#include <linux/wlan_plat.h>
#include <linux/amlogic/dhd_buf.h>
-#define DHD_STATIC_VERSION_STR "100.10.545.3"
+#define DHD_STATIC_VERSION_STR "1.579.77.41.9"
#define BCMDHD_SDIO
#define BCMDHD_PCIE
DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,
DHD_PREALLOC_STAT_REPORT_BUF = 18,
- DHD_PREALLOC_WL_WEXT_INFO = 19,
+ DHD_PREALLOC_WL_ESCAN_INFO = 19,
DHD_PREALLOC_FW_VERBOSE_RING = 20,
DHD_PREALLOC_FW_EVENT_RING = 21,
DHD_PREALLOC_DHD_EVENT_RING = 22,
#define STATIC_BUF_MAX_NUM 20
#define STATIC_BUF_SIZE (PAGE_SIZE*2)
-#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4
+
#define DHD_PREALLOC_PROT_SIZE (16 * 1024)
#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024)
#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024)
#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024)
#define DHD_PREALLOC_DHD_INFO_SIZE (32 * 1024)
-#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (1290 * 1024)
+#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (1280 * 1024)
#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (73 * 1024)
-#define DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE (8 * 1024)
-#define DHD_PREALLOC_WL_WEXT_INFO_SIZE (70 * 1024)
+#define DHD_PREALLOC_WL_ESCAN_INFO_SIZE (67 * 1024)
#ifdef CONFIG_64BIT
#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2)
#else
#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024)
#else
#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024)
-#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024)
+#define WLAN_DHD_WLFC_BUF_SIZE (29 * 1024)
#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024)
#endif /* CONFIG_64BIT */
#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024)
void *wlan_static_if_flow_lkup;
void *wlan_static_dhd_memdump_ram_buf;
void *wlan_static_dhd_wlfc_hanger_buf;
-void *wlan_static_dhd_log_dump_buf;
-void *wlan_static_dhd_log_dump_buf_ex;
void *wlan_static_wl_escan_info_buf;
void *wlan_static_fw_verbose_ring_buf;
void *wlan_static_fw_event_ring_buf;
}
return wlan_static_dhd_wlfc_hanger_buf;
}
- if (section == DHD_PREALLOC_DHD_LOG_DUMP_BUF) {
- if (size > DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE) {
- pr_err("request DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE(%lu) > %d\n",
- size, DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE);
- return NULL;
- }
-
- return wlan_static_dhd_log_dump_buf;
- }
- if (section == DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX) {
- if (size > DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE) {
- pr_err("request DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE(%lu) > %d\n",
- size, DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE);
- return NULL;
- }
-
- return wlan_static_dhd_log_dump_buf_ex;
- }
- if (section == DHD_PREALLOC_WL_WEXT_INFO) {
- if (size > DHD_PREALLOC_WL_WEXT_INFO_SIZE) {
- pr_err("request DHD_PREALLOC_WL_WEXT_INFO_SIZE(%lu) > %d\n",
- size, DHD_PREALLOC_WL_WEXT_INFO_SIZE);
+ if (section == DHD_PREALLOC_WL_ESCAN_INFO) {
+ if (size > DHD_PREALLOC_WL_ESCAN_INFO_SIZE) {
+ pr_err("request DHD_PREALLOC_WL_ESCAN_INFO_SIZE(%lu) > %d\n",
+ size, DHD_PREALLOC_WL_ESCAN_INFO_SIZE);
return NULL;
}
pr_err("request section id(%d) is out of max index %d\n",
section, DHD_PREALLOC_MAX);
- pr_err("%s: failed to alloc section %d, size=%ld\n",
- __func__, section, size);
+ pr_err("failed to alloc section %d, size=%ld\n",
+ section, size);
return NULL;
}
{
int i;
int j;
- pr_info("%s(): %s\n", __func__, DHD_STATIC_VERSION_STR);
+
+ pr_info("bcmdhd_init_wlan_mem %s\n", DHD_STATIC_VERSION_STR);
for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
if (!wlan_static_osl_buf)
goto err_mem_alloc;
- wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL);
+ wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE,
+ GFP_KERNEL);
if (!wlan_static_scan_buf0)
goto err_mem_alloc;
- wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL);
+ wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE,
+ GFP_KERNEL);
if (!wlan_static_dhd_info_buf)
goto err_mem_alloc;
- wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL);
+ wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE,
+ GFP_KERNEL);
if (!wlan_static_dhd_wlfc_info_buf)
goto err_mem_alloc;
#ifdef BCMDHD_PCIE
- wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL);
+ wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE,
+ GFP_KERNEL);
if (!wlan_static_if_flow_lkup)
goto err_mem_alloc;
#endif /* BCMDHD_PCIE */
- wlan_static_dhd_memdump_ram_buf = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE, GFP_KERNEL);
+ wlan_static_dhd_memdump_ram_buf = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE,
+ GFP_KERNEL);
if (!wlan_static_dhd_memdump_ram_buf)
goto err_mem_alloc;
- wlan_static_dhd_wlfc_hanger_buf = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL);
+ wlan_static_dhd_wlfc_hanger_buf = kmalloc(
+ DHD_PREALLOC_DHD_WLFC_HANGER_SIZE,
+ GFP_KERNEL);
if (!wlan_static_dhd_wlfc_hanger_buf)
goto err_mem_alloc;
- wlan_static_dhd_log_dump_buf = kmalloc(DHD_PREALLOC_DHD_LOG_DUMP_BUF_SIZE, GFP_KERNEL);
- if (!wlan_static_dhd_log_dump_buf)
- goto err_mem_alloc;
-
- wlan_static_dhd_log_dump_buf_ex = kmalloc(DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX_SIZE, GFP_KERNEL);
- if (!wlan_static_dhd_log_dump_buf_ex)
- goto err_mem_alloc;
-
- wlan_static_wl_escan_info_buf = kmalloc(DHD_PREALLOC_WL_WEXT_INFO_SIZE, GFP_KERNEL);
+ wlan_static_wl_escan_info_buf = kmalloc(DHD_PREALLOC_WL_ESCAN_INFO_SIZE,
+ GFP_KERNEL);
if (!wlan_static_wl_escan_info_buf)
goto err_mem_alloc;
- wlan_static_fw_verbose_ring_buf = kmalloc(FW_VERBOSE_RING_SIZE, GFP_KERNEL);
+ wlan_static_fw_verbose_ring_buf = kmalloc(
+ FW_VERBOSE_RING_SIZE,
+ GFP_KERNEL);
if (!wlan_static_fw_verbose_ring_buf)
goto err_mem_alloc;
- wlan_static_fw_event_ring_buf = kmalloc(FW_EVENT_RING_SIZE, GFP_KERNEL);
+ wlan_static_fw_event_ring_buf = kmalloc(FW_EVENT_RING_SIZE,
+ GFP_KERNEL);
if (!wlan_static_fw_event_ring_buf)
goto err_mem_alloc;
- wlan_static_dhd_event_ring_buf = kmalloc(DHD_EVENT_RING_SIZE, GFP_KERNEL);
+ wlan_static_dhd_event_ring_buf = kmalloc(DHD_EVENT_RING_SIZE,
+ GFP_KERNEL);
if (!wlan_static_dhd_event_ring_buf)
goto err_mem_alloc;
- wlan_static_nan_event_ring_buf = kmalloc(NAN_EVENT_RING_SIZE, GFP_KERNEL);
+ wlan_static_nan_event_ring_buf = kmalloc(NAN_EVENT_RING_SIZE,
+ GFP_KERNEL);
if (!wlan_static_nan_event_ring_buf)
goto err_mem_alloc;
err_mem_alloc:
- if (wlan_static_prot)
- kfree(wlan_static_prot);
-
+ kfree(wlan_static_prot);
#if defined(BCMDHD_SDIO)
- if (wlan_static_rxbuf)
- kfree(wlan_static_rxbuf);
-
- if (wlan_static_databuf)
- kfree(wlan_static_databuf);
+ kfree(wlan_static_rxbuf);
+ kfree(wlan_static_databuf);
#endif /* BCMDHD_SDIO */
-
- if (wlan_static_osl_buf)
- kfree(wlan_static_osl_buf);
-
- if (wlan_static_scan_buf0)
- kfree(wlan_static_scan_buf0);
-
- if (wlan_static_scan_buf1)
- kfree(wlan_static_scan_buf1);
-
- if (wlan_static_dhd_info_buf)
- kfree(wlan_static_dhd_info_buf);
-
- if (wlan_static_dhd_wlfc_info_buf)
- kfree(wlan_static_dhd_wlfc_info_buf);
-
+ kfree(wlan_static_osl_buf);
+ kfree(wlan_static_scan_buf0);
+ kfree(wlan_static_scan_buf1);
+ kfree(wlan_static_dhd_info_buf);
+ kfree(wlan_static_dhd_wlfc_info_buf);
#ifdef BCMDHD_PCIE
- if (wlan_static_if_flow_lkup)
- kfree(wlan_static_if_flow_lkup);
+ kfree(wlan_static_if_flow_lkup);
#endif /* BCMDHD_PCIE */
-
- if (wlan_static_dhd_memdump_ram_buf)
- kfree(wlan_static_dhd_memdump_ram_buf);
-
- if (wlan_static_dhd_wlfc_hanger_buf)
- kfree(wlan_static_dhd_wlfc_hanger_buf);
-
- if (wlan_static_dhd_log_dump_buf)
- kfree(wlan_static_dhd_log_dump_buf);
-
- if (wlan_static_dhd_log_dump_buf_ex)
- kfree(wlan_static_dhd_log_dump_buf_ex);
-
- if (wlan_static_wl_escan_info_buf)
- kfree(wlan_static_wl_escan_info_buf);
-
+ kfree(wlan_static_dhd_memdump_ram_buf);
+ kfree(wlan_static_dhd_wlfc_hanger_buf);
+ kfree(wlan_static_wl_escan_info_buf);
#ifdef BCMDHD_PCIE
- if (wlan_static_fw_verbose_ring_buf)
- kfree(wlan_static_fw_verbose_ring_buf);
-
- if (wlan_static_fw_event_ring_buf)
- kfree(wlan_static_fw_event_ring_buf);
-
- if (wlan_static_dhd_event_ring_buf)
- kfree(wlan_static_dhd_event_ring_buf);
-
- if (wlan_static_nan_event_ring_buf)
- kfree(wlan_static_nan_event_ring_buf);
+ kfree(wlan_static_fw_verbose_ring_buf);
+ kfree(wlan_static_fw_event_ring_buf);
+ kfree(wlan_static_dhd_event_ring_buf);
+ kfree(wlan_static_nan_event_ring_buf);
#endif /* BCMDHD_PCIE */
- pr_err("%s: Failed to mem_alloc for WLAN\n", __func__);
+ pr_err("Failed to mem_alloc for WLAN\n");
i = WLAN_SKB_BUF_NUM;
err_skb_alloc:
- pr_err("%s: Failed to skb_alloc for WLAN\n", __func__);
+ pr_err("Failed to skb_alloc for WLAN\n");
for (j = 0; j < i; j++)
dev_kfree_skb(wlan_static_skb[j]);
# bcmdhd
+# 1. WL_IFACE_COMB_NUM_CHANNELS must be added if Android version is 4.4 with Kernel version 3.0~3.4,
+# otherwise please remove it.
# if not confiure pci mode, we use sdio mode as default
ifeq ($(CONFIG_BCMDHD_PCIE),)
$(info bcm SDIO driver configured)
-CONFIG_DHD_USE_STATIC_BUF := y
+CONFIG_DHD_USE_STATIC_BUF :=y
endif
-ifeq ($(CONFIG_BCMDHD_SDIO),y)
-MODULE_NAME := dhd
-else
-MODULE_NAME := bcmdhd
-endif
-#CONFIG_BCMDHD := m
#CONFIG_BCMDHD_SDIO := y
#CONFIG_BCMDHD_PCIE := y
#CONFIG_BCMDHD_USB := y
#CONFIG_BCMDHD_CUSB := y
CONFIG_BCMDHD_PROPTXSTATUS := y
-#CONFIG_BCMDHD_DEBUG := y
CONFIG_MACH_PLATFORM := y
#CONFIG_BCMDHD_DTS := y
export CONFIG_VTS_SUPPORT = y
DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \
- -Wno-maybe-uninitialized -Werror \
- -DBCMDONGLEHOST -DBCMDMA32 -DBCMFILEIMAGE \
+ -Wno-maybe-uninitialized \
+ -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \
-DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG -DGET_OTP_MAC_ENABLE \
-DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT -DSUPPORT_PM2_ONLY \
- -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DDHDTCPACK_SUPPRESS \
+ -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DPNO_SUPPORT -DDHDTCPACK_SUPPRESS \
-DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT \
- -DMULTIPLE_SUPPLICANT -DTSQ_MULTIPLIER -DMFP -DDHD_8021X_DUMP \
- -DPOWERUP_MAX_RETRY=0 -DIFACE_HANG_FORCE_DEV_CLOSE -DWAIT_DEQUEUE \
- -DWL_EXT_IAPSTA -DWL_ESCAN -DCCODE_LIST \
- -DENABLE_INSMOD_NO_FW_LOAD -DGET_OTP_MODULE_NAME \
+ -DMULTIPLE_SUPPLICANT -DTSQ_MULTIPLIER -DMFP \
+ -DWL_EXT_IAPSTA \
+ -DENABLE_INSMOD_NO_FW_LOAD \
-I$(src) -I$(src)/include
DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \
dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \
dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \
bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \
- dhd_debug_linux.o dhd_debug.o dhd_mschdbg.o dhd_dbg_ring.o \
- hnd_pktq.o hnd_pktpool.o bcmxtlv.o linux_pkt.o bcmstdlib_s.o frag.o \
- dhd_linux_exportfs.o dhd_linux_pktdump.o \
- dhd_config.o dhd_ccode.o wl_event.o wl_android_ext.o wl_escan.o
-
-ifneq ($(CONFIG_WIRELESS_EXT),)
- DHDOFILES += wl_iw.o
- DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW
-endif
-ifneq ($(CONFIG_CFG80211),)
- DHDOFILES += wl_cfg80211.o wl_cfgscan.o wl_cfgp2p.o
- DHDOFILES += wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o
- DHDOFILES += dhd_cfg80211.o
- DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT
- DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
- DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=10
- DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL
- DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES
- DHDCFLAGS += -DESCAN_RESULT_PATCH -DESCAN_BUF_OVERFLOW_MGMT
- DHDCFLAGS += -DVSDB -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8
- DHDCFLAGS += -DWL_VIRTUAL_APSTA
- DHDCFLAGS += -DPNO_SUPPORT -DEXPLICIT_DISCIF_CLEANUP
- DHDCFLAGS += -DDHD_USE_SCAN_WAKELOCK
-# DHDCFLAGS += -DWL_STATIC_IF
-# DHDCFLAGS += -DWL_SAE
-endif
+ dhd_debug_linux.o dhd_debug.o dhd_mschdbg.o \
+ hnd_pktq.o hnd_pktpool.o dhd_config.o wl_android_ext.o
#BCMDHD_SDIO
ifneq ($(CONFIG_BCMDHD_SDIO),)
DHDCFLAGS += -DBCMSDIO -DMMC_SDIO_ABORT -DBCMLXSDMMC -DUSE_SDIOFIFO_IOVAR \
-DSDTEST -DBDC -DDHD_USE_IDLECOUNT -DCUSTOM_SDIO_F2_BLKSIZE=256 \
- -DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD \
- -DDHDENABLE_TAILPAD -DSUPPORT_P2P_GO_PS \
- -DBCMSDIO_RXLIM_POST -DCONSOLE_DPC
+ -DBCMSDIOH_TXGLOM -DBCMSDIOH_TXGLOM_EXT -DRXFRAME_THREAD
ifeq ($(CONFIG_BCMDHD_OOB),y)
DHDCFLAGS += -DOOB_INTR_ONLY -DCUSTOMER_OOB -DHW_OOB
ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y)
else
DHDCFLAGS += -DSDIO_ISR_THREAD
endif
+
DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \
dhd_sdio.o dhd_cdc.o dhd_wlfc.o
endif
ifneq ($(CONFIG_BCMDHD_PCIE),)
DHDCFLAGS += -DPCIE_FULL_DONGLE -DBCMPCIE -DCUSTOM_DPC_PRIO_SETTING=-1 \
-DDONGLE_ENABLE_ISOLATION
-DHDCFLAGS += -DDHD_LB -DDHD_LB_RXP -DDHD_LB_STATS -DDHD_LB_TXP
-DHDCFLAGS += -DDHD_PKTID_AUDIT_ENABLED
-ifeq ($(CONFIG_BCMDHD_OOB),y)
- DHDCFLAGS += -DCUSTOMER_OOB -DBCMPCIE_OOB_HOST_WAKE
-endif
ifneq ($(CONFIG_PCI_MSI),)
- DHDCFLAGS += -DDHD_MSI_SUPPORT
+ DHDCFLAGS += -DDHD_USE_MSI
endif
+DHDCFLAGS += -DDHD_PCIE_BAR1_WIN_BASE_FIX=0x200000
+
DHDOFILES += dhd_pcie.o dhd_pcie_linux.o pcie_core.o dhd_flowring.o \
- dhd_msgbuf.o dhd_linux_lb.o
+ dhd_msgbuf.o
endif
#BCMDHD_USB
DHDCFLAGS += -DBCMUSBDEV_COMPOSITE
DHDCFLAGS :=$(filter-out -DENABLE_INSMOD_NO_FW_LOAD,$(DHDCFLAGS))
endif
+
DHDOFILES += dbus.o dbus_usb.o dbus_usb_linux.o dhd_cdc.o dhd_wlfc.o
endif
DHDCFLAGS += -DPROP_TXSTATUS
endif
ifneq ($(CONFIG_BCMDHD_SDIO),)
- DHDCFLAGS += -DPROP_TXSTATUS -DPROPTX_MAXCOUNT
+ DHDCFLAGS += -DPROP_TXSTATUS
endif
ifneq ($(CONFIG_CFG80211),)
DHDCFLAGS += -DPROP_TXSTATUS_VSDB
endif
endif
-ifeq ($(CONFIG_64BIT),y)
- DHDCFLAGS := $(filter-out -DBCMDMA32,$(DHDCFLAGS))
- DHDCFLAGS += -DBCMDMA64OSL
-endif
-
#VTS_SUPPORT
ifeq ($(CONFIG_VTS_SUPPORT),y)
ifneq ($(CONFIG_CFG80211),)
-DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DLINKSTAT_SUPPORT \
- -DCUSTOM_COUNTRY_CODE -DDHD_GET_VALID_CHANNELS \
- -DDEBUGABILITY -DDBG_PKT_MON -DDHD_LOG_DUMP -DDHD_FW_COREDUMP \
- -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT -DDHD_WAKE_STATUS
-DHDOFILES += dhd_rtt.o bcm_app_utils.o
-endif
-endif
+DHDCFLAGS += -DGSCAN_SUPPORT -DRTT_SUPPORT -DCUSTOM_FORCE_NODFS_FLAG \
+ -DLINKSTAT_SUPPORT -DDEBUGABILITY -DDBG_PKT_MON -DPKT_FILTER_SUPPORT \
+ -DAPF -DNDO_CONFIG_SUPPORT -DRSSI_MONITOR_SUPPORT -DDHD_WAKE_STATUS \
+ -DCUSTOM_COUNTRY_CODE -DDHD_FW_COREDUMP -DEXPLICIT_DISCIF_CLEANUP
-# For Debug
-ifneq ($(CONFIG_BCMDHD_DEBUG),)
-DHDCFLAGS += -DDEBUGFS_CFG80211
-DHDCFLAGS += -DSHOW_LOGTRACE -DDHD_LOG_DUMP -DDHD_FW_COREDUMP \
- -DBCMASSERT_LOG -DSI_ERROR_ENFORCE
-ifneq ($(CONFIG_BCMDHD_PCIE),)
- DHDCFLAGS += -DEWP_EDL
- DHDCFLAGS += -DDNGL_EVENT_SUPPORT
- DHDCFLAGS += -DDHD_SSSR_DUMP
+DHDOFILES += bcmxtlv.o dhd_rtt.o bcm_app_utils.o
endif
endif
# MESH support for kernel 3.10 later
ifeq ($(CONFIG_WL_MESH),y)
DHDCFLAGS += -DWLMESH
-ifneq ($(CONFIG_CFG80211),)
- DHDCFLAGS += -DWLMESH_CFG80211
-endif
ifneq ($(CONFIG_BCMDHD_PCIE),)
DHDCFLAGS += -DBCM_HOST_BUF -DDMA_HOST_BUFFER_LEN=0x80000
endif
DHDCFLAGS += -DDHD_UPDATE_INTF_MAC
DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS))
- DHDCFLAGS :=$(filter-out -DWL_STATIC_IF,$(DHDCFLAGS))
-endif
-
-ifeq ($(CONFIG_WL_EASYMESH),y)
- DHDCFLAGS :=$(filter-out -DDHD_FW_COREDUMP,$(DHDCFLAGS))
- DHDCFLAGS :=$(filter-out -DDHD_LOG_DUMP,$(DHDCFLAGS))
- DHDCFLAGS += -DWLEASYMESH -DWL_STATIC_IF -DWLDWDS -DFOURADDR_AUTO_BRG
+ DHDCFLAGS :=$(filter-out -DSET_RANDOM_MAC_SOFTAP,$(DHDCFLAGS))
endif
-#CSI_SUPPORT
-ifneq ($(CONFIG_CSI_SUPPORT),)
- DHDCFLAGS += -DCSI_SUPPORT
- DHDOFILES += dhd_csi.o
+ifeq ($(CONFIG_BCMDHD_SDIO),y)
+obj-$(CONFIG_BCMDHD) += dhd.o
+dhd-objs += $(DHDOFILES)
+else
+obj-$(CONFIG_BCMDHD) += bcmdhd.o
+bcmdhd-objs += $(DHDOFILES)
endif
-obj-$(CONFIG_BCMDHD) += $(MODULE_NAME).o
-$(MODULE_NAME)-objs += $(DHDOFILES)
-
ifeq ($(CONFIG_MACH_PLATFORM),y)
DHDOFILES += dhd_gpio.o
ifeq ($(CONFIG_BCMDHD_DTS),y)
ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y)
# obj-m += dhd_static_buf.o
DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DENHANCED_STATIC_BUF
- DHDCFLAGS += -DCONFIG_DHD_USE_STATIC_BUF
- DHDCFLAGS += -DDHD_USE_STATIC_MEMDUMP
+ DHDCFLAGS += -DDHD_USE_STATIC_MEMDUMP -DCONFIG_DHD_USE_STATIC_BUF
endif
+ifneq ($(CONFIG_WIRELESS_EXT),)
+ DHDOFILES += wl_iw.o wl_escan.o
+ DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW -DWL_ESCAN
+endif
+ifneq ($(CONFIG_CFG80211),)
+ DHDOFILES += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o wl_cfg_btcoex.o wl_cfgvendor.o
+ DHDOFILES += dhd_cfg80211.o
+ DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF
+ DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
+ DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65
+ DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15
+ DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000
+ DHDCFLAGS += -DCUSTOM_PNO_EVENT_LOCK_xTIME=7
+ DHDCFLAGS += -DWL_SUPPORT_AUTO_CHANNEL
+ DHDCFLAGS += -DWL_SUPPORT_BACKPORTED_KPATCHES
+ DHDCFLAGS += -DESCAN_RESULT_PATCH -DESCAN_BUF_OVERFLOW_MGMT
+ DHDCFLAGS += -DVSDB -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ DHDCFLAGS += -DWLTDLS -DMIRACAST_AMPDU_SIZE=8
+ DHDCFLAGS += -DWL_VIRTUAL_APSTA
+endif
EXTRA_CFLAGS = $(DHDCFLAGS)
ifeq ($(CONFIG_BCMDHD),m)
EXTRA_LDFLAGS += --strip-debug
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: aiutils.c 823201 2019-06-03 03:49:36Z $
+ * $Id: aiutils.c 625027 2016-03-15 08:20:18Z $
*/
#include <bcm_cfg.h>
#include <typedefs.h>
#include "siutils_priv.h"
#include <bcmdevs.h>
+#define BCM5357_DMP() (0)
#define BCM53573_DMP() (0)
#define BCM4707_DMP() (0)
#define PMU_DMP() (0)
#define GCI_DMP() (0)
-
-#if defined(BCM_BACKPLANE_TIMEOUT)
-static bool ai_get_apb_bridge(si_t *sih, uint32 coreidx, uint32 *apb_id, uint32 *apb_coreuinit);
-#endif /* BCM_BACKPLANE_TIMEOUT */
-
-#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
-static void ai_reset_axi_to(si_info_t *sii, aidmp_t *ai);
-#endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
+#define remap_coreid(sih, coreid) (coreid)
+#define remap_corerev(sih, corerev) (corerev)
/* EROM parsing */
return asd;
}
-/* Parse the enumeration rom to identify all cores
- * Erom content format can be found in:
- * http://hwnbu-twiki.broadcom.com/twiki/pub/Mwgroup/ArmDocumentation/SystemDiscovery.pdf
- */
+
+/* parse the enumeration rom to identify all cores */
void
ai_scan(si_t *sih, void *regs, uint devid)
{
case PCMCIA_BUS:
default:
- SI_ERROR(("Don't know how to do AXI enumeration on bus %d\n", sih->bustype));
+ SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
ASSERT(0);
return;
}
mfg, cid, crev, OSL_OBFUSCATE_BUF(eromptr - 1), nmw, nsw, nmp, nsp));
#else
BCM_REFERENCE(crev);
-#endif // endif
+#endif
- if (BCM4347_CHIP(sih->chip)) {
+ if (CHIPID(sih->chip) == BCM4347_CHIP_ID) {
/* 4347 has more entries for ARM core
* This should apply to all chips but crashes on router
* This is a temp fix to be further analyze
*/
if (nsp == 0)
continue;
- } else
- {
+ } else {
/* Include Default slave wrapper for timeout monitoring */
if ((nsp == 0) ||
#if !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT)
((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
-#else
- ((CHIPTYPE(sii->pub.socitype) == SOCI_NAI) &&
- (mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) ||
#endif /* !defined(AXI_TIMEOUTS) && !defined(BCM_BACKPLANE_TIMEOUT) */
FALSE) {
continue;
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
&addrl, &addrh, &sizel, &sizeh);
if (asd != 0) {
- if ((sii->oob_router != 0) && (sii->oob_router != addrl)) {
- sii->oob_router1 = addrl;
- } else {
- sii->oob_router = addrl;
- }
+ sii->oob_router = addrl;
}
}
- if (cid != NS_CCB_CORE_ID &&
- cid != PMU_CORE_ID && cid != GCI_CORE_ID && cid != SR_CORE_ID &&
- cid != HUB_CORE_ID && cid != HND_OOBR_CORE_ID)
+ if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID &&
+ cid != PMU_CORE_ID && cid != GCI_CORE_ID)
continue;
}
cores_info->cia[idx] = cia;
cores_info->cib[idx] = cib;
- cores_info->coreid[idx] = cid;
+ cores_info->coreid[idx] = remap_coreid(sih, cid);
for (i = 0; i < nmp; i++) {
mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
}
cores_info->coresba[idx] = addrl;
cores_info->coresba_size[idx] = sizel;
- /* Get any more ASDs in first port */
+ /* Get any more ASDs in port 0 */
j = 1;
do {
asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
do {
asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
&sizel, &sizeh);
- /* To get the first base address of second slave port */
- if ((asd != 0) && (i == 1) && (j == 0)) {
- cores_info->csp2ba[idx] = addrl;
- cores_info->csp2ba_size[idx] = sizel;
- }
+
if (asd == 0)
break;
j++;
SI_ERROR(("Master wrapper %d is not 4KB\n", i));
goto error;
}
- if (i == 0) {
+ if (i == 0)
cores_info->wrapba[idx] = addrl;
- } else if (i == 1) {
+ else if (i == 1)
cores_info->wrapba2[idx] = addrl;
- } else if (i == 2) {
- cores_info->wrapba3[idx] = addrl;
- }
- if (axi_wrapper &&
- (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
- axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
- axi_wrapper[sii->axi_num_wrappers].cid = cid;
- axi_wrapper[sii->axi_num_wrappers].rev = crev;
- axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
- axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
- sii->axi_num_wrappers++;
- SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x,"
- "rev:%x, addr:%x, size:%x\n",
+
+ ASSERT(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS);
+ axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
+ axi_wrapper[sii->axi_num_wrappers].cid = cid;
+ axi_wrapper[sii->axi_num_wrappers].rev = crev;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_MASTER_WRAPPER;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
+ sii->axi_num_wrappers++;
+ SI_VMSG(("MASTER WRAPPER: %d, mfg:%x, cid:%x, rev:%x, addr:%x, size:%x\n",
sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
- }
}
/* And finally slave wrappers */
SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
goto error;
}
- if ((nmw == 0) && (i == 0)) {
+ if ((nmw == 0) && (i == 0))
cores_info->wrapba[idx] = addrl;
- } else if ((nmw == 0) && (i == 1)) {
+ else if ((nmw == 0) && (i == 1))
cores_info->wrapba2[idx] = addrl;
- } else if ((nmw == 0) && (i == 2)) {
- cores_info->wrapba3[idx] = addrl;
- }
/* Include all slave wrappers to the list to
* enable and monitor watchdog timeouts
*/
- if (axi_wrapper &&
- (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
- axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
- axi_wrapper[sii->axi_num_wrappers].cid = cid;
- axi_wrapper[sii->axi_num_wrappers].rev = crev;
- axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
-
- /* Software WAR as discussed with hardware team, to ensure proper
- * Slave Wrapper Base address is set for 4364 Chip ID.
- * Current address is 0x1810c000, Corrected the same to 0x1810e000.
- * This ensures AXI default slave wrapper is registered along with
- * other slave wrapper cores and is useful while generating trap info
- * when write operation is tried on Invalid Core / Wrapper register
- */
-
- if ((CHIPID(sih->chip) == BCM4364_CHIP_ID) &&
- (cid == DEF_AI_COMP)) {
- axi_wrapper[sii->axi_num_wrappers].wrapper_addr =
- 0x1810e000;
- } else {
- axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
- }
-
- sii->axi_num_wrappers++;
+ ASSERT(sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS);
+ axi_wrapper[sii->axi_num_wrappers].mfg = mfg;
+ axi_wrapper[sii->axi_num_wrappers].cid = cid;
+ axi_wrapper[sii->axi_num_wrappers].rev = crev;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_type = AI_SLAVE_WRAPPER;
+ axi_wrapper[sii->axi_num_wrappers].wrapper_addr = addrl;
+ sii->axi_num_wrappers++;
- SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x,"
- "rev:%x, addr:%x, size:%x\n",
- sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
- }
+ SI_VMSG(("SLAVE WRAPPER: %d, mfg:%x, cid:%x, rev:%x, addr:%x, size:%x\n",
+ sii->axi_num_wrappers, mfg, cid, crev, addrl, sizel));
}
+
#ifndef BCM_BACKPLANE_TIMEOUT
/* Don't record bridges */
if (br)
continue;
-#endif // endif
+#endif
/* Done with core */
sii->numcores++;
* Return the current core's virtual address.
*/
static volatile void *
-_ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrapn)
+_ai_setcoreidx(si_t *sih, uint coreidx, uint use_wrap2)
{
si_info_t *sii = SI_INFO(sih);
si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
- uint32 addr, wrap, wrap2, wrap3;
+ uint32 addr, wrap, wrap2;
volatile void *regs;
if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
addr = cores_info->coresba[coreidx];
wrap = cores_info->wrapba[coreidx];
wrap2 = cores_info->wrapba2[coreidx];
- wrap3 = cores_info->wrapba3[coreidx];
#ifdef BCM_BACKPLANE_TIMEOUT
/* No need to disable interrupts while entering/exiting APB bridge core */
cores_info->wrappers2[coreidx] = REG_MAP(wrap2, SI_CORE_SIZE);
ASSERT(GOODREGS(cores_info->wrappers2[coreidx]));
}
- if (!cores_info->wrappers3[coreidx] && (wrap3 != 0)) {
- cores_info->wrappers3[coreidx] = REG_MAP(wrap3, SI_CORE_SIZE);
- ASSERT(GOODREGS(cores_info->wrappers3[coreidx]));
- }
-
- if (use_wrapn == 2) {
- sii->curwrap = cores_info->wrappers3[coreidx];
- } else if (use_wrapn == 1) {
+ if (use_wrap2)
sii->curwrap = cores_info->wrappers2[coreidx];
- } else {
+ else
sii->curwrap = cores_info->wrappers[coreidx];
- }
break;
case PCI_BUS:
regs = sii->curmap;
/* point bar0 2nd 4KB window to the primary wrapper */
- if (use_wrapn)
+ if (use_wrap2)
wrap = wrap2;
if (PCIE_GEN2(sii))
OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
case SPI_BUS:
case SDIO_BUS:
sii->curmap = regs = (void *)((uintptr)addr);
- if (use_wrapn)
+ if (use_wrap2)
sii->curwrap = (void *)((uintptr)wrap2);
else
sii->curwrap = (void *)((uintptr)wrap);
return _ai_setcoreidx(sih, coreidx, 1);
}
-volatile void *
-ai_setcoreidx_3rdwrap(si_t *sih, uint coreidx)
-{
- return _ai_setcoreidx(sih, coreidx, 2);
-}
-
void
ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
{
if (cc == NULL)
goto error;
- BCM_REFERENCE(erombase);
erombase = R_REG(sii->osh, &cc->eromptr);
eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
return 2;
}
-/* Return the address of the nth address space in the current core
- * Arguments:
- * sih : Pointer to struct si_t
- * spidx : slave port index
- * baidx : base address index
- */
+/* Return the address of the nth address space in the current core */
uint32
-ai_addrspace(si_t *sih, uint spidx, uint baidx)
+ai_addrspace(si_t *sih, uint asidx)
{
si_info_t *sii = SI_INFO(sih);
si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
cidx = sii->curidx;
- if (spidx == CORE_SLAVE_PORT_0) {
- if (baidx == CORE_BASE_ADDR_0)
- return cores_info->coresba[cidx];
- else if (baidx == CORE_BASE_ADDR_1)
- return cores_info->coresba2[cidx];
- } else if (spidx == CORE_SLAVE_PORT_1) {
- if (baidx == CORE_BASE_ADDR_0)
- return cores_info->csp2ba[cidx];
+ if (asidx == 0)
+ return cores_info->coresba[cidx];
+ else if (asidx == 1)
+ return cores_info->coresba2[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+ __FUNCTION__, asidx));
+ return 0;
}
-
- SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
- __FUNCTION__, baidx, spidx));
-
- return 0;
}
-/* Return the size of the nth address space in the current core
-* Arguments:
-* sih : Pointer to struct si_t
-* spidx : slave port index
-* baidx : base address index
-*/
+/* Return the size of the nth address space in the current core */
uint32
-ai_addrspacesize(si_t *sih, uint spidx, uint baidx)
+ai_addrspacesize(si_t *sih, uint asidx)
{
si_info_t *sii = SI_INFO(sih);
si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint cidx;
cidx = sii->curidx;
- if (spidx == CORE_SLAVE_PORT_0) {
- if (baidx == CORE_BASE_ADDR_0)
- return cores_info->coresba_size[cidx];
- else if (baidx == CORE_BASE_ADDR_1)
- return cores_info->coresba2_size[cidx];
- } else if (spidx == CORE_SLAVE_PORT_1) {
- if (baidx == CORE_BASE_ADDR_0)
- return cores_info->csp2ba_size[cidx];
- }
-
- SI_ERROR(("%s: Need to parse the erom again to find %d base addr in %d slave port\n",
- __FUNCTION__, baidx, spidx));
- return 0;
+ if (asidx == 0)
+ return cores_info->coresba_size[cidx];
+ else if (asidx == 1)
+ return cores_info->coresba2_size[cidx];
+ else {
+ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
+ __FUNCTION__, asidx));
+ return 0;
+ }
}
uint
si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+ return sii->curidx;
+ }
if (BCM4707_DMP()) {
SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
__FUNCTION__));
SI_ERROR(("%s: Attempting to read DMP registers on 53573\n", __FUNCTION__));
return sii->curidx;
}
+#ifdef REROUTE_OOBINT
+ if (PMU_DMP()) {
+ SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
+ __FUNCTION__));
+ return PMU_OOB_BIT;
+ }
+#else
if (PMU_DMP()) {
uint idx, flag;
idx = sii->curidx;
ai_setcoreidx(sih, idx);
return flag;
}
+#endif /* REROUTE_OOBINT */
ai = sii->curwrap;
ASSERT(ai != NULL);
si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
+ return sii->curidx;
+ }
if (BCM4707_DMP()) {
SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
__FUNCTION__));
return sii->curidx;
}
+#ifdef REROUTE_OOBINT
+ if (PMU_DMP()) {
+ SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
+ __FUNCTION__));
+ return PMU_OOB_BIT;
+ }
+#endif /* REROUTE_OOBINT */
ai = sii->curwrap;
ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
{
si_info_t *sii = SI_INFO(sih);
- uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
+ uint32 *map = (uint32 *) sii->curwrap;
if (mask || val) {
- uint32 w = R_REG(sii->osh, addr);
+ uint32 w = R_REG(sii->osh, map+(offset/4));
w &= ~mask;
w |= val;
- W_REG(sii->osh, addr, w);
+ W_REG(sii->osh, map+(offset/4), w);
}
- return (R_REG(sii->osh, addr));
+
+ return (R_REG(sii->osh, map+(offset/4)));
}
uint
si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint32 cib;
- cib = cores_info->cib[sii->curidx];
- return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
-}
-uint
-ai_corerev_minor(si_t *sih)
-{
- return (ai_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
- SISF_MINORREV_D11_MASK;
+ cib = cores_info->cib[sii->curidx];
+ return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
}
bool
si_info_t *sii = SI_INFO(sih);
si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
ASSERT(GOODIDX(coreidx));
ASSERT(regoff < SI_CORE_SIZE);
ASSERT((val & ~mask) == 0);
return (w);
}
-/*
- * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
- * switch back to the original core, and return the new value.
- *
- * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
- *
- * Also, when using pci/pcie, we can optimize away the core switching for pci registers
- * and (on newer pci cores) chipcommon registers.
- */
-uint
-ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
-{
- uint origidx = 0;
- volatile uint32 *r = NULL;
- uint w = 0;
- uint intr_val = 0;
- bool fast = FALSE;
- si_info_t *sii = SI_INFO(sih);
- si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
-
- ASSERT(GOODIDX(coreidx));
- ASSERT(regoff < SI_CORE_SIZE);
- ASSERT((val & ~mask) == 0);
-
- if (coreidx >= SI_MAXCORES)
- return 0;
-
- if (BUSTYPE(sih->bustype) == SI_BUS) {
- /* If internal bus, we can always get at everything */
- fast = TRUE;
- /* map if does not exist */
- if (!cores_info->regs[coreidx]) {
- cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
- SI_CORE_SIZE);
- ASSERT(GOODREGS(cores_info->regs[coreidx]));
- }
- r = (volatile uint32 *)((volatile uchar *)cores_info->regs[coreidx] + regoff);
- } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
- /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
-
- if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
- /* Chipc registers are mapped at 12KB */
-
- fast = TRUE;
- r = (volatile uint32 *)((volatile char *)sii->curmap +
- PCI_16KB0_CCREGS_OFFSET + regoff);
- } else if (sii->pub.buscoreidx == coreidx) {
- /* pci registers are at either in the last 2KB of an 8KB window
- * or, in pcie and pci rev 13 at 8KB
- */
- fast = TRUE;
- if (SI_FAST(sii))
- r = (volatile uint32 *)((volatile char *)sii->curmap +
- PCI_16KB0_PCIREGS_OFFSET + regoff);
- else
- r = (volatile uint32 *)((volatile char *)sii->curmap +
- ((regoff >= SBCONFIGOFF) ?
- PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
- regoff);
- }
- }
-
- if (!fast) {
- INTR_OFF(sii, intr_val);
-
- /* save current core index */
- origidx = si_coreidx(&sii->pub);
-
- /* switch core */
- r = (volatile uint32*) ((volatile uchar*) ai_setcoreidx(&sii->pub, coreidx) +
- regoff);
- }
- ASSERT(r != NULL);
-
- /* mask and set */
- if (mask || val) {
- w = (R_REG(sii->osh, r) & ~mask) | val;
- W_REG(sii->osh, r, w);
- }
-
- if (!fast) {
- /* restore core index */
- if (origidx != coreidx)
- ai_setcoreidx(&sii->pub, origidx);
-
- INTR_RESTORE(sii, intr_val);
- }
-
- return (w);
-}
-
/*
* If there is no need for fiddling with interrupts or core switches (typically silicon
* back plane registers, pci registers and chipcommon registers), this function
si_info_t *sii = SI_INFO(sih);
si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
ASSERT(GOODIDX(coreidx));
ASSERT(regoff < SI_CORE_SIZE);
uint32 status;
aidmp_t *ai;
+
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
_ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
{
si_info_t *sii = SI_INFO(sih);
-#if defined(UCM_CORRUPTION_WAR)
- si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
-#endif // endif
aidmp_t *ai;
volatile uint32 dummy;
uint loop_counter = 10;
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
+ /* if core is already out of reset, just return */
+
/* ensure there are no pending backplane operations */
SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
/* put core into reset state */
W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
OSL_DELAY(10);
W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
dummy = R_REG(sii->osh, &ai->ioctrl);
BCM_REFERENCE(dummy);
-#ifdef UCM_CORRUPTION_WAR
- if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
- /* Reset FGC */
- OSL_DELAY(1);
- W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
- }
-#endif /* UCM_CORRUPTION_WAR */
+
/* ensure there are no pending backplane operations */
SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
/* ensure there are no pending backplane operations */
SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
/* take core out of reset */
W_REG(sii->osh, &ai->resetctrl, 0);
SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
}
-#ifdef UCM_CORRUPTION_WAR
- /* Pulse FGC after lifting Reset */
- W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
-#else
+
W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
-#endif /* UCM_CORRUPTION_WAR */
dummy = R_REG(sii->osh, &ai->ioctrl);
BCM_REFERENCE(dummy);
-#ifdef UCM_CORRUPTION_WAR
- if (cores_info->coreid[sii->curidx] == D11_CORE_ID) {
- /* Reset FGC */
- OSL_DELAY(1);
- W_REG(sii->osh, &ai->ioctrl, (dummy & (~SICF_FGC)));
- }
-#endif /* UCM_CORRUPTION_WAR */
OSL_DELAY(1);
}
si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint idx = sii->curidx;
- if (cores_info->wrapba3[idx] != 0) {
- ai_setcoreidx_3rdwrap(sih, idx);
- _ai_core_reset(sih, bits, resetbits);
- ai_setcoreidx(sih, idx);
- }
-
if (cores_info->wrapba2[idx] != 0) {
ai_setcoreidx_2ndwrap(sih, idx);
_ai_core_reset(sih, bits, resetbits);
aidmp_t *ai;
uint32 w;
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+ __FUNCTION__));
+ return;
+ }
if (BCM4707_DMP()) {
SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
__FUNCTION__));
aidmp_t *ai;
uint32 w;
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
+ __FUNCTION__));
+ return 0;
+ }
if (BCM4707_DMP()) {
SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
__FUNCTION__));
aidmp_t *ai;
uint32 w;
+ if (BCM5357_DMP()) {
+ SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
+ __FUNCTION__));
+ return 0;
+ }
if (BCM4707_DMP()) {
SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
__FUNCTION__));
osh = sii->osh;
+
/* Save and restore wrapper access window */
if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
if (PCIE_GEN2(sii)) {
axi_wrapper[i].wrapper_type == AI_SLAVE_WRAPPER ? "SLAVE" : "MASTER",
axi_wrapper[i].wrapper_addr);
+ /* BCM5357_DMP() */
+ if (((CHIPID(sih->chip) == BCM5357_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM4749_CHIP_ID)) &&
+ (sih->chippkg == BCM5357_PKG_ID) &&
+ (axi_wrapper[i].cid == USB20H_CORE_ID)) {
+ bcm_bprintf(b, "Skipping usb20h in 5357\n");
+ continue;
+ }
+
/* BCM4707_DMP() */
if (BCM4707_CHIP(CHIPID(sih->chip)) &&
(axi_wrapper[i].cid == NS_CCB_CORE_ID)) {
}
/* Restore the initial wrapper space */
- if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
- if (prev_value && cfg_reg) {
- OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
- }
+ if (prev_value && cfg_reg) {
+ OSL_PCI_WRITE_CONFIG(osh, cfg_reg, 4, prev_value);
}
+
}
-#endif // endif
+#endif
+
void
-ai_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout_exp, uint32 cid)
+ai_enable_backplane_timeouts(si_t *sih)
{
#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
uint32 i;
axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
- uint32 errlogctrl = (enable << AIELC_TO_ENAB_SHIFT) |
- ((timeout_exp << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK);
#ifdef BCM_BACKPLANE_TIMEOUT
uint32 prev_value = 0;
offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
}
else {
- ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
+ osl_panic("!PCIE_GEN1 && !PCIE_GEN2\n");
}
prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
return;
}
}
+
#endif /* BCM_BACKPLANE_TIMEOUT */
for (i = 0; i < sii->axi_num_wrappers; ++i) {
continue;
}
- /* Update only given core if requested */
- if ((cid != 0) && (axi_wrapper[i].cid != cid)) {
- continue;
- }
-
#ifdef BCM_BACKPLANE_TIMEOUT
if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
- /* Set BAR0_CORE2_WIN2 to bridge wapper base address */
+ /* Set BAR0_CORE2_WIN2 to wapper base address */
OSL_PCI_WRITE_CONFIG(osh,
cfg_reg, 4, axi_wrapper[i].wrapper_addr);
/* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
- ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
+ ai = (aidmp_t *) ((uint8*)sii->curmap + offset);
}
else
#endif /* BCM_BACKPLANE_TIMEOUT */
ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
}
- W_REG(sii->osh, &ai->errlogctrl, errlogctrl);
+ W_REG(sii->osh, &ai->errlogctrl, (1 << AIELC_TO_ENAB_SHIFT) |
+ ((AXI_TO_VAL << AIELC_TO_EXP_SHIFT) & AIELC_TO_EXP_MASK));
SI_VMSG(("ENABLED BPT: MFG:%x, CID:%x, ADDR:%x, ERR_CTRL:%x\n",
axi_wrapper[i].mfg,
static uint32 si_ignore_errlog_cnt = 0;
static bool
-ai_ignore_errlog(si_info_t *sii, aidmp_t *ai,
- uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
+ai_ignore_errlog(si_info_t *sii, uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
{
uint32 axi_id;
-#ifdef BCMPCIE_BTLOG
- uint32 axi_id2 = BCM4347_UNUSED_AXI_ID;
-#endif /* BCMPCIE_BTLOG */
- uint32 ignore_errsts = AIELS_SLAVE_ERR;
- uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
- uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
- uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
/* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
switch (CHIPID(sii->pub.chip)) {
case BCM4345_CHIP_ID:
axi_id = BCM4345_BT_AXI_ID;
break;
- case BCM4349_CHIP_GRPID:
- axi_id = BCM4349_BT_AXI_ID;
- break;
- case BCM4364_CHIP_ID:
- case BCM4373_CHIP_ID:
- axi_id = BCM4364_BT_AXI_ID;
- break;
-#ifdef BCMPCIE_BTLOG
- case BCM4347_CHIP_ID:
- case BCM4357_CHIP_ID:
- axi_id = BCM4347_CC_AXI_ID;
- axi_id2 = BCM4347_PCIE_AXI_ID;
- ignore_errsts = AIELS_TIMEOUT;
- ignore_hi = BCM4347_BT_ADDR_HI;
- ignore_lo = BCM4347_BT_ADDR_LO;
- ignore_size = BCM4347_BT_SIZE;
- break;
-#endif /* BCMPCIE_BTLOG */
-
default:
return FALSE;
}
/* AXI ID check */
- err_axi_id &= AI_ERRLOGID_AXI_ID_MASK;
- if (!(err_axi_id == axi_id ||
-#ifdef BCMPCIE_BTLOG
- (axi_id2 != BCM4347_UNUSED_AXI_ID && err_axi_id == axi_id2)))
-#else
- FALSE))
-#endif /* BCMPCIE_BTLOG */
+ if ((err_axi_id & AI_ERRLOGID_AXI_ID_MASK) != axi_id)
return FALSE;
/* slave errors */
- if ((errsts & AIELS_TIMEOUT_MASK) != ignore_errsts)
+ if ((errsts & AIELS_TIMEOUT_MASK) != AIELS_SLAVE_ERR)
return FALSE;
- /* address range check */
- if ((hi_addr != ignore_hi) ||
- (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size)))
+ /* chipc reg 0x190 */
+ if ((hi_addr != BT_CC_SPROM_BADREG_HI) || (lo_addr != BT_CC_SPROM_BADREG_LO))
return FALSE;
-#ifdef BCMPCIE_BTLOG
- if (ignore_errsts == AIELS_TIMEOUT) {
- /* reset AXI timeout */
- ai_reset_axi_to(sii, ai);
- }
-#endif /* BCMPCIE_BTLOG */
-
return TRUE;
}
#endif /* defined (AXI_TIMEOUTS) || defined (BCM_BACKPLANE_TIMEOUT) */
#ifdef BCM_BACKPLANE_TIMEOUT
/* Function to return the APB bridge details corresponding to the core */
-static bool
+bool
ai_get_apb_bridge(si_t * sih, uint32 coreidx, uint32 *apb_id, uint32 * apb_coreuinit)
{
uint i;
}
uint32
-ai_clear_backplane_to_fast(si_t *sih, void *addr)
+ai_clear_backplane_to_fast(si_t *sih, void * addr)
{
si_info_t *sii = SI_INFO(sih);
- volatile void *curmap = sii->curmap;
+ void * curmap = sii->curmap;
bool core_reg = FALSE;
/* Use fast path only for core register access */
- if (((uintptr)addr >= (uintptr)curmap) &&
- ((uintptr)addr < ((uintptr)curmap + SI_CORE_SIZE))) {
+ if ((addr >= curmap) && (addr < (curmap + SI_CORE_SIZE))) {
/* address being accessed is within current core reg map */
core_reg = TRUE;
}
#endif /* BCM_BACKPLANE_TIMEOUT */
#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
-static bool g_disable_backplane_logs = FALSE;
-
-#if defined(ETD)
-static uint32 last_axi_error = AXI_WRAP_STS_NONE;
-static uint32 last_axi_error_core = 0;
-static uint32 last_axi_error_wrap = 0;
-#endif /* ETD */
-
/*
* API to clear the back plane timeout per core.
* Caller may passs optional wrapper address. If present this will be used as
* will be verified.
*/
uint32
-ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap)
+ai_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap)
{
int ret = AXI_WRAP_STS_NONE;
aidmp_t *ai = NULL;
uint32 target_coreidx = si_findcoreidx(sih, coreid, coreunit);
#if defined(BCM_BACKPLANE_TIMEOUT)
- si_axi_error_t * axi_error = sih->err_info ?
- &sih->err_info->axi_error[sih->err_info->count] : NULL;
+ si_axi_error_t * axi_error = &sih->err_info->axi_error[sih->err_info->count];
#endif /* BCM_BACKPLANE_TIMEOUT */
bool restore_core = FALSE;
SI_PRINT(("%s, errlogstatus:%x - Slave Wrapper:%x\n",
__FUNCTION__, errlog_status, coreid));
ret = AXI_WRAP_STS_WRAP_RD_ERR;
- errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
+ errlog_lo = (uint32)&ai->errlogstatus;
goto end;
}
SI_PRINT(("%s: prev errlogstatus:%x, errlogstatus:%x\n",
__FUNCTION__, errlog_status, tmp));
ret = AXI_WRAP_STS_WRAP_RD_ERR;
- errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
+ errlog_lo = (uint32)&ai->errlogstatus;
goto end;
}
/*
errlog_flags = R_REG(sii->osh, &ai->errlogflags);
/* we are already in the error path, so OK to check for the slave error */
- if (ai_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id,
+ if (ai_ignore_errlog(sii, errlog_lo, errlog_hi, errlog_id,
errlog_status)) {
si_ignore_errlog_cnt++;
goto end;
/* only reset APB Bridge on timeout (not slave error, or dec error) */
switch (errlog_status & AIELS_TIMEOUT_MASK) {
case AIELS_SLAVE_ERR:
- SI_PRINT(("AXI slave error\n"));
+ SI_PRINT(("AXI slave error"));
ret = AXI_WRAP_STS_SLAVE_ERR;
break;
case AIELS_TIMEOUT:
- ai_reset_axi_to(sii, ai);
+ /* reset APB Bridge */
+ OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ /* sync write */
+ (void)R_REG(sii->osh, &ai->resetctrl);
+ /* clear Reset bit */
+ AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
+ /* sync write */
+ (void)R_REG(sii->osh, &ai->resetctrl);
+ SI_PRINT(("AXI timeout"));
ret = AXI_WRAP_STS_TIMEOUT;
break;
case AIELS_DECODE:
- SI_PRINT(("AXI decode error\n"));
+ SI_PRINT(("AXI decode error"));
ret = AXI_WRAP_STS_DECODE_ERR;
break;
default:
}
end:
-#if defined(ETD)
- if (ret != AXI_WRAP_STS_NONE) {
- last_axi_error = ret;
- last_axi_error_core = coreid;
- last_axi_error_wrap = (uint32)ai;
- }
-#endif /* ETD */
#if defined(BCM_BACKPLANE_TIMEOUT)
if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
return ret;
}
-
-/* reset AXI timeout */
-static void
-ai_reset_axi_to(si_info_t *sii, aidmp_t *ai)
-{
- /* reset APB Bridge */
- OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
- /* sync write */
- (void)R_REG(sii->osh, &ai->resetctrl);
- /* clear Reset bit */
- AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
- /* sync write */
- (void)R_REG(sii->osh, &ai->resetctrl);
- SI_PRINT(("AXI timeout\n"));
- if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
- SI_PRINT(("reset failed on wrapper %p\n", ai));
- g_disable_backplane_logs = TRUE;
- }
-}
#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
/*
if ((sii->axi_num_wrappers == 0) || (!PCIE(sii)))
#else
if (sii->axi_num_wrappers == 0)
-#endif // endif
+#endif
{
SI_VMSG((" %s, axi_num_wrappers:%d, Is_PCIE:%d, BUS_TYPE:%d, ID:%x\n",
__FUNCTION__, sii->axi_num_wrappers, PCIE(sii),
offset = PCIE2_BAR0_CORE2_WIN2_OFFSET;
}
else {
- ASSERT(!"!PCIE_GEN1 && !PCIE_GEN2");
+ osl_panic("!PCIE_GEN1 && !PCIE_GEN2\n");
}
prev_value = OSL_PCI_READ_CONFIG(osh, cfg_reg, 4);
if (prev_value == ID32_INVALID) {
si_axi_error_t * axi_error =
- sih->err_info ?
- &sih->err_info->axi_error[sih->err_info->count] :
- NULL;
-
+ &sih->err_info->axi_error[sih->err_info->count];
SI_PRINT(("%s, PCI_BAR0_WIN2 - %x\n", __FUNCTION__, prev_value));
- if (axi_error) {
- axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
- axi_error->errlog_lo = cfg_reg;
- sih->err_info->count++;
-
- if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
- sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
- SI_PRINT(("AXI Error log overflow\n"));
- }
+
+ axi_error->error = ret = AXI_WRAP_STS_PCI_RD_ERR;
+ axi_error->errlog_lo = cfg_reg;
+ sih->err_info->count++;
+
+ if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
+ sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
+ SI_PRINT(("AXI Error log overflow\n"));
}
return ret;
}
#ifdef BCM_BACKPLANE_TIMEOUT
+
if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
/* Set BAR0_CORE2_WIN2 to bridge wapper base address */
OSL_PCI_WRITE_CONFIG(osh,
cfg_reg, 4, axi_wrapper[i].wrapper_addr);
/* set AI to BAR0 + Offset corresponding to Gen1 or gen2 */
- ai = (aidmp_t *) (DISCARD_QUAL(sii->curmap, uint8) + offset);
+ ai = (aidmp_t *) ((uint8*)sii->curmap + offset);
}
else
#endif /* BCM_BACKPLANE_TIMEOUT */
ai = (aidmp_t *)(uintptr) axi_wrapper[i].wrapper_addr;
}
- tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0,
- DISCARD_QUAL(ai, void));
+ tmp = ai_clear_backplane_to_per_core(sih, axi_wrapper[i].cid, 0, (void*)ai);
ret |= tmp;
}
cib = cores_info->cib[coreidx];
return ((cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT);
}
-
-#ifdef UART_TRAP_DBG
-void
-ai_dump_APB_Bridge_registers(si_t *sih)
-{
-aidmp_t *ai;
-si_info_t *sii = SI_INFO(sih);
-
- ai = (aidmp_t *) sii->br_wrapba[0];
- printf("APB Bridge 0\n");
- printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
- R_REG(sii->osh, &ai->errlogaddrlo),
- R_REG(sii->osh, &ai->errlogaddrhi),
- R_REG(sii->osh, &ai->errlogid),
- R_REG(sii->osh, &ai->errlogflags));
- printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
-}
-#endif /* UART_TRAP_DBG */
-
-void
-ai_force_clocks(si_t *sih, uint clock_state)
-{
-
- si_info_t *sii = SI_INFO(sih);
- aidmp_t *ai, *ai_sec = NULL;
- volatile uint32 dummy;
- uint32 ioctrl;
- si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
-
- ASSERT(GOODREGS(sii->curwrap));
- ai = sii->curwrap;
- if (cores_info->wrapba2[sii->curidx])
- ai_sec = REG_MAP(cores_info->wrapba2[sii->curidx], SI_CORE_SIZE);
-
- /* ensure there are no pending backplane operations */
- SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
-
- if (clock_state == FORCE_CLK_ON) {
- ioctrl = R_REG(sii->osh, &ai->ioctrl);
- W_REG(sii->osh, &ai->ioctrl, (ioctrl | SICF_FGC));
- dummy = R_REG(sii->osh, &ai->ioctrl);
- BCM_REFERENCE(dummy);
- if (ai_sec) {
- ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
- W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl | SICF_FGC));
- dummy = R_REG(sii->osh, &ai_sec->ioctrl);
- BCM_REFERENCE(dummy);
- }
- } else {
- ioctrl = R_REG(sii->osh, &ai->ioctrl);
- W_REG(sii->osh, &ai->ioctrl, (ioctrl & (~SICF_FGC)));
- dummy = R_REG(sii->osh, &ai->ioctrl);
- BCM_REFERENCE(dummy);
- if (ai_sec) {
- ioctrl = R_REG(sii->osh, &ai_sec->ioctrl);
- W_REG(sii->osh, &ai_sec->ioctrl, (ioctrl & (~SICF_FGC)));
- dummy = R_REG(sii->osh, &ai_sec->ioctrl);
- BCM_REFERENCE(dummy);
- }
- }
- /* ensure there are no pending backplane operations */
- SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
-}
* Contents are wifi-specific, used by any kernel or app-level
* software that might want wifi things as it grows.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcm_app_utils.c 667243 2016-10-26 11:37:48Z $
+ * $Id: bcm_app_utils.c 623866 2016-03-09 11:58:34Z $
*/
#include <typedefs.h>
#include <ctype.h>
#ifndef ASSERT
#define ASSERT(exp)
-#endif // endif
+#endif
#endif /* BCMDRIVER */
#include <bcmwifi_channels.h>
#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
#include <bcmstdlib.h> /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */
-#endif // endif
+#endif
#include <bcmutils.h>
#include <wlioctl.h>
#define NUM_OF_CNT_IN_WL_CNT_VER_11_T \
((sizeof(wl_cnt_ver_11_t) - 2 * sizeof(uint16)) / sizeof(uint32))
/* Exclude 64 macstat cnt variables. */
-#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T \
- ((sizeof(wl_cnt_wlc_t)) / sizeof(uint32))
+#define NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T \
+ (NUM_OF_CNT_IN_WL_CNT_VER_11_T - WL_CNT_MCST_VAR_NUM)
/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_wlc_t */
static const uint8 wlcntver6t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_6_T] = {
IDX_IN_WL_CNT_VER_6_T(wepexcluded_mcst)
};
-#define INVALID_IDX ((uint8)(-1))
-
/* Index conversion table from wl_cnt_ver_11_t to wl_cnt_wlc_t */
static const uint8 wlcntver11t_to_wlcntwlct[NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T] = {
IDX_IN_WL_CNT_VER_11_T(txframe),
IDX_IN_WL_CNT_VER_11_T(ampdu_wds),
IDX_IN_WL_CNT_VER_11_T(txlost),
IDX_IN_WL_CNT_VER_11_T(txdatamcast),
- IDX_IN_WL_CNT_VER_11_T(txdatabcast),
- INVALID_IDX,
- IDX_IN_WL_CNT_VER_11_T(rxback),
- IDX_IN_WL_CNT_VER_11_T(txback),
- INVALID_IDX,
- INVALID_IDX,
- INVALID_IDX,
- INVALID_IDX,
- IDX_IN_WL_CNT_VER_11_T(txbcast),
- IDX_IN_WL_CNT_VER_11_T(txdropped),
- IDX_IN_WL_CNT_VER_11_T(rxbcast),
- IDX_IN_WL_CNT_VER_11_T(rxdropped)
+ IDX_IN_WL_CNT_VER_11_T(txdatabcast)
};
/* Index conversion table from wl_cnt_ver_11_t to
IDX_IN_WL_CNT_VER_11_T(bphy_badplcp)
};
+
/* Index conversion table from wl_cnt_ver_6_t to wl_cnt_v_le10_mcst_t */
static const uint8 wlcntver6t_to_wlcntvle10mcstt[WL_CNT_MCST_VAR_NUM] = {
IDX_IN_WL_CNT_VER_6_T(txallfrm),
} else {
for (i = 0; i < NUM_OF_WLCCNT_IN_WL_CNT_VER_11_T; i++) {
if (wlcntver11t_to_wlcntwlct[i] >= src_max_idx) {
- if (wlcntver11t_to_wlcntwlct[i] == INVALID_IDX) {
- continue;
- }
- else {
- /* src buffer does not have counters from here */
- break;
- }
+ /* src buffer does not have counters from here */
+ break;
}
dst[i] = src[wlcntver11t_to_wlcntwlct[i]];
}
uint16 mcst_xtlv_id;
int res = BCME_OK;
wl_cnt_info_t *cntinfo = cntbuf;
- uint8 *xtlvbuf_p = cntinfo->data;
+ void *xtlvbuf_p = cntinfo->data;
uint16 ver = cntinfo->version;
uint16 xtlvbuflen = (uint16)buflen;
uint16 src_max_idx;
osl_t *osh = ctx;
#else
BCM_REFERENCE(ctx);
-#endif // endif
+#endif
if (ver >= WL_CNT_VERSION_XTLV) {
/* Already in xtlv format. */
#else
wlccnt = (wl_cnt_wlc_t *)malloc(sizeof(*wlccnt));
macstat = (uint32 *)malloc(WL_CNT_MCST_STRUCT_SZ);
-#endif // endif
+#endif
if (!wlccnt || !macstat) {
printf("%s: malloc fail!\n", __FUNCTION__);
res = BCME_NOMEM;
if (macstat) {
free(macstat);
}
-#endif // endif
+#endif
return res;
}
+++ /dev/null
-/*
- * Bloom filter support
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: bcmbloom.c 788740 2018-11-13 21:45:01Z $
- */
-
-#include <typedefs.h>
-#include <bcmdefs.h>
-
-#include <stdarg.h>
-
-#ifdef BCMDRIVER
-#include <osl.h>
-#include <bcmutils.h>
-#else /* !BCMDRIVER */
-#include <stdio.h>
-#include <string.h>
-#ifndef ASSERT
-#define ASSERT(exp)
-#endif // endif
-#endif /* !BCMDRIVER */
-#include <bcmutils.h>
-
-#include <bcmbloom.h>
-
-#define BLOOM_BIT_LEN(_x) ((_x) << 3)
-
-struct bcm_bloom_filter {
- void *cb_ctx;
- uint max_hash;
- bcm_bloom_hash_t *hash; /* array of hash functions */
- uint filter_size; /* in bytes */
- uint8 *filter; /* can be NULL for validate only */
-};
-
-/* public interface */
-int
-bcm_bloom_create(bcm_bloom_alloc_t alloc_cb,
- bcm_bloom_free_t free_cb, void *cb_ctx, uint max_hash,
- uint filter_size, bcm_bloom_filter_t **bloom)
-{
- int err = BCME_OK;
- bcm_bloom_filter_t *bp = NULL;
-
- if (!bloom || !alloc_cb || (max_hash == 0)) {
- err = BCME_BADARG;
- goto done;
- }
-
- bp = (*alloc_cb)(cb_ctx, sizeof(*bp));
- if (!bp) {
- err = BCME_NOMEM;
- goto done;
- }
-
- memset(bp, 0, sizeof(*bp));
- bp->cb_ctx = cb_ctx;
- bp->max_hash = max_hash;
- bp->hash = (*alloc_cb)(cb_ctx, sizeof(*bp->hash) * max_hash);
- memset(bp->hash, 0, sizeof(*bp->hash) * max_hash);
-
- if (!bp->hash) {
- err = BCME_NOMEM;
- goto done;
- }
-
- if (filter_size > 0) {
- bp->filter = (*alloc_cb)(cb_ctx, filter_size);
- if (!bp->filter) {
- err = BCME_NOMEM;
- goto done;
- }
- bp->filter_size = filter_size;
- memset(bp->filter, 0, filter_size);
- }
-
- *bloom = bp;
-
-done:
- if (err != BCME_OK)
- bcm_bloom_destroy(&bp, free_cb);
-
- return err;
-}
-
-int
-bcm_bloom_destroy(bcm_bloom_filter_t **bloom, bcm_bloom_free_t free_cb)
-{
- int err = BCME_OK;
- bcm_bloom_filter_t *bp;
-
- if (!bloom || !*bloom || !free_cb)
- goto done;
-
- bp = *bloom;
- *bloom = NULL;
-
- if (bp->filter)
- (*free_cb)(bp->cb_ctx, bp->filter, bp->filter_size);
- if (bp->hash)
- (*free_cb)(bp->cb_ctx, bp->hash,
- sizeof(*bp->hash) * bp->max_hash);
- (*free_cb)(bp->cb_ctx, bp, sizeof(*bp));
-
-done:
- return err;
-}
-
-int
-bcm_bloom_add_hash(bcm_bloom_filter_t *bp, bcm_bloom_hash_t hash, uint *idx)
-{
- uint i;
-
- if (!bp || !hash || !idx)
- return BCME_BADARG;
-
- for (i = 0; i < bp->max_hash; ++i) {
- if (bp->hash[i] == NULL)
- break;
- }
-
- if (i >= bp->max_hash)
- return BCME_NORESOURCE;
-
- bp->hash[i] = hash;
- *idx = i;
- return BCME_OK;
-}
-
-int
-bcm_bloom_remove_hash(bcm_bloom_filter_t *bp, uint idx)
-{
- if (!bp)
- return BCME_BADARG;
-
- if (idx >= bp->max_hash)
- return BCME_NOTFOUND;
-
- bp->hash[idx] = NULL;
- return BCME_OK;
-}
-
-bool
-bcm_bloom_is_member(bcm_bloom_filter_t *bp,
- const uint8 *tag, uint tag_len, const uint8 *buf, uint buf_len)
-{
- uint i;
- int err = BCME_OK;
-
- if (!tag || (tag_len == 0)) /* empty tag is always a member */
- goto done;
-
- /* use internal buffer if none was specified */
- if (!buf || (buf_len == 0)) {
- if (!bp->filter) /* every one is a member of empty filter */
- goto done;
-
- buf = bp->filter;
- buf_len = bp->filter_size;
- }
-
- for (i = 0; i < bp->max_hash; ++i) {
- uint pos;
- if (!bp->hash[i])
- continue;
- pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len);
-
- /* all bits must be set for a match */
- CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
- if (isclr(buf, pos % BLOOM_BIT_LEN(buf_len))) {
- CLANG_DIAGNOSTIC_POP()
- err = BCME_NOTFOUND;
- break;
- }
- }
-
-done:
- return err;
-}
-
-int
-bcm_bloom_add_member(bcm_bloom_filter_t *bp, const uint8 *tag, uint tag_len)
-{
- uint i;
-
- if (!bp || !tag || (tag_len == 0))
- return BCME_BADARG;
-
- if (!bp->filter) /* validate only */
- return BCME_UNSUPPORTED;
-
- for (i = 0; i < bp->max_hash; ++i) {
- uint pos;
- if (!bp->hash[i])
- continue;
- pos = (*bp->hash[i])(bp->cb_ctx, i, tag, tag_len);
- setbit(bp->filter, pos % BLOOM_BIT_LEN(bp->filter_size));
- }
-
- return BCME_OK;
-}
-
-int bcm_bloom_get_filter_data(bcm_bloom_filter_t *bp,
- uint buf_size, uint8 *buf, uint *buf_len)
-{
- if (!bp)
- return BCME_BADARG;
-
- if (buf_len)
- *buf_len = bp->filter_size;
-
- if (buf_size < bp->filter_size)
- return BCME_BUFTOOSHORT;
-
- if (bp->filter && bp->filter_size)
- memcpy(buf, bp->filter, bp->filter_size);
-
- return BCME_OK;
-}
/*
* bcmevent read-only data shared by kernel or app layers
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmevent.c 807989 2019-03-05 07:57:42Z $
+ * $Id: bcmevent.c 707287 2017-06-27 06:44:29Z $
*/
#include <typedefs.h>
BCMEVENT_NAME(WLC_E_IF),
#ifdef WLP2P
BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE),
-#endif // endif
+#endif
BCMEVENT_NAME(WLC_E_RSSI),
BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE),
+ BCMEVENT_NAME(WLC_E_EXTLOG_MSG),
BCMEVENT_NAME(WLC_E_ACTION_FRAME),
BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX),
BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE),
-#ifdef BCMWAPI_WAI
- BCMEVENT_NAME(WLC_E_WAI_STA_EVENT),
- BCMEVENT_NAME(WLC_E_WAI_MSG),
-#endif /* BCMWAPI_WAI */
BCMEVENT_NAME(WLC_E_ESCAN_RESULT),
BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE),
#ifdef WLP2P
BCMEVENT_NAME(WLC_E_PROBRESP_MSG),
BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG),
-#endif // endif
+#endif
#ifdef PROP_TXSTATUS
BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP),
-#endif // endif
+#endif
BCMEVENT_NAME(WLC_E_WAKE_EVENT),
BCMEVENT_NAME(WLC_E_DCS_REQUEST),
BCMEVENT_NAME(WLC_E_RM_COMPLETE),
+#ifdef WLMEDIA_HTSF
+ BCMEVENT_NAME(WLC_E_HTSFSYNC),
+#endif
BCMEVENT_NAME(WLC_E_OVERLAY_REQ),
BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND),
BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT),
BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
#ifdef SOFTAP
BCMEVENT_NAME(WLC_E_GTK_PLUMBED),
-#endif // endif
+#endif
BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE),
BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE),
BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX),
#ifdef WLWNM
BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP),
#endif /* WLWNM */
-#if defined(WL_PROXDETECT) || defined(RTT_SUPPORT)
+#if defined(WL_PROXDETECT)
BCMEVENT_NAME(WLC_E_PROXD),
-#endif // endif
+#endif
BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL),
BCMEVENT_NAME(WLC_E_BSSID),
#ifdef PROP_TXSTATUS
BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT),
-#endif // endif
+#endif
BCMEVENT_NAME(WLC_E_PSTA_PRIMARY_INTF_IND),
BCMEVENT_NAME(WLC_E_TXFAIL_THRESH),
#ifdef GSCAN_SUPPORT
#endif /* GSCAN_SUPPORT */
#ifdef WLBSSLOAD_REPORT
BCMEVENT_NAME(WLC_E_BSS_LOAD),
-#endif // endif
+#endif
#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW)
BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ),
-#endif // endif
-#ifdef WLFBT
- BCMEVENT_NAME(WLC_E_FBT),
-#endif /* WLFBT */
+#endif
BCMEVENT_NAME(WLC_E_AUTHORIZED),
BCMEVENT_NAME(WLC_E_PROBREQ_MSG_RX),
BCMEVENT_NAME(WLC_E_CSA_START_IND),
BCMEVENT_NAME(WLC_E_ALLOW_CREDIT_BORROW),
BCMEVENT_NAME(WLC_E_MSCH),
BCMEVENT_NAME(WLC_E_ULP),
- BCMEVENT_NAME(WLC_E_NAN),
- BCMEVENT_NAME(WLC_E_PKT_FILTER),
- BCMEVENT_NAME(WLC_E_DMA_TXFLUSH_COMPLETE),
BCMEVENT_NAME(WLC_E_PSK_AUTH),
BCMEVENT_NAME(WLC_E_SDB_TRANSITION),
- BCMEVENT_NAME(WLC_E_PFN_SCAN_BACKOFF),
- BCMEVENT_NAME(WLC_E_PFN_BSSID_SCAN_BACKOFF),
- BCMEVENT_NAME(WLC_E_AGGR_EVENT),
- BCMEVENT_NAME(WLC_E_TVPM_MITIGATION),
-#ifdef WL_NAN
- BCMEVENT_NAME(WLC_E_NAN_CRITICAL),
- BCMEVENT_NAME(WLC_E_NAN_NON_CRITICAL),
- BCMEVENT_NAME(WLC_E_NAN),
-#endif /* WL_NAN */
- BCMEVENT_NAME(WLC_E_RPSNOA),
- BCMEVENT_NAME(WLC_E_PHY_CAL),
- BCMEVENT_NAME(WLC_E_WA_LQM),
};
+
const char *bcmevent_get_name(uint event_type)
{
/* note: first coded this as a static const but some
#else
err = BCME_UNSUPPORTED;
break;
-#endif // endif
+#endif
default:
err = BCME_NOTFOUND;
* BCMSDH interface glue
* implement bcmsdh API for SDIOH driver
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmsdh.c 727623 2017-10-21 01:00:32Z $
+ * $Id: bcmsdh.c 671319 2016-11-21 14:27:29Z $
*/
/**
static dhd_hang_state_t g_dhd_hang_state = NO_HANG_STATE;
#endif /* defined (BT_OVER_SDIO) */
+
#if defined(OOB_INTR_ONLY) && defined(HW_OOB) || defined(FORCE_WOWLAN)
extern int
sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
{
sdioh_enable_hw_oob_intr(sdh->sdioh, enable);
}
-#endif // endif
+#endif
#if defined(BT_OVER_SDIO)
void bcmsdh_btsdio_process_hang_state(dhd_hang_state_t new_state)
if (HANG_RECOVERY_STATE == new_state ||
NO_HANG_STATE == new_state)
state_change = true;
-
break;
case HANG_RECOVERY_STATE:
bcmsdh->sdioh = sdioh;
bcmsdh->osh = osh;
bcmsdh->init_success = TRUE;
- *regsva = si_enum_base(0);
+ *regsva = SI_ENUM_BASE;
bcmsdh_force_sbwad_calc(bcmsdh, FALSE);
/* Report the BAR, to fix if needed */
- bcmsdh->sbwad = si_enum_base(0);
+ bcmsdh->sbwad = SI_ENUM_BASE;
/* save the handler locally */
l_bcmsdh = bcmsdh;
int
bcmsdh_iovar_op(void *sdh, const char *name,
- void *params, uint plen, void *arg, uint len, bool set)
+ void *params, int plen, void *arg, int len, bool set)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
return sdioh_iovar_op(bcmsdh->sdioh, name, params, plen, arg, len, set);
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
-#ifdef BCMSPI_ANDROID
- uint32 data;
-#endif /* BCMSPI_ANDROID */
ASSERT(bcmsdh);
status = sdioh_interrupt_set(bcmsdh->sdioh, TRUE);
-#ifdef BCMSPI_ANDROID
- data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL);
- data |= 0xE0E70000;
- bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL);
-#endif /* BCMSPI_ANDROID */
return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
}
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
SDIOH_API_RC status;
-#ifdef BCMSPI_ANDROID
- uint32 data;
-#endif /* BCMSPI_ANDROID */
ASSERT(bcmsdh);
status = sdioh_interrupt_set(bcmsdh->sdioh, FALSE);
-#ifdef BCMSPI_ANDROID
- data = bcmsdh_cfg_read_word(sdh, 0, 4, NULL);
- data &= ~0xE0E70000;
- bcmsdh_cfg_write_word(sdh, 0, 4, data, NULL);
-#endif /* BCMSPI_ANDROID */
return (SDIOH_API_SUCCESS(status) ? 0 : BCME_ERROR);
}
ASSERT(sdh);
return sdioh_interrupt_pending(bcmsdh->sdioh);
}
-#endif // endif
+#endif
+
int
bcmsdh_devremove_reg(void *sdh, bcmsdh_cb_fn_t fn, void *argh)
SDIOH_API_RC status;
#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
int32 retry = 0;
-#endif // endif
+#endif
uint8 data = 0;
if (!bcmsdh)
do {
if (retry) /* wait for 1 ms till bus get settled down */
OSL_DELAY(1000);
-#endif // endif
+#endif
status = sdioh_cfg_read(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
-#endif // endif
+#endif
if (err)
*err = (SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR);
SDIOH_API_RC status;
#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
int32 retry = 0;
-#endif // endif
+#endif
if (!bcmsdh)
bcmsdh = l_bcmsdh;
do {
if (retry) /* wait for 1 ms till bus get settled down */
OSL_DELAY(1000);
-#endif // endif
+#endif
status = sdioh_cfg_write(bcmsdh->sdioh, fnc_num, addr, (uint8 *)&data);
#ifdef SDIOH_API_ACCESS_RETRY_LIMIT
} while (!SDIOH_API_SUCCESS(status) && (retry++ < SDIOH_API_ACCESS_RETRY_LIMIT));
-#endif // endif
+#endif
if (err)
*err = SDIOH_API_SUCCESS(status) ? 0 : BCME_SDIO_ERROR;
addr, data));
}
+
int
bcmsdh_cis_read(void *sdh, uint func, uint8 *cis, uint length)
{
ASSERT(bcmsdh->init_success);
- if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, bcmsdh->force_sbwad_calc)) {
+ if (bcmsdhsdio_set_sbaddr_window(bcmsdh, addr, FALSE)) {
bcmsdh->regfail = TRUE; // terence 20130621: prevent dhd_dpc in dead lock
return 0xFFFFFFFF;
}
uint32
bcmsdh_get_dstatus(void *sdh)
{
-#ifdef BCMSPI
- bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
- sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
- return sdioh_get_dstatus(sd);
-#else
return 0;
-#endif /* BCMSPI */
}
uint32
bcmsdh_cur_sbwad(void *sdh)
void
bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev)
{
-#ifdef BCMSPI
- bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
- sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
- sdioh_chipinfo(sd, chip, chiprev);
-#else
return;
-#endif /* BCMSPI */
}
-#ifdef BCMSPI
-void
-bcmsdh_dwordmode(void *sdh, bool set)
-{
- bcmsdh_info_t *p = (bcmsdh_info_t *)sdh;
- sdioh_info_t *sd = (sdioh_info_t *)(p->sdioh);
- sdioh_dwordmode(sd, set);
- return;
-}
-#endif /* BCMSPI */
int
bcmsdh_sleep(void *sdh, bool enab)
return sdioh_sleep(sd, enab);
#else
return BCME_UNSUPPORTED;
-#endif // endif
+#endif
}
int
/*
* SDIO access interface for drivers - linux specific (pci only)
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmsdh_linux.c 689948 2017-03-14 05:21:03Z $
+ * $Id: bcmsdh_linux.c 672609 2016-11-29 07:00:46Z $
*/
/**
#include <bcmutils.h>
#include <dngl_stats.h>
#include <dhd.h>
+#if defined(CONFIG_ARCH_ODIN)
+#include <linux/platform_data/gpio-odin.h>
+#endif /* defined(CONFIG_ARCH_ODIN) */
#include <dhd_linux.h>
/* driver info, initialized when bcmsdh_register is called */
} bcmsdh_os_info_t;
/* debugging macros */
-#define SDLX_ERR(x) printf x
#define SDLX_MSG(x) printf x
/**
bcmsdh = bcmsdh_attach(osh, sdioh, ®s);
if (bcmsdh == NULL) {
- SDLX_ERR(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
goto err;
}
bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t));
if (bcmsdh_osinfo == NULL) {
- SDLX_ERR(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__));
+ SDLX_MSG(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__));
goto err;
}
bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info,
&bcmsdh_osinfo->oob_irq_flags);
if (bcmsdh_osinfo->oob_irq_num < 0) {
- SDLX_ERR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
+ SDLX_MSG(("%s: Host OOB irq is not defined\n", __FUNCTION__));
goto err;
}
#endif /* defined(BCMLXSDMMC) */
bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num,
slot_num, 0, bus_type, (void *)regs, osh, bcmsdh);
if (bcmsdh_osinfo->context == NULL) {
- SDLX_ERR(("%s: device attach failed\n", __FUNCTION__));
+ SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
goto err;
}
int bcmsdh_set_get_wake(bcmsdh_info_t *bcmsdh, int flag)
{
-#if defined(OOB_INTR_ONLY)
bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
unsigned long flags;
-#endif
- int ret = 0;
+ int ret;
-#if defined(OOB_INTR_ONLY)
spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
ret = bcmsdh->pkt_wake;
bcmsdh->pkt_wake = flag;
spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
-#endif
return ret;
}
#endif /* DHD_WAKE_STATUS */
SDLX_MSG(("%s: register client driver\n", __FUNCTION__));
error = bcmsdh_register_client_driver();
if (error)
- SDLX_ERR(("%s: failed %d\n", __FUNCTION__, error));
+ SDLX_MSG(("%s: failed %d\n", __FUNCTION__, error));
return error;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
if (bcmsdh_pci_driver.node.next == NULL)
return;
-#endif // endif
+#endif
bcmsdh_unregister_client_driver();
}
return bcmsdh_osinfo->dev_wake_enabled;
}
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+#if defined(OOB_INTR_ONLY)
void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable)
{
unsigned long flags;
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id;
bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
-#ifndef BCMSPI_ANDROID
bcmsdh_oob_intr_set(bcmsdh, FALSE);
-#endif /* !BCMSPI_ANDROID */
bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context);
return IRQ_HANDLED;
bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
if (bcmsdh_osinfo->oob_irq_registered) {
- SDLX_ERR(("%s: irq is already registered\n", __FUNCTION__));
+ SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__));
return -EBUSY;
}
#ifdef HW_OOB
- SDLX_MSG(("%s: HW_OOB irq=%d flags=0x%X\n", __FUNCTION__,
- (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
+ printf("%s: HW_OOB irq=%d flags=0x%X\n", __FUNCTION__,
+ (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags);
#else
- SDLX_MSG(("%s: SW_OOB irq=%d flags=0x%X\n", __FUNCTION__,
- (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
+ printf("%s: SW_OOB irq=%d flags=0x%X\n", __FUNCTION__,
+ (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags);
#endif
bcmsdh_osinfo->oob_irq_handler = oob_irq_handler;
bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context;
bcmsdh_osinfo->oob_irq_enabled = TRUE;
bcmsdh_osinfo->oob_irq_registered = TRUE;
+#if defined(CONFIG_ARCH_ODIN)
+ err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+ bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#else
err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#endif /* defined(CONFIG_ARCH_ODIN) */
if (err) {
- SDLX_ERR(("%s: request_irq failed with %d\n", __FUNCTION__, err));
+ SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err));
bcmsdh_osinfo->oob_irq_enabled = FALSE;
bcmsdh_osinfo->oob_irq_registered = FALSE;
return err;
SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__));
bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
#else
-#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
- if (device_may_wakeup(bcmsdh_osinfo->dev)) {
-#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
- err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
- if (err)
- SDLX_ERR(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err));
- else
- bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
-#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
- }
-#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+/*
+ err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (err)
+ SDLX_MSG(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err));
+ else
+*/
+ bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
#endif
return 0;
void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh)
{
- int err = 0;
+ /*int err = 0;*/
bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
SDLX_MSG(("%s: Enter\n", __FUNCTION__));
SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__));
return;
}
+/*
if (bcmsdh_osinfo->oob_irq_wake_enabled) {
-#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
- if (device_may_wakeup(bcmsdh_osinfo->dev)) {
-#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
- err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
- if (!err)
- bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
-#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
- }
-#endif /* CONFIG_ARCH_RHEA || CONFIG_ARCH_CAPRI */
+ err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (!err)
+ bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
}
+*/
if (bcmsdh_osinfo->oob_irq_enabled) {
disable_irq(bcmsdh_osinfo->oob_irq_num);
bcmsdh_osinfo->oob_irq_enabled = FALSE;
free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh);
bcmsdh_osinfo->oob_irq_registered = FALSE;
}
-#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+#endif
/* Module parameters specific to each host-controller driver */
extern char dhd_sdiod_uhsi_ds_override[2];
module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0);
-#endif // endif
+#endif
#ifdef BCMSDH_MODULE
EXPORT_SYMBOL(bcmsdh_attach);
#if defined(DHD_DEBUG)
EXPORT_SYMBOL(bcmsdh_intr_pending);
-#endif // endif
+#endif
#if defined(BT_OVER_SDIO)
EXPORT_SYMBOL(bcmsdh_btsdio_interface_init);
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Proprietary,Open:>>
*
- * $Id: bcmsdh_sdmmc.c 782528 2018-09-28 12:15:40Z $
+ * $Id: bcmsdh_sdmmc.c 710913 2017-07-14 10:17:51Z $
*/
#include <typedefs.h>
#include <sdiovar.h> /* ioctl/iovars */
#include <linux/mmc/core.h>
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0))
+#include <drivers/mmc/core/host.h>
+void
+mmc_host_clk_hold(struct mmc_host *host)
+{
+ BCM_REFERENCE(host);
+ return;
+}
+
+void
+mmc_host_clk_release(struct mmc_host *host)
+{
+ BCM_REFERENCE(host);
+ return;
+}
+#elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
#include <drivers/mmc/core/host.h>
#else
#include <linux/mmc/host.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
extern volatile bool dhd_mmc_suspend;
-#endif // endif
+#endif
#include "bcmsdh_sdmmc.h"
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0)) || (LINUX_VERSION_CODE >= \
- KERNEL_VERSION(4, 4, 0))
-static inline void
-mmc_host_clk_hold(struct mmc_host *host)
-{
- BCM_REFERENCE(host);
- return;
-}
-
-static inline void
-mmc_host_clk_release(struct mmc_host *host)
-{
- BCM_REFERENCE(host);
- return;
-}
-
-static inline unsigned int
-mmc_host_clk_rate(struct mmc_host *host)
-{
- return host->ios.clock;
-}
-#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 0) */
-
#ifndef BCMSDH_MODULE
extern int sdio_function_init(void);
extern void sdio_function_cleanup(void);
static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
extern int sdio_reset_comm(struct mmc_card *card);
+#else
+int sdio_reset_comm(struct mmc_card *card)
+{
+ return 0;
+}
#endif
#ifdef GLOBAL_SDMMC_INSTANCE
extern PBCMSDH_SDMMC_INSTANCE gInstance;
#define DEFAULT_SDIO_F2_BLKSIZE 512
#ifndef CUSTOM_SDIO_F2_BLKSIZE
#define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE
-#endif // endif
+#endif
#define DEFAULT_SDIO_F1_BLKSIZE 64
#ifndef CUSTOM_SDIO_F1_BLKSIZE
#define CUSTOM_SDIO_F1_BLKSIZE DEFAULT_SDIO_F1_BLKSIZE
-#endif // endif
+#endif
#define MAX_IO_RW_EXTENDED_BLK 511
uint sd_power = 1; /* Default to SD Slot powered ON */
uint sd_clock = 1; /* Default to SD Clock turned ON */
uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
-uint sd_msglevel = SDH_ERROR_VAL;
+uint sd_msglevel = 0x01;
uint sd_use_dma = TRUE;
#ifndef CUSTOM_RXCHAIN
#define CUSTOM_RXCHAIN 0
-#endif // endif
+#endif
DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
+void sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz);
+uint sdmmc_get_clock_rate(sdioh_info_t *sd);
+void sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
#if defined(BT_OVER_SDIO)
extern
void sdioh_sdmmc_card_enable_func_f3(sdioh_info_t *sd, struct sdio_func *func)
}
#endif /* defined (BT_OVER_SDIO) */
-void sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz);
-uint sdmmc_get_clock_rate(sdioh_info_t *sd);
-void sdmmc_set_clock_divisor(sdioh_info_t *sd, uint sd_div);
-
static int
sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
{
return NULL;
}
+
extern SDIOH_API_RC
sdioh_detach(osl_t *osh, sdioh_info_t *sd)
{
reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
#if defined(BT_OVER_SDIO)
reg &= ~INTR_CTL_FUNC3_EN;
-#endif // endif
+#endif
/* Disable master interrupt with the last function interrupt */
if (!(reg & 0xFE))
reg = 0;
{
return (0);
}
-#endif // endif
+#endif
uint
sdioh_query_iofnum(sdioh_info_t *sd)
/* Now set it */
si->client_block_size[func] = blksize;
-#ifdef USE_DYNAMIC_F2_BLKSIZE
if (si->func[func] == NULL) {
sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
bcmerror = BCME_NORESOURCE;
sd_err(("%s: Failed to set F%d blocksize to %d(%d)\n",
__FUNCTION__, func, blksize, bcmerror));
sdio_release_host(si->func[func]);
-#endif /* USE_DYNAMIC_F2_BLKSIZE */
break;
}
int err_ret = 0;
#if defined(MMC_SDIO_ABORT)
int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
-#endif // endif
- struct osl_timespec now, before;
+#endif
+ struct timespec now, before;
if (sd_msglevel & SDH_COST_VAL)
- osl_do_gettimeofday(&before);
+ getnstimeofday(&before);
sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
}
}
#endif /* MMC_SDIO_ABORT */
- /* to allow abort command through F1 */
-#if defined(SDIO_ISR_THREAD)
- else if (regaddr == SDIOD_CCCR_INTR_EXTN) {
- while (sdio_abort_retry--) {
- if (sd->func[func]) {
- sdio_claim_host(sd->func[func]);
- /*
- * this sdio_f0_writeb() can be replaced with
- * another api depending upon MMC driver change.
- * As of this time, this is temporaray one
- */
- sdio_writeb(sd->func[func],
- *byte, regaddr, &err_ret);
- sdio_release_host(sd->func[func]);
- }
- if (!err_ret)
- break;
- }
- }
-#endif
else if (regaddr < 0xF0) {
sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
} else {
}
if (sd_msglevel & SDH_COST_VAL) {
- osl_do_gettimeofday(&now);
+ getnstimeofday(&now);
sd_cost(("%s: rw=%d len=1 cost=%lds %luus\n", __FUNCTION__,
rw, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000));
}
int err_ret2 = SDIOH_API_RC_SUCCESS; // terence 20130621: prevent dhd_dpc in dead lock
#if defined(MMC_SDIO_ABORT)
int sdio_abort_retry = MMC_SDIO_ABORT_RETRY_LIMIT;
-#endif // endif
- struct osl_timespec now, before;
+#endif
+ struct timespec now, before;
if (sd_msglevel & SDH_COST_VAL)
- osl_do_gettimeofday(&before);
+ getnstimeofday(&before);
if (func == 0) {
sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
}
if (sd_msglevel & SDH_COST_VAL) {
- osl_do_gettimeofday(&now);
+ getnstimeofday(&now);
sd_cost(("%s: rw=%d, len=%d cost=%lds %luus\n", __FUNCTION__,
rw, nbytes, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
}
uint8 *localbuf = NULL;
uint local_plen = 0;
uint pkt_len = 0;
- struct osl_timespec now, before;
+ struct timespec now, before;
sd_trace(("%s: Enter\n", __FUNCTION__));
ASSERT(pkt);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
if (sd_msglevel & SDH_COST_VAL)
- osl_do_gettimeofday(&before);
+ getnstimeofday(&before);
blk_size = sd->client_block_size[func];
max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
* a restriction on max tx/glom count (based on host->max_segs).
*/
if (sg_count >= ARRAYSIZE(sd->sg_list)) {
- sd_err(("%s: sg list entries(%u) exceed limit(%zu),"
- " sd blk_size=%u\n",
- __FUNCTION__, sg_count, (size_t)ARRAYSIZE(sd->sg_list), blk_size));
+ sd_err(("%s: sg list entries exceed limit %d\n", __FUNCTION__, sg_count));
return (SDIOH_API_RC_FAIL);
}
pdata += pkt_offset;
* DMA descriptor, use multiple sg buffers when xfer_size is bigger than
* max_seg_size
*/
- if (sg_data_size > host->max_seg_size) {
+ if (sg_data_size > host->max_seg_size)
sg_data_size = host->max_seg_size;
- }
sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
ttl_len += sg_data_size;
goto txglomfail;
}
}
-
+
bcopy(buf, (localbuf + local_plen), pkt_len);
local_plen += pkt_len;
- if (PKTNEXT(sd->osh, pnext))
+ if (PKTNEXT(sd->osh, pnext))
continue;
buf = localbuf;
pkt_len += blk_size - (pkt_len % blk_size);
if ((write) && (!fifo))
- err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, pkt_len);
+ err_ret = sdio_memcpy_toio(
+ sd->func[func],
+ addr, buf, pkt_len);
else if (write)
- err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, pkt_len);
+ err_ret = sdio_memcpy_toio(
+ sd->func[func],
+ addr, buf, pkt_len);
else if (fifo)
- err_ret = sdio_readsb(sd->func[func], buf, addr, pkt_len);
+ err_ret = sdio_readsb(
+ sd->func[func],
+ buf, addr, pkt_len);
else
- err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, pkt_len);
+ err_ret = sdio_memcpy_fromio(
+ sd->func[func],
+ buf, addr, pkt_len);
if (err_ret)
sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
MFREE(sd->osh, localbuf, ttl_len);
if (sd_msglevel & SDH_COST_VAL) {
- osl_do_gettimeofday(&now);
+ getnstimeofday(&now);
sd_cost(("%s: rw=%d, ttl_len=%d, cost=%lds %luus\n", __FUNCTION__,
write, ttl_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000-before.tv_nsec/1000));
}
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
int err_ret = 0;
- struct osl_timespec now, before;
+ struct timespec now, before;
sd_trace(("%s: Enter\n", __FUNCTION__));
ASSERT(buf);
if (sd_msglevel & SDH_COST_VAL)
- osl_do_gettimeofday(&before);
+ getnstimeofday(&before);
/* NOTE:
* For all writes, each packet length is aligned to 32 (or 4)
sd_trace(("%s: Exit\n", __FUNCTION__));
if (sd_msglevel & SDH_COST_VAL) {
- osl_do_gettimeofday(&now);
+ getnstimeofday(&now);
sd_cost(("%s: rw=%d, len=%d cost=%lds %luus\n", __FUNCTION__,
write, len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
}
return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
}
+
/*
* This function takes a buffer or packet, and fixes everything up so that in the
* end, a DMA-able packet is created.
{
SDIOH_API_RC status;
void *tmppkt;
- struct osl_timespec now, before;
+ struct timespec now, before;
sd_trace(("%s: Enter\n", __FUNCTION__));
DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
if (sd_msglevel & SDH_COST_VAL)
- osl_do_gettimeofday(&before);
+ getnstimeofday(&before);
if (pkt) {
#ifdef BCMSDIOH_TXGLOM
PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
if (sd_msglevel & SDH_COST_VAL) {
- osl_do_gettimeofday(&now);
+ getnstimeofday(&now);
sd_cost(("%s: len=%d cost=%lds %luus\n", __FUNCTION__,
buf_len, now.tv_sec-before.tv_sec, now.tv_nsec/1000 - before.tv_nsec/1000));
}
if (sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize)) {
return BCME_SDIO_ERROR;
}
+
if (regsize == 2)
*data &= 0xffff;
2.6.27. The implementation prior to that is buggy, and needs broadcom's
patch for it
*/
-#if defined(ENABLE_INSMOD_NO_FW_LOAD) && !defined(BUS_POWER_RESTORE)
if ((ret = sdio_reset_comm(sd->func[0]->card))) {
sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
return ret;
- } else
-#endif
- {
+ }
+ else {
sd->num_funcs = 2;
sd->sd_blockmode = TRUE;
sd->use_client_ints = TRUE;
#else /* defined(OOB_INTR_ONLY) */
#if defined(HW_OOB)
sdioh_enable_func_intr(sd);
-#endif // endif
+#endif
bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
#endif /* !defined(OOB_INTR_ONLY) */
}
#else /* defined(OOB_INTR_ONLY) */
#if defined(HW_OOB)
sdioh_disable_func_intr(sd);
-#endif // endif
+#endif
bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
#endif /* !defined(OOB_INTR_ONLY) */
}
return (1);
}
+
SDIOH_API_RC
sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
{
uint
sdmmc_get_clock_rate(sdioh_info_t *sd)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
return 0;
#else
struct sdio_func *sdio_func = sd->func[0];
#endif
}
+
void
sdmmc_set_clock_rate(sdioh_info_t *sd, uint hz)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
return;
#else
struct sdio_func *sdio_func = sd->func[0];
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Proprietary,Open:>>
*
- * $Id: bcmsdh_sdmmc_linux.c 825481 2019-06-14 10:06:03Z $
+ * $Id: bcmsdh_sdmmc_linux.c 644124 2016-06-17 07:59:34Z $
*/
#include <typedefs.h>
#include <dhd_linux.h>
#include <bcmsdh_sdmmc.h>
#include <dhd_dbg.h>
-#include <bcmdevs.h>
#if !defined(SDIO_VENDOR_ID_BROADCOM)
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
#define SDIO_DEVICE_ID_BROADCOM_DEFAULT 0x0000
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB)
+#define SDIO_DEVICE_ID_BROADCOM_4325_SDGWB 0x0492 /* BCM94325SDGWB */
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4325)
+#define SDIO_DEVICE_ID_BROADCOM_4325 0x0493
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4325) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4329)
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4329) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4319)
+#define SDIO_DEVICE_ID_BROADCOM_4319 0x4319
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4319) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4330)
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4330) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4334)
+#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4334) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_4324)
+#define SDIO_DEVICE_ID_BROADCOM_4324 0x4324
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_4324) */
+#if !defined(SDIO_DEVICE_ID_BROADCOM_43239)
+#define SDIO_DEVICE_ID_BROADCOM_43239 43239
+#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */
+
extern void wl_cfg80211_set_parent_dev(void *dev);
extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
#ifdef WL_CFG80211
wl_cfg80211_set_parent_dev(&func->dev);
-#endif // endif
+#endif
/* allocate SDIO Host Controller state info */
osh = osl_attach(&func->dev, SDIO_BUS, TRUE);
/* devices we support, null terminated */
static const struct sdio_device_id bcmsdh_sdmmc_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_DEFAULT) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM4362_CHIP_ID) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43751_CHIP_ID) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43752_CHIP_ID) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43012_CHIP_ID) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_CHIP_ID) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N_ID) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N2G_ID) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, BCM43014_D11N5G_ID) },
- /* { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_ANY_ID) }, */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325_SDGWB) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4325) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4319) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4324) },
+ { SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43239) },
{ SDIO_DEVICE_CLASS(SDIO_CLASS_NONE) },
- /* end: all zeroes */
- { 0, 0, 0, 0},
+ { 0, 0, 0, 0 /* end: all zeroes */
+ },
};
MODULE_DEVICE_TABLE(sdio, bcmsdh_sdmmc_ids);
/*
* Broadcom SPI Host Controller Driver - Linux Per-port
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
#include <sdiovar.h> /* to get msglevel bit values */
-#ifdef BCMSPI_ANDROID
-#include <bcmsdh.h>
-#include <bcmspibrcm.h>
-#include <linux/spi/spi.h>
-#else
#include <pcicfg.h>
#include <sdio.h> /* SDIO Device and Protocol Specs */
#include <linux/sched.h> /* request_irq(), free_irq() */
#include <bcmsdspi.h>
#include <bcmspi.h>
-#endif /* BCMSPI_ANDROID */
-#ifndef BCMSPI_ANDROID
extern uint sd_crc;
module_param(sd_crc, uint, 0);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
#define KERNEL26
-#endif // endif
-#endif /* !BCMSPI_ANDROID */
+#endif
struct sdos_info {
sdioh_info_t *sd;
spinlock_t lock;
-#ifndef BCMSPI_ANDROID
wait_queue_head_t intr_wait_queue;
-#endif /* !BCMSPI_ANDROID */
};
-#ifndef BCMSPI_ANDROID
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
#define BLOCKABLE() (!in_atomic())
#else
#define BLOCKABLE() (!in_interrupt())
-#endif // endif
+#endif
/* Interrupt handler */
static irqreturn_t
sdspi_isr(int irq, void *dev_id
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
, struct pt_regs *ptregs
-#endif // endif
+#endif
)
{
sdioh_info_t *sd;
return IRQ_RETVAL(ours);
}
}
-#endif /* !BCMSPI_ANDROID */
-
-#ifdef BCMSPI_ANDROID
-static struct spi_device *gBCMSPI = NULL;
-
-extern int bcmsdh_probe(struct device *dev);
-extern int bcmsdh_remove(struct device *dev);
-
-static int bcmsdh_spi_probe(struct spi_device *spi_dev)
-{
- int ret = 0;
-
- gBCMSPI = spi_dev;
-
-#ifdef SPI_PIO_32BIT_RW
- spi_dev->bits_per_word = 32;
-#else
- spi_dev->bits_per_word = 8;
-#endif /* SPI_PIO_32BIT_RW */
- ret = spi_setup(spi_dev);
-
- if (ret) {
- sd_err(("bcmsdh_spi_probe: spi_setup fail with %d\n", ret));
- }
- sd_err(("bcmsdh_spi_probe: spi_setup with %d, bits_per_word=%d\n",
- ret, spi_dev->bits_per_word));
- ret = bcmsdh_probe(&spi_dev->dev);
-
- return ret;
-}
-static int bcmsdh_spi_remove(struct spi_device *spi_dev)
-{
- int ret = 0;
-
- ret = bcmsdh_remove(&spi_dev->dev);
- gBCMSPI = NULL;
-
- return ret;
-}
-
-static struct spi_driver bcmsdh_spi_driver = {
- .probe = bcmsdh_spi_probe,
- .remove = bcmsdh_spi_remove,
- .driver = {
- .name = "wlan_spi",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- },
-};
-
-/*
- * module init
-*/
-int bcmsdh_register_client_driver(void)
-{
- int error = 0;
- sd_trace(("bcmsdh_gspi: %s Enter\n", __FUNCTION__));
-
- error = spi_register_driver(&bcmsdh_spi_driver);
-
- return error;
-}
-
-/*
- * module cleanup
-*/
-void bcmsdh_unregister_client_driver(void)
-{
- sd_trace(("%s Enter\n", __FUNCTION__));
- spi_unregister_driver(&bcmsdh_spi_driver);
-}
-#endif /* BCMSPI_ANDROID */
/* Register with Linux for interrupts */
int
spi_register_irq(sdioh_info_t *sd, uint irq)
{
-#ifndef BCMSPI_ANDROID
sd_trace(("Entering %s: irq == %d\n", __FUNCTION__, irq));
if (request_irq(irq, sdspi_isr, IRQF_SHARED, "bcmsdspi", sd) < 0) {
sd_err(("%s: request_irq() failed\n", __FUNCTION__));
return ERROR;
}
-#endif /* !BCMSPI_ANDROID */
return SUCCESS;
}
void
spi_free_irq(uint irq, sdioh_info_t *sd)
{
-#ifndef BCMSPI_ANDROID
free_irq(irq, sd);
-#endif /* !BCMSPI_ANDROID */
}
/* Map Host controller registers */
-#ifndef BCMSPI_ANDROID
uint32 *
spi_reg_map(osl_t *osh, uintptr addr, int size)
{
{
REG_UNMAP((void*)(uintptr)addr);
}
-#endif /* !BCMSPI_ANDROID */
int
spi_osinit(sdioh_info_t *sd)
sdos->sd = sd;
spin_lock_init(&sdos->lock);
-#ifndef BCMSPI_ANDROID
init_waitqueue_head(&sdos->intr_wait_queue);
-#endif /* !BCMSPI_ANDROID */
return BCME_OK;
}
return SDIOH_API_RC_FAIL;
}
-#ifndef BCMSPI_ANDROID
if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
return SDIOH_API_RC_FAIL;
}
-#endif /* !BCMSPI_ANDROID */
/* Ensure atomicity for enable/disable calls */
spin_lock_irqsave(&sdos->lock, flags);
sd->client_intr_enabled = enable;
-#ifndef BCMSPI_ANDROID
if (enable && !sd->lockcount)
spi_devintr_on(sd);
else
spi_devintr_off(sd);
-#endif /* !BCMSPI_ANDROID */
spin_unlock_irqrestore(&sdos->lock, flags);
sd_err(("%s: Already locked!\n", __FUNCTION__));
ASSERT(sd->lockcount == 0);
}
-#ifdef BCMSPI_ANDROID
- if (sd->client_intr_enabled)
- bcmsdh_oob_intr_set(0);
-#else
spi_devintr_off(sd);
-#endif /* BCMSPI_ANDROID */
sd->lockcount++;
spin_unlock_irqrestore(&sdos->lock, flags);
}
spin_lock_irqsave(&sdos->lock, flags);
if (--sd->lockcount == 0 && sd->client_intr_enabled) {
-#ifdef BCMSPI_ANDROID
- bcmsdh_oob_intr_set(1);
-#else
spi_devintr_on(sd);
-#endif /* BCMSPI_ANDROID */
}
spin_unlock_irqrestore(&sdos->lock, flags);
}
-#ifndef BCMSPI_ANDROID
void spi_waitbits(sdioh_info_t *sd, bool yield)
{
#ifndef BCMSDYIELD
ASSERT(!yield);
-#endif // endif
+#endif
sd_trace(("%s: yield %d canblock %d\n",
__FUNCTION__, yield, BLOCKABLE()));
}
}
-#else /* !BCMSPI_ANDROID */
-int bcmgspi_dump = 0; /* Set to dump complete trace of all SPI bus transactions */
-
-static void
-hexdump(char *pfx, unsigned char *msg, int msglen)
-{
- int i, col;
- char buf[80];
-
- ASSERT(strlen(pfx) + 49 <= sizeof(buf));
-
- col = 0;
-
- for (i = 0; i < msglen; i++, col++) {
- if (col % 16 == 0)
- strcpy(buf, pfx);
- sprintf(buf + strlen(buf), "%02x", msg[i]);
- if ((col + 1) % 16 == 0)
- printf("%s\n", buf);
- else
- sprintf(buf + strlen(buf), " ");
- }
-
- if (col % 16 != 0)
- printf("%s\n", buf);
-}
-
-/* Send/Receive an SPI Packet */
-void
-spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen)
-{
- int write = 0;
- int tx_len = 0;
- struct spi_message msg;
- struct spi_transfer t[2];
-
- spi_message_init(&msg);
- memset(t, 0, 2*sizeof(struct spi_transfer));
-
- if (sd->wordlen == 2)
-#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW))
- write = msg_out[2] & 0x80;
-#else
- write = msg_out[1] & 0x80;
-#endif /* !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) */
- if (sd->wordlen == 4)
-#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW))
- write = msg_out[0] & 0x80;
-#else
- write = msg_out[3] & 0x80;
-#endif /* !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW)) */
-
- if (bcmgspi_dump) {
- hexdump(" OUT: ", msg_out, msglen);
- }
-
- tx_len = write ? msglen-4 : 4;
-
- sd_trace(("spi_sendrecv: %s, wordlen %d, cmd : 0x%02x 0x%02x 0x%02x 0x%02x\n",
- write ? "WR" : "RD", sd->wordlen,
- msg_out[0], msg_out[1], msg_out[2], msg_out[3]));
-
- t[0].tx_buf = (char *)&msg_out[0];
- t[0].rx_buf = 0;
- t[0].len = tx_len;
-
- spi_message_add_tail(&t[0], &msg);
-
- t[1].rx_buf = (char *)&msg_in[tx_len];
- t[1].tx_buf = 0;
- t[1].len = msglen-tx_len;
-
- spi_message_add_tail(&t[1], &msg);
- spi_sync(gBCMSPI, &msg);
-
- if (bcmgspi_dump) {
- hexdump(" IN : ", msg_in, msglen);
- }
-}
-#endif /* !BCMSPI_ANDROID */
/*
* Broadcom BCMSDH to gSPI Protocol Conversion Layer
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmspibrcm.c 700323 2017-05-18 16:12:11Z $
+ * $Id: bcmspibrcm.c 611787 2016-01-12 06:07:27Z $
*/
#define HSMODE
#include <pcicfg.h>
+
#include <bcmspibrcm.h>
-#ifdef BCMSPI_ANDROID
-extern void spi_sendrecv(sdioh_info_t *sd, uint8 *msg_out, uint8 *msg_in, int msglen);
-#else
#include <bcmspi.h>
-#endif /* BCMSPI_ANDROID */
/* these are for the older cores... for newer cores we have control for each of them */
#define F0_RESPONSE_DELAY 16
#define F1_RESPONSE_DELAY 16
#define F2_RESPONSE_DELAY F0_RESPONSE_DELAY
+
#define GSPI_F0_RESP_DELAY 0
#define GSPI_F1_RESP_DELAY F1_RESPONSE_DELAY
#define GSPI_F2_RESP_DELAY 0
uint sd_msglevel = SDH_ERROR_VAL;
#else
uint sd_msglevel = 0;
-#endif // endif
+#endif
uint sd_hiok = FALSE; /* Use hi-speed mode if available? */
uint sd_sdmode = SDIOH_MODE_SPI; /* Use SD4 mode by default */
uint sd_f2_blocksize = 64; /* Default blocksize */
+
uint sd_divisor = 2;
uint sd_power = 1; /* Default to SD Slot powered ON */
uint sd_clock = 1; /* Default to SD Clock turned ON */
#define BUF2_PKT_LEN 128
uint8 spi_outbuf2[BUF2_PKT_LEN];
uint8 spi_inbuf2[BUF2_PKT_LEN];
-#ifdef BCMSPI_ANDROID
-uint *dhd_spi_lockcount = NULL;
-#endif /* BCMSPI_ANDROID */
-#if !(defined(SPI_PIO_RW_BIGENDIAN) && defined(SPI_PIO_32BIT_RW))
#define SPISWAP_WD4(x) bcmswap32(x);
#define SPISWAP_WD2(x) (bcmswap16(x & 0xffff)) | \
(bcmswap16((x & 0xffff0000) >> 16) << 16);
-#else
-#define SPISWAP_WD4(x) x;
-#define SPISWAP_WD2(x) bcmswap32by16(x);
-#endif // endif
/* Prototypes */
static bool bcmspi_test_card(sdioh_info_t *sd);
return NULL;
}
-#ifndef BCMSPI_ANDROID
sd->bar0 = bar0;
-#endif /* !BCMSPI_ANDROID */
sd->irq = irq;
-#ifndef BCMSPI_ANDROID
sd->intr_handler = NULL;
sd->intr_handler_arg = NULL;
sd->intr_handler_valid = FALSE;
-#endif /* !BCMSPI_ANDROID */
/* Set defaults */
sd->use_client_ints = TRUE;
*/
sd->wordlen = 2;
-#ifdef BCMSPI_ANDROID
- dhd_spi_lockcount = &sd->lockcount;
-#endif /* BCMSPI_ANDROID */
-#ifndef BCMSPI_ANDROID
if (!spi_hw_attach(sd)) {
sd_err(("%s: spi_hw_attach() failed\n", __FUNCTION__));
spi_osfree(sd);
MFREE(sd->osh, sd, sizeof(sdioh_info_t));
return (NULL);
}
-#endif /* !BCMSPI_ANDROID */
if (bcmspi_driver_init(sd) != SUCCESS) {
sd_err(("%s: bcmspi_driver_init() failed()\n", __FUNCTION__));
-#ifndef BCMSPI_ANDROID
spi_hw_detach(sd);
-#endif /* !BCMSPI_ANDROID */
spi_osfree(sd);
MFREE(sd->osh, sd, sizeof(sdioh_info_t));
return (NULL);
if (spi_register_irq(sd, irq) != SUCCESS) {
sd_err(("%s: spi_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
-#ifndef BCMSPI_ANDROID
spi_hw_detach(sd);
-#endif /* !BCMSPI_ANDROID */
spi_osfree(sd);
MFREE(sd->osh, sd, sizeof(sdioh_info_t));
return (NULL);
if (sd) {
sd_err(("%s: detaching from hardware\n", __FUNCTION__));
spi_free_irq(sd->irq, sd);
-#ifndef BCMSPI_ANDROID
spi_hw_detach(sd);
-#endif /* !BCMSPI_ANDROID */
spi_osfree(sd);
-#ifdef BCMSPI_ANDROID
- dhd_spi_lockcount = NULL;
-#endif /* !BCMSPI_ANDROID */
MFREE(sd->osh, sd, sizeof(sdioh_info_t));
}
return SDIOH_API_RC_SUCCESS;
extern SDIOH_API_RC
sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
{
-#ifndef BCMSPI_ANDROID
sd_trace(("%s: Entering\n", __FUNCTION__));
*onoff = sd->client_intr_enabled;
-#endif /* !BCMSPI_ANDROID */
return SDIOH_API_RC_SUCCESS;
}
{
return 0;
}
-#endif // endif
+#endif
/* Provide dstatus bits of spi-transaction for dhd layers. */
extern uint32
}
}
+
uint
sdioh_query_iofnum(sdioh_info_t *sd)
{
bcopy(&int_val, arg, val_size);
break;
-#ifndef BCMSPI_ANDROID
case IOV_SVAL(IOV_DIVISOR):
sd_divisor = int_val;
if (!spi_start_clock(si, (uint16)sd_divisor)) {
bcmerror = BCME_ERROR;
}
break;
-#endif /* !BCMSPI_ANDROID */
case IOV_GVAL(IOV_POWER):
int_val = (uint32)sd_power;
break;
}
+
case IOV_GVAL(IOV_SPIERRSTATS):
{
bcopy(&si->spierrstats, arg, sizeof(struct spierrstats_t));
int offset;
uint32 cis_byte;
uint16 *cis = (uint16 *)cisd;
- uint bar0 = SI_ENUM_BASE(sd->sih);
+ uint bar0 = SI_ENUM_BASE;
int status;
uint8 data;
sd_trace(("%s cmd_arg = 0x%x\n", __FUNCTION__, cmd_arg));
+
/* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen
* according to the wordlen mode(16/32bit) the device is in.
*/
{
uint32 cmd_arg = GSPI_RESYNC_PATTERN, data = 0, datalen = 0;
+
/* Set up and issue the SPI command. MSByte goes out on bus first. Increase datalen
* according to the wordlen mode(16/32bit) the device is in.
*/
return SUCCESS;
}
+
/*
* Private/Static work routines
*/
uint32 status_en_reg = 0;
sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
-#ifndef BCMSPI_ANDROID
#ifdef HSMODE
if (!spi_start_clock(sd, (uint16)sd_divisor)) {
sd_err(("spi_start_clock failed\n"));
return ERROR;
}
#endif /* HSMODE */
-#endif /* !BCMSPI_ANDROID */
if (!bcmspi_host_device_init_adapt(sd)) {
sd_err(("bcmspi_host_device_init_adapt failed\n"));
}
#ifndef HSMODE
-#ifndef BCMSPI_ANDROID
/* After configuring for High-Speed mode, set the desired clock rate. */
if (!spi_start_clock(sd, 4)) {
sd_err(("spi_start_clock failed\n"));
return ERROR;
}
-#endif /* !BCMSPI_ANDROID */
#endif /* HSMODE */
/* check to see if the response delay needs to be programmed properly */
}
}
+
sd->card_init_done = TRUE;
/* get the device rev to program the prop respdelays */
sd_trace(("In %s spih-ctrl = 0x%x \n", __FUNCTION__, regdata));
+
if (hsmode == TRUE) {
sd_trace(("Attempting to enable High-Speed mode.\n"));
return status;
}
}
-#ifndef BCMSPI_ANDROID
spi_controller_highspeed_mode(sd, hsmode);
-#endif /* !BCMSPI_ANDROID */
return TRUE;
}
}
}
+
return TRUE;
}
return FALSE;
}
+
#define RW_PATTERN1 0xA0A1A2A3
#define RW_PATTERN2 0x4B5B6B7B
/* +4 for cmd and +4 for dstatus */
hostlen = datalen + 8 + resp_delay;
hostlen += dstatus_idx;
-#ifdef BCMSPI_ANDROID
- if (hostlen%4) {
- sd_err(("Unaligned data len %d, hostlen %d\n",
- datalen, hostlen));
-#endif /* BCMSPI_ANDROID */
hostlen += (4 - (hostlen & 0x3));
-#ifdef BCMSPI_ANDROID
- }
-#endif /* BCMSPI_ANDROID */
spi_sendrecv(sd, spi_outbuf, spi_inbuf, hostlen);
/* for Read, get the data into the input buffer */
__FUNCTION__, write ? "Wd" : "Rd", func, "INCR",
addr, nbytes, sd->r_cnt, sd->t_cnt));
+
if ((status = bcmspi_cmd_issue(sd, sd->sd_use_dma, cmd_arg, data, nbytes)) != SUCCESS) {
sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__,
(write ? "write" : "read")));
+++ /dev/null
-/*
- * Broadcom Secure Standard Library.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id $
- */
-
-#include <bcm_cfg.h>
-#include <typedefs.h>
-#include <bcmdefs.h>
-#ifdef BCMDRIVER
-#include <osl.h>
-#else /* BCMDRIVER */
-#include <stddef.h>
-#include <string.h>
-#endif /* else BCMDRIVER */
-
-#include <bcmstdlib_s.h>
-#include <bcmutils.h>
-
-/*
- * __SIZE_MAX__ value is depending on platform:
- * Firmware Dongle: RAMSIZE (Dongle Specific Limit).
- * LINUX NIC/Windows/MACOSX/Application: OS Native or
- * 0xFFFFFFFFu if not defined.
- */
-#ifndef SIZE_MAX
-#ifndef __SIZE_MAX__
-#define __SIZE_MAX__ 0xFFFFFFFFu
-#endif /* __SIZE_MAX__ */
-#define SIZE_MAX __SIZE_MAX__
-#endif /* SIZE_MAX */
-#define RSIZE_MAX (SIZE_MAX >> 1u)
-
-#if !defined(__STDC_WANT_SECURE_LIB__) && !(defined(__STDC_LIB_EXT1__) && \
- defined(__STDC_WANT_LIB_EXT1__))
-/*
- * memmove_s - secure memmove
- * dest : pointer to the object to copy to
- * destsz : size of the destination buffer
- * src : pointer to the object to copy from
- * n : number of bytes to copy
- * Return Value : zero on success and non-zero on error
- * Also on error, if dest is not a null pointer and destsz not greater
- * than RSIZE_MAX, writes destsz zero bytes into the dest object.
- */
-int
-memmove_s(void *dest, size_t destsz, const void *src, size_t n)
-{
- int err = BCME_OK;
-
- if ((!dest) || (((char *)dest + destsz) < (char *)dest)) {
- err = BCME_BADARG;
- goto exit;
- }
-
- if (destsz > RSIZE_MAX) {
- err = BCME_BADLEN;
- goto exit;
- }
-
- if (destsz < n) {
- memset(dest, 0, destsz);
- err = BCME_BADLEN;
- goto exit;
- }
-
- if ((!src) || (((const char *)src + n) < (const char *)src)) {
- memset(dest, 0, destsz);
- err = BCME_BADARG;
- goto exit;
- }
-
- memmove(dest, src, n);
-exit:
- return err;
-}
-
-/*
- * memcpy_s - secure memcpy
- * dest : pointer to the object to copy to
- * destsz : size of the destination buffer
- * src : pointer to the object to copy from
- * n : number of bytes to copy
- * Return Value : zero on success and non-zero on error
- * Also on error, if dest is not a null pointer and destsz not greater
- * than RSIZE_MAX, writes destsz zero bytes into the dest object.
- */
-int
-memcpy_s(void *dest, size_t destsz, const void *src, size_t n)
-{
- int err = BCME_OK;
- char *d = dest;
- const char *s = src;
-
- if ((!d) || ((d + destsz) < d)) {
- err = BCME_BADARG;
- goto exit;
- }
-
- if (destsz > RSIZE_MAX) {
- err = BCME_BADLEN;
- goto exit;
- }
-
- if (destsz < n) {
- memset(dest, 0, destsz);
- err = BCME_BADLEN;
- goto exit;
- }
-
- if ((!s) || ((s + n) < s)) {
- memset(dest, 0, destsz);
- err = BCME_BADARG;
- goto exit;
- }
-
- /* overlap checking between dest and src */
- if (!(((d + destsz) <= s) || (d >= (s + n)))) {
- memset(dest, 0, destsz);
- err = BCME_BADARG;
- goto exit;
- }
-
- (void)memcpy(dest, src, n);
-exit:
- return err;
-}
-
-/*
- * memset_s - secure memset
- * dest : pointer to the object to be set
- * destsz : size of the destination buffer
- * c : byte value
- * n : number of bytes to be set
- * Return Value : zero on success and non-zero on error
- * Also on error, if dest is not a null pointer and destsz not greater
- * than RSIZE_MAX, writes destsz bytes with value c into the dest object.
- */
-int
-memset_s(void *dest, size_t destsz, int c, size_t n)
-{
- int err = BCME_OK;
- if ((!dest) || (((char *)dest + destsz) < (char *)dest)) {
- err = BCME_BADARG;
- goto exit;
- }
-
- if (destsz > RSIZE_MAX) {
- err = BCME_BADLEN;
- goto exit;
- }
-
- if (destsz < n) {
- (void)memset(dest, c, destsz);
- err = BCME_BADLEN;
- goto exit;
- }
-
- (void)memset(dest, c, n);
-exit:
- return err;
-}
-#endif /* !__STDC_WANT_SECURE_LIB__ && !(__STDC_LIB_EXT1__ && __STDC_WANT_LIB_EXT1__) */
-
-#if 0
-/**
- * strlcpy - Copy a %NUL terminated string into a sized buffer
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer 0 if input parameters are NOK
- * return: string leng of src (assume src is NUL terminated)
- *
- * Compatible with *BSD: the result is always a valid
- * NUL-terminated string that fits in the buffer (unless,
- * of course, the buffer size is zero). It does not pad
- * out the result like strncpy() does.
- */
-size_t strlcpy(char *dest, const char *src, size_t size)
-{
- const char *s = src;
- size_t n;
-
- if (dest == NULL) {
- return 0;
- }
-
- /* terminate dest if src is NULL and return 0 as only NULL was added */
- if (s == NULL) {
- *dest = '\0';
- return 0;
- }
-
- /* allows us to handle size 0 */
- if (size == 0) {
- n = 0;
- } else {
- n = size - 1u;
- }
-
- /* perform copy */
- while (*s && n != 0) {
- *dest++ = *s++;
- n--;
- }
-
- *dest = '\0';
-
- /* count to end of s or compensate for NULL */
- if (n == 0) {
- while (*s++)
- ;
- } else {
- s++;
- }
-
- /* return bytes copied not accounting NUL */
- return (s - src - 1u);
-}
-#endif // endif
-
-/**
- * strlcat_s - Concatenate a %NUL terminated string with a sized buffer
- * @dest: Where to concatenate the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer
- * return: string length of created string (i.e. the initial length of dest plus the length of src)
- * not including the NUL char, up until size
- *
- * Unlike strncat(), strlcat() take the full size of the buffer (not just the number of bytes to
- * copy) and guarantee to NUL-terminate the result (even when there's nothing to concat).
- * If the length of dest string concatinated with the src string >= size, truncation occurs.
- *
- * Compatible with *BSD: the result is always a valid NUL-terminated string that fits in the buffer
- * (unless, of course, the buffer size is zero).
- *
- * If either src or dest is not NUL-terminated, dest[size-1] will be set to NUL.
- * If size < strlen(dest) + strlen(src), dest[size-1] will be set to NUL.
- * If size == 0, dest[0] will be set to NUL.
- */
-size_t
-strlcat_s(char *dest, const char *src, size_t size)
-{
- char *d = dest;
- const char *s = src; /* point to the start of the src string */
- size_t n = size;
- size_t dlen;
- size_t bytes_to_copy = 0;
-
- if (dest == NULL) {
- return 0;
- }
-
- /* set d to point to the end of dest string (up to size) */
- while (n != 0 && *d != '\0') {
- d++;
- n--;
- }
- dlen = (size_t)(d - dest);
-
- if (s != NULL) {
- size_t slen = 0;
-
- /* calculate src len in case it's not null-terminated */
- n = size;
- while (n-- != 0 && *(s + slen) != '\0') {
- ++slen;
- }
-
- n = size - dlen; /* maximum num of chars to copy */
- if (n != 0) {
- /* copy relevant chars (until end of src buf or given size is reached) */
- bytes_to_copy = MIN(slen - (size_t)(s - src), n - 1);
- (void)memcpy(d, s, bytes_to_copy);
- d += bytes_to_copy;
- }
- }
- if (n == 0 && dlen != 0) {
- --d; /* nothing to copy, but NUL-terminate dest anyway */
- }
- *d = '\0'; /* NUL-terminate dest */
-
- return (dlen + bytes_to_copy);
-}
/*
* Driver O/S-independent utility routines
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmutils.c 813798 2019-04-08 10:20:21Z $
+ * $Id: bcmutils.c 699163 2017-05-12 05:18:23Z $
*/
#include <bcm_cfg.h>
#include <bcmdefs.h>
#include <stdarg.h>
#ifdef BCMDRIVER
+
#include <osl.h>
#include <bcmutils.h>
#include <stdio.h>
#include <string.h>
-#include <bcm_math.h>
#include <bcmutils.h>
#if defined(BCMEXTSUP)
#include <bcm_osl.h>
-#endif // endif
+#endif
#ifndef ASSERT
#define ASSERT(exp)
-#endif // endif
+#endif
#endif /* !BCMDRIVER */
-#ifdef WL_UNITTEST
-#ifdef ASSERT
-#undef ASSERT
-#endif /* ASSERT */
-#define ASSERT(exp)
-#endif /* WL_UNITTEST */
-
-#include <bcmstdlib_s.h>
#include <bcmendian.h>
#include <bcmdevs.h>
#include <ethernet.h>
#include <bcmipv6.h>
#include <bcmtcp.h>
-#ifdef BCMDRIVER
-
-/* return total length of buffer chain */
-uint BCMFASTPATH
-pkttotlen(osl_t *osh, void *p)
-{
- uint total;
- int len;
-
- total = 0;
- for (; p; p = PKTNEXT(osh, p)) {
- len = PKTLEN(osh, p);
- total += (uint)len;
-#ifdef BCMLFRAG
- if (BCMLFRAG_ENAB()) {
- if (PKTISFRAG(osh, p)) {
- total += PKTFRAGTOTLEN(osh, p);
- }
- }
-#endif // endif
- }
+/* Look-up table to calculate head room present in a number */
+static const uint8 msb_table[] = {
+ 0, 1, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+};
- return (total);
-}
+void *_bcmutils_dummy_fn = NULL;
-/* return the last buffer of chained pkt */
-void *
-pktlast(osl_t *osh, void *p)
-{
- for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
- ;
- return (p);
-}
-/* count segments of a chained packet */
-uint BCMFASTPATH
-pktsegcnt(osl_t *osh, void *p)
-{
- uint cnt;
- for (cnt = 0; p; p = PKTNEXT(osh, p)) {
- cnt++;
-#ifdef BCMLFRAG
- if (BCMLFRAG_ENAB()) {
- if (PKTISFRAG(osh, p)) {
- cnt += PKTFRAGTOTNUM(osh, p);
- }
- }
-#endif // endif
- }
+#ifdef BCMDRIVER
- return cnt;
-}
/* copy a pkt buffer chain into a buffer */
uint
for (; p && offset; p = PKTNEXT(osh, p)) {
if (offset < (uint)PKTLEN(osh, p))
break;
- offset -= (uint)PKTLEN(osh, p);
+ offset -= PKTLEN(osh, p);
}
if (!p)
{
uint n, ret = 0;
+
/* skip 'offset' bytes */
for (; p && offset; p = PKTNEXT(osh, p)) {
if (offset < (uint)PKTLEN(osh, p))
break;
- offset -= (uint)PKTLEN(osh, p);
+ offset -= PKTLEN(osh, p);
}
if (!p)
return ret;
}
+
+
+/* return total length of buffer chain */
+uint BCMFASTPATH
+pkttotlen(osl_t *osh, void *p)
+{
+ uint total;
+ int len;
+
+ total = 0;
+ for (; p; p = PKTNEXT(osh, p)) {
+ len = PKTLEN(osh, p);
+ total += len;
+#ifdef BCMLFRAG
+ if (BCMLFRAG_ENAB()) {
+ if (PKTISFRAG(osh, p)) {
+ total += PKTFRAGTOTLEN(osh, p);
+ }
+ }
+#endif
+ }
+
+ return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+ for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+ ;
+
+ return (p);
+}
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt(osl_t *osh, void *p)
+{
+ uint cnt;
+
+ for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+ cnt++;
+#ifdef BCMLFRAG
+ if (BCMLFRAG_ENAB()) {
+ if (PKTISFRAG(osh, p)) {
+ cnt += PKTFRAGTOTNUM(osh, p);
+ }
+ }
+#endif
+ }
+
+ return cnt;
+}
+
+
+/* count segments of a chained packet */
+uint BCMFASTPATH
+pktsegcnt_war(osl_t *osh, void *p)
+{
+ uint cnt;
+ uint8 *pktdata;
+ uint len, remain, align64;
+
+ for (cnt = 0; p; p = PKTNEXT(osh, p)) {
+ cnt++;
+ len = PKTLEN(osh, p);
+ if (len > 128) {
+ pktdata = (uint8 *)PKTDATA(osh, p); /* starting address of data */
+ /* Check for page boundary straddle (2048B) */
+ if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff))
+ cnt++;
+
+ align64 = (uint)((uintptr)pktdata & 0x3f); /* aligned to 64B */
+ align64 = (64 - align64) & 0x3f;
+ len -= align64; /* bytes from aligned 64B to end */
+ /* if aligned to 128B, check for MOD 128 between 1 to 4B */
+ remain = len % 128;
+ if (remain > 0 && remain <= 4)
+ cnt++; /* add extra seg */
+ }
+ }
+
+ return cnt;
+}
+
uint8 * BCMFASTPATH
pktdataoffset(osl_t *osh, void *p, uint offset)
{
for (; p; p = PKTNEXT(osh, p)) {
pdata = (uint8 *) PKTDATA(osh, p);
pkt_off = offset - len;
- len += (uint)PKTLEN(osh, p);
+ len += PKTLEN(osh, p);
if (len > offset)
break;
}
return (uint8*) (pdata+pkt_off);
}
+
/* given a offset in pdata, find the pkt seg hdr */
void *
pktoffset(osl_t *osh, void *p, uint offset)
return NULL;
for (; p; p = PKTNEXT(osh, p)) {
- len += (uint)PKTLEN(osh, p);
+ len += PKTLEN(osh, p);
if (len > offset)
break;
}
return p;
}
-void
-bcm_mdelay(uint ms)
-{
- uint i;
+#endif /* BCMDRIVER */
- for (i = 0; i < ms; i++) {
- OSL_DELAY(1000);
- }
-}
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+const unsigned char bcm_ctype[] = {
-#if defined(DHD_DEBUG)
-/* pretty hex print a pkt buffer chain */
-void
-prpkt(const char *msg, osl_t *osh, void *p0)
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */
+ _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
+ _BCM_C, /* 8-15 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */
+ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */
+ _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */
+ _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */
+ _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */
+ _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */
+ _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
+ _BCM_U|_BCM_X, _BCM_U, /* 64-71 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */
+ _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */
+ _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
+ _BCM_L|_BCM_X, _BCM_L, /* 96-103 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
+ _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */
+ _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
+ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
+ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
+ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+};
+
+ulong
+bcm_strtoul(const char *cp, char **endp, uint base)
{
- void *p;
+ ulong result, last_result = 0, value;
+ bool minus;
- if (msg && (msg[0] != '\0'))
- printf("%s:\n", msg);
+ minus = FALSE;
- for (p = p0; p; p = PKTNEXT(osh, p))
- prhex(NULL, PKTDATA(osh, p), (uint)PKTLEN(osh, p));
-}
-#endif // endif
+ while (bcm_isspace(*cp))
+ cp++;
-/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
- * Also updates the inplace vlan tag if requested.
- * For debugging, it returns an indication of what it did.
- */
-uint BCMFASTPATH
-pktsetprio(void *pkt, bool update_vtag)
-{
- struct ether_header *eh;
- struct ethervlan_header *evh;
- uint8 *pktdata;
- uint priority = 0;
- uint rc = 0;
+ if (cp[0] == '+')
+ cp++;
+ else if (cp[0] == '-') {
+ minus = TRUE;
+ cp++;
+ }
- pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
- ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+ if (base == 0) {
+ if (cp[0] == '0') {
+ if ((cp[1] == 'x') || (cp[1] == 'X')) {
+ base = 16;
+ cp = &cp[2];
+ } else {
+ base = 8;
+ cp = &cp[1];
+ }
+ } else
+ base = 10;
+ } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
+ cp = &cp[2];
+ }
- eh = (struct ether_header *) pktdata;
+ result = 0;
- if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
- uint16 vlan_tag;
- uint vlan_prio, dscp_prio = 0;
+ while (bcm_isxdigit(*cp) &&
+ (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) {
+ result = result*base + value;
+ /* Detected overflow */
+ if (result < last_result && !minus) {
+ if (endp) {
+ /* Go to the end of current number */
+ while (bcm_isxdigit(*cp)) {
+ cp++;
+ }
+ *endp = DISCARD_QUAL(cp, char);
+ }
+ return (ulong)-1;
+ }
+ last_result = result;
+ cp++;
+ }
- evh = (struct ethervlan_header *)eh;
+ if (minus)
+ result = (ulong)(-(long)result);
- vlan_tag = ntoh16(evh->vlan_tag);
- vlan_prio = (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+ if (endp)
+ *endp = DISCARD_QUAL(cp, char);
- if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
- (evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
- uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
- uint8 tos_tc = (uint8)IP_TOS46(ip_body);
- dscp_prio = tos_tc >> IPV4_TOS_PREC_SHIFT;
- }
+ return (result);
+}
- /* DSCP priority gets precedence over 802.1P (vlan tag) */
- if (dscp_prio != 0) {
- priority = dscp_prio;
- rc |= PKTPRIO_VDSCP;
- } else {
- priority = vlan_prio;
- rc |= PKTPRIO_VLAN;
- }
- /*
- * If the DSCP priority is not the same as the VLAN priority,
- * then overwrite the priority field in the vlan tag, with the
- * DSCP priority value. This is required for Linux APs because
- * the VLAN driver on Linux, overwrites the skb->priority field
- * with the priority value in the vlan tag
- */
- if (update_vtag && (priority != vlan_prio)) {
- vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
- vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
- evh->vlan_tag = hton16(vlan_tag);
- rc |= PKTPRIO_UPD;
- }
-#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
- } else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
- priority = PRIO_8021D_NC;
- rc = PKTPRIO_DSCP;
-#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
- } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
- (eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
- uint8 *ip_body = pktdata + sizeof(struct ether_header);
- uint8 tos_tc = (uint8)IP_TOS46(ip_body);
- uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
- switch (dscp) {
- case DSCP_EF:
- case DSCP_VA:
- priority = PRIO_8021D_VO;
- break;
- case DSCP_AF31:
- case DSCP_AF32:
- case DSCP_AF33:
- case DSCP_CS3:
- priority = PRIO_8021D_CL;
- break;
- case DSCP_AF21:
- case DSCP_AF22:
- case DSCP_AF23:
- priority = PRIO_8021D_EE;
- break;
- case DSCP_AF11:
- case DSCP_AF12:
- case DSCP_AF13:
- case DSCP_CS2:
- priority = PRIO_8021D_BE;
- break;
- case DSCP_CS6:
- case DSCP_CS7:
- priority = PRIO_8021D_NC;
- break;
- default:
- priority = tos_tc >> IPV4_TOS_PREC_SHIFT;
- break;
- }
-
- rc |= PKTPRIO_DSCP;
- }
-
- ASSERT(priority <= MAXPRIO);
- PKTSETPRIO(pkt, (int)priority);
- return (rc | priority);
+int
+bcm_atoi(const char *s)
+{
+ return (int)bcm_strtoul(s, NULL, 10);
}
-/* lookup user priority for specified DSCP */
-static uint8
-dscp2up(uint8 *up_table, uint8 dscp)
+/* return pointer to location of substring 'needle' in 'haystack' */
+char *
+bcmstrstr(const char *haystack, const char *needle)
{
- uint8 user_priority = 255;
+ int len, nlen;
+ int i;
- /* lookup up from table if parameters valid */
- if (up_table != NULL && dscp < UP_TABLE_MAX) {
- user_priority = up_table[dscp];
- }
+ if ((haystack == NULL) || (needle == NULL))
+ return DISCARD_QUAL(haystack, char);
- /* 255 is unused value so return up from dscp */
- if (user_priority == 255) {
- user_priority = dscp >> (IPV4_TOS_PREC_SHIFT - IPV4_TOS_DSCP_SHIFT);
- }
+ nlen = (int)strlen(needle);
+ len = (int)strlen(haystack) - nlen + 1;
- return user_priority;
+ for (i = 0; i < len; i++)
+ if (memcmp(needle, &haystack[i], nlen) == 0)
+ return DISCARD_QUAL(&haystack[i], char);
+ return (NULL);
}
-/* set user priority by QoS Map Set table (UP table), table size is UP_TABLE_MAX */
-uint BCMFASTPATH
-pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag)
+char *
+bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len)
{
- if (up_table) {
- uint8 *pktdata;
- uint pktlen;
- uint8 dscp;
- uint user_priority = 0;
- uint rc = 0;
-
- pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
- pktlen = (uint)PKTLEN(OSH_NULL, pkt);
-
- if (pktgetdscp(pktdata, pktlen, &dscp)) {
- rc = PKTPRIO_DSCP;
- user_priority = dscp2up(up_table, dscp);
- PKTSETPRIO(pkt, (int)user_priority);
- }
+ for (; s_len >= substr_len; s++, s_len--)
+ if (strncmp(s, substr, substr_len) == 0)
+ return DISCARD_QUAL(s, char);
- return (rc | user_priority);
- } else {
- return pktsetprio(pkt, update_vtag);
- }
+ return NULL;
}
-/* Returns TRUE and DSCP if IP header found, FALSE otherwise.
- */
-bool BCMFASTPATH
-pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp)
+char *
+bcmstrcat(char *dest, const char *src)
{
- struct ether_header *eh;
- struct ethervlan_header *evh;
- uint8 *ip_body;
- bool rc = FALSE;
-
- /* minimum length is ether header and IP header */
- if (pktlen < sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN)
- return FALSE;
-
- eh = (struct ether_header *) pktdata;
+ char *p;
- if (eh->ether_type == HTON16(ETHER_TYPE_IP)) {
- ip_body = pktdata + sizeof(struct ether_header);
- *dscp = (uint8)IP_DSCP46(ip_body);
- rc = TRUE;
- }
- else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) {
- evh = (struct ethervlan_header *)eh;
+ p = dest + strlen(dest);
- /* minimum length is ethervlan header and IP header */
- if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN &&
- evh->ether_type == HTON16(ETHER_TYPE_IP)) {
- ip_body = pktdata + sizeof(struct ethervlan_header);
- *dscp = (uint8)IP_DSCP46(ip_body);
- rc = TRUE;
- }
- }
+ while ((*p++ = *src++) != '\0')
+ ;
- return rc;
+ return (dest);
}
-/* usr_prio range from low to high with usr_prio value */
-static bool
-up_table_set(uint8 *up_table, uint8 usr_prio, uint8 low, uint8 high)
+char *
+bcmstrncat(char *dest, const char *src, uint size)
{
- int i;
+ char *endp;
+ char *p;
- if (usr_prio > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) {
- return FALSE;
- }
+ p = dest + strlen(dest);
+ endp = p + size;
- for (i = low; i <= high; i++) {
- up_table[i] = usr_prio;
- }
+ while (p != endp && (*p++ = *src++) != '\0')
+ ;
- return TRUE;
+ return (dest);
}
-/* set user priority table */
-int BCMFASTPATH
-wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie)
+
+/****************************************************************************
+* Function: bcmstrtok
+*
+* Purpose:
+* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
+* but allows strToken() to be used by different strings or callers at the same
+* time. Each call modifies '*string' by substituting a NULL character for the
+* first delimiter that is encountered, and updates 'string' to point to the char
+* after the delimiter. Leading delimiters are skipped.
+*
+* Parameters:
+* string (mod) Ptr to string ptr, updated by token.
+* delimiters (in) Set of delimiter characters.
+* tokdelim (out) Character that delimits the returned token. (May
+* be set to NULL if token delimiter is not required).
+*
+* Returns: Pointer to the next token found. NULL when no more tokens are found.
+*****************************************************************************
+*/
+char *
+bcmstrtok(char **string, const char *delimiters, char *tokdelim)
{
- uint8 len;
+ unsigned char *str;
+ unsigned long map[8];
+ int count;
+ char *nextoken;
- if (up_table == NULL || qos_map_ie == NULL) {
- return BCME_ERROR;
+ if (tokdelim != NULL) {
+ /* Prime the token delimiter */
+ *tokdelim = '\0';
}
- /* clear table to check table was set or not */
- memset(up_table, 0xff, UP_TABLE_MAX);
+ /* Clear control map */
+ for (count = 0; count < 8; count++) {
+ map[count] = 0;
+ }
- /* length of QoS Map IE must be 16+n*2, n is number of exceptions */
- if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID &&
- (len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH &&
- (len % 2) == 0) {
- uint8 *except_ptr = (uint8 *)qos_map_ie->data;
- uint8 except_len = len - QOS_MAP_FIXED_LENGTH;
- uint8 *range_ptr = except_ptr + except_len;
- uint8 i;
+ /* Set bits in delimiter table */
+ do {
+ map[*delimiters >> 5] |= (1 << (*delimiters & 31));
+ }
+ while (*delimiters++);
- /* fill in ranges */
- for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) {
- uint8 low = range_ptr[i];
- uint8 high = range_ptr[i + 1];
- if (low == 255 && high == 255) {
- continue;
- }
+ str = (unsigned char*)*string;
- if (!up_table_set(up_table, i / 2, low, high)) {
- /* clear the table on failure */
- memset(up_table, 0xff, UP_TABLE_MAX);
- return BCME_ERROR;
- }
- }
+ /* Find beginning of token (skip over leading delimiters). Note that
+ * there is no token iff this loop sets str to point to the terminal
+ * null (*str == '\0')
+ */
+ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
+ str++;
+ }
- /* update exceptions */
- for (i = 0; i < except_len; i += 2) {
- uint8 dscp = except_ptr[i];
- uint8 usr_prio = except_ptr[i+1];
+ nextoken = (char*)str;
- /* exceptions with invalid dscp/usr_prio are ignored */
- up_table_set(up_table, usr_prio, dscp, dscp);
+ /* Find the end of the token. If it is not the end of the string,
+ * put a null there.
+ */
+ for (; *str; str++) {
+ if (map[*str >> 5] & (1 << (*str & 31))) {
+ if (tokdelim != NULL) {
+ *tokdelim = *str;
+ }
+
+ *str++ = '\0';
+ break;
}
}
- return BCME_OK;
-}
-
-/* The 0.5KB string table is not removed by compiler even though it's unused */
+ *string = (char*)str;
-static char bcm_undeferrstr[32];
-static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
-
-/* Convert the error codes into related error strings */
-const char *
-BCMRAMFN(bcmerrorstr)(int bcmerror)
-{
- /* check if someone added a bcmerror code but forgot to add errorstring */
- ASSERT((uint)ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
-
- if (bcmerror > 0 || bcmerror < BCME_LAST) {
- snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
- return bcm_undeferrstr;
+ /* Determine if a token has been found. */
+ if (nextoken == (char *) str) {
+ return NULL;
+ }
+ else {
+ return nextoken;
}
-
- ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
-
- return bcmerrorstrtable[-bcmerror];
}
-/* iovar table lookup */
-/* could mandate sorted tables and do a binary search */
-const bcm_iovar_t*
-bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
-{
- const bcm_iovar_t *vi;
- const char *lookup_name;
- /* skip any ':' delimited option prefixes */
- lookup_name = strrchr(name, ':');
- if (lookup_name != NULL)
- lookup_name++;
- else
- lookup_name = name;
+#define xToLower(C) \
+ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
- ASSERT(table != NULL);
- for (vi = table; vi->name; vi++) {
- if (!strcmp(vi->name, lookup_name))
- return vi;
+/****************************************************************************
+* Function: bcmstricmp
+*
+* Purpose: Compare to strings case insensitively.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
+int
+bcmstricmp(const char *s1, const char *s2)
+{
+ char dc, sc;
+
+ while (*s2 && *s1) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
}
- /* ran to end of table */
- return NULL; /* var name not found */
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
}
+
+/****************************************************************************
+* Function: bcmstrnicmp
+*
+* Purpose: Compare to strings case insensitively, upto a max of 'cnt'
+* characters.
+*
+* Parameters: s1 (in) First string to compare.
+* s2 (in) Second string to compare.
+* cnt (in) Max characters to compare.
+*
+* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
+* t1 > t2, when ignoring case sensitivity.
+*****************************************************************************
+*/
int
-bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+bcmstrnicmp(const char* s1, const char* s2, int cnt)
{
- int bcmerror = 0;
- BCM_REFERENCE(arg);
+ char dc, sc;
- /* length check on io buf */
- switch (vi->type) {
- case IOVT_BOOL:
- case IOVT_INT8:
- case IOVT_INT16:
- case IOVT_INT32:
- case IOVT_UINT8:
- case IOVT_UINT16:
- case IOVT_UINT32:
- /* all integers are int32 sized args at the ioctl interface */
- if (len < (int)sizeof(int)) {
- bcmerror = BCME_BUFTOOSHORT;
- }
- break;
+ while (*s2 && *s1 && cnt) {
+ dc = xToLower(*s1);
+ sc = xToLower(*s2);
+ if (dc < sc) return -1;
+ if (dc > sc) return 1;
+ s1++;
+ s2++;
+ cnt--;
+ }
- case IOVT_BUFFER:
- /* buffer must meet minimum length requirement */
- if (len < vi->minlen) {
- bcmerror = BCME_BUFTOOSHORT;
- }
- break;
+ if (!cnt) return 0;
+ if (*s1 && !*s2) return 1;
+ if (!*s1 && *s2) return -1;
+ return 0;
+}
- case IOVT_VOID:
- if (!set) {
- /* Cannot return nil... */
- bcmerror = BCME_UNSUPPORTED;
- }
- break;
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(const char *p, struct ether_addr *ea)
+{
+ int i = 0;
+ char *ep;
- default:
- /* unknown type for length check in iovar info */
- ASSERT(0);
- bcmerror = BCME_UNSUPPORTED;
+ for (;;) {
+ ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16);
+ p = ep;
+ if (!*p++ || i == 6)
+ break;
}
- return bcmerror;
+ return (i == 6);
}
-#if !defined(_CFEZ_)
-/*
- * Hierarchical Multiword bitmap based small id allocator.
- *
- * Multilevel hierarchy bitmap. (maximum 2 levels)
- * First hierarchy uses a multiword bitmap to identify 32bit words in the
- * second hierarchy that have at least a single bit set. Each bit in a word of
- * the second hierarchy represents a unique ID that may be allocated.
- *
- * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed.
- * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word
- * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs.
- * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non
- * non-zero bitmap word carrying at least one free ID.
- * BCM_MWBMAP_SHIFT_OP: Used in MOD, DIV and MUL operations.
- * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID
- *
- * Design Notes:
- * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many
- * bits are computed each time on allocation and deallocation, requiring 4
- * array indexed access and 3 arithmetic operations. When not defined, a runtime
- * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed.
- * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation.
- * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may
- * be used by defining BCM_MWBMAP_USE_CNTSETBITS.
- *
- * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array
- * size is fixed. No intention to support larger than 4K indice allocation. ID
- * allocators for ranges smaller than 4K will have a wastage of only 12Bytes
- * with savings in not having to use an indirect access, had it been dynamically
- * allocated.
- */
-#define BCM_MWBMAP_ITEMS_MAX (64 * 1024) /* May increase to 64K */
+int
+bcm_atoipv4(const char *p, struct ipv4_addr *ip)
+{
-#define BCM_MWBMAP_BITS_WORD (NBITS(uint32))
-#define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD)
-#define BCM_MWBMAP_WDMAP_MAX (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD)
-#define BCM_MWBMAP_SHIFT_OP (5)
-#define BCM_MWBMAP_MODOP(ix) ((ix) & (BCM_MWBMAP_BITS_WORD - 1))
-#define BCM_MWBMAP_DIVOP(ix) ((ix) >> BCM_MWBMAP_SHIFT_OP)
-#define BCM_MWBMAP_MULOP(ix) ((ix) << BCM_MWBMAP_SHIFT_OP)
+ int i = 0;
+ char *c;
+ for (;;) {
+ ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0);
+ if (*c++ != '.' || i == IPV4_ADDR_LEN)
+ break;
+ p = c;
+ }
+ return (i == IPV4_ADDR_LEN);
+}
+#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
-/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */
-#define BCM_MWBMAP_PTR(hdl) ((struct bcm_mwbmap *)(hdl))
-#define BCM_MWBMAP_HDL(ptr) ((void *)(ptr))
-#if defined(BCM_MWBMAP_DEBUG)
-#define BCM_MWBMAP_AUDIT(mwb) \
- do { \
- ASSERT((mwb != NULL) && \
- (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \
- bcm_mwbmap_audit(mwb); \
- } while (0)
-#define MWBMAP_ASSERT(exp) ASSERT(exp)
-#define MWBMAP_DBG(x) printf x
-#else /* !BCM_MWBMAP_DEBUG */
-#define BCM_MWBMAP_AUDIT(mwb) do {} while (0)
-#define MWBMAP_ASSERT(exp) do {} while (0)
-#define MWBMAP_DBG(x)
-#endif /* !BCM_MWBMAP_DEBUG */
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
+ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
+ */
+ulong
+wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+{
+ ulong copyct = 1;
+ ushort i;
-typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */
- uint16 wmaps; /* Total number of words in free wd bitmap */
- uint16 imaps; /* Total number of words in free id bitmap */
- int32 ifree; /* Count of free indices. Used only in audits */
- uint16 total; /* Total indices managed by multiword bitmap */
+ if (abuflen == 0)
+ return 0;
- void * magic; /* Audit handle parameter from user */
+ /* wbuflen is in bytes */
+ wbuflen /= sizeof(ushort);
- uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of */
-#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
- int8 wd_count[BCM_MWBMAP_WORDS_MAX]; /* free id running count, 1st lvl */
-#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ for (i = 0; i < wbuflen; ++i) {
+ if (--abuflen == 0)
+ break;
+ *abuf++ = (char) *wbuf++;
+ ++copyct;
+ }
+ *abuf = '\0';
- uint32 id_bitmap[0]; /* Second level bitmap */
-} bcm_mwbmap_t;
+ return copyct;
+}
+#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
-/* Incarnate a hierarchical multiword bitmap based small index allocator. */
-struct bcm_mwbmap *
-bcm_mwbmap_init(osl_t *osh, uint32 items_max)
+char *
+bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
{
- struct bcm_mwbmap * mwbmap_p;
- uint32 wordix, size, words, extra;
-
- /* Implementation Constraint: Uses 32bit word bitmap */
- MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U);
- MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U);
- MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX));
- MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U);
+ static const char hex[] =
+ {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+ };
+ const uint8 *octet = ea->octet;
+ char *p = buf;
+ int i;
- ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX);
+ for (i = 0; i < 6; i++, octet++) {
+ *p++ = hex[(*octet >> 4) & 0xf];
+ *p++ = hex[*octet & 0xf];
+ *p++ = ':';
+ }
- /* Determine the number of words needed in the multiword bitmap */
- extra = BCM_MWBMAP_MODOP(items_max);
- words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U);
+ *(p-1) = '\0';
- /* Allocate runtime state of multiword bitmap */
- /* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */
- size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words);
- mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size);
- if (mwbmap_p == (bcm_mwbmap_t *)NULL) {
- ASSERT(0);
- goto error1;
- }
- memset(mwbmap_p, 0, size);
+ return (buf);
+}
- /* Initialize runtime multiword bitmap state */
- mwbmap_p->imaps = (uint16)words;
- mwbmap_p->ifree = (int32)items_max;
- mwbmap_p->total = (uint16)items_max;
+char *
+bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
+{
+ snprintf(buf, 16, "%d.%d.%d.%d",
+ ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
+ return (buf);
+}
- /* Setup magic, for use in audit of handle */
- mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p);
+char *
+bcm_ipv6_ntoa(void *ipv6, char *buf)
+{
+ /* Implementing RFC 5952 Sections 4 + 5 */
+ /* Not thoroughly tested */
+ uint16 tmp[8];
+ uint16 *a = &tmp[0];
+ char *p = buf;
+ int i, i_max = -1, cnt = 0, cnt_max = 1;
+ uint8 *a4 = NULL;
+ memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN);
- /* Setup the second level bitmap of free indices */
- /* Mark all indices as available */
- for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) {
- mwbmap_p->id_bitmap[wordix] = (uint32)(~0U);
-#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
- mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD;
-#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+ if (a[i]) {
+ if (cnt > cnt_max) {
+ cnt_max = cnt;
+ i_max = i - cnt;
+ }
+ cnt = 0;
+ } else
+ cnt++;
}
-
- /* Ensure that extra indices are tagged as un-available */
- if (extra) { /* fixup the free ids in last bitmap and wd_count */
- uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1];
- *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
-#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
- mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */
-#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ if (cnt > cnt_max) {
+ cnt_max = cnt;
+ i_max = i - cnt;
}
+ if (i_max == 0 &&
+ /* IPv4-translated: ::ffff:0:a.b.c.d */
+ ((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) ||
+ /* IPv4-mapped: ::ffff:a.b.c.d */
+ (cnt_max == 5 && a[5] == 0xffff)))
+ a4 = (uint8*) (a + 6);
- /* Setup the first level bitmap hierarchy */
- extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps);
- words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U);
-
- mwbmap_p->wmaps = (uint16)words;
-
- for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++)
- mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U);
- if (extra) {
- uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1];
- *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+ for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+ if ((uint8*) (a + i) == a4) {
+ snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]);
+ break;
+ } else if (i == i_max) {
+ *p++ = ':';
+ i += cnt_max - 1;
+ p[0] = ':';
+ p[1] = '\0';
+ } else {
+ if (i)
+ *p++ = ':';
+ p += snprintf(p, 8, "%x", ntoh16(a[i]));
+ }
}
- return mwbmap_p;
-
-error1:
- return BCM_MWBMAP_INVALID_HDL;
+ return buf;
}
+#ifdef BCMDRIVER
-/* Release resources used by multiword bitmap based small index allocator. */
void
-bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl)
+bcm_mdelay(uint ms)
{
- bcm_mwbmap_t * mwbmap_p;
-
- BCM_MWBMAP_AUDIT(mwbmap_hdl);
- mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+ uint i;
- MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap)
- + (sizeof(uint32) * mwbmap_p->imaps));
- return;
+ for (i = 0; i < ms; i++) {
+ OSL_DELAY(1000);
+ }
}
-/* Allocate a unique small index using a multiword bitmap index allocator. */
-uint32 BCMFASTPATH
-bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl)
-{
- bcm_mwbmap_t * mwbmap_p;
- uint32 wordix, bitmap;
-
- BCM_MWBMAP_AUDIT(mwbmap_hdl);
- mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
-
- /* Start with the first hierarchy */
- for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) {
-
- bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */
-
- if (bitmap != 0U) {
-
- uint32 count, bitix, *bitmap_p;
-
- bitmap_p = &mwbmap_p->wd_bitmap[wordix];
-
- /* clear all except trailing 1 */
- bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
- MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
- bcm_count_leading_zeros(bitmap));
- bitix = (BCM_MWBMAP_BITS_WORD - 1)
- - (uint32)bcm_count_leading_zeros(bitmap); /* use asm clz */
- wordix = BCM_MWBMAP_MULOP(wordix) + bitix;
-
- /* Clear bit if wd count is 0, without conditional branch */
-#if defined(BCM_MWBMAP_USE_CNTSETBITS)
- count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1;
-#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
- mwbmap_p->wd_count[wordix]--;
- count = (uint32)mwbmap_p->wd_count[wordix];
- MWBMAP_ASSERT(count ==
- (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1));
-#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
- MWBMAP_ASSERT(count >= 0);
-
- /* clear wd_bitmap bit if id_map count is 0 */
- bitmap = ((uint32)(count == 0)) << BCM_MWBMAP_MODOP(bitix);
-
- MWBMAP_DBG((
- "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
- bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count));
-
- *bitmap_p ^= bitmap;
-
- /* Use bitix in the second hierarchy */
- bitmap_p = &mwbmap_p->id_bitmap[wordix];
-
- bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */
- MWBMAP_ASSERT(bitmap != 0U);
-
- /* clear all except trailing 1 */
- bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
- MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
- bcm_count_leading_zeros(bitmap));
- bitix = BCM_MWBMAP_MULOP(wordix)
- + (BCM_MWBMAP_BITS_WORD - 1)
- - (uint32)bcm_count_leading_zeros(bitmap); /* use asm clz */
- mwbmap_p->ifree--; /* decrement system wide free count */
- MWBMAP_ASSERT(mwbmap_p->ifree >= 0);
- MWBMAP_DBG((
- "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d",
- bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
- mwbmap_p->ifree));
- *bitmap_p ^= bitmap; /* mark as allocated = 1b0 */
- return bitix;
- }
- }
+#if defined(DHD_DEBUG)
+/* pretty hex print a pkt buffer chain */
+void
+prpkt(const char *msg, osl_t *osh, void *p0)
+{
+ void *p;
- ASSERT(mwbmap_p->ifree == 0);
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
- return BCM_MWBMAP_INVALID_IDX;
+ for (p = p0; p; p = PKTNEXT(osh, p))
+ prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p));
}
+#endif
-/* Force an index at a specified position to be in use */
-void
-bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO.
+ * Also updates the inplace vlan tag if requested.
+ * For debugging, it returns an indication of what it did.
+ */
+uint BCMFASTPATH
+pktsetprio(void *pkt, bool update_vtag)
{
- bcm_mwbmap_t * mwbmap_p;
- uint32 count, wordix, bitmap, *bitmap_p;
-
- BCM_MWBMAP_AUDIT(mwbmap_hdl);
- mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *pktdata;
+ int priority = 0;
+ int rc = 0;
- ASSERT(bitix < mwbmap_p->total);
+ pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+ ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
- /* Start with second hierarchy */
- wordix = BCM_MWBMAP_DIVOP(bitix);
- bitmap = (uint32)(1U << BCM_MWBMAP_MODOP(bitix));
- bitmap_p = &mwbmap_p->id_bitmap[wordix];
+ eh = (struct ether_header *) pktdata;
- ASSERT((*bitmap_p & bitmap) == bitmap);
+ if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
+ uint16 vlan_tag;
+ int vlan_prio, dscp_prio = 0;
- mwbmap_p->ifree--; /* update free count */
- ASSERT(mwbmap_p->ifree >= 0);
+ evh = (struct ethervlan_header *)eh;
- MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d",
- bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
- mwbmap_p->ifree));
+ vlan_tag = ntoh16(evh->vlan_tag);
+ vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
- *bitmap_p ^= bitmap; /* mark as in use */
+ if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
+ (evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+ uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+ uint8 tos_tc = IP_TOS46(ip_body);
+ dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ }
- /* Update first hierarchy */
- bitix = wordix;
+ /* DSCP priority gets precedence over 802.1P (vlan tag) */
+ if (dscp_prio != 0) {
+ priority = dscp_prio;
+ rc |= PKTPRIO_VDSCP;
+ } else {
+ priority = vlan_prio;
+ rc |= PKTPRIO_VLAN;
+ }
+ /*
+ * If the DSCP priority is not the same as the VLAN priority,
+ * then overwrite the priority field in the vlan tag, with the
+ * DSCP priority value. This is required for Linux APs because
+ * the VLAN driver on Linux, overwrites the skb->priority field
+ * with the priority value in the vlan tag
+ */
+ if (update_vtag && (priority != vlan_prio)) {
+ vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+ vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+ evh->vlan_tag = hton16(vlan_tag);
+ rc |= PKTPRIO_UPD;
+ }
+#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
+ } else if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+ priority = PRIO_8021D_NC;
+ rc = PKTPRIO_DSCP;
+#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
+ } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
+ (eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+ uint8 *ip_body = pktdata + sizeof(struct ether_header);
+ uint8 tos_tc = IP_TOS46(ip_body);
+ uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
+ switch (dscp) {
+ case DSCP_EF:
+ priority = PRIO_8021D_VO;
+ break;
+ case DSCP_AF31:
+ case DSCP_AF32:
+ case DSCP_AF33:
+ priority = PRIO_8021D_CL;
+ break;
+ case DSCP_AF21:
+ case DSCP_AF22:
+ case DSCP_AF23:
+ case DSCP_AF11:
+ case DSCP_AF12:
+ case DSCP_AF13:
+ priority = PRIO_8021D_EE;
+ break;
+ default:
+ priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ break;
+ }
- wordix = BCM_MWBMAP_DIVOP(bitix);
- bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+ rc |= PKTPRIO_DSCP;
+ }
-#if defined(BCM_MWBMAP_USE_CNTSETBITS)
- count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
-#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
- mwbmap_p->wd_count[bitix]--;
- count = (uint32)mwbmap_p->wd_count[bitix];
- MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
-#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
- MWBMAP_ASSERT(count >= 0);
+ ASSERT(priority >= 0 && priority <= MAXPRIO);
+ PKTSETPRIO(pkt, priority);
+ return (rc | priority);
+}
- bitmap = (uint32)(count == 0) << BCM_MWBMAP_MODOP(bitix);
+/* lookup user priority for specified DSCP */
+static uint8
+dscp2up(uint8 *up_table, uint8 dscp)
+{
+ uint8 user_priority = 255;
- MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
- BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap,
- (*bitmap_p) ^ bitmap, count));
+ /* lookup up from table if parameters valid */
+ if (up_table != NULL && dscp < UP_TABLE_MAX) {
+ user_priority = up_table[dscp];
+ }
- *bitmap_p ^= bitmap; /* mark as in use */
+ /* 255 is unused value so return up from dscp */
+ if (user_priority == 255) {
+ user_priority = dscp >> (IPV4_TOS_PREC_SHIFT - IPV4_TOS_DSCP_SHIFT);
+ }
- return;
+ return user_priority;
}
-/* Free a previously allocated index back into the multiword bitmap allocator */
-void BCMFASTPATH
-bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+/* set user priority by QoS Map Set table (UP table), table size is UP_TABLE_MAX */
+uint BCMFASTPATH
+pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag)
{
- bcm_mwbmap_t * mwbmap_p;
- uint32 wordix, bitmap, *bitmap_p;
-
- BCM_MWBMAP_AUDIT(mwbmap_hdl);
- mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
-
- ASSERT(bitix < mwbmap_p->total);
+ if (up_table) {
+ uint8 *pktdata;
+ uint pktlen;
+ uint8 dscp;
+ uint user_priority = 0;
+ uint rc = 0;
- /* Start with second level hierarchy */
- wordix = BCM_MWBMAP_DIVOP(bitix);
- bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
- bitmap_p = &mwbmap_p->id_bitmap[wordix];
+ pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+ pktlen = PKTLEN(OSH_NULL, pkt);
- ASSERT((*bitmap_p & bitmap) == 0U); /* ASSERT not a double free */
+ if (pktgetdscp(pktdata, pktlen, &dscp)) {
+ rc = PKTPRIO_DSCP;
+ user_priority = dscp2up(up_table, dscp);
+ PKTSETPRIO(pkt, user_priority);
+ }
- mwbmap_p->ifree++; /* update free count */
- ASSERT(mwbmap_p->ifree <= mwbmap_p->total);
+ return (rc | user_priority);
+ } else {
+ return pktsetprio(pkt, update_vtag);
+ }
+}
- MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d",
- bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap,
- mwbmap_p->ifree));
+/* Returns TRUE and DSCP if IP header found, FALSE otherwise.
+ */
+bool BCMFASTPATH
+pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp)
+{
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *ip_body;
+ bool rc = FALSE;
- *bitmap_p |= bitmap; /* mark as available */
+ /* minimum length is ether header and IP header */
+ if (pktlen < sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN)
+ return FALSE;
- /* Now update first level hierarchy */
+ eh = (struct ether_header *) pktdata;
- bitix = wordix;
+ if (eh->ether_type == HTON16(ETHER_TYPE_IP)) {
+ ip_body = pktdata + sizeof(struct ether_header);
+ *dscp = IP_DSCP46(ip_body);
+ rc = TRUE;
+ }
+ else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) {
+ evh = (struct ethervlan_header *)eh;
- wordix = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */
- bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
- bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+ /* minimum length is ethervlan header and IP header */
+ if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN &&
+ evh->ether_type == HTON16(ETHER_TYPE_IP)) {
+ ip_body = pktdata + sizeof(struct ethervlan_header);
+ *dscp = IP_DSCP46(ip_body);
+ rc = TRUE;
+ }
+ }
-#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
- mwbmap_p->wd_count[bitix]++;
-#endif // endif
+ return rc;
+}
-#if defined(BCM_MWBMAP_DEBUG)
- {
- uint32 count;
-#if defined(BCM_MWBMAP_USE_CNTSETBITS)
- count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
-#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
- count = mwbmap_p->wd_count[bitix];
- MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
-#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+/* Add to adjust the 802.1x priority */
+void
+pktset8021xprio(void *pkt, int prio)
+{
+ struct ether_header *eh;
+ uint8 *pktdata;
+ if(prio == PKTPRIO(pkt))
+ return;
+ pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
+ ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+ eh = (struct ether_header *) pktdata;
+ if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
+ ASSERT(prio >= 0 && prio <= MAXPRIO);
+ PKTSETPRIO(pkt, prio);
+ }
+}
- MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD);
+/* usr_prio range from low to high with usr_prio value */
+static bool
+up_table_set(uint8 *up_table, uint8 usr_prio, uint8 low, uint8 high)
+{
+ int i;
- MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d",
- bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count));
+ if (usr_prio > 7 || low > high || low >= UP_TABLE_MAX || high >= UP_TABLE_MAX) {
+ return FALSE;
}
-#endif /* BCM_MWBMAP_DEBUG */
- *bitmap_p |= bitmap;
+ for (i = low; i <= high; i++) {
+ up_table[i] = usr_prio;
+ }
- return;
+ return TRUE;
}
-/* Fetch the toal number of free indices in the multiword bitmap allocator */
-uint32
-bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl)
+/* set user priority table */
+int BCMFASTPATH
+wl_set_up_table(uint8 *up_table, bcm_tlv_t *qos_map_ie)
{
- bcm_mwbmap_t * mwbmap_p;
+ uint8 len;
- BCM_MWBMAP_AUDIT(mwbmap_hdl);
- mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+ if (up_table == NULL || qos_map_ie == NULL) {
+ return BCME_ERROR;
+ }
- ASSERT(mwbmap_p->ifree >= 0);
+ /* clear table to check table was set or not */
+ memset(up_table, 0xff, UP_TABLE_MAX);
- return (uint32)mwbmap_p->ifree;
-}
+ /* length of QoS Map IE must be 16+n*2, n is number of exceptions */
+ if (qos_map_ie != NULL && qos_map_ie->id == DOT11_MNG_QOS_MAP_ID &&
+ (len = qos_map_ie->len) >= QOS_MAP_FIXED_LENGTH &&
+ (len % 2) == 0) {
+ uint8 *except_ptr = (uint8 *)qos_map_ie->data;
+ uint8 except_len = len - QOS_MAP_FIXED_LENGTH;
+ uint8 *range_ptr = except_ptr + except_len;
+ int i;
-/* Determine whether an index is inuse or free */
-bool
-bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
-{
- bcm_mwbmap_t * mwbmap_p;
- uint32 wordix, bitmap;
+ /* fill in ranges */
+ for (i = 0; i < QOS_MAP_FIXED_LENGTH; i += 2) {
+ uint8 low = range_ptr[i];
+ uint8 high = range_ptr[i + 1];
+ if (low == 255 && high == 255) {
+ continue;
+ }
- BCM_MWBMAP_AUDIT(mwbmap_hdl);
- mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+ if (!up_table_set(up_table, i / 2, low, high)) {
+ /* clear the table on failure */
+ memset(up_table, 0xff, UP_TABLE_MAX);
+ return BCME_ERROR;
+ }
+ }
- ASSERT(bitix < mwbmap_p->total);
+ /* update exceptions */
+ for (i = 0; i < except_len; i += 2) {
+ uint8 dscp = except_ptr[i];
+ uint8 usr_prio = except_ptr[i+1];
- wordix = BCM_MWBMAP_DIVOP(bitix);
- bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
+ /* exceptions with invalid dscp/usr_prio are ignored */
+ up_table_set(up_table, usr_prio, dscp, dscp);
+ }
+ }
- return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U);
+ return BCME_OK;
}
-/* Debug dump a multiword bitmap allocator */
-void
-bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl)
-{
- uint32 ix, count;
- bcm_mwbmap_t * mwbmap_p;
+/* The 0.5KB string table is not removed by compiler even though it's unused */
- BCM_MWBMAP_AUDIT(mwbmap_hdl);
- mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+static char bcm_undeferrstr[32];
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
- printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n",
- OSL_OBFUSCATE_BUF((void *)mwbmap_p),
- mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total);
- for (ix = 0U; ix < mwbmap_p->wmaps; ix++) {
- printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]);
- bcm_bitprint32(mwbmap_p->wd_bitmap[ix]);
- printf("\n");
- }
- for (ix = 0U; ix < mwbmap_p->imaps; ix++) {
-#if defined(BCM_MWBMAP_USE_CNTSETBITS)
- count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]);
-#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
- count = (uint32)mwbmap_p->wd_count[ix];
- MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix]));
-#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
- printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count);
- bcm_bitprint32(mwbmap_p->id_bitmap[ix]);
- printf("\n");
+/* Convert the error codes into related error strings */
+const char *
+bcmerrorstr(int bcmerror)
+{
+ /* check if someone added a bcmerror code but forgot to add errorstring */
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+
+ if (bcmerror > 0 || bcmerror < BCME_LAST) {
+ snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror);
+ return bcm_undeferrstr;
}
- return;
+ ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN);
+
+ return bcmerrorstrtable[-bcmerror];
}
-/* Audit a hierarchical multiword bitmap */
-void
-bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl)
-{
- bcm_mwbmap_t * mwbmap_p;
- uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p;
- mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+/* iovar table lookup */
+/* could mandate sorted tables and do a binary search */
+const bcm_iovar_t*
+bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
+{
+ const bcm_iovar_t *vi;
+ const char *lookup_name;
- for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) {
+ /* skip any ':' delimited option prefixes */
+ lookup_name = strrchr(name, ':');
+ if (lookup_name != NULL)
+ lookup_name++;
+ else
+ lookup_name = name;
- bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+ ASSERT(table != NULL);
- for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) {
- if ((*bitmap_p) & (1 << bitix)) {
- idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix;
-#if defined(BCM_MWBMAP_USE_CNTSETBITS)
- count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]);
-#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
- count = (uint32)mwbmap_p->wd_count[idmap_ix];
- ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]));
-#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
- ASSERT(count != 0U);
- free_cnt += count;
- }
- }
+ for (vi = table; vi->name; vi++) {
+ if (!strcmp(vi->name, lookup_name))
+ return vi;
}
+ /* ran to end of table */
- ASSERT((int)free_cnt == mwbmap_p->ifree);
+ return NULL; /* var name not found */
}
-/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */
-
-/* Simple 16bit Id allocator using a stack implementation. */
-typedef struct id16_map {
- uint32 failures; /* count of failures */
- void *dbg; /* debug placeholder */
- uint16 total; /* total number of ids managed by allocator */
- uint16 start; /* start value of 16bit ids to be managed */
- int stack_idx; /* index into stack of available ids */
- uint16 stack[0]; /* stack of 16 bit ids */
-} id16_map_t;
-
-#define ID16_MAP_SZ(items) (sizeof(id16_map_t) + \
- (sizeof(uint16) * (items)))
-
-#if defined(BCM_DBG)
-
-/* Uncomment BCM_DBG_ID16 to debug double free */
-/* #define BCM_DBG_ID16 */
-
-typedef struct id16_map_dbg {
- uint16 total;
- bool avail[0];
-} id16_map_dbg_t;
-#define ID16_MAP_DBG_SZ(items) (sizeof(id16_map_dbg_t) + \
- (sizeof(bool) * (items)))
-#define ID16_MAP_MSG(x) print x
-#else
-#define ID16_MAP_MSG(x)
-#endif /* BCM_DBG */
-void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */
-id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16)
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
{
- uint16 idx, val16;
- id16_map_t * id16_map;
+ int bcmerror = 0;
+ BCM_REFERENCE(arg);
- ASSERT(total_ids > 0);
+ /* length check on io buf */
+ switch (vi->type) {
+ case IOVT_BOOL:
+ case IOVT_INT8:
+ case IOVT_INT16:
+ case IOVT_INT32:
+ case IOVT_UINT8:
+ case IOVT_UINT16:
+ case IOVT_UINT32:
+ /* all integers are int32 sized args at the ioctl interface */
+ if (len < (int)sizeof(int)) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
- /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map
- * with random values.
- */
- ASSERT((start_val16 == ID16_UNDEFINED) ||
- (start_val16 + total_ids) < ID16_INVALID);
+ case IOVT_BUFFER:
+ /* buffer must meet minimum length requirement */
+ if (len < vi->minlen) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
- id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids));
- if (id16_map == NULL) {
- return NULL;
- }
+ case IOVT_VOID:
+ if (!set) {
+ /* Cannot return nil... */
+ bcmerror = BCME_UNSUPPORTED;
+ } else if (len) {
+ /* Set is an action w/o parameters */
+ bcmerror = BCME_BUFTOOLONG;
+ }
+ break;
- id16_map->total = total_ids;
- id16_map->start = start_val16;
- id16_map->failures = 0;
- id16_map->dbg = NULL;
+ default:
+ /* unknown type for length check in iovar info */
+ ASSERT(0);
+ bcmerror = BCME_UNSUPPORTED;
+ }
- /*
- * Populate stack with 16bit id values, commencing with start_val16.
- * if start_val16 is ID16_UNDEFINED, then do not populate the id16 map.
- */
- id16_map->stack_idx = -1;
+ return bcmerror;
+}
- if (id16_map->start != ID16_UNDEFINED) {
- val16 = start_val16;
+#endif /* BCMDRIVER */
- for (idx = 0; idx < total_ids; idx++, val16++) {
- id16_map->stack_idx = idx;
- id16_map->stack[id16_map->stack_idx] = val16;
- }
- }
+#ifdef BCM_OBJECT_TRACE
-#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
- if (id16_map->start != ID16_UNDEFINED) {
- id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids));
+#define BCM_OBJECT_MERGE_SAME_OBJ 0
- if (id16_map->dbg) {
- id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+/* some place may add / remove the object to trace list for Linux: */
+/* add: osl_alloc_skb dev_alloc_skb skb_realloc_headroom dhd_start_xmit */
+/* remove: osl_pktfree dev_kfree_skb netif_rx */
- id16_map_dbg->total = total_ids;
- for (idx = 0; idx < total_ids; idx++) {
- id16_map_dbg->avail[idx] = TRUE;
- }
- }
- }
-#endif /* BCM_DBG && BCM_DBG_ID16 */
+#define BCM_OBJDBG_COUNT (1024 * 100)
+static spinlock_t dbgobj_lock;
+#define BCM_OBJDBG_LOCK_INIT() spin_lock_init(&dbgobj_lock)
+#define BCM_OBJDBG_LOCK_DESTROY()
+#define BCM_OBJDBG_LOCK spin_lock_irqsave
+#define BCM_OBJDBG_UNLOCK spin_unlock_irqrestore
- return (void *)id16_map;
-}
+#define BCM_OBJDBG_ADDTOHEAD 0
+#define BCM_OBJDBG_ADDTOTAIL 1
-void * /* Destruct an id16 allocator instance */
-id16_map_fini(osl_t *osh, void * id16_map_hndl)
-{
- uint16 total_ids;
- id16_map_t * id16_map;
+#define BCM_OBJDBG_CALLER_LEN 32
+struct bcm_dbgobj {
+ struct bcm_dbgobj *prior;
+ struct bcm_dbgobj *next;
+ uint32 flag;
+ void *obj;
+ uint32 obj_sn;
+ uint32 obj_state;
+ uint32 line;
+ char caller[BCM_OBJDBG_CALLER_LEN];
+};
- if (id16_map_hndl == NULL)
- return NULL;
+static struct bcm_dbgobj *dbgobj_freehead = NULL;
+static struct bcm_dbgobj *dbgobj_freetail = NULL;
+static struct bcm_dbgobj *dbgobj_objhead = NULL;
+static struct bcm_dbgobj *dbgobj_objtail = NULL;
- id16_map = (id16_map_t *)id16_map_hndl;
+static uint32 dbgobj_sn = 0;
+static int dbgobj_count = 0;
+static struct bcm_dbgobj bcm_dbg_objs[BCM_OBJDBG_COUNT];
- total_ids = id16_map->total;
- ASSERT(total_ids > 0);
+void
+bcm_object_trace_init(void)
+{
+ int i = 0;
+ BCM_OBJDBG_LOCK_INIT();
+ memset(&bcm_dbg_objs, 0x00, sizeof(struct bcm_dbgobj) * BCM_OBJDBG_COUNT);
+ dbgobj_freehead = &bcm_dbg_objs[0];
+ dbgobj_freetail = &bcm_dbg_objs[BCM_OBJDBG_COUNT - 1];
-#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
- if (id16_map->dbg) {
- MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids));
- id16_map->dbg = NULL;
+ for (i = 0; i < BCM_OBJDBG_COUNT; ++i) {
+ bcm_dbg_objs[i].next = (i == (BCM_OBJDBG_COUNT - 1)) ?
+ dbgobj_freehead : &bcm_dbg_objs[i + 1];
+ bcm_dbg_objs[i].prior = (i == 0) ?
+ dbgobj_freetail : &bcm_dbg_objs[i - 1];
}
-#endif /* BCM_DBG && BCM_DBG_ID16 */
-
- id16_map->total = 0;
- MFREE(osh, id16_map, ID16_MAP_SZ(total_ids));
-
- return NULL;
}
void
-id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16)
+bcm_object_trace_deinit(void)
{
- uint16 idx, val16;
- id16_map_t * id16_map;
-
- ASSERT(total_ids > 0);
- /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map
- * with random values.
- */
- ASSERT((start_val16 == ID16_UNDEFINED) ||
- (start_val16 + total_ids) < ID16_INVALID);
-
- id16_map = (id16_map_t *)id16_map_hndl;
- if (id16_map == NULL) {
- return;
+ if (dbgobj_objhead || dbgobj_objtail) {
+ printf("%s: not all objects are released\n", __FUNCTION__);
+ ASSERT(0);
}
+ BCM_OBJDBG_LOCK_DESTROY();
+}
- id16_map->total = total_ids;
- id16_map->start = start_val16;
- id16_map->failures = 0;
-
- /* Populate stack with 16bit id values, commencing with start_val16 */
- id16_map->stack_idx = -1;
-
- if (id16_map->start != ID16_UNDEFINED) {
- val16 = start_val16;
-
- for (idx = 0; idx < total_ids; idx++, val16++) {
- id16_map->stack_idx = idx;
- id16_map->stack[id16_map->stack_idx] = val16;
- }
+static void
+bcm_object_rm_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+ struct bcm_dbgobj *dbgobj)
+{
+ if ((dbgobj == *head) && (dbgobj == *tail)) {
+ *head = NULL;
+ *tail = NULL;
+ } else if (dbgobj == *head) {
+ *head = (*head)->next;
+ } else if (dbgobj == *tail) {
+ *tail = (*tail)->prior;
}
+ dbgobj->next->prior = dbgobj->prior;
+ dbgobj->prior->next = dbgobj->next;
+}
-#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
- if (id16_map->start != ID16_UNDEFINED) {
- if (id16_map->dbg) {
- id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+static void
+bcm_object_add_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+ struct bcm_dbgobj *dbgobj, int addtotail)
+{
+ if (!(*head) && !(*tail)) {
+ *head = dbgobj;
+ *tail = dbgobj;
+ dbgobj->next = dbgobj;
+ dbgobj->prior = dbgobj;
+ } else if ((*head) && (*tail)) {
+ (*tail)->next = dbgobj;
+ (*head)->prior = dbgobj;
+ dbgobj->next = *head;
+ dbgobj->prior = *tail;
+ if (addtotail == BCM_OBJDBG_ADDTOTAIL)
+ *tail = dbgobj;
+ else
+ *head = dbgobj;
+ } else {
+ ASSERT(0); /* can't be this case */
+ }
+}
- id16_map_dbg->total = total_ids;
- for (idx = 0; idx < total_ids; idx++) {
- id16_map_dbg->avail[idx] = TRUE;
+static INLINE void
+bcm_object_movetoend(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
+ struct bcm_dbgobj *dbgobj, int movetotail)
+{
+ if ((*head) && (*tail)) {
+ if (movetotail == BCM_OBJDBG_ADDTOTAIL) {
+ if (dbgobj != (*tail)) {
+ bcm_object_rm_list(head, tail, dbgobj);
+ bcm_object_add_list(head, tail, dbgobj, movetotail);
+ }
+ } else {
+ if (dbgobj != (*head)) {
+ bcm_object_rm_list(head, tail, dbgobj);
+ bcm_object_add_list(head, tail, dbgobj, movetotail);
}
}
+ } else {
+ ASSERT(0); /* can't be this case */
}
-#endif /* BCM_DBG && BCM_DBG_ID16 */
}
-uint16 BCMFASTPATH /* Allocate a unique 16bit id */
-id16_map_alloc(void * id16_map_hndl)
+void
+bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line)
{
- uint16 val16;
- id16_map_t * id16_map;
-
- ASSERT(id16_map_hndl != NULL);
- if (!id16_map_hndl) {
- return ID16_INVALID;
- }
- id16_map = (id16_map_t *)id16_map_hndl;
-
- ASSERT(id16_map->total > 0);
-
- if (id16_map->stack_idx < 0) {
- id16_map->failures++;
- return ID16_INVALID;
- }
-
- val16 = id16_map->stack[id16_map->stack_idx];
- id16_map->stack_idx--;
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
-#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
- ASSERT((id16_map->start == ID16_UNDEFINED) ||
- (val16 < (id16_map->start + id16_map->total)));
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
- if (id16_map->dbg) { /* Validate val16 */
- id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+ if (opt == BCM_OBJDBG_ADD_PKT ||
+ opt == BCM_OBJDBG_ADD) {
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ printf("%s: obj %p allocated from %s(%d),"
+ " allocate again from %s(%d)\n",
+ __FUNCTION__, dbgobj->obj,
+ dbgobj->caller, dbgobj->line,
+ caller, line);
+ ASSERT(0);
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
- ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == TRUE);
- id16_map_dbg->avail[val16 - id16_map->start] = FALSE;
- }
-#endif /* BCM_DBG && BCM_DBG_ID16 */
+#if BCM_OBJECT_MERGE_SAME_OBJ
+ dbgobj = dbgobj_freetail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ goto FREED_ENTRY_FOUND;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_freetail)
+ break;
+ }
+#endif /* BCM_OBJECT_MERGE_SAME_OBJ */
- return val16;
-}
+ dbgobj = dbgobj_freehead;
+#if BCM_OBJECT_MERGE_SAME_OBJ
+FREED_ENTRY_FOUND:
+#endif /* BCM_OBJECT_MERGE_SAME_OBJ */
+ if (!dbgobj) {
+ printf("%s: already got %d objects ?????????????????????\n",
+ __FUNCTION__, BCM_OBJDBG_COUNT);
+ ASSERT(0);
+ goto EXIT;
+ }
-void BCMFASTPATH /* Free a 16bit id value into the id16 allocator */
-id16_map_free(void * id16_map_hndl, uint16 val16)
-{
- id16_map_t * id16_map;
+ bcm_object_rm_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj);
+ dbgobj->obj = obj;
+ strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN);
+ dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0';
+ dbgobj->line = line;
+ dbgobj->flag = 0;
+ if (opt == BCM_OBJDBG_ADD_PKT) {
+ dbgobj->obj_sn = dbgobj_sn++;
+ dbgobj->obj_state = 0;
+ /* first 4 bytes is pkt sn */
+ if (((unsigned long)PKTTAG(obj)) & 0x3)
+ printf("pkt tag address not aligned by 4: %p\n", PKTTAG(obj));
+ *(uint32*)PKTTAG(obj) = dbgobj->obj_sn;
+ }
+ bcm_object_add_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj,
+ BCM_OBJDBG_ADDTOTAIL);
- ASSERT(id16_map_hndl != NULL);
+ dbgobj_count++;
- id16_map = (id16_map_t *)id16_map_hndl;
+ } else if (opt == BCM_OBJDBG_REMOVE) {
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ if (dbgobj->flag) {
+ printf("%s: rm flagged obj %p flag 0x%08x from %s(%d)\n",
+ __FUNCTION__, obj, dbgobj->flag, caller, line);
+ }
+ bcm_object_rm_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj);
+ memset(dbgobj->caller, 0x00, BCM_OBJDBG_CALLER_LEN);
+ strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN);
+ dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0';
+ dbgobj->line = line;
+ bcm_object_add_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj,
+ BCM_OBJDBG_ADDTOTAIL);
+ dbgobj_count--;
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
-#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
- ASSERT((id16_map->start == ID16_UNDEFINED) ||
- (val16 < (id16_map->start + id16_map->total)));
+ dbgobj = dbgobj_freetail;
+ while (dbgobj && dbgobj->obj) {
+ if (dbgobj->obj == obj) {
+ printf("%s: obj %p already freed from from %s(%d),"
+ " try free again from %s(%d)\n",
+ __FUNCTION__, obj,
+ dbgobj->caller, dbgobj->line,
+ caller, line);
+ //ASSERT(0); /* release same obj more than one time? */
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_freetail)
+ break;
+ }
- if (id16_map->dbg) { /* Validate val16 */
- id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+ printf("%s: ################### release none-existing obj %p from %s(%d)\n",
+ __FUNCTION__, obj, caller, line);
+ //ASSERT(0); /* release same obj more than one time? */
- ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == FALSE);
- id16_map_dbg->avail[val16 - id16_map->start] = TRUE;
}
-#endif /* BCM_DBG && BCM_DBG_ID16 */
- id16_map->stack_idx++;
- id16_map->stack[id16_map->stack_idx] = val16;
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
}
-uint32 /* Returns number of failures to allocate an unique id16 */
-id16_map_failures(void * id16_map_hndl)
+void
+bcm_object_trace_upd(void *obj, void *obj_new)
{
- ASSERT(id16_map_hndl != NULL);
- return ((id16_map_t *)id16_map_hndl)->failures;
-}
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
-bool
-id16_map_audit(void * id16_map_hndl)
-{
- int idx;
- int insane = 0;
- id16_map_t * id16_map;
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
- ASSERT(id16_map_hndl != NULL);
- if (!id16_map_hndl) {
- goto done;
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ dbgobj->obj = obj_new;
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
}
- id16_map = (id16_map_t *)id16_map_hndl;
- ASSERT(id16_map->stack_idx >= -1);
- ASSERT(id16_map->stack_idx < (int)id16_map->total);
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
+}
- if (id16_map->start == ID16_UNDEFINED)
- goto done;
+void
+bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn,
+ const char *caller, int line)
+{
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
- for (idx = 0; idx <= id16_map->stack_idx; idx++) {
- ASSERT(id16_map->stack[idx] >= id16_map->start);
- ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total));
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
-#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
- if (id16_map->dbg) {
- uint16 val16 = id16_map->stack[idx];
- if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) {
- insane |= 1;
- ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n",
- OSL_OBFUSATE_BUF(id16_map_hndl), idx, val16));
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if ((dbgobj->obj == obj) &&
+ ((!chksn) || (dbgobj->obj_sn == sn))) {
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
}
+ goto EXIT;
}
-#endif /* BCM_DBG && BCM_DBG_ID16 */
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
}
-#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
- if (id16_map->dbg) {
- uint16 avail = 0; /* Audit available ids counts */
- for (idx = 0; idx < id16_map_dbg->total; idx++) {
- if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE)
- avail++;
+ dbgobj = dbgobj_freetail;
+ while (dbgobj) {
+ if ((dbgobj->obj == obj) &&
+ ((!chksn) || (dbgobj->obj_sn == sn))) {
+ printf("%s: (%s:%d) obj %p (sn %d state %d) was freed from %s(%d)\n",
+ __FUNCTION__, caller, line,
+ dbgobj->obj, dbgobj->obj_sn, dbgobj->obj_state,
+ dbgobj->caller, dbgobj->line);
+ goto EXIT;
}
- if (avail && (avail != (id16_map->stack_idx + 1))) {
- insane |= 1;
- ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n",
- OSL_OBFUSCATE_BUF(id16_map_hndl),
- avail, id16_map->stack_idx));
+ else if (dbgobj->obj == NULL) {
+ break;
}
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_freetail)
+ break;
}
-#endif /* BCM_DBG && BCM_DBG_ID16 */
-done:
- /* invoke any other system audits */
- return (!!insane);
+ printf("%s: obj %p not found, check from %s(%d), chksn %s, sn %d\n",
+ __FUNCTION__, obj, caller, line, chksn ? "yes" : "no", sn);
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ printf("%s: (%s:%d) obj %p sn %d was allocated from %s(%d)\n",
+ __FUNCTION__, caller, line,
+ dbgobj->obj, dbgobj->obj_sn, dbgobj->caller, dbgobj->line);
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
+ }
+
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
}
-/* END: Simple id16 allocator */
void
-dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size)
-{
- uint32 memsize;
- memsize = sizeof(dll_pool_t) + (elems_max * elem_size);
- if (pool)
- MFREE(osh, pool, memsize);
-}
-dll_pool_t *
-dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size)
+bcm_object_feature_set(void *obj, uint32 type, uint32 value)
{
- uint32 memsize, i;
- dll_pool_t * dll_pool_p;
- dll_t * elem_p;
-
- ASSERT(elem_size > sizeof(dll_t));
-
- memsize = sizeof(dll_pool_t) + (elems_max * elem_size);
-
- if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, memsize)) == NULL) {
- printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n",
- elems_max, elem_size);
- ASSERT(0);
- return dll_pool_p;
- }
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
- dll_init(&dll_pool_p->free_list);
- dll_pool_p->elems_max = elems_max;
- dll_pool_p->elem_size = elem_size;
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
- elem_p = dll_pool_p->elements;
- for (i = 0; i < elems_max; i++) {
- dll_append(&dll_pool_p->free_list, elem_p);
- elem_p = (dll_t *)((uintptr)elem_p + elem_size);
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ if (type == BCM_OBJECT_FEATURE_FLAG) {
+ if (value & BCM_OBJECT_FEATURE_CLEAR)
+ dbgobj->flag &= ~(value);
+ else
+ dbgobj->flag |= (value);
+ } else if (type == BCM_OBJECT_FEATURE_PKT_STATE) {
+ dbgobj->obj_state = value;
+ }
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
}
- dll_pool_p->free_count = elems_max;
+ printf("%s: obj %p not found in active list\n", __FUNCTION__, obj);
+ ASSERT(0);
- return dll_pool_p;
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return;
}
-void *
-dll_pool_alloc(dll_pool_t * dll_pool_p)
+int
+bcm_object_feature_get(void *obj, uint32 type, uint32 value)
{
- dll_t * elem_p;
+ int rtn = 0;
+ struct bcm_dbgobj *dbgobj;
+ unsigned long flags;
- if (dll_pool_p->free_count == 0) {
- ASSERT(dll_empty(&dll_pool_p->free_list));
- return NULL;
+ BCM_REFERENCE(flags);
+ BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+
+ dbgobj = dbgobj_objtail;
+ while (dbgobj) {
+ if (dbgobj->obj == obj) {
+ if (type == BCM_OBJECT_FEATURE_FLAG) {
+ rtn = (dbgobj->flag & value) & (~BCM_OBJECT_FEATURE_CLEAR);
+ }
+ if (dbgobj != dbgobj_objtail) {
+ bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
+ dbgobj, BCM_OBJDBG_ADDTOTAIL);
+ }
+ goto EXIT;
+ }
+ dbgobj = dbgobj->prior;
+ if (dbgobj == dbgobj_objtail)
+ break;
}
- elem_p = dll_head_p(&dll_pool_p->free_list);
- dll_delete(elem_p);
- dll_pool_p->free_count -= 1;
+ printf("%s: obj %p not found in active list\n", __FUNCTION__, obj);
+ ASSERT(0);
- return (void *)elem_p;
+EXIT:
+ BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
+ return rtn;
}
-void
-dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p)
-{
- dll_t * node_p = (dll_t *)elem_p;
- dll_prepend(&dll_pool_p->free_list, node_p);
- dll_pool_p->free_count += 1;
-}
+#endif /* BCM_OBJECT_TRACE */
-void
-dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p)
+uint8 *
+bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst)
{
- dll_t * node_p = (dll_t *)elem_p;
- dll_append(&dll_pool_p->free_list, node_p);
- dll_pool_p->free_count += 1;
-}
+ uint8 *new_dst = dst;
+ bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst;
-#endif // endif
+ /* dst buffer should always be valid */
+ ASSERT(dst);
-#endif /* BCMDRIVER */
+ /* data len must be within valid range */
+ ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE));
-#if defined(BCMDRIVER) || defined(WL_UNITTEST)
+ /* source data buffer pointer should be valid, unless datalen is 0
+ * meaning no data with this TLV
+ */
+ ASSERT((data != NULL) || (datalen == 0));
-/* triggers bcm_bprintf to print to kernel log */
-bool bcm_bprintf_bypass = FALSE;
+ /* only do work if the inputs are valid
+ * - must have a dst to write to AND
+ * - datalen must be within range AND
+ * - the source data pointer must be non-NULL if datalen is non-zero
+ * (this last condition detects datalen > 0 with a NULL data pointer)
+ */
+ if ((dst != NULL) &&
+ ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) &&
+ ((data != NULL) || (datalen == 0))) {
-/* Initialization of bcmstrbuf structure */
-void
-bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
-{
- b->origsize = b->size = size;
- b->origbuf = b->buf = buf;
- if (size > 0) {
- buf[0] = '\0';
+ /* write type, len fields */
+ dst_tlv->id = (uint8)type;
+ dst_tlv->len = (uint8)datalen;
+
+ /* if data is present, copy to the output buffer and update
+ * pointer to output buffer
+ */
+ if (datalen > 0) {
+
+ memcpy(dst_tlv->data, data, datalen);
+ }
+
+ /* update the output destination poitner to point past
+ * the TLV written
+ */
+ new_dst = dst + BCM_TLV_HDR_SIZE + datalen;
}
+
+ return (new_dst);
}
-/* Buffer sprintf wrapper to guard against buffer overflow */
-int
-bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+uint8 *
+bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen)
{
- va_list ap;
- int r;
-
- va_start(ap, fmt);
-
- r = vsnprintf(b->buf, b->size, fmt, ap);
- if (bcm_bprintf_bypass == TRUE) {
- printf("%s", b->buf);
- goto exit;
- }
-
- /* Non Ansi C99 compliant returns -1,
- * Ansi compliant return r >= b->size,
- * bcmstdlib returns 0, handle all
- */
- /* r == 0 is also the case when strlen(fmt) is zero.
- * typically the case when "" is passed as argument.
- */
- if ((r == -1) || (r >= (int)b->size)) {
- b->size = 0;
- } else {
- b->size -= (uint)r;
- b->buf += r;
- }
-
-exit:
- va_end(ap);
-
- return r;
-}
-
-void
-bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, const uint8 *buf, int len)
-{
- int i;
-
- if (msg != NULL && msg[0] != '\0')
- bcm_bprintf(b, "%s", msg);
- for (i = 0; i < len; i ++)
- bcm_bprintf(b, "%02X", buf[i]);
- if (newline)
- bcm_bprintf(b, "\n");
-}
-
-void
-bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
-{
- int i;
-
- for (i = 0; i < num_bytes; i++) {
- num[i] += amount;
- if (num[i] >= amount)
- break;
- amount = 1;
- }
-}
-
-int
-bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes)
-{
- int i;
+ uint8 *new_dst = dst;
- for (i = nbytes - 1; i >= 0; i--) {
- if (arg1[i] != arg2[i])
- return (arg1[i] - arg2[i]);
- }
- return 0;
-}
+ if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) {
-void
-bcm_print_bytes(const char *name, const uchar *data, int len)
-{
- int i;
- int per_line = 0;
+ /* if len + tlv hdr len is more than destlen, don't do anything
+ * just return the buffer untouched
+ */
+ if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) {
- printf("%s: %d \n", name ? name : "", len);
- for (i = 0; i < len; i++) {
- printf("%02x ", *data++);
- per_line++;
- if (per_line == 16) {
- per_line = 0;
- printf("\n");
+ new_dst = bcm_write_tlv(type, data, datalen, dst);
}
}
- printf("\n");
+
+ return (new_dst);
}
-/* Look for vendor-specific IE with specified OUI and optional type */
-bcm_tlv_t *
-bcm_find_vendor_ie(const void *tlvs, uint tlvs_len, const char *voui, uint8 *type, uint type_len)
+uint8 *
+bcm_copy_tlv(const void *src, uint8 *dst)
{
- const bcm_tlv_t *ie;
- uint8 ie_len;
+ uint8 *new_dst = dst;
+ const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+ uint totlen;
- ie = (const bcm_tlv_t*)tlvs;
+ ASSERT(dst && src);
+ if (dst && src) {
- /* make sure we are looking at a valid IE */
- if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) {
- return NULL;
+ totlen = BCM_TLV_HDR_SIZE + src_tlv->len;
+ memcpy(dst, src_tlv, totlen);
+ new_dst = dst + totlen;
}
- /* Walk through the IEs looking for an OUI match */
- do {
- ie_len = ie->len;
- if ((ie->id == DOT11_MNG_VS_ID) &&
- (ie_len >= (DOT11_OUI_LEN + type_len)) &&
- !bcmp(ie->data, voui, DOT11_OUI_LEN))
- {
- /* compare optional type */
- if (type_len == 0 ||
- !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) {
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- return (bcm_tlv_t *)(ie); /* a match */
- GCC_DIAGNOSTIC_POP();
- }
- }
- } while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL);
-
- return NULL;
+ return (new_dst);
}
-#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
- defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
-#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1)
-int
-bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
+uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen)
{
- uint i, c;
- char *p = buf;
- char *endp = buf + SSID_FMT_BUF_LEN;
-
- if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
+ uint8 *new_dst = dst;
+ const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
- for (i = 0; i < ssid_len; i++) {
- c = (uint)ssid[i];
- if (c == '\\') {
- *p++ = '\\';
- *p++ = '\\';
- } else if (bcm_isprint((uchar)c)) {
- *p++ = (char)c;
- } else {
- p += snprintf(p, (size_t)(endp - p), "\\x%02X", c);
+ ASSERT(src);
+ if (src) {
+ if (bcm_valid_tlv(src_tlv, dst_maxlen)) {
+ new_dst = bcm_copy_tlv(src, dst);
}
}
- *p = '\0';
- ASSERT(p < endp);
- return (int)(p - buf);
+ return (new_dst);
}
-#endif // endif
-
-#endif /* BCMDRIVER || WL_UNITTEST */
-char *
-bcm_ether_ntoa(const struct ether_addr *ea, char *buf)
-{
- static const char hex[] =
- {
- '0', '1', '2', '3', '4', '5', '6', '7',
- '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
- };
- const uint8 *octet = ea->octet;
- char *p = buf;
- int i;
- for (i = 0; i < 6; i++, octet++) {
- *p++ = hex[(*octet >> 4) & 0xf];
- *p++ = hex[*octet & 0xf];
- *p++ = ':';
- }
+#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
+/*******************************************************************************
+ * crc8
+ *
+ * Computes a crc8 over the input data using the polynomial:
+ *
+ * x^8 + x^7 +x^6 + x^4 + x^2 + 1
+ *
+ * The caller provides the initial value (either CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC8_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
- *(p-1) = '\0';
+static const uint8 crc8_table[256] = {
+ 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+ 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+ 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+ 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+ 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+ 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+ 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+ 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+ 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+ 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+ 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+ 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+ 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+ 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+ 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+ 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+ 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+ 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+ 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+ 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+ 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+ 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+ 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+ 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+ 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+ 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+ 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+ 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+ 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+ 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+ 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+ 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
+};
- return (buf);
-}
+#define CRC_INNER_LOOP(n, c, x) \
+ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
-/* Find the position of first bit set
- * in the given number.
- */
-int
-bcm_find_fsb(uint32 num)
+uint8
+hndcrc8(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint8 crc /* either CRC8_INIT_VALUE or previous return value */
+)
{
- uint8 pos = 0;
- if (!num)
- return pos;
- while (!(num & 1)) {
- num >>= 1;
- pos++;
- }
- return (pos+1);
-}
+ /* hard code the crc loop instead of using CRC_INNER_LOOP macro
+ * to avoid the undefined and unnecessary (uint8 >> 8) operation.
+ */
+ while (nbytes-- > 0)
+ crc = crc8_table[(crc ^ *pdata++) & 0xff];
-char *
-bcm_ip_ntoa(struct ipv4_addr *ia, char *buf)
-{
- snprintf(buf, 16, "%d.%d.%d.%d",
- ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]);
- return (buf);
+ return crc;
}
-char *
-bcm_ipv6_ntoa(void *ipv6, char *buf)
-{
- /* Implementing RFC 5952 Sections 4 + 5 */
- /* Not thoroughly tested */
- uint16 tmp[8];
- uint16 *a = &tmp[0];
- char *p = buf;
- int i, i_max = -1, cnt = 0, cnt_max = 1;
- uint8 *a4 = NULL;
- memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN);
-
- for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
- if (a[i]) {
- if (cnt > cnt_max) {
- cnt_max = cnt;
- i_max = i - cnt;
- }
- cnt = 0;
- } else
- cnt++;
- }
- if (cnt > cnt_max) {
- cnt_max = cnt;
- i_max = i - cnt;
- }
- if (i_max == 0 &&
- /* IPv4-translated: ::ffff:0:a.b.c.d */
- ((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) ||
- /* IPv4-mapped: ::ffff:a.b.c.d */
- (cnt_max == 5 && a[5] == 0xffff)))
- a4 = (uint8*) (a + 6);
+/*******************************************************************************
+ * crc16
+ *
+ * Computes a crc16 over the input data using the polynomial:
+ *
+ * x^16 + x^12 +x^5 + 1
+ *
+ * The caller provides the initial value (either CRC16_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When checking, a final
+ * return value of CRC16_GOOD_VALUE indicates a valid CRC.
+ *
+ * Reference: Dallas Semiconductor Application Note 27
+ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
+ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
+ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
+ *
+ * ****************************************************************************
+ */
- for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
- if ((uint8*) (a + i) == a4) {
- snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]);
- break;
- } else if (i == i_max) {
- *p++ = ':';
- i += cnt_max - 1;
- p[0] = ':';
- p[1] = '\0';
- } else {
- if (i)
- *p++ = ':';
- p += snprintf(p, 8, "%x", ntoh16(a[i]));
- }
- }
+static const uint16 crc16_table[256] = {
+ 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
+ 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
+ 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
+ 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
+ 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
+ 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
+ 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
+ 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
+ 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
+ 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
+ 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
+ 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
+ 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
+ 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
+ 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
+ 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
+ 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
+ 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
+ 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
+ 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
+ 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
+ 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
+ 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
+ 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
+ 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
+ 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
+ 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
+ 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
+ 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
+ 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
+ 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
+ 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
+};
- return buf;
+uint16
+hndcrc16(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint16 crc /* either CRC16_INIT_VALUE or previous return value */
+)
+{
+ while (nbytes-- > 0)
+ CRC_INNER_LOOP(16, crc, *pdata++);
+ return crc;
}
-#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
-const unsigned char bcm_ctype[] = {
-
- _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */
- _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C,
- _BCM_C, /* 8-15 */
- _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */
- _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */
- _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */
- _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */
- _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */
- _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */
- _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X,
- _BCM_U|_BCM_X, _BCM_U, /* 64-71 */
- _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */
- _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */
- _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */
- _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X,
- _BCM_L|_BCM_X, _BCM_L, /* 96-103 */
- _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */
- _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */
- _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */
- _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
- _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */
- _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P,
- _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */
- _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U,
- _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */
- _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U,
- _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */
- _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L,
- _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */
- _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L,
- _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */
+static const uint32 crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+ 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+ 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+ 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+ 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+ 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+ 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+ 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+ 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+ 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+ 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+ 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+ 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+ 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+ 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+ 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+ 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+ 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+ 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
};
-uint64
-bcm_strtoull(const char *cp, char **endp, uint base)
+/*
+ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
+ * accumulating over multiple pieces.
+ */
+uint32
+hndcrc32(uint8 *pdata, uint nbytes, uint32 crc)
{
- uint64 result, last_result = 0, value;
- bool minus;
+ uint8 *pend;
+ pend = pdata + nbytes;
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
- minus = FALSE;
+ return crc;
+}
- while (bcm_isspace(*cp))
- cp++;
+#ifdef notdef
+#define CLEN 1499 /* CRC Length */
+#define CBUFSIZ (CLEN+4)
+#define CNBUFS 5 /* # of bufs */
- if (cp[0] == '+')
- cp++;
- else if (cp[0] == '-') {
- minus = TRUE;
- cp++;
+void
+testcrc32(void)
+{
+ uint j, k, l;
+ uint8 *buf;
+ uint len[CNBUFS];
+ uint32 crcr;
+ uint32 crc32tv[CNBUFS] =
+ {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+
+ ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+
+ /* step through all possible alignments */
+ for (l = 0; l <= 4; l++) {
+ for (j = 0; j < CNBUFS; j++) {
+ len[j] = CLEN;
+ for (k = 0; k < len[j]; k++)
+ *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
+ }
+
+ for (j = 0; j < CNBUFS; j++) {
+ crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
+ ASSERT(crcr == crc32tv[j]);
+ }
}
- if (base == 0) {
- if (cp[0] == '0') {
- if ((cp[1] == 'x') || (cp[1] == 'X')) {
- base = 16;
- cp = &cp[2];
- } else {
- base = 8;
- cp = &cp[1];
- }
- } else
- base = 10;
- } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) {
- cp = &cp[2];
+ MFREE(buf, CBUFSIZ*CNBUFS);
+ return;
+}
+#endif /* notdef */
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV parameter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+ int len;
+
+ /* validate current elt */
+ if (!bcm_valid_tlv(elt, *buflen)) {
+ return NULL;
}
- result = 0;
+ /* advance to next elt */
+ len = elt->len;
+ elt = (bcm_tlv_t*)(elt->data + len);
+ *buflen -= (TLV_HDR_LEN + len);
- while (bcm_isxdigit(*cp) &&
- (value = (uint64)(bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10)) < base) {
- result = result*base + value;
- /* Detected overflow */
- if (result < last_result && !minus) {
- if (endp) {
- /* Go to the end of current number */
- while (bcm_isxdigit(*cp)) {
- cp++;
- }
- *endp = DISCARD_QUAL(cp, char);
- }
- return (ulong)-1;
- }
- last_result = result;
- cp++;
+ /* validate next elt */
+ if (!bcm_valid_tlv(elt, *buflen)) {
+ return NULL;
}
- if (minus)
- result = (ulong)(-(long)result);
+ return elt;
+}
- if (endp)
- *endp = DISCARD_QUAL(cp, char);
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
- return (result);
+ if ((elt = (bcm_tlv_t*)buf) == NULL) {
+ return NULL;
+ }
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+
+ return (elt);
+ }
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+
+ return NULL;
}
-ulong
-bcm_strtoul(const char *cp, char **endp, uint base)
+bcm_tlv_t *
+bcm_parse_tlvs_dot11(void *buf, int buflen, uint key, bool id_ext)
{
- return (ulong) bcm_strtoull(cp, endp, base);
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ int len = elt->len;
+
+ do {
+ /* validate remaining totlen */
+ if (totlen < (int)(len + TLV_HDR_LEN))
+ break;
+
+ if (id_ext) {
+ if (!DOT11_MNG_IE_ID_EXT_MATCH(elt, key))
+ break;
+ } else if (elt->id != key) {
+ break;
+ }
+
+ return (elt);
+ } while (0);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
+
+ return NULL;
}
-int
-bcm_atoi(const char *s)
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ * return NULL if not found or length field < min_varlen
+ */
+bcm_tlv_t *
+bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen)
{
- return (int)bcm_strtoul(s, NULL, 10);
+ bcm_tlv_t * ret;
+ ret = bcm_parse_tlvs(buf, buflen, key);
+ if (ret == NULL || ret->len < min_bodylen) {
+ return NULL;
+ }
+ return ret;
}
-/* return pointer to location of substring 'needle' in 'haystack' */
-char *
-bcmstrstr(const char *haystack, const char *needle)
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag. Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
{
- int len, nlen;
- int i;
+ bcm_tlv_t *elt;
+ int totlen;
- if ((haystack == NULL) || (needle == NULL))
- return DISCARD_QUAL(haystack, char);
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
- nlen = (int)strlen(needle);
- len = (int)strlen(haystack) - nlen + 1;
+ /* find tagged parameter */
+ while (totlen >= TLV_HDR_LEN) {
+ uint id = elt->id;
+ int len = elt->len;
- for (i = 0; i < len; i++)
- if (memcmp(needle, &haystack[i], (size_t)nlen) == 0)
- return DISCARD_QUAL(&haystack[i], char);
- return (NULL);
-}
+ /* Punt if we start seeing IDs > than target key */
+ if (id > key) {
+ return (NULL);
+ }
-char *
-bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len)
-{
- for (; s_len >= substr_len; s++, s_len--)
- if (strncmp(s, substr, substr_len) == 0)
- return DISCARD_QUAL(s, char);
+ /* validate remaining totlen */
+ if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+ return (elt);
+ }
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
+ totlen -= (len + TLV_HDR_LEN);
+ }
return NULL;
}
+#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
-char *
-bcmstrcat(char *dest, const char *src)
+#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
+ defined(DHD_DEBUG)
+int
+bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len)
{
- char *p;
-
- p = dest + strlen(dest);
+ int i, slen = 0;
+ uint32 bit, mask;
+ const char *name;
+ mask = bd->mask;
+ if (len < 2 || !buf)
+ return 0;
- while ((*p++ = *src++) != '\0')
- ;
+ buf[0] = '\0';
- return (dest);
+ for (i = 0; (name = bd->bitfield[i].name) != NULL; i++) {
+ bit = bd->bitfield[i].bit;
+ if ((flags & mask) == bit) {
+ if (len > (int)strlen(name)) {
+ slen = strlen(name);
+ strncpy(buf, name, slen+1);
+ }
+ break;
+ }
+ }
+ return slen;
}
-char *
-bcmstrncat(char *dest, const char *src, uint size)
+int
+bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
{
- char *endp;
- char *p;
-
- p = dest + strlen(dest);
- endp = p + size;
-
- while (p != endp && (*p++ = *src++) != '\0')
- ;
+ int i;
+ char* p = buf;
+ char hexstr[16];
+ int slen = 0, nlen = 0;
+ uint32 bit;
+ const char* name;
- return (dest);
-}
+ if (len < 2 || !buf)
+ return 0;
-/****************************************************************************
-* Function: bcmstrtok
-*
-* Purpose:
-* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(),
-* but allows strToken() to be used by different strings or callers at the same
-* time. Each call modifies '*string' by substituting a NULL character for the
-* first delimiter that is encountered, and updates 'string' to point to the char
-* after the delimiter. Leading delimiters are skipped.
-*
-* Parameters:
-* string (mod) Ptr to string ptr, updated by token.
-* delimiters (in) Set of delimiter characters.
-* tokdelim (out) Character that delimits the returned token. (May
-* be set to NULL if token delimiter is not required).
-*
-* Returns: Pointer to the next token found. NULL when no more tokens are found.
-*****************************************************************************
-*/
-char *
-bcmstrtok(char **string, const char *delimiters, char *tokdelim)
-{
- unsigned char *str;
- unsigned long map[8];
- int count;
- char *nextoken;
+ buf[0] = '\0';
- if (tokdelim != NULL) {
- /* Prime the token delimiter */
- *tokdelim = '\0';
+ for (i = 0; flags != 0; i++) {
+ bit = bd[i].bit;
+ name = bd[i].name;
+ if (bit == 0 && flags != 0) {
+ /* print any unnamed bits */
+ snprintf(hexstr, 16, "0x%X", flags);
+ name = hexstr;
+ flags = 0; /* exit loop */
+ } else if ((flags & bit) == 0)
+ continue;
+ flags &= ~bit;
+ nlen = strlen(name);
+ slen += nlen;
+ /* count btwn flag space */
+ if (flags != 0)
+ slen += 1;
+ /* need NULL char as well */
+ if (len <= slen)
+ break;
+ /* copy NULL char but don't count it */
+ strncpy(p, name, nlen + 1);
+ p += nlen;
+ /* copy btwn flag space and NULL char */
+ if (flags != 0)
+ p += snprintf(p, 2, " ");
}
- /* Clear control map */
- for (count = 0; count < 8; count++) {
- map[count] = 0;
+ /* indicate the str was too short */
+ if (flags != 0) {
+ p += snprintf(p, 2, ">");
}
- /* Set bits in delimiter table */
- do {
- map[*delimiters >> 5] |= (1 << (*delimiters & 31));
- }
- while (*delimiters++);
+ return (int)(p - buf);
+}
+#endif
- str = (unsigned char*)*string;
+/* print bytes formatted as hex to a string. return the resulting string length */
+int
+bcm_format_hex(char *str, const void *bytes, int len)
+{
+ int i;
+ char *p = str;
+ const uint8 *src = (const uint8*)bytes;
- /* Find beginning of token (skip over leading delimiters). Note that
- * there is no token iff this loop sets str to point to the terminal
- * null (*str == '\0')
- */
- while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) {
- str++;
+ for (i = 0; i < len; i++) {
+ p += snprintf(p, 3, "%02X", *src);
+ src++;
}
+ return (int)(p - str);
+}
- nextoken = (char*)str;
+/* pretty hex print a contiguous buffer */
+void
+prhex(const char *msg, volatile uchar *buf, uint nbytes)
+{
+ char line[128], *p;
+ int len = sizeof(line);
+ int nchar;
+ uint i;
- /* Find the end of the token. If it is not the end of the string,
- * put a null there.
- */
- for (; *str; str++) {
- if (map[*str >> 5] & (1 << (*str & 31))) {
- if (tokdelim != NULL) {
- *tokdelim = (char)*str;
- }
+ if (msg && (msg[0] != '\0'))
+ printf("%s:\n", msg);
- *str++ = '\0';
- break;
+ p = line;
+ for (i = 0; i < nbytes; i++) {
+ if (i % 16 == 0) {
+ nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
+ p += nchar;
+ len -= nchar;
+ }
+ if (len > 0) {
+ nchar = snprintf(p, len, "%02x ", buf[i]);
+ p += nchar;
+ len -= nchar;
}
- }
-
- *string = (char*)str;
- /* Determine if a token has been found. */
- if (nextoken == (char *) str) {
- return NULL;
- }
- else {
- return nextoken;
+ if (i % 16 == 15) {
+ printf("%s\n", line); /* flush line */
+ p = line;
+ len = sizeof(line);
+ }
}
+
+ /* flush last partial line */
+ if (p != line)
+ printf("%s\n", line);
}
-#define xToLower(C) \
- ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C)
+static const char *crypto_algo_names[] = {
+ "NONE",
+ "WEP1",
+ "TKIP",
+ "WEP128",
+ "AES_CCM",
+ "AES_OCB_MSDU",
+ "AES_OCB_MPDU",
+ "NALG",
+ "UNDEF",
+ "UNDEF",
+ "UNDEF",
+ "UNDEF"
+ "PMK",
+ "BIP",
+ "AES_GCM",
+ "AES_CCM256",
+ "AES_GCM256",
+ "BIP_CMAC256",
+ "BIP_GMAC",
+ "BIP_GMAC256",
+ "UNDEF"
+};
-/****************************************************************************
-* Function: bcmstricmp
-*
-* Purpose: Compare to strings case insensitively.
-*
-* Parameters: s1 (in) First string to compare.
-* s2 (in) Second string to compare.
-*
-* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
-* t1 > t2, when ignoring case sensitivity.
-*****************************************************************************
-*/
-int
-bcmstricmp(const char *s1, const char *s2)
+const char *
+bcm_crypto_algo_name(uint algo)
{
- char dc, sc;
+ return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
+}
- while (*s2 && *s1) {
- dc = xToLower(*s1);
- sc = xToLower(*s2);
- if (dc < sc) return -1;
- if (dc > sc) return 1;
- s1++;
- s2++;
- }
- if (*s1 && !*s2) return 1;
- if (!*s1 && *s2) return -1;
- return 0;
+char *
+bcm_chipname(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
}
-/****************************************************************************
-* Function: bcmstrnicmp
-*
-* Purpose: Compare to strings case insensitively, upto a max of 'cnt'
-* characters.
-*
-* Parameters: s1 (in) First string to compare.
-* s2 (in) Second string to compare.
-* cnt (in) Max characters to compare.
-*
-* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if
-* t1 > t2, when ignoring case sensitivity.
-*****************************************************************************
-*/
-int
-bcmstrnicmp(const char* s1, const char* s2, int cnt)
+/* Produce a human-readable string for boardrev */
+char *
+bcm_brev_str(uint32 brev, char *buf)
{
- char dc, sc;
-
- while (*s2 && *s1 && cnt) {
- dc = xToLower(*s1);
- sc = xToLower(*s2);
- if (dc < sc) return -1;
- if (dc > sc) return 1;
- s1++;
- s2++;
- cnt--;
- }
+ if (brev < 0x100)
+ snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
+ else
+ snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
- if (!cnt) return 0;
- if (*s1 && !*s2) return 1;
- if (!*s1 && *s2) return -1;
- return 0;
+ return (buf);
}
-/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
-int
-bcm_ether_atoe(const char *p, struct ether_addr *ea)
+#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
+
+/* dump large strings to console */
+void
+printbig(char *buf)
{
- int i = 0;
- char *ep;
+ uint len, max_len;
+ char c;
- for (;;) {
- ea->octet[i++] = (uint8) bcm_strtoul(p, &ep, 16);
- p = ep;
- if (!*p++ || i == 6)
- break;
- }
+ len = (uint)strlen(buf);
- return (i == 6);
+ max_len = BUFSIZE_TODUMP_ATONCE;
+
+ while (len > max_len) {
+ c = buf[max_len];
+ buf[max_len] = '\0';
+ printf("%s", buf);
+ buf[max_len] = c;
+
+ buf += max_len;
+ len -= max_len;
+ }
+ /* print the remaining string */
+ printf("%s\n", buf);
+ return;
}
-int
-bcm_atoipv4(const char *p, struct ipv4_addr *ip)
+/* routine to dump fields in a fileddesc structure */
+uint
+bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
+ char *buf, uint32 bufsize)
{
+ uint filled_len;
+ int len;
+ struct fielddesc *cur_ptr;
- int i = 0;
- char *c;
- for (;;) {
- ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0);
- if (*c++ != '.' || i == IPV4_ADDR_LEN)
+ filled_len = 0;
+ cur_ptr = fielddesc_array;
+
+ while (bufsize > 1) {
+ if (cur_ptr->nameandfmt == NULL)
break;
- p = c;
+ len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
+ read_rtn(arg0, arg1, cur_ptr->offset));
+ /* check for snprintf overflow or error */
+ if (len < 0 || (uint32)len >= bufsize)
+ len = bufsize - 1;
+ buf += len;
+ bufsize -= len;
+ filled_len += len;
+ cur_ptr++;
}
- return (i == IPV4_ADDR_LEN);
+ return filled_len;
}
-#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
-#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
-/* registry routine buffer preparation utility functions:
- * parameter order is like strncpy, but returns count
- * of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
- */
-ulong
-wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen)
+uint
+bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint buflen)
{
- ulong copyct = 1;
- ushort i;
+ uint len;
- if (abuflen == 0)
+ len = (uint)strlen(name) + 1;
+
+ if ((len + datalen) > buflen)
return 0;
- /* wbuflen is in bytes */
- wbuflen /= sizeof(ushort);
+ strncpy(buf, name, buflen);
- for (i = 0; i < wbuflen; ++i) {
- if (--abuflen == 0)
- break;
- *abuf++ = (char) *wbuf++;
- ++copyct;
+ /* append data onto the end of the name string */
+ if (data && datalen != 0) {
+ memcpy(&buf[len], data, datalen);
+ len += datalen;
}
- *abuf = '\0';
- return copyct;
+ return len;
}
-#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */
-
-#ifdef BCM_OBJECT_TRACE
-#define BCM_OBJECT_MERGE_SAME_OBJ 0
+/* Quarter dBm units to mW
+ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
+ * Table is offset so the last entry is largest mW value that fits in
+ * a uint16.
+ */
-/* some place may add / remove the object to trace list for Linux: */
-/* add: osl_alloc_skb dev_alloc_skb skb_realloc_headroom dhd_start_xmit */
-/* remove: osl_pktfree dev_kfree_skb netif_rx */
+#define QDBM_OFFSET 153 /* Offset for first entry */
+#define QDBM_TABLE_LEN 40 /* Table size */
-#define BCM_OBJDBG_COUNT (1024 * 100)
-static spinlock_t dbgobj_lock;
-#define BCM_OBJDBG_LOCK_INIT() spin_lock_init(&dbgobj_lock)
-#define BCM_OBJDBG_LOCK_DESTROY()
-#define BCM_OBJDBG_LOCK spin_lock_irqsave
-#define BCM_OBJDBG_UNLOCK spin_unlock_irqrestore
+/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
+ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
+ */
+#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
-#define BCM_OBJDBG_ADDTOHEAD 0
-#define BCM_OBJDBG_ADDTOTAIL 1
+/* Largest mW value that will round down to the last table entry,
+ * QDBM_OFFSET + QDBM_TABLE_LEN-1.
+ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
+ */
+#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
-#define BCM_OBJDBG_CALLER_LEN 32
-struct bcm_dbgobj {
- struct bcm_dbgobj *prior;
- struct bcm_dbgobj *next;
- uint32 flag;
- void *obj;
- uint32 obj_sn;
- uint32 obj_state;
- uint32 line;
- char caller[BCM_OBJDBG_CALLER_LEN];
+static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
+/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
+/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
+/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
+/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
+/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
};
-static struct bcm_dbgobj *dbgobj_freehead = NULL;
-static struct bcm_dbgobj *dbgobj_freetail = NULL;
-static struct bcm_dbgobj *dbgobj_objhead = NULL;
-static struct bcm_dbgobj *dbgobj_objtail = NULL;
+uint16
+bcm_qdbm_to_mw(uint8 qdbm)
+{
+ uint factor = 1;
+ int idx = qdbm - QDBM_OFFSET;
-static uint32 dbgobj_sn = 0;
-static int dbgobj_count = 0;
-static struct bcm_dbgobj bcm_dbg_objs[BCM_OBJDBG_COUNT];
+ if (idx >= QDBM_TABLE_LEN) {
+ /* clamp to max uint16 mW value */
+ return 0xFFFF;
+ }
-void
-bcm_object_trace_init(void)
+ /* scale the qdBm index up to the range of the table 0-40
+ * where an offset of 40 qdBm equals a factor of 10 mW.
+ */
+ while (idx < 0) {
+ idx += 40;
+ factor *= 10;
+ }
+
+ /* return the mW value scaled down to the correct factor of 10,
+ * adding in factor/2 to get proper rounding.
+ */
+ return ((nqdBm_to_mW_map[idx] + factor/2) / factor);
+}
+
+uint8
+bcm_mw_to_qdbm(uint16 mw)
{
- int i = 0;
- BCM_OBJDBG_LOCK_INIT();
- memset(&bcm_dbg_objs, 0x00, sizeof(struct bcm_dbgobj) * BCM_OBJDBG_COUNT);
- dbgobj_freehead = &bcm_dbg_objs[0];
- dbgobj_freetail = &bcm_dbg_objs[BCM_OBJDBG_COUNT - 1];
+ uint8 qdbm;
+ int offset;
+ uint mw_uint = mw;
+ uint boundary;
- for (i = 0; i < BCM_OBJDBG_COUNT; ++i) {
- bcm_dbg_objs[i].next = (i == (BCM_OBJDBG_COUNT - 1)) ?
- dbgobj_freehead : &bcm_dbg_objs[i + 1];
- bcm_dbg_objs[i].prior = (i == 0) ?
- dbgobj_freetail : &bcm_dbg_objs[i - 1];
+ /* handle boundary case */
+ if (mw_uint <= 1)
+ return 0;
+
+ offset = QDBM_OFFSET;
+
+ /* move mw into the range of the table */
+ while (mw_uint < QDBM_TABLE_LOW_BOUND) {
+ mw_uint *= 10;
+ offset -= 40;
+ }
+
+ for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
+ boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
+ nqdBm_to_mW_map[qdbm])/2;
+ if (mw_uint < boundary) break;
+ }
+
+ qdbm += (uint8)offset;
+
+ return (qdbm);
+}
+
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+ uint bitcount = 0, i;
+ uint8 tmp;
+ for (i = 0; i < length; i++) {
+ tmp = bitmap[i];
+ while (tmp) {
+ bitcount++;
+ tmp &= (tmp - 1);
+ }
}
+ return bitcount;
}
+#if defined(BCMDRIVER) || defined(WL_UNITTEST)
+
+/* triggers bcm_bprintf to print to kernel log */
+bool bcm_bprintf_bypass = FALSE;
+
+/* Initialization of bcmstrbuf structure */
void
-bcm_object_trace_deinit(void)
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
{
- if (dbgobj_objhead || dbgobj_objtail) {
- printf("%s: not all objects are released\n", __FUNCTION__);
- ASSERT(0);
- }
- BCM_OBJDBG_LOCK_DESTROY();
+ b->origsize = b->size = size;
+ b->origbuf = b->buf = buf;
}
-static void
-bcm_object_rm_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
- struct bcm_dbgobj *dbgobj)
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
{
- if ((dbgobj == *head) && (dbgobj == *tail)) {
- *head = NULL;
- *tail = NULL;
- } else if (dbgobj == *head) {
- *head = (*head)->next;
- } else if (dbgobj == *tail) {
- *tail = (*tail)->prior;
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+ if (bcm_bprintf_bypass == TRUE) {
+ printf("%s", b->buf);
+ goto exit;
}
- dbgobj->next->prior = dbgobj->prior;
- dbgobj->prior->next = dbgobj->next;
-}
-static void
-bcm_object_add_list(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
- struct bcm_dbgobj *dbgobj, int addtotail)
-{
- if (!(*head) && !(*tail)) {
- *head = dbgobj;
- *tail = dbgobj;
- dbgobj->next = dbgobj;
- dbgobj->prior = dbgobj;
- } else if ((*head) && (*tail)) {
- (*tail)->next = dbgobj;
- (*head)->prior = dbgobj;
- dbgobj->next = *head;
- dbgobj->prior = *tail;
- if (addtotail == BCM_OBJDBG_ADDTOTAIL)
- *tail = dbgobj;
- else
- *head = dbgobj;
+ /* Non Ansi C99 compliant returns -1,
+ * Ansi compliant return r >= b->size,
+ * bcmstdlib returns 0, handle all
+ */
+ /* r == 0 is also the case when strlen(fmt) is zero.
+ * typically the case when "" is passed as argument.
+ */
+ if ((r == -1) || (r >= (int)b->size)) {
+ b->size = 0;
} else {
- ASSERT(0); /* can't be this case */
+ b->size -= r;
+ b->buf += r;
}
+
+exit:
+ va_end(ap);
+
+ return r;
}
-static INLINE void
-bcm_object_movetoend(struct bcm_dbgobj **head, struct bcm_dbgobj **tail,
- struct bcm_dbgobj *dbgobj, int movetotail)
+void
+bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, const uint8 *buf, int len)
{
- if ((*head) && (*tail)) {
- if (movetotail == BCM_OBJDBG_ADDTOTAIL) {
- if (dbgobj != (*tail)) {
- bcm_object_rm_list(head, tail, dbgobj);
- bcm_object_add_list(head, tail, dbgobj, movetotail);
- }
- } else {
- if (dbgobj != (*head)) {
- bcm_object_rm_list(head, tail, dbgobj);
- bcm_object_add_list(head, tail, dbgobj, movetotail);
- }
- }
- } else {
- ASSERT(0); /* can't be this case */
- }
+ int i;
+
+ if (msg != NULL && msg[0] != '\0')
+ bcm_bprintf(b, "%s", msg);
+ for (i = 0; i < len; i ++)
+ bcm_bprintf(b, "%02X", buf[i]);
+ if (newline)
+ bcm_bprintf(b, "\n");
}
void
-bcm_object_trace_opr(void *obj, uint32 opt, const char *caller, int line)
+bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount)
{
- struct bcm_dbgobj *dbgobj;
- unsigned long flags;
+ int i;
- BCM_REFERENCE(flags);
- BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+ for (i = 0; i < num_bytes; i++) {
+ num[i] += amount;
+ if (num[i] >= amount)
+ break;
+ amount = 1;
+ }
+}
- if (opt == BCM_OBJDBG_ADD_PKT ||
- opt == BCM_OBJDBG_ADD) {
- dbgobj = dbgobj_objtail;
- while (dbgobj) {
- if (dbgobj->obj == obj) {
- printf("%s: obj %p allocated from %s(%d),"
- " allocate again from %s(%d)\n",
- __FUNCTION__, dbgobj->obj,
- dbgobj->caller, dbgobj->line,
- caller, line);
- ASSERT(0);
- goto EXIT;
- }
- dbgobj = dbgobj->prior;
- if (dbgobj == dbgobj_objtail)
- break;
- }
+int
+bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes)
+{
+ int i;
-#if BCM_OBJECT_MERGE_SAME_OBJ
- dbgobj = dbgobj_freetail;
- while (dbgobj) {
- if (dbgobj->obj == obj) {
- goto FREED_ENTRY_FOUND;
- }
- dbgobj = dbgobj->prior;
- if (dbgobj == dbgobj_freetail)
- break;
- }
-#endif /* BCM_OBJECT_MERGE_SAME_OBJ */
+ for (i = nbytes - 1; i >= 0; i--) {
+ if (arg1[i] != arg2[i])
+ return (arg1[i] - arg2[i]);
+ }
+ return 0;
+}
- dbgobj = dbgobj_freehead;
-#if BCM_OBJECT_MERGE_SAME_OBJ
-FREED_ENTRY_FOUND:
-#endif /* BCM_OBJECT_MERGE_SAME_OBJ */
- if (!dbgobj) {
- printf("%s: already got %d objects ?????????????????????\n",
- __FUNCTION__, BCM_OBJDBG_COUNT);
- ASSERT(0);
- goto EXIT;
- }
+void
+bcm_print_bytes(const char *name, const uchar *data, int len)
+{
+ int i;
+ int per_line = 0;
- bcm_object_rm_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj);
- dbgobj->obj = obj;
- strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN);
- dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0';
- dbgobj->line = line;
- dbgobj->flag = 0;
- if (opt == BCM_OBJDBG_ADD_PKT) {
- dbgobj->obj_sn = dbgobj_sn++;
- dbgobj->obj_state = 0;
- /* first 4 bytes is pkt sn */
- if (((unsigned long)PKTTAG(obj)) & 0x3)
- printf("pkt tag address not aligned by 4: %p\n", PKTTAG(obj));
- *(uint32*)PKTTAG(obj) = dbgobj->obj_sn;
+ printf("%s: %d \n", name ? name : "", len);
+ for (i = 0; i < len; i++) {
+ printf("%02x ", *data++);
+ per_line++;
+ if (per_line == 16) {
+ per_line = 0;
+ printf("\n");
}
- bcm_object_add_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj,
- BCM_OBJDBG_ADDTOTAIL);
+ }
+ printf("\n");
+}
- dbgobj_count++;
+/* Look for vendor-specific IE with specified OUI and optional type */
+bcm_tlv_t *
+bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len)
+{
+ bcm_tlv_t *ie;
+ uint8 ie_len;
- } else if (opt == BCM_OBJDBG_REMOVE) {
- dbgobj = dbgobj_objtail;
- while (dbgobj) {
- if (dbgobj->obj == obj) {
- if (dbgobj->flag) {
- printf("%s: rm flagged obj %p flag 0x%08x from %s(%d)\n",
- __FUNCTION__, obj, dbgobj->flag, caller, line);
- }
- bcm_object_rm_list(&dbgobj_objhead, &dbgobj_objtail, dbgobj);
- memset(dbgobj->caller, 0x00, BCM_OBJDBG_CALLER_LEN);
- strncpy(dbgobj->caller, caller, BCM_OBJDBG_CALLER_LEN);
- dbgobj->caller[BCM_OBJDBG_CALLER_LEN-1] = '\0';
- dbgobj->line = line;
- bcm_object_add_list(&dbgobj_freehead, &dbgobj_freetail, dbgobj,
- BCM_OBJDBG_ADDTOTAIL);
- dbgobj_count--;
- goto EXIT;
- }
- dbgobj = dbgobj->prior;
- if (dbgobj == dbgobj_objtail)
- break;
- }
+ ie = (bcm_tlv_t*)tlvs;
- dbgobj = dbgobj_freetail;
- while (dbgobj && dbgobj->obj) {
- if (dbgobj->obj == obj) {
- printf("%s: obj %p already freed from from %s(%d),"
- " try free again from %s(%d)\n",
- __FUNCTION__, obj,
- dbgobj->caller, dbgobj->line,
- caller, line);
- //ASSERT(0); /* release same obj more than one time? */
- goto EXIT;
- }
- dbgobj = dbgobj->prior;
- if (dbgobj == dbgobj_freetail)
- break;
+ /* make sure we are looking at a valid IE */
+ if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) {
+ return NULL;
+ }
+
+ /* Walk through the IEs looking for an OUI match */
+ do {
+ ie_len = ie->len;
+ if ((ie->id == DOT11_MNG_PROPR_ID) &&
+ (ie_len >= (DOT11_OUI_LEN + type_len)) &&
+ !bcmp(ie->data, voui, DOT11_OUI_LEN))
+ {
+ /* compare optional type */
+ if (type_len == 0 ||
+ !bcmp(&ie->data[DOT11_OUI_LEN], type, type_len)) {
+ return (ie); /* a match */
+ }
}
+ } while ((ie = bcm_next_tlv(ie, &tlvs_len)) != NULL);
- printf("%s: ################### release none-existing obj %p from %s(%d)\n",
- __FUNCTION__, obj, caller, line);
- //ASSERT(0); /* release same obj more than one time? */
-
- }
-
-EXIT:
- BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
- return;
+ return NULL;
}
-void
-bcm_object_trace_upd(void *obj, void *obj_new)
+#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
+ defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
+#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1)
+
+int
+bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len)
{
- struct bcm_dbgobj *dbgobj;
- unsigned long flags;
+ uint i, c;
+ char *p = buf;
+ char *endp = buf + SSID_FMT_BUF_LEN;
- BCM_REFERENCE(flags);
- BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+ if (ssid_len > DOT11_MAX_SSID_LEN) ssid_len = DOT11_MAX_SSID_LEN;
- dbgobj = dbgobj_objtail;
- while (dbgobj) {
- if (dbgobj->obj == obj) {
- dbgobj->obj = obj_new;
- if (dbgobj != dbgobj_objtail) {
- bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
- dbgobj, BCM_OBJDBG_ADDTOTAIL);
- }
- goto EXIT;
+ for (i = 0; i < ssid_len; i++) {
+ c = (uint)ssid[i];
+ if (c == '\\') {
+ *p++ = '\\';
+ *p++ = '\\';
+ } else if (bcm_isprint((uchar)c)) {
+ *p++ = (char)c;
+ } else {
+ p += snprintf(p, (endp - p), "\\x%02X", c);
}
- dbgobj = dbgobj->prior;
- if (dbgobj == dbgobj_objtail)
- break;
}
+ *p = '\0';
+ ASSERT(p < endp);
-EXIT:
- BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
- return;
+ return (int)(p - buf);
}
+#endif
-void
-bcm_object_trace_chk(void *obj, uint32 chksn, uint32 sn,
- const char *caller, int line)
+#endif /* BCMDRIVER || WL_UNITTEST */
+
+/*
+ * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
+ * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
+ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
+ * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs.
+*/
+
+unsigned int
+process_nvram_vars(char *varbuf, unsigned int len)
{
- struct bcm_dbgobj *dbgobj;
- unsigned long flags;
+ char *dp;
+ bool findNewline;
+ int column;
+ unsigned int buf_len, n;
+ unsigned int pad = 0;
+ char nv_ver[128];
- BCM_REFERENCE(flags);
- BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+ dp = varbuf;
- dbgobj = dbgobj_objtail;
- while (dbgobj) {
- if ((dbgobj->obj == obj) &&
- ((!chksn) || (dbgobj->obj_sn == sn))) {
- if (dbgobj != dbgobj_objtail) {
- bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
- dbgobj, BCM_OBJDBG_ADDTOTAIL);
- }
- goto EXIT;
+ findNewline = FALSE;
+ column = 0;
+
+ // terence 20130914: print out NVRAM version
+ if (varbuf[0] == '#') {
+ memset(nv_ver, 0x00, sizeof(nv_ver));
+ for (n=1; n<len && n<(sizeof(nv_ver)-1); n++) {
+ if (varbuf[n] == '\n')
+ break;
+ nv_ver[n-1] = varbuf[n];
}
- dbgobj = dbgobj->prior;
- if (dbgobj == dbgobj_objtail)
- break;
+ printk("NVRAM version: %s\n", nv_ver);
}
- dbgobj = dbgobj_freetail;
- while (dbgobj) {
- if ((dbgobj->obj == obj) &&
- ((!chksn) || (dbgobj->obj_sn == sn))) {
- printf("%s: (%s:%d) obj %p (sn %d state %d) was freed from %s(%d)\n",
- __FUNCTION__, caller, line,
- dbgobj->obj, dbgobj->obj_sn, dbgobj->obj_state,
- dbgobj->caller, dbgobj->line);
- goto EXIT;
+ for (n = 0; n < len; n++) {
+ if (varbuf[n] == '\r')
+ continue;
+ if (findNewline && varbuf[n] != '\n')
+ continue;
+ findNewline = FALSE;
+ if (varbuf[n] == '#') {
+ findNewline = TRUE;
+ continue;
}
- else if (dbgobj->obj == NULL) {
- break;
+ if (varbuf[n] == '\n') {
+ if (column == 0)
+ continue;
+ *dp++ = 0;
+ column = 0;
+ continue;
}
- dbgobj = dbgobj->prior;
- if (dbgobj == dbgobj_freetail)
- break;
+ *dp++ = varbuf[n];
+ column++;
}
-
- printf("%s: obj %p not found, check from %s(%d), chksn %s, sn %d\n",
- __FUNCTION__, obj, caller, line, chksn ? "yes" : "no", sn);
- dbgobj = dbgobj_objtail;
- while (dbgobj) {
- printf("%s: (%s:%d) obj %p sn %d was allocated from %s(%d)\n",
- __FUNCTION__, caller, line,
- dbgobj->obj, dbgobj->obj_sn, dbgobj->caller, dbgobj->line);
- dbgobj = dbgobj->prior;
- if (dbgobj == dbgobj_objtail)
- break;
+ buf_len = (unsigned int)(dp - varbuf);
+ if (buf_len % 4) {
+ pad = 4 - buf_len % 4;
+ if (pad && (buf_len + pad <= len)) {
+ buf_len += pad;
+ }
}
-EXIT:
- BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
- return;
+ while (dp < varbuf + n)
+ *dp++ = 0;
+
+ return buf_len;
}
+/* calculate a * b + c */
void
-bcm_object_feature_set(void *obj, uint32 type, uint32 value)
+bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c)
{
- struct bcm_dbgobj *dbgobj;
- unsigned long flags;
+#define FORMALIZE(var) {cc += (var & 0x80000000) ? 1 : 0; var &= 0x7fffffff;}
+ uint32 r1, r0;
+ uint32 a1, a0, b1, b0, t, cc = 0;
- BCM_REFERENCE(flags);
- BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+ a1 = a >> 16;
+ a0 = a & 0xffff;
+ b1 = b >> 16;
+ b0 = b & 0xffff;
- dbgobj = dbgobj_objtail;
- while (dbgobj) {
- if (dbgobj->obj == obj) {
- if (type == BCM_OBJECT_FEATURE_FLAG) {
- if (value & BCM_OBJECT_FEATURE_CLEAR)
- dbgobj->flag &= ~(value);
- else
- dbgobj->flag |= (value);
- } else if (type == BCM_OBJECT_FEATURE_PKT_STATE) {
- dbgobj->obj_state = value;
- }
- if (dbgobj != dbgobj_objtail) {
- bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
- dbgobj, BCM_OBJDBG_ADDTOTAIL);
- }
- goto EXIT;
- }
- dbgobj = dbgobj->prior;
- if (dbgobj == dbgobj_objtail)
- break;
+ r0 = a0 * b0;
+ FORMALIZE(r0);
+
+ t = (a1 * b0) << 16;
+ FORMALIZE(t);
+
+ r0 += t;
+ FORMALIZE(r0);
+
+ t = (a0 * b1) << 16;
+ FORMALIZE(t);
+
+ r0 += t;
+ FORMALIZE(r0);
+
+ FORMALIZE(c);
+
+ r0 += c;
+ FORMALIZE(r0);
+
+ r0 |= (cc % 2) ? 0x80000000 : 0;
+ r1 = a1 * b1 + ((a1 * b0) >> 16) + ((b1 * a0) >> 16) + (cc / 2);
+
+ *r_high = r1;
+ *r_low = r0;
+}
+
+/* calculate a / b */
+void
+bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+ uint32 a1 = a_high, a0 = a_low, r0 = 0;
+
+ if (b < 2)
+ return;
+
+ while (a1 != 0) {
+ r0 += (0xffffffff / b) * a1;
+ bcm_uint64_multiple_add(&a1, &a0, ((0xffffffff % b) + 1) % b, a1, a0);
}
- printf("%s: obj %p not found in active list\n", __FUNCTION__, obj);
- ASSERT(0);
+ r0 += a0 / b;
+ *r = r0;
+}
-EXIT:
- BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
- return;
+#ifndef setbit /* As in the header file */
+#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
+/* Set bit in byte array. */
+void
+setbit(void *array, uint bit)
+{
+ ((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY);
}
-int
-bcm_object_feature_get(void *obj, uint32 type, uint32 value)
+/* Clear bit in byte array. */
+void
+clrbit(void *array, uint bit)
{
- int rtn = 0;
- struct bcm_dbgobj *dbgobj;
- unsigned long flags;
+ ((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY));
+}
- BCM_REFERENCE(flags);
- BCM_OBJDBG_LOCK(&dbgobj_lock, flags);
+/* Test if bit is set in byte array. */
+bool
+isset(const void *array, uint bit)
+{
+ return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY)));
+}
- dbgobj = dbgobj_objtail;
- while (dbgobj) {
- if (dbgobj->obj == obj) {
- if (type == BCM_OBJECT_FEATURE_FLAG) {
- rtn = (dbgobj->flag & value) & (~BCM_OBJECT_FEATURE_CLEAR);
- }
- if (dbgobj != dbgobj_objtail) {
- bcm_object_movetoend(&dbgobj_objhead, &dbgobj_objtail,
- dbgobj, BCM_OBJDBG_ADDTOTAIL);
- }
- goto EXIT;
+/* Test if bit is clear in byte array. */
+bool
+isclr(const void *array, uint bit)
+{
+ return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0);
+}
+#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */
+#endif /* setbit */
+
+void
+set_bitrange(void *array, uint start, uint end, uint maxbit)
+{
+ uint startbyte = start/NBBY;
+ uint endbyte = end/NBBY;
+ uint i, startbytelastbit, endbytestartbit;
+
+ if (end >= start) {
+ if (endbyte - startbyte > 1)
+ {
+ startbytelastbit = (startbyte+1)*NBBY - 1;
+ endbytestartbit = endbyte*NBBY;
+ for (i = startbyte+1; i < endbyte; i++)
+ ((uint8 *)array)[i] = 0xFF;
+ for (i = start; i <= startbytelastbit; i++)
+ setbit(array, i);
+ for (i = endbytestartbit; i <= end; i++)
+ setbit(array, i);
+ } else {
+ for (i = start; i <= end; i++)
+ setbit(array, i);
}
- dbgobj = dbgobj->prior;
- if (dbgobj == dbgobj_objtail)
- break;
}
+ else {
+ set_bitrange(array, start, maxbit, maxbit);
+ set_bitrange(array, 0, end, maxbit);
+ }
+}
- printf("%s: obj %p not found in active list\n", __FUNCTION__, obj);
- ASSERT(0);
+void
+bcm_bitprint32(const uint32 u32arg)
+{
+ int i;
+ for (i = NBITS(uint32) - 1; i >= 0; i--) {
+ if (isbitset(u32arg, i)) {
+ printf("1");
+ } else {
+ printf("0");
+ }
-EXIT:
- BCM_OBJDBG_UNLOCK(&dbgobj_lock, flags);
- return rtn;
+ if ((i % NBBY) == 0) printf(" ");
+ }
+ printf("\n");
}
-#endif /* BCM_OBJECT_TRACE */
-
-uint8 *
-bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst)
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16
+bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum)
{
- uint8 *new_dst = dst;
- bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst;
-
- /* dst buffer should always be valid */
- ASSERT(dst);
+ while (len > 1) {
+ sum += (buf[0] << 8) | buf[1];
+ buf += 2;
+ len -= 2;
+ }
- /* data len must be within valid range */
- ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE));
+ if (len > 0) {
+ sum += (*buf) << 8;
+ }
- /* source data buffer pointer should be valid, unless datalen is 0
- * meaning no data with this TLV
- */
- ASSERT((data != NULL) || (datalen == 0));
+ while (sum >> 16) {
+ sum = (sum & 0xffff) + (sum >> 16);
+ }
- /* only do work if the inputs are valid
- * - must have a dst to write to AND
- * - datalen must be within range AND
- * - the source data pointer must be non-NULL if datalen is non-zero
- * (this last condition detects datalen > 0 with a NULL data pointer)
- */
- if ((dst != NULL) &&
- ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) &&
- ((data != NULL) || (datalen == 0))) {
+ return ((uint16)~sum);
+}
+#if defined(BCMDRIVER) && !defined(_CFEZ_)
+/*
+ * Hierarchical Multiword bitmap based small id allocator.
+ *
+ * Multilevel hierarchy bitmap. (maximum 2 levels)
+ * First hierarchy uses a multiword bitmap to identify 32bit words in the
+ * second hierarchy that have at least a single bit set. Each bit in a word of
+ * the second hierarchy represents a unique ID that may be allocated.
+ *
+ * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed.
+ * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word
+ * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs.
+ * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non
+ * non-zero bitmap word carrying at least one free ID.
+ * BCM_MWBMAP_SHIFT_OP: Used in MOD, DIV and MUL operations.
+ * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID
+ *
+ * Design Notes:
+ * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many
+ * bits are computed each time on allocation and deallocation, requiring 4
+ * array indexed access and 3 arithmetic operations. When not defined, a runtime
+ * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed.
+ * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation.
+ * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may
+ * be used by defining BCM_MWBMAP_USE_CNTSETBITS.
+ *
+ * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array
+ * size is fixed. No intention to support larger than 4K indice allocation. ID
+ * allocators for ranges smaller than 4K will have a wastage of only 12Bytes
+ * with savings in not having to use an indirect access, had it been dynamically
+ * allocated.
+ */
+#define BCM_MWBMAP_ITEMS_MAX (64 * 1024) /* May increase to 64K */
- /* write type, len fields */
- dst_tlv->id = (uint8)type;
- dst_tlv->len = (uint8)datalen;
+#define BCM_MWBMAP_BITS_WORD (NBITS(uint32))
+#define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_WDMAP_MAX (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_SHIFT_OP (5)
+#define BCM_MWBMAP_MODOP(ix) ((ix) & (BCM_MWBMAP_BITS_WORD - 1))
+#define BCM_MWBMAP_DIVOP(ix) ((ix) >> BCM_MWBMAP_SHIFT_OP)
+#define BCM_MWBMAP_MULOP(ix) ((ix) << BCM_MWBMAP_SHIFT_OP)
- /* if data is present, copy to the output buffer and update
- * pointer to output buffer
- */
- if (datalen > 0) {
+/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */
+#define BCM_MWBMAP_PTR(hdl) ((struct bcm_mwbmap *)(hdl))
+#define BCM_MWBMAP_HDL(ptr) ((void *)(ptr))
- memcpy(dst_tlv->data, data, (size_t)datalen);
- }
+#if defined(BCM_MWBMAP_DEBUG)
+#define BCM_MWBMAP_AUDIT(mwb) \
+ do { \
+ ASSERT((mwb != NULL) && \
+ (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \
+ bcm_mwbmap_audit(mwb); \
+ } while (0)
+#define MWBMAP_ASSERT(exp) ASSERT(exp)
+#define MWBMAP_DBG(x) printf x
+#else /* !BCM_MWBMAP_DEBUG */
+#define BCM_MWBMAP_AUDIT(mwb) do {} while (0)
+#define MWBMAP_ASSERT(exp) do {} while (0)
+#define MWBMAP_DBG(x)
+#endif /* !BCM_MWBMAP_DEBUG */
- /* update the output destination poitner to point past
- * the TLV written
- */
- new_dst = dst + BCM_TLV_HDR_SIZE + datalen;
- }
- return (new_dst);
-}
+typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */
+ uint16 wmaps; /* Total number of words in free wd bitmap */
+ uint16 imaps; /* Total number of words in free id bitmap */
+ int32 ifree; /* Count of free indices. Used only in audits */
+ uint16 total; /* Total indices managed by multiword bitmap */
-uint8 *
-bcm_write_tlv_ext(uint8 type, uint8 ext, const void *data, uint8 datalen, uint8 *dst)
-{
- uint8 *new_dst = dst;
- bcm_tlv_ext_t *dst_tlv = (bcm_tlv_ext_t *)dst;
+ void * magic; /* Audit handle parameter from user */
- /* dst buffer should always be valid */
- ASSERT(dst);
+ uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ int8 wd_count[BCM_MWBMAP_WORDS_MAX]; /* free id running count, 1st lvl */
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
- /* data len must be within valid range */
- ASSERT(datalen <= BCM_TLV_EXT_MAX_DATA_SIZE);
+ uint32 id_bitmap[0]; /* Second level bitmap */
+} bcm_mwbmap_t;
- /* source data buffer pointer should be valid, unless datalen is 0
- * meaning no data with this TLV
- */
- ASSERT((data != NULL) || (datalen == 0));
+/* Incarnate a hierarchical multiword bitmap based small index allocator. */
+struct bcm_mwbmap *
+bcm_mwbmap_init(osl_t *osh, uint32 items_max)
+{
+ struct bcm_mwbmap * mwbmap_p;
+ uint32 wordix, size, words, extra;
- /* only do work if the inputs are valid
- * - must have a dst to write to AND
- * - datalen must be within range AND
- * - the source data pointer must be non-NULL if datalen is non-zero
- * (this last condition detects datalen > 0 with a NULL data pointer)
- */
- if ((dst != NULL) &&
- (datalen <= BCM_TLV_EXT_MAX_DATA_SIZE) &&
- ((data != NULL) || (datalen == 0))) {
+ /* Implementation Constraint: Uses 32bit word bitmap */
+ MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U);
+ MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U);
+ MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX));
+ MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U);
- /* write type, len fields */
- dst_tlv->id = (uint8)type;
- dst_tlv->ext = ext;
- dst_tlv->len = 1 + (uint8)datalen;
+ ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX);
- /* if data is present, copy to the output buffer and update
- * pointer to output buffer
- */
- if (datalen > 0) {
- memcpy(dst_tlv->data, data, datalen);
- }
+ /* Determine the number of words needed in the multiword bitmap */
+ extra = BCM_MWBMAP_MODOP(items_max);
+ words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U);
- /* update the output destination poitner to point past
- * the TLV written
- */
- new_dst = dst + BCM_TLV_EXT_HDR_SIZE + datalen;
+ /* Allocate runtime state of multiword bitmap */
+ /* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */
+ size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words);
+ mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size);
+ if (mwbmap_p == (bcm_mwbmap_t *)NULL) {
+ ASSERT(0);
+ goto error1;
}
+ memset(mwbmap_p, 0, size);
- return (new_dst);
-}
-
-uint8 *
-bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen)
-{
- uint8 *new_dst = dst;
-
- if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) {
+ /* Initialize runtime multiword bitmap state */
+ mwbmap_p->imaps = (uint16)words;
+ mwbmap_p->ifree = (int32)items_max;
+ mwbmap_p->total = (uint16)items_max;
- /* if len + tlv hdr len is more than destlen, don't do anything
- * just return the buffer untouched
- */
- if ((int)(datalen + (int)BCM_TLV_HDR_SIZE) <= dst_maxlen) {
+ /* Setup magic, for use in audit of handle */
+ mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p);
- new_dst = bcm_write_tlv(type, data, datalen, dst);
- }
+ /* Setup the second level bitmap of free indices */
+ /* Mark all indices as available */
+ for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) {
+ mwbmap_p->id_bitmap[wordix] = (uint32)(~0U);
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD;
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
}
- return (new_dst);
-}
+ /* Ensure that extra indices are tagged as un-available */
+ if (extra) { /* fixup the free ids in last bitmap and wd_count */
+ uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1];
+ *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ }
-uint8 *
-bcm_copy_tlv(const void *src, uint8 *dst)
-{
- uint8 *new_dst = dst;
- const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
- uint totlen;
+ /* Setup the first level bitmap hierarchy */
+ extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps);
+ words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U);
- ASSERT(dst && src);
- if (dst && src) {
+ mwbmap_p->wmaps = (uint16)words;
- totlen = BCM_TLV_HDR_SIZE + src_tlv->len;
- memcpy(dst, src_tlv, totlen);
- new_dst = dst + totlen;
+ for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++)
+ mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U);
+ if (extra) {
+ uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1];
+ *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
}
- return (new_dst);
+ return mwbmap_p;
+
+error1:
+ return BCM_MWBMAP_INVALID_HDL;
}
-uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen)
+/* Release resources used by multiword bitmap based small index allocator. */
+void
+bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl)
{
- uint8 *new_dst = dst;
- const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+ bcm_mwbmap_t * mwbmap_p;
- ASSERT(src);
- if (src) {
- if (bcm_valid_tlv(src_tlv, dst_maxlen)) {
- new_dst = bcm_copy_tlv(src, dst);
- }
- }
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
- return (new_dst);
+ MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap)
+ + (sizeof(uint32) * mwbmap_p->imaps));
+ return;
}
-#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
-/*******************************************************************************
- * crc8
- *
- * Computes a crc8 over the input data using the polynomial:
- *
- * x^8 + x^7 +x^6 + x^4 + x^2 + 1
- *
- * The caller provides the initial value (either CRC8_INIT_VALUE
- * or the previous returned value) to allow for processing of
- * discontiguous blocks of data. When generating the CRC the
- * caller is responsible for complementing the final return value
- * and inserting it into the byte stream. When checking, a final
- * return value of CRC8_GOOD_VALUE indicates a valid CRC.
- *
- * Reference: Dallas Semiconductor Application Note 27
- * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
- * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
- * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
- *
- * ****************************************************************************
- */
+/* Allocate a unique small index using a multiword bitmap index allocator. */
+uint32 BCMFASTPATH
+bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 wordix, bitmap;
-static const uint8 crc8_table[256] = {
- 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
- 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
- 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
- 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
- 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
- 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
- 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
- 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
- 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
- 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
- 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
- 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
- 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
- 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
- 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
- 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
- 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
- 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
- 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
- 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
- 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
- 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
- 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
- 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
- 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
- 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
- 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
- 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
- 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
- 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
- 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
- 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F
-};
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
-#define CRC_INNER_LOOP(n, c, x) \
- (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+ /* Start with the first hierarchy */
+ for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) {
-uint8
-hndcrc8(
- const uint8 *pdata, /* pointer to array of data to process */
- uint nbytes, /* number of input data bytes to process */
- uint8 crc /* either CRC8_INIT_VALUE or previous return value */
-)
-{
- /* hard code the crc loop instead of using CRC_INNER_LOOP macro
- * to avoid the undefined and unnecessary (uint8 >> 8) operation.
- */
- while (nbytes-- > 0)
- crc = crc8_table[(crc ^ *pdata++) & 0xff];
+ bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */
- return crc;
-}
+ if (bitmap != 0U) {
-/*******************************************************************************
- * crc16
- *
- * Computes a crc16 over the input data using the polynomial:
- *
- * x^16 + x^12 +x^5 + 1
- *
- * The caller provides the initial value (either CRC16_INIT_VALUE
- * or the previous returned value) to allow for processing of
- * discontiguous blocks of data. When generating the CRC the
- * caller is responsible for complementing the final return value
- * and inserting it into the byte stream. When checking, a final
- * return value of CRC16_GOOD_VALUE indicates a valid CRC.
- *
- * Reference: Dallas Semiconductor Application Note 27
- * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms",
- * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd.,
- * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt
- *
- * ****************************************************************************
- */
+ uint32 count, bitix, *bitmap_p;
-static const uint16 crc16_table[256] = {
- 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
- 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
- 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
- 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
- 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
- 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
- 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
- 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
- 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
- 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
- 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
- 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
- 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
- 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
- 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
- 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
- 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
- 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
- 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
- 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
- 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
- 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
- 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
- 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
- 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
- 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
- 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
- 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
- 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
- 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
- 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
- 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
-};
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
-uint16
-hndcrc16(
- const uint8 *pdata, /* pointer to array of data to process */
- uint nbytes, /* number of input data bytes to process */
- uint16 crc /* either CRC16_INIT_VALUE or previous return value */
-)
-{
- while (nbytes-- > 0)
- CRC_INNER_LOOP(16, crc, *pdata++);
- return crc;
-}
+ /* clear all except trailing 1 */
+ bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+ MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+ bcm_count_leading_zeros(bitmap));
+ bitix = (BCM_MWBMAP_BITS_WORD - 1)
+ - bcm_count_leading_zeros(bitmap); /* use asm clz */
+ wordix = BCM_MWBMAP_MULOP(wordix) + bitix;
-static const uint32 crc32_table[256] = {
- 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
- 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
- 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
- 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
- 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
- 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
- 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
- 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
- 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
- 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
- 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
- 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
- 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
- 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
- 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
- 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
- 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
- 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
- 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
- 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
- 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
- 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
- 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
- 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
- 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
- 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
- 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
- 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
- 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
- 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
- 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
- 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
- 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
- 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
- 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
- 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
- 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
- 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
- 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
- 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
- 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
- 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
- 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
- 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
- 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
- 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
- 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
- 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
- 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
- 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
- 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
- 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
- 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
- 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
- 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
- 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
- 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
- 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
- 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
- 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
- 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
- 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
- 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
- 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
-};
+ /* Clear bit if wd count is 0, without conditional branch */
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1;
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ mwbmap_p->wd_count[wordix]--;
+ count = mwbmap_p->wd_count[wordix];
+ MWBMAP_ASSERT(count ==
+ (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ MWBMAP_ASSERT(count >= 0);
-/*
- * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if
- * accumulating over multiple pieces.
- */
-uint32
-hndcrc32(const uint8 *pdata, uint nbytes, uint32 crc)
-{
- const uint8 *pend;
- pend = pdata + nbytes;
- while (pdata < pend)
- CRC_INNER_LOOP(32, crc, *pdata++);
+ /* clear wd_bitmap bit if id_map count is 0 */
+ bitmap = (count == 0) << bitix;
- return crc;
-}
+ MWBMAP_DBG((
+ "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count));
-#ifdef notdef
-#define CLEN 1499 /* CRC Length */
-#define CBUFSIZ (CLEN+4)
-#define CNBUFS 5 /* # of bufs */
+ *bitmap_p ^= bitmap;
-void
-testcrc32(void)
-{
- uint j, k, l;
- uint8 *buf;
- uint len[CNBUFS];
- uint32 crcr;
- uint32 crc32tv[CNBUFS] =
- {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110};
+ /* Use bitix in the second hierarchy */
+ bitmap_p = &mwbmap_p->id_bitmap[wordix];
- ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL);
+ bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */
+ MWBMAP_ASSERT(bitmap != 0U);
- /* step through all possible alignments */
- for (l = 0; l <= 4; l++) {
- for (j = 0; j < CNBUFS; j++) {
- len[j] = CLEN;
- for (k = 0; k < len[j]; k++)
- *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff;
- }
+ /* clear all except trailing 1 */
+ bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+ MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+ bcm_count_leading_zeros(bitmap));
+ bitix = BCM_MWBMAP_MULOP(wordix)
+ + (BCM_MWBMAP_BITS_WORD - 1)
+ - bcm_count_leading_zeros(bitmap); /* use asm clz */
- for (j = 0; j < CNBUFS; j++) {
- crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE);
- ASSERT(crcr == crc32tv[j]);
+ mwbmap_p->ifree--; /* decrement system wide free count */
+ MWBMAP_ASSERT(mwbmap_p->ifree >= 0);
+
+ MWBMAP_DBG((
+ "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+ mwbmap_p->ifree));
+
+ *bitmap_p ^= bitmap; /* mark as allocated = 1b0 */
+
+ return bitix;
}
}
- MFREE(buf, CBUFSIZ*CNBUFS);
- return;
+ ASSERT(mwbmap_p->ifree == 0);
+
+ return BCM_MWBMAP_INVALID_IDX;
}
-#endif /* notdef */
-/*
- * Advance from the current 1-byte tag/1-byte length/variable-length value
- * triple, to the next, returning a pointer to the next.
- * If the current or next TLV is invalid (does not fit in given buffer length),
- * NULL is returned.
- * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
- * by the TLV parameter's length if it is valid.
- */
-bcm_tlv_t *
-bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen)
+/* Force an index at a specified position to be in use */
+void
+bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
{
- uint len;
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 count, wordix, bitmap, *bitmap_p;
- /* validate current elt */
- if (!bcm_valid_tlv(elt, *buflen)) {
- return NULL;
- }
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
- /* advance to next elt */
- len = elt->len;
- elt = (const bcm_tlv_t*)(elt->data + len);
- *buflen -= (TLV_HDR_LEN + len);
+ ASSERT(bitix < mwbmap_p->total);
- /* validate next elt */
- if (!bcm_valid_tlv(elt, *buflen)) {
- return NULL;
- }
+ /* Start with second hierarchy */
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap = (uint32)(1U << BCM_MWBMAP_MODOP(bitix));
+ bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+ ASSERT((*bitmap_p & bitmap) == bitmap);
+
+ mwbmap_p->ifree--; /* update free count */
+ ASSERT(mwbmap_p->ifree >= 0);
+
+ MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+ mwbmap_p->ifree));
+
+ *bitmap_p ^= bitmap; /* mark as in use */
+
+ /* Update first hierarchy */
+ bitix = wordix;
+
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ mwbmap_p->wd_count[bitix]--;
+ count = mwbmap_p->wd_count[bitix];
+ MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ MWBMAP_ASSERT(count >= 0);
+
+ bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- return (bcm_tlv_t *)(elt);
- GCC_DIAGNOSTIC_POP();
+ MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+ BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap,
+ (*bitmap_p) ^ bitmap, count));
+
+ *bitmap_p ^= bitmap; /* mark as in use */
+
+ return;
}
-/**
- * Advance a const tlv buffer pointer and length up to the given tlv element pointer
- * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data
- * are all in the range of the buffer/length.
- *
- * @param elt pointer to a valid bcm_tlv_t in the buffer
- * @param buffer pointer to a tlv buffer
- * @param buflen length of the buffer in bytes
- *
- * On return, if elt is not a tlv in the buffer bounds, the *buffer parameter
- * will be set to NULL and *buflen parameter will be set to zero. Otherwise,
- * *buffer will point to elt, and *buflen will have been adjusted by the the
- * difference between *buffer and elt.
- */
-void
-bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen)
+/* Free a previously allocated index back into the multiword bitmap allocator */
+void BCMFASTPATH
+bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
{
- uint new_buflen;
- const uint8 *new_buffer;
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 wordix, bitmap, *bitmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
- new_buffer = (const uint8*)elt;
+ ASSERT(bitix < mwbmap_p->total);
- /* make sure the input buffer pointer is non-null, that (buffer + buflen) does not wrap,
- * and that the elt pointer is in the range of [buffer, buffer + buflen]
- */
- if ((*buffer != NULL) &&
- ((uintptr)*buffer < ((uintptr)*buffer + *buflen)) &&
- (new_buffer >= *buffer) &&
- (new_buffer < (*buffer + *buflen))) {
- /* delta between buffer and new_buffer is <= *buflen, so truncating cast to uint
- * from ptrdiff is ok
- */
- uint delta = (uint)(new_buffer - *buffer);
+ /* Start with second level hierarchy */
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
+ bitmap_p = &mwbmap_p->id_bitmap[wordix];
- /* New buffer length is old len minus the delta from the buffer start to elt.
- * The check just above guarantees that the subtractions does not underflow.
- */
- new_buflen = *buflen - delta;
-
- /* validate current elt */
- if (bcm_valid_tlv(elt, new_buflen)) {
- /* All good, so update the input/output parameters */
- *buffer = new_buffer;
- *buflen = new_buflen;
- return;
- }
+ ASSERT((*bitmap_p & bitmap) == 0U); /* ASSERT not a double free */
+
+ mwbmap_p->ifree++; /* update free count */
+ ASSERT(mwbmap_p->ifree <= mwbmap_p->total);
+
+ MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap,
+ mwbmap_p->ifree));
+
+ *bitmap_p |= bitmap; /* mark as available */
+
+ /* Now update first level hierarchy */
+
+ bitix = wordix;
+
+ wordix = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */
+ bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ mwbmap_p->wd_count[bitix]++;
+#endif
+
+#if defined(BCM_MWBMAP_DEBUG)
+ {
+ uint32 count;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ count = mwbmap_p->wd_count[bitix];
+ MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+
+ MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD);
+
+ MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count));
}
+#endif /* BCM_MWBMAP_DEBUG */
- /* something did not check out, clear out the buffer info */
- *buffer = NULL;
- *buflen = 0;
+ *bitmap_p |= bitmap;
return;
}
-/**
- * Advance a const tlv buffer pointer and length past the given tlv element pointer
- * 'elt'. The function checks that elt is a valid tlv; the elt pointer and data
- * are all in the range of the buffer/length. The function also checks that the
- * remaining buffer starts with a valid tlv.
- *
- * @param elt pointer to a valid bcm_tlv_t in the buffer
- * @param buffer pointer to a tlv buffer
- * @param buflen length of the buffer in bytes
- *
- * On return, if elt is not a tlv in the buffer bounds, or the remaining buffer
- * following the elt does not begin with a tlv in the buffer bounds, the *buffer
- * parameter will be set to NULL and *buflen parameter will be set to zero.
- * Otherwise, *buffer will point to the first byte past elt, and *buflen will
- * have the remaining buffer length.
- */
-void
-bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen)
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+uint32
+bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl)
{
- /* Start by advancing the buffer up to the given elt */
- bcm_tlv_buffer_advance_to(elt, buffer, buflen);
+ bcm_mwbmap_t * mwbmap_p;
- /* if that did not work, bail out */
- if (*buflen == 0) {
- return;
- }
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
-#if defined(__COVERITY__)
- /* The elt has been verified by bcm_tlv_buffer_advance_to() to be a valid element,
- * so its elt->len is in the bounds of the buffer. The following check prevents
- * Coverity from flagging the (elt->data + elt->len) statement below as using a
- * tainted elt->len to index into array 'elt->data'.
- */
- if (elt->len > *buflen) {
- return;
- }
-#endif /* __COVERITY__ */
+ ASSERT(mwbmap_p->ifree >= 0);
- /* We know we are advanced up to a good tlv.
- * Now just advance to the following tlv.
- */
- elt = (const bcm_tlv_t*)(elt->data + elt->len);
+ return mwbmap_p->ifree;
+}
- bcm_tlv_buffer_advance_to(elt, buffer, buflen);
+/* Determine whether an index is inuse or free */
+bool
+bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 wordix, bitmap;
- return;
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ ASSERT(bitix < mwbmap_p->total);
+
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
+
+ return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U);
}
-/*
- * Traverse a string of 1-byte tag/1-byte length/variable-length value
- * triples, returning a pointer to the substring whose first element
- * matches tag
- */
-bcm_tlv_t *
-bcm_parse_tlvs(const void *buf, uint buflen, uint key)
+/* Debug dump a multiword bitmap allocator */
+void
+bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl)
+{
+ uint32 ix, count;
+ bcm_mwbmap_t * mwbmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", mwbmap_p,
+ mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total);
+ for (ix = 0U; ix < mwbmap_p->wmaps; ix++) {
+ printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]);
+ bcm_bitprint32(mwbmap_p->wd_bitmap[ix]);
+ printf("\n");
+ }
+ for (ix = 0U; ix < mwbmap_p->imaps; ix++) {
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ count = mwbmap_p->wd_count[ix];
+ MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count);
+ bcm_bitprint32(mwbmap_p->id_bitmap[ix]);
+ printf("\n");
+ }
+
+ return;
+}
+
+/* Audit a hierarchical multiword bitmap */
+void
+bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl)
{
- const bcm_tlv_t *elt;
- int totlen;
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p;
- if ((elt = (const bcm_tlv_t*)buf) == NULL) {
- return NULL;
- }
- totlen = (int)buflen;
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
- /* find tagged parameter */
- while (totlen >= TLV_HDR_LEN) {
- uint len = elt->len;
+ for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) {
- /* validate remaining totlen */
- if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- return (bcm_tlv_t *)(elt);
- GCC_DIAGNOSTIC_POP();
- }
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
- elt = (const bcm_tlv_t*)((const uint8*)elt + (len + TLV_HDR_LEN));
- totlen -= (len + TLV_HDR_LEN);
+ for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) {
+ if ((*bitmap_p) & (1 << bitix)) {
+ idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ count = mwbmap_p->wd_count[idmap_ix];
+ ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ ASSERT(count != 0U);
+ free_cnt += count;
+ }
+ }
}
- return NULL;
+ ASSERT((int)free_cnt == mwbmap_p->ifree);
}
+/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */
-bcm_tlv_t *
-bcm_parse_tlvs_dot11(const void *buf, int buflen, uint key, bool id_ext)
-{
- bcm_tlv_t *elt;
- int totlen;
-
- /*
- ideally, we don't want to do that, but returning a const pointer
- from these parse function spreads casting everywhere in the code
- */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- elt = (bcm_tlv_t*)buf;
- GCC_DIAGNOSTIC_POP();
+/* Simple 16bit Id allocator using a stack implementation. */
+typedef struct id16_map {
+ uint32 failures; /* count of failures */
+ void *dbg; /* debug placeholder */
+ uint16 total; /* total number of ids managed by allocator */
+ uint16 start; /* start value of 16bit ids to be managed */
+ int stack_idx; /* index into stack of available ids */
+ uint16 stack[0]; /* stack of 16 bit ids */
+} id16_map_t;
- totlen = buflen;
+#define ID16_MAP_SZ(items) (sizeof(id16_map_t) + \
+ (sizeof(uint16) * (items)))
- /* find tagged parameter */
- while (totlen >= TLV_HDR_LEN) {
- int len = elt->len;
+#if defined(BCM_DBG)
- do {
- /* validate remaining totlen */
- if (totlen < (int)(len + TLV_HDR_LEN))
- break;
+/* Uncomment BCM_DBG_ID16 to debug double free */
+/* #define BCM_DBG_ID16 */
- if (id_ext) {
- if (!DOT11_MNG_IE_ID_EXT_MATCH(elt, key))
- break;
- } else if (elt->id != key) {
- break;
- }
+typedef struct id16_map_dbg {
+ uint16 total;
+ bool avail[0];
+} id16_map_dbg_t;
+#define ID16_MAP_DBG_SZ(items) (sizeof(id16_map_dbg_t) + \
+ (sizeof(bool) * (items)))
+#define ID16_MAP_MSG(x) print x
+#else
+#define ID16_MAP_MSG(x)
+#endif /* BCM_DBG */
- return (bcm_tlv_t *)(elt); /* a match */
- } while (0);
+void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */
+id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16)
+{
+ uint16 idx, val16;
+ id16_map_t * id16_map;
- elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
- totlen -= (len + TLV_HDR_LEN);
- }
+ ASSERT(total_ids > 0);
- return NULL;
-}
+ /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map
+ * with random values.
+ */
+ ASSERT((start_val16 == ID16_UNDEFINED) ||
+ (start_val16 + total_ids) < ID16_INVALID);
-/*
- * Traverse a string of 1-byte tag/1-byte length/variable-length value
- * triples, returning a pointer to the substring whose first element
- * matches tag
- * return NULL if not found or length field < min_varlen
- */
-bcm_tlv_t *
-bcm_parse_tlvs_min_bodylen(const void *buf, int buflen, uint key, int min_bodylen)
-{
- bcm_tlv_t * ret;
- ret = bcm_parse_tlvs(buf, (uint)buflen, key);
- if (ret == NULL || ret->len < min_bodylen) {
+ id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids));
+ if (id16_map == NULL) {
return NULL;
}
- return ret;
-}
-
-/*
- * Traverse a string of 1-byte tag/1-byte length/variable-length value
- * triples, returning a pointer to the substring whose first element
- * matches tag. Stop parsing when we see an element whose ID is greater
- * than the target key.
- */
-const bcm_tlv_t *
-bcm_parse_ordered_tlvs(const void *buf, int buflen, uint key)
-{
- const bcm_tlv_t *elt;
- int totlen;
- elt = (const bcm_tlv_t*)buf;
- totlen = buflen;
+ id16_map->total = total_ids;
+ id16_map->start = start_val16;
+ id16_map->failures = 0;
+ id16_map->dbg = NULL;
- /* find tagged parameter */
- while (totlen >= TLV_HDR_LEN) {
- uint id = elt->id;
- int len = elt->len;
+ /*
+ * Populate stack with 16bit id values, commencing with start_val16.
+ * if start_val16 is ID16_UNDEFINED, then do not populate the id16 map.
+ */
+ id16_map->stack_idx = -1;
- /* Punt if we start seeing IDs > than target key */
- if (id > key) {
- return (NULL);
- }
+ if (id16_map->start != ID16_UNDEFINED) {
+ val16 = start_val16;
- /* validate remaining totlen */
- if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
- return (elt);
+ for (idx = 0; idx < total_ids; idx++, val16++) {
+ id16_map->stack_idx = idx;
+ id16_map->stack[id16_map->stack_idx] = val16;
}
-
- elt = (const bcm_tlv_t*)((const uint8*)elt + (len + TLV_HDR_LEN));
- totlen -= (len + TLV_HDR_LEN);
}
- return NULL;
-}
-#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
-#if defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \
- defined(DHD_DEBUG)
-int
-bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 flags, char* buf, int len)
-{
- int i, slen = 0;
- uint32 bit, mask;
- const char *name;
- mask = bd->mask;
- if (len < 2 || !buf)
- return 0;
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->start != ID16_UNDEFINED) {
+ id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids));
- buf[0] = '\0';
+ if (id16_map->dbg) {
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
- for (i = 0; (name = bd->bitfield[i].name) != NULL; i++) {
- bit = bd->bitfield[i].bit;
- if ((flags & mask) == bit) {
- if (len > (int)strlen(name)) {
- slen = (int)strlen(name);
- strncpy(buf, name, (size_t)len);
+ id16_map_dbg->total = total_ids;
+ for (idx = 0; idx < total_ids; idx++) {
+ id16_map_dbg->avail[idx] = TRUE;
}
- break;
}
}
- return slen;
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ return (void *)id16_map;
}
-int
-bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len)
+void * /* Destruct an id16 allocator instance */
+id16_map_fini(osl_t *osh, void * id16_map_hndl)
{
- int i;
- char* p = buf;
- char hexstr[16];
- int slen = 0, nlen = 0;
- uint32 bit;
- const char* name;
+ uint16 total_ids;
+ id16_map_t * id16_map;
- if (len < 2 || !buf)
- return 0;
+ if (id16_map_hndl == NULL)
+ return NULL;
- buf[0] = '\0';
+ id16_map = (id16_map_t *)id16_map_hndl;
- for (i = 0; flags != 0; i++) {
- bit = bd[i].bit;
- name = bd[i].name;
- if (bit == 0 && flags != 0) {
- /* print any unnamed bits */
- snprintf(hexstr, 16, "0x%X", flags);
- name = hexstr;
- flags = 0; /* exit loop */
- } else if ((flags & bit) == 0)
- continue;
- flags &= ~bit;
- nlen = (int)strlen(name);
- slen += nlen;
- /* count btwn flag space */
- if (flags != 0)
- slen += 1;
- /* need NULL char as well */
- if (len <= slen)
- break;
- /* copy NULL char but don't count it */
- strncpy(p, name, (size_t)len);
- p += nlen;
- /* copy btwn flag space and NULL char */
- if (flags != 0)
- p += snprintf(p, 2, " ");
- }
+ total_ids = id16_map->total;
+ ASSERT(total_ids > 0);
- /* indicate the str was too short */
- if (flags != 0) {
- p += snprintf(p, 2, ">");
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->dbg) {
+ MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids));
+ id16_map->dbg = NULL;
}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
- return (int)(p - buf);
+ id16_map->total = 0;
+ MFREE(osh, id16_map, ID16_MAP_SZ(total_ids));
+
+ return NULL;
}
-/* print out whcih bits in octet array 'addr' are set. bcm_bit_desc_t:bit is a bit offset. */
-int
-bcm_format_octets(const bcm_bit_desc_t *bd, uint bdsz,
- const uint8 *addr, uint size, char *buf, int len)
+void
+id16_map_clear(void * id16_map_hndl, uint16 total_ids, uint16 start_val16)
{
- uint i;
- char *p = buf;
- int slen = 0, nlen = 0;
- uint32 bit;
- const char* name;
- bool more = FALSE;
+ uint16 idx, val16;
+ id16_map_t * id16_map;
+
+ ASSERT(total_ids > 0);
+ /* A start_val16 of ID16_UNDEFINED, allows the caller to fill the id16 map
+ * with random values.
+ */
+ ASSERT((start_val16 == ID16_UNDEFINED) ||
+ (start_val16 + total_ids) < ID16_INVALID);
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+ if (id16_map == NULL) {
+ return;
+ }
- BCM_REFERENCE(size);
+ id16_map->total = total_ids;
+ id16_map->start = start_val16;
+ id16_map->failures = 0;
- if (len < 2 || !buf)
- return 0;
+ /* Populate stack with 16bit id values, commencing with start_val16 */
+ id16_map->stack_idx = -1;
- buf[0] = '\0';
+ if (id16_map->start != ID16_UNDEFINED) {
+ val16 = start_val16;
- for (i = 0; i < bdsz; i++) {
- bit = bd[i].bit;
- name = bd[i].name;
- CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- if (isset(addr, bit)) {
- CLANG_DIAGNOSTIC_POP();
- nlen = (int)strlen(name);
- slen += nlen;
- /* need SPACE - for simplicity */
- slen += 1;
- /* need NULL as well */
- if (len < slen + 1) {
- more = TRUE;
- break;
- }
- memcpy(p, name, (size_t)nlen);
- p += nlen;
- p[0] = ' ';
- p += 1;
- p[0] = '\0';
+ for (idx = 0; idx < total_ids; idx++, val16++) {
+ id16_map->stack_idx = idx;
+ id16_map->stack[id16_map->stack_idx] = val16;
}
}
- if (more) {
- p[0] = '>';
- p += 1;
- p[0] = '\0';
- }
-
- return (int)(p - buf);
-}
-#endif // endif
-
-/* print bytes formatted as hex to a string. return the resulting string length */
-int
-bcm_format_hex(char *str, const void *bytes, int len)
-{
- int i;
- char *p = str;
- const uint8 *src = (const uint8*)bytes;
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->start != ID16_UNDEFINED) {
+ if (id16_map->dbg) {
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
- for (i = 0; i < len; i++) {
- p += snprintf(p, 3, "%02X", *src);
- src++;
+ id16_map_dbg->total = total_ids;
+ for (idx = 0; idx < total_ids; idx++) {
+ id16_map_dbg->avail[idx] = TRUE;
+ }
+ }
}
- return (int)(p - str);
+#endif /* BCM_DBG && BCM_DBG_ID16 */
}
-/* pretty hex print a contiguous buffer */
-void
-prhex(const char *msg, const uchar *buf, uint nbytes)
+uint16 BCMFASTPATH /* Allocate a unique 16bit id */
+id16_map_alloc(void * id16_map_hndl)
{
- char line[128], *p;
- int len = sizeof(line);
- int nchar;
- uint i;
-
- if (msg && (msg[0] != '\0'))
- printf("%s:\n", msg);
+ uint16 val16;
+ id16_map_t * id16_map;
- p = line;
- for (i = 0; i < nbytes; i++) {
- if (i % 16 == 0) {
- nchar = snprintf(p, (size_t)len, " %04x: ", i); /* line prefix */
- p += nchar;
- len -= nchar;
- }
- if (len > 0) {
- nchar = snprintf(p, (size_t)len, "%02x ", buf[i]);
- p += nchar;
- len -= nchar;
- }
+ ASSERT(id16_map_hndl != NULL);
- if (i % 16 == 15) {
- printf("%s\n", line); /* flush line */
- p = line;
- len = sizeof(line);
- }
- }
+ id16_map = (id16_map_t *)id16_map_hndl;
- /* flush last partial line */
- if (p != line)
- printf("%s\n", line);
-}
+ ASSERT(id16_map->total > 0);
-static const char *crypto_algo_names[] = {
- "NONE",
- "WEP1",
- "TKIP",
- "WEP128",
- "AES_CCM",
- "AES_OCB_MSDU",
- "AES_OCB_MPDU",
-#ifdef BCMCCX
- "CKIP",
- "CKIP_MMH",
- "WEP_MMH",
- "NALG",
-#else
- "NALG",
- "UNDEF",
- "UNDEF",
- "UNDEF",
-#endif /* BCMCCX */
-#ifdef BCMWAPI_WAI
- "WAPI",
-#else
- "UNDEF",
-#endif // endif
- "PMK",
- "BIP",
- "AES_GCM",
- "AES_CCM256",
- "AES_GCM256",
- "BIP_CMAC256",
- "BIP_GMAC",
- "BIP_GMAC256",
- "UNDEF"
-};
+ if (id16_map->stack_idx < 0) {
+ id16_map->failures++;
+ return ID16_INVALID;
+ }
-const char *
-bcm_crypto_algo_name(uint algo)
-{
- return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR";
-}
+ val16 = id16_map->stack[id16_map->stack_idx];
+ id16_map->stack_idx--;
-char *
-bcm_chipname(uint chipid, char *buf, uint len)
-{
- const char *fmt;
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ ASSERT((id16_map->start == ID16_UNDEFINED) ||
+ (val16 < (id16_map->start + id16_map->total)));
- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
- /*
- * The following call to snprintf generates a compiler warning
- * due to -Wformat-nonliteral. However, the format string is coming
- * from internal callers rather than external data input, and is a
- * useful debugging tool serving a variety of diagnostics. Rather
- * than expand code size by replicating multiple functions with different
- * argument lists, or disabling the warning globally, let's consider
- * if we can just disable the warning for this one instance.
- */
- CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT()
- snprintf(buf, len, fmt, chipid);
- CLANG_DIAGNOSTIC_POP()
- return buf;
-}
+ if (id16_map->dbg) { /* Validate val16 */
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
-/* Produce a human-readable string for boardrev */
-char *
-bcm_brev_str(uint32 brev, char *buf)
-{
- if (brev < 0x100)
- snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf);
- else
- snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff);
+ ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == TRUE);
+ id16_map_dbg->avail[val16 - id16_map->start] = FALSE;
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
- return (buf);
+ return val16;
}
-#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */
-/* dump large strings to console */
-void
-printbig(char *buf)
+void BCMFASTPATH /* Free a 16bit id value into the id16 allocator */
+id16_map_free(void * id16_map_hndl, uint16 val16)
{
- uint len, max_len;
- char c;
+ id16_map_t * id16_map;
- len = (uint)strlen(buf);
+ ASSERT(id16_map_hndl != NULL);
- max_len = BUFSIZE_TODUMP_ATONCE;
+ id16_map = (id16_map_t *)id16_map_hndl;
- while (len > max_len) {
- c = buf[max_len];
- buf[max_len] = '\0';
- printf("%s", buf);
- buf[max_len] = c;
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ ASSERT((id16_map->start == ID16_UNDEFINED) ||
+ (val16 < (id16_map->start + id16_map->total)));
- buf += max_len;
- len -= max_len;
+ if (id16_map->dbg) { /* Validate val16 */
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+ ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == FALSE);
+ id16_map_dbg->avail[val16 - id16_map->start] = TRUE;
}
- /* print the remaining string */
- printf("%s\n", buf);
- return;
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ id16_map->stack_idx++;
+ id16_map->stack[id16_map->stack_idx] = val16;
}
-/* routine to dump fields in a fileddesc structure */
-uint
-bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array,
- char *buf, uint32 bufsize)
+uint32 /* Returns number of failures to allocate an unique id16 */
+id16_map_failures(void * id16_map_hndl)
{
- uint filled_len;
- int len;
- struct fielddesc *cur_ptr;
+ ASSERT(id16_map_hndl != NULL);
+ return ((id16_map_t *)id16_map_hndl)->failures;
+}
- filled_len = 0;
- cur_ptr = fielddesc_array;
+bool
+id16_map_audit(void * id16_map_hndl)
+{
+ int idx;
+ int insane = 0;
+ id16_map_t * id16_map;
- while (bufsize > 1) {
- if (cur_ptr->nameandfmt == NULL)
- break;
+ ASSERT(id16_map_hndl != NULL);
- /*
- * The following call to snprintf generates a compiler warning
- * due to -Wformat-nonliteral. However, the format string is coming
- * from internal callers rather than external data input, and is a
- * useful debugging tool serving a variety of diagnostics. Rather
- * than expand code size by replicating multiple functions with different
- * argument lists, or disabling the warning globally, let's consider
- * if we can just disable the warning for this one instance.
- */
- CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT()
- len = snprintf(buf, bufsize, cur_ptr->nameandfmt,
- read_rtn(arg0, arg1, cur_ptr->offset));
- CLANG_DIAGNOSTIC_POP()
- /* check for snprintf overflow or error */
- if (len < 0 || (uint32)len >= bufsize)
- len = (int)(bufsize - 1);
- buf += len;
- bufsize -= (uint32)len;
- filled_len += (uint32)len;
- cur_ptr++;
- }
- return filled_len;
-}
+ id16_map = (id16_map_t *)id16_map_hndl;
-uint
-bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint buflen)
-{
- uint len;
+ ASSERT(id16_map->stack_idx >= -1);
+ ASSERT(id16_map->stack_idx < (int)id16_map->total);
- len = (uint)strlen(name) + 1;
+ if (id16_map->start == ID16_UNDEFINED)
+ goto done;
- if ((len + datalen) > buflen)
- return 0;
+ for (idx = 0; idx <= id16_map->stack_idx; idx++) {
+ ASSERT(id16_map->stack[idx] >= id16_map->start);
+ ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total));
- strncpy(buf, name, buflen);
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->dbg) {
+ uint16 val16 = id16_map->stack[idx];
+ if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) {
+ insane |= 1;
+ ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n",
+ id16_map_hndl, idx, val16));
+ }
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+ }
- /* append data onto the end of the name string */
- if (data && datalen != 0) {
- memcpy(&buf[len], data, datalen);
- len += datalen;
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->dbg) {
+ uint16 avail = 0; /* Audit available ids counts */
+ for (idx = 0; idx < id16_map_dbg->total; idx++) {
+ if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE)
+ avail++;
+ }
+ if (avail && (avail != (id16_map->stack_idx + 1))) {
+ insane |= 1;
+ ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n",
+ id16_map_hndl, avail, id16_map->stack_idx));
+ }
}
+#endif /* BCM_DBG && BCM_DBG_ID16 */
- return len;
+done:
+ /* invoke any other system audits */
+ return (!!insane);
}
+/* END: Simple id16 allocator */
-/* Quarter dBm units to mW
- * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
- * Table is offset so the last entry is largest mW value that fits in
- * a uint16.
- */
-#define QDBM_OFFSET 153 /* Offset for first entry */
-#define QDBM_TABLE_LEN 40 /* Table size */
+#endif
-/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
- * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
- */
-#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */
+/* calculate a >> b; and returns only lower 32 bits */
+void
+bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+ uint32 a1 = a_high, a0 = a_low, r0 = 0;
-/* Largest mW value that will round down to the last table entry,
- * QDBM_OFFSET + QDBM_TABLE_LEN-1.
- * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
- */
-#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */
+ if (b == 0) {
+ r0 = a_low;
+ *r = r0;
+ return;
+ }
-static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
-/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */
-/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
-/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
-/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
-/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
-/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
-};
+ if (b < 32) {
+ a0 = a0 >> b;
+ a1 = a1 & ((1 << b) - 1);
+ a1 = a1 << (32 - b);
+ r0 = a0 | a1;
+ *r = r0;
+ return;
+ } else {
+ r0 = a1 >> (b - 32);
+ *r = r0;
+ return;
+ }
-uint16
-bcm_qdbm_to_mw(uint8 qdbm)
+}
+
+/* calculate a + b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset)
{
- uint factor = 1;
- int idx = qdbm - QDBM_OFFSET;
+ uint32 r1_lo = *r_lo;
+ (*r_lo) += offset;
+ if (*r_lo < r1_lo)
+ (*r_hi) ++;
+}
- if (idx >= QDBM_TABLE_LEN) {
- /* clamp to max uint16 mW value */
- return 0xFFFF;
- }
+/* calculate a - b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+ uint32 r1_lo = *r_lo;
+ (*r_lo) -= offset;
+ if (*r_lo > r1_lo)
+ (*r_hi) --;
+}
- /* scale the qdBm index up to the range of the table 0-40
- * where an offset of 40 qdBm equals a factor of 10 mW.
- */
- while (idx < 0) {
- idx += 40;
- factor *= 10;
- }
+/* Does unsigned 64 bit fixed point multiplication */
+uint64
+fp_mult_64(uint64 val1, uint64 val2, uint8 nf1, uint8 nf2, uint8 nf_res)
+{
+ uint64 mult_out_tmp, mult_out, rnd_val;
+ uint8 shift_amt;
- /* return the mW value scaled down to the correct factor of 10,
- * adding in factor/2 to get proper rounding.
- */
- return (uint16)((nqdBm_to_mW_map[idx] + factor/2) / factor);
+ shift_amt = nf1 + nf2 - nf_res;
+ /* 0.5 in 1.0.shift_amt */
+ rnd_val = bcm_shl_64(1, (shift_amt - 1));
+ rnd_val = (shift_amt == 0) ? 0 : rnd_val;
+ mult_out_tmp = (uint64)((uint64)val1 * (uint64)val2) + (uint64)rnd_val;
+ mult_out = bcm_shr_64(mult_out_tmp, shift_amt);
+
+ return mult_out;
}
+
+/* Does unsigned 64 bit by 32 bit fixed point division */
uint8
-bcm_mw_to_qdbm(uint16 mw)
+fp_div_64(uint64 num, uint32 den, uint8 nf_num, uint8 nf_den, uint32 *div_out)
{
- uint8 qdbm;
- int offset;
- uint mw_uint = mw;
- uint boundary;
+ uint8 shift_amt1, shift_amt2, shift_amt, nf_res, hd_rm_nr, hd_rm_dr;
+ uint32 num_hi, num_lo;
+ uint64 num_scale;
- /* handle boundary case */
- if (mw_uint <= 1)
- return 0;
-
- offset = QDBM_OFFSET;
+ /* Worst case shift possible */
+ hd_rm_nr = fp_calc_head_room_64(num);
+ hd_rm_dr = fp_calc_head_room_32(den);
- /* move mw into the range of the table */
- while (mw_uint < QDBM_TABLE_LOW_BOUND) {
- mw_uint *= 10;
- offset -= 40;
- }
+ /* (Nr / Dr) <= 2^32 */
+ shift_amt1 = hd_rm_nr - hd_rm_dr - 1;
+ /* Shift <= 32 + N2 - N1 */
+ shift_amt2 = 31 + nf_den - nf_num;
+ shift_amt = MINIMUM(shift_amt1, shift_amt2);
- for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) {
- boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] -
- nqdBm_to_mW_map[qdbm])/2;
- if (mw_uint < boundary) break;
- }
+ /* Scale numerator */
+ num_scale = bcm_shl_64(num, shift_amt);
- qdbm += (uint8)offset;
+ /* Do division */
+ num_hi = (uint32)((uint64)num_scale >> 32) & MASK_32_BITS;
+ num_lo = (uint32)(num_scale & MASK_32_BITS);
+ bcm_uint64_divide(div_out, num_hi, num_lo, den);
- return (qdbm);
+ /* Result format */
+ nf_res = nf_num - nf_den + shift_amt;
+ return nf_res;
}
-uint
-bcm_bitcount(uint8 *bitmap, uint length)
+/* Finds the number of bits available for shifting in unsigned 64 bit number */
+uint8
+fp_calc_head_room_64(uint64 num)
{
- uint bitcount = 0, i;
- uint8 tmp;
- for (i = 0; i < length; i++) {
- tmp = bitmap[i];
- while (tmp) {
- bitcount++;
- tmp &= (tmp - 1);
- }
+ uint8 n_room_bits = 0, msb_pos;
+ uint32 num_hi, num_lo, x;
+
+ num_hi = (uint32)((uint64)num >> 32) & MASK_32_BITS;
+ num_lo = (uint32)(num & MASK_32_BITS);
+
+ if (num_hi > 0) {
+ x = num_hi;
+ n_room_bits = 0;
+ } else {
+ x = num_lo;
+ n_room_bits = 32;
}
- return bitcount;
-}
-/*
- * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file and ending in a NUL.
- * also accepts nvram files which are already in the format of <var1>=<value>\0\<var2>=<value2>\0
- * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs.
- * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs.
-*/
+ msb_pos = (x >> 16) ? ((x >> 24) ? (24 + msb_table[(x >> 24) & MASK_8_BITS])
+ : (16 + msb_table[(x >> 16) & MASK_8_BITS]))
+ : ((x >> 8) ? (8 + msb_table[(x >> 8) & MASK_8_BITS])
+ : msb_table[x & MASK_8_BITS]);
-unsigned int
-process_nvram_vars(char *varbuf, unsigned int len)
-{
- char *dp;
- bool findNewline;
- int column;
- unsigned int buf_len, n;
- unsigned int pad = 0;
- char nv_ver[128];
+ return (n_room_bits + 32 - msb_pos);
+}
- dp = varbuf;
+/* Finds the number of bits available for shifting in unsigned 32 bit number */
+uint8
+fp_calc_head_room_32(uint32 x)
+{
+ uint8 msb_pos;
- findNewline = FALSE;
- column = 0;
+ msb_pos = (x >> 16) ? ((x >> 24) ? (24 + msb_table[(x >> 24) & MASK_8_BITS])
+ : (16 + msb_table[(x >> 16) & MASK_8_BITS]))
+ : ((x >> 8) ? (8 + msb_table[(x >> 8) & MASK_8_BITS])
+ : msb_table[x & MASK_8_BITS]);
- // terence 20130914: print out NVRAM version
- if (varbuf[0] == '#') {
- memset(nv_ver, 0x00, sizeof(nv_ver));
- for (n=1; n<len && n<(sizeof(nv_ver)-1); n++) {
- if (varbuf[n] == '\n')
- break;
- nv_ver[n-1] = varbuf[n];
- }
- printf("NVRAM version: %s\n", nv_ver);
- }
+ return (32 - msb_pos);
+}
- for (n = 0; n < len; n++) {
- if (varbuf[n] == '\r')
- continue;
- if (findNewline && varbuf[n] != '\n')
- continue;
- findNewline = FALSE;
- if (varbuf[n] == '#') {
- findNewline = TRUE;
- continue;
- }
- if (varbuf[n] == '\n') {
- if (column == 0)
- continue;
- *dp++ = 0;
- column = 0;
- continue;
- }
- *dp++ = varbuf[n];
- column++;
- }
- buf_len = (unsigned int)(dp - varbuf);
- if (buf_len % 4) {
- pad = 4 - buf_len % 4;
- if (pad && (buf_len + pad <= len)) {
- buf_len += pad;
- }
- }
+/* Does unsigned 64 bit fixed point floor */
+uint32
+fp_floor_64(uint64 num, uint8 floor_pos)
+{
+ uint32 floor_out;
- while (dp < varbuf + n)
- *dp++ = 0;
+ floor_out = (uint32)bcm_shr_64(num, floor_pos);
- return buf_len;
+ return floor_out;
}
-#ifndef setbit /* As in the header file */
-#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
-/* Set bit in byte array. */
-void
-setbit(void *array, uint bit)
+/* Does unsigned 32 bit fixed point floor */
+uint32
+fp_floor_32(uint32 num, uint8 floor_pos)
{
- ((uint8 *)array)[bit / NBBY] |= 1 << (bit % NBBY);
+ return num >> floor_pos;
}
-/* Clear bit in byte array. */
-void
-clrbit(void *array, uint bit)
+/* Does unsigned 64 bit fixed point rounding */
+uint32
+fp_round_64(uint64 num, uint8 rnd_pos)
{
- ((uint8 *)array)[bit / NBBY] &= ~(1 << (bit % NBBY));
+ uint64 rnd_val, rnd_out_tmp;
+ uint32 rnd_out;
+
+ /* 0.5 in 1.0.rnd_pos */
+ rnd_val = bcm_shl_64(1, (rnd_pos - 1));
+ rnd_val = (rnd_pos == 0) ? 0 : rnd_val;
+ rnd_out_tmp = num + rnd_val;
+ rnd_out = (uint32)bcm_shr_64(rnd_out_tmp, rnd_pos);
+
+ return rnd_out;
}
-/* Test if bit is set in byte array. */
-bool
-isset(const void *array, uint bit)
+/* Does unsigned 32 bit fixed point rounding */
+uint32
+fp_round_32(uint32 num, uint8 rnd_pos)
{
- return (((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY)));
+ uint32 rnd_val, rnd_out_tmp;
+
+ /* 0.5 in 1.0.rnd_pos */
+ rnd_val = 1 << (rnd_pos - 1);
+ rnd_val = (rnd_pos == 0) ? 0 : rnd_val;
+ rnd_out_tmp = num + rnd_val;
+ return (rnd_out_tmp >> rnd_pos);
}
-/* Test if bit is clear in byte array. */
-bool
-isclr(const void *array, uint bit)
+/* Does unsigned fixed point ceiling */
+uint32
+fp_ceil_64(uint64 num, uint8 ceil_pos)
{
- return ((((const uint8 *)array)[bit / NBBY] & (1 << (bit % NBBY))) == 0);
+ uint64 ceil_val, ceil_out_tmp;
+ uint32 ceil_out;
+
+ /* 0.999 in 1.0.rnd_pos */
+ ceil_val = bcm_shl_64(1, ceil_pos) - 1;
+ ceil_out_tmp = num + ceil_val;
+ ceil_out = (uint32)bcm_shr_64(ceil_out_tmp, ceil_pos);
+
+ return ceil_out;
}
-#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */
-#endif /* setbit */
-void
-set_bitrange(void *array, uint start, uint end, uint maxbit)
+/* Does left shift of unsigned 64 bit number */
+uint64
+bcm_shl_64(uint64 input, uint8 shift_amt)
{
- uint startbyte = start/NBBY;
- uint endbyte = end/NBBY;
- uint i, startbytelastbit, endbytestartbit;
+ uint32 in_hi, in_lo;
+ uint32 masked_lo = 0;
+ uint32 mask;
+ uint64 shl_out;
- if (end >= start) {
- if (endbyte - startbyte > 1)
- {
- startbytelastbit = (startbyte+1)*NBBY - 1;
- endbytestartbit = endbyte*NBBY;
- for (i = startbyte+1; i < endbyte; i++)
- ((uint8 *)array)[i] = 0xFF;
- for (i = start; i <= startbytelastbit; i++)
- setbit(array, i);
- for (i = endbytestartbit; i <= end; i++)
- setbit(array, i);
- } else {
- for (i = start; i <= end; i++)
- setbit(array, i);
- }
- }
- else {
- set_bitrange(array, start, maxbit, maxbit);
- set_bitrange(array, 0, end, maxbit);
+ if (shift_amt == 0) {
+ return input;
}
-}
-void
-bcm_bitprint32(const uint32 u32arg)
-{
- int i;
- for (i = NBITS(uint32) - 1; i >= 0; i--) {
- if (isbitset(u32arg, i)) {
- printf("1");
- } else {
- printf("0");
- }
+ /* Get hi and lo part */
+ in_hi = (uint32)((uint64)input >> 32) & MASK_32_BITS;
+ in_lo = (uint32)(input & MASK_32_BITS);
- if ((i % NBBY) == 0) printf(" ");
+ if (shift_amt < 32) {
+ /* Extract bit which belongs to hi part after shifting */
+ mask = ((uint32)~0) << (32 - shift_amt);
+ masked_lo = (in_lo & mask) >> (32 - shift_amt);
+
+ /* Shift hi and lo and prepare output */
+ in_hi = (in_hi << shift_amt) | masked_lo;
+ in_lo = in_lo << shift_amt;
+ } else {
+ /* Extract bit which belongs to hi part after shifting */
+ shift_amt = shift_amt - 32;
+
+ /* Shift hi and lo and prepare output */
+ in_hi = in_lo << shift_amt;
+ in_lo = 0;
}
- printf("\n");
+
+ shl_out = (((uint64)in_hi << 32) | in_lo);
+ return shl_out;
}
-/* calculate checksum for ip header, tcp / udp header / data */
-uint16
-bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum)
+/* Does right shift of unsigned 64 bit number */
+uint64
+bcm_shr_64(uint64 input, uint8 shift_amt)
{
- while (len > 1) {
- sum += (uint32)((buf[0] << 8) | buf[1]);
- buf += 2;
- len -= 2;
- }
+ uint32 in_hi, in_lo;
+ uint32 masked_hi = 0;
+ uint32 mask;
+ uint64 shr_out;
- if (len > 0) {
- sum += (uint32)((*buf) << 8);
+ if (shift_amt == 0) {
+ return input;
}
- while (sum >> 16) {
- sum = (sum & 0xffff) + (sum >> 16);
- }
+ /* Get hi and lo part */
+ in_hi = (uint32)((uint64)input >> 32) & MASK_32_BITS;
+ in_lo = (uint32)(input & MASK_32_BITS);
- return ((uint16)~sum);
-}
+ if (shift_amt < 32) {
+ /* Extract bit which belongs to lo part after shifting */
+ mask = (1 << shift_amt) - 1;
+ masked_hi = in_hi & mask;
+
+ /* Shift hi and lo and prepare output */
+ in_hi = (uint32)in_hi >> shift_amt;
+ in_lo = ((uint32)in_lo >> shift_amt) | (masked_hi << (32 - shift_amt));
+ } else {
+ shift_amt = shift_amt - 32;
+ in_lo = in_hi >> shift_amt;
+ in_hi = 0;
+ }
-int
-BCMRAMFN(valid_bcmerror)(int e)
-{
- return ((e <= 0) && (e >= BCME_LAST));
+ shr_out = (((uint64)in_hi << 32) | in_lo);
+ return shr_out;
}
#ifdef DEBUG_COUNTER
#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */
#endif /* DEBUG_COUNTER */
+#if defined(BCMDRIVER) && !defined(_CFEZ_)
+void
+dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size)
+{
+ uint32 mem_size;
+ mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+ if (pool)
+ MFREE(osh, pool, mem_size);
+}
+dll_pool_t *
+dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size)
+{
+ uint32 mem_size, i;
+ dll_pool_t * dll_pool_p;
+ dll_t * elem_p;
+
+ ASSERT(elem_size > sizeof(dll_t));
+
+ mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+
+ if ((dll_pool_p = (dll_pool_t *)MALLOCZ(osh, mem_size)) == NULL) {
+ printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n",
+ elems_max, elem_size);
+ ASSERT(0);
+ return dll_pool_p;
+ }
+
+ dll_init(&dll_pool_p->free_list);
+ dll_pool_p->elems_max = elems_max;
+ dll_pool_p->elem_size = elem_size;
+
+ elem_p = dll_pool_p->elements;
+ for (i = 0; i < elems_max; i++) {
+ dll_append(&dll_pool_p->free_list, elem_p);
+ elem_p = (dll_t *)((uintptr)elem_p + elem_size);
+ }
+
+ dll_pool_p->free_count = elems_max;
+
+ return dll_pool_p;
+}
+
+
+void *
+dll_pool_alloc(dll_pool_t * dll_pool_p)
+{
+ dll_t * elem_p;
+
+ if (dll_pool_p->free_count == 0) {
+ ASSERT(dll_empty(&dll_pool_p->free_list));
+ return NULL;
+ }
+
+ elem_p = dll_head_p(&dll_pool_p->free_list);
+ dll_delete(elem_p);
+ dll_pool_p->free_count -= 1;
+
+ return (void *)elem_p;
+}
+
+void
+dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p)
+{
+ dll_t * node_p = (dll_t *)elem_p;
+ dll_prepend(&dll_pool_p->free_list, node_p);
+ dll_pool_p->free_count += 1;
+}
+
+
+void
+dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p)
+{
+ dll_t * node_p = (dll_t *)elem_p;
+ dll_append(&dll_pool_p->free_list, node_p);
+ dll_pool_p->free_count += 1;
+}
+
+#endif
+
/* calculate partial checksum */
static uint32
ip_cksum_partial(uint32 sum, uint8 *val8, uint32 count)
ptr += OFFSETOF(struct ipv4_hdr, hdr_chksum) + 2;
/* return calculated chksum */
- return ip_cksum(sum, ptr, (uint32)((uint)ip_len - OFFSETOF(struct ipv4_hdr, src_ip)));
+ return ip_cksum(sum, ptr, ip_len - OFFSETOF(struct ipv4_hdr, src_ip));
}
/* calculate TCP header checksum using partial sum */
ASSERT(tcp != NULL);
ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
- if (!ip || !tcp || !(tcp_len >= TCP_MIN_HEADER_LEN))
- return 0;
/* pseudo header cksum */
memset(&tcp_ps, 0, sizeof(tcp_ps));
memcpy(&tcp_ps.dst_ip, ip_hdr->dst_ip, IPV4_ADDR_LEN);
ASSERT(tcp != NULL);
ASSERT(tcp_len >= TCP_MIN_HEADER_LEN);
- if (!ipv6 || !tcp || !(tcp_len >= TCP_MIN_HEADER_LEN))
- return 0;
/* pseudo header cksum */
memset((char *)&ipv6_pseudo, 0, sizeof(ipv6_pseudo));
memcpy((char *)ipv6_pseudo.saddr, (char *)ipv6_hdr->saddr.addr,
/* return calculated TCP header chksum */
return tcp_hdr_chksum(sum, tcp, tcp_len);
}
-
-void *_bcmutils_dummy_fn = NULL;
-
-/* GROUP 1 --- start
- * These function under GROUP 1 are general purpose functions to do complex number
- * calculations and square root calculation.
- */
-
-uint32 sqrt_int(uint32 value)
-{
- uint32 root = 0, shift = 0;
-
- /* Compute integer nearest to square root of input integer value */
- for (shift = 0; shift < 32; shift += 2) {
- if (((0x40000000 >> shift) + root) <= value) {
- value -= ((0x40000000 >> shift) + root);
- root = (root >> 1) | (0x40000000 >> shift);
- }
- else {
- root = root >> 1;
- }
- }
-
- /* round to the nearest integer */
- if (root < value) ++root;
-
- return root;
-}
-/* GROUP 1 --- end */
-
-/* read/write field in a consecutive bits in an octet array.
- * 'addr' is the octet array's start byte address
- * 'size' is the octet array's byte size
- * 'stbit' is the value's start bit offset
- * 'nbits' is the value's bit size
- * This set of utilities are for convenience. Don't use them
- * in time critical/data path as there's a great overhead in them.
- */
-void
-setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val)
-{
- uint fbyte = stbit >> 3; /* first byte */
- uint lbyte = (stbit + nbits - 1) >> 3; /* last byte */
- uint fbit = stbit & 7; /* first bit in the first byte */
- uint rbits = (nbits > 8 - fbit ?
- nbits - (8 - fbit) :
- 0) & 7; /* remaining bits of the last byte when not 0 */
- uint8 mask;
- uint byte;
-
- BCM_REFERENCE(size);
-
- ASSERT(fbyte < size);
- ASSERT(lbyte < size);
- ASSERT(nbits <= (sizeof(val) << 3));
-
- /* all bits are in the same byte */
- if (fbyte == lbyte) {
- mask = (uint8)(((1 << nbits) - 1) << fbit);
- addr[fbyte] &= ~mask;
- addr[fbyte] |= (uint8)(val << fbit);
- return;
- }
-
- /* first partial byte */
- if (fbit > 0) {
- mask = (uint8)(0xff << fbit);
- addr[fbyte] &= ~mask;
- addr[fbyte] |= (uint8)(val << fbit);
- val >>= (8 - fbit);
- nbits -= (8 - fbit);
- fbyte ++; /* first full byte */
- }
-
- /* last partial byte */
- if (rbits > 0) {
- mask = (uint8)((1 << rbits) - 1);
- addr[lbyte] &= ~mask;
- addr[lbyte] |= (uint8)(val >> (nbits - rbits));
- lbyte --; /* last full byte */
- }
-
- /* remaining full byte(s) */
- for (byte = fbyte; byte <= lbyte; byte ++) {
- addr[byte] = (uint8)val;
- val >>= 8;
- }
-}
-
-uint32
-getbits(const uint8 *addr, uint size, uint stbit, uint nbits)
-{
- uint fbyte = stbit >> 3; /* first byte */
- uint lbyte = (stbit + nbits - 1) >> 3; /* last byte */
- uint fbit = stbit & 7; /* first bit in the first byte */
- uint rbits = (nbits > 8 - fbit ?
- nbits - (8 - fbit) :
- 0) & 7; /* remaining bits of the last byte when not 0 */
- uint32 val = 0;
- uint bits = 0; /* bits in first partial byte */
- uint8 mask;
- uint byte;
-
- BCM_REFERENCE(size);
-
- ASSERT(fbyte < size);
- ASSERT(lbyte < size);
- ASSERT(nbits <= (sizeof(val) << 3));
-
- /* all bits are in the same byte */
- if (fbyte == lbyte) {
- mask = (uint8)(((1 << nbits) - 1) << fbit);
- val = (addr[fbyte] & mask) >> fbit;
- return val;
- }
-
- /* first partial byte */
- if (fbit > 0) {
- bits = 8 - fbit;
- mask = (uint8)(0xFFu << fbit);
- val |= (addr[fbyte] & mask) >> fbit;
- fbyte ++; /* first full byte */
- }
-
- /* last partial byte */
- if (rbits > 0) {
- mask = (uint8)((1 << rbits) - 1);
- val |= (uint32)((addr[lbyte] & mask) << (nbits - rbits));
- lbyte --; /* last full byte */
- }
-
- /* remaining full byte(s) */
- for (byte = fbyte; byte <= lbyte; byte ++) {
- val |= (uint32)((addr[byte] << (((byte - fbyte) << 3) + bits)));
- }
-
- return val;
-}
-
-#ifdef BCMDRIVER
-
-/** allocate variable sized data with 'size' bytes. note: vld should NOT be null.
- */
-int
-bcm_vdata_alloc(osl_t *osh, var_len_data_t *vld, uint32 size)
-{
- int ret = BCME_ERROR;
- uint8 *dat = NULL;
-
- if (vld == NULL) {
- ASSERT(0);
- goto done;
- }
-
- /* trying to allocate twice? */
- if (vld->vdata != NULL) {
- ASSERT(0);
- goto done;
- }
-
- /* trying to allocate 0 size? */
- if (size == 0) {
- ASSERT(0);
- ret = BCME_BADARG;
- goto done;
- }
-
- dat = MALLOCZ(osh, size);
- if (dat == NULL) {
- ret = BCME_NOMEM;
- goto done;
- }
- vld->vlen = size;
- vld->vdata = dat;
- ret = BCME_OK;
-done:
- return ret;
-}
-
-/** free memory associated with variable sized data. note: vld should NOT be null.
- */
-int
-bcm_vdata_free(osl_t *osh, var_len_data_t *vld)
-{
- int ret = BCME_ERROR;
-
- if (vld == NULL) {
- ASSERT(0);
- goto done;
- }
-
- if (vld->vdata) {
- MFREE(osh, vld->vdata, vld->vlen);
- vld->vdata = NULL;
- vld->vlen = 0;
- ret = BCME_OK;
- }
-done:
- return ret;
-}
-
-#endif /* BCMDRIVER */
-
-/* Count the number of elements not matching a given value in a null terminated array */
-int
-array_value_mismatch_count(uint8 value, uint8 *array, int array_size)
-{
- int i;
- int count = 0;
-
- for (i = 0; i < array_size; i++) {
- /* exit if a null terminator is found */
- if (array[i] == 0) {
- break;
- }
- if (array[i] != value) {
- count++;
- }
- }
- return count;
-}
-
-/* Count the number of non-zero elements in an uint8 array */
-int
-array_nonzero_count(uint8 *array, int array_size)
-{
- return array_value_mismatch_count(0, array, array_size);
-}
-
-/* Count the number of non-zero elements in an int16 array */
-int
-array_nonzero_count_int16(int16 *array, int array_size)
-{
- int i;
- int count = 0;
-
- for (i = 0; i < array_size; i++) {
- if (array[i] != 0) {
- count++;
- }
- }
- return count;
-}
-
-/* Count the number of zero elements in an uint8 array */
-int
-array_zero_count(uint8 *array, int array_size)
-{
- int i;
- int count = 0;
-
- for (i = 0; i < array_size; i++) {
- if (array[i] == 0) {
- count++;
- }
- }
- return count;
-}
-
-/* Validate an array that can be 1 of 2 data types.
- * One of array1 or array2 should be non-NULL. The other should be NULL.
- */
-static int
-verify_ordered_array(uint8 *array1, int16 *array2, int array_size,
- int range_lo, int range_hi, bool err_if_no_zero_term, bool is_ordered)
-{
- int ret;
- int i;
- int val = 0;
- int prev_val = 0;
-
- ret = err_if_no_zero_term ? BCME_NOTFOUND : BCME_OK;
-
- /* Check that:
- * - values are in strict descending order.
- * - values are within the valid range.
- */
- for (i = 0; i < array_size; i++) {
- if (array1) {
- val = (int)array1[i];
- } else if (array2) {
- val = (int)array2[i];
- } else {
- /* both array parameters are NULL */
- return BCME_NOTFOUND;
- }
- if (val == 0) {
- /* array is zero-terminated */
- ret = BCME_OK;
- break;
- }
-
- if (is_ordered && i > 0 && val >= prev_val) {
- /* array is not in descending order */
- ret = BCME_BADOPTION;
- break;
- }
- prev_val = val;
-
- if (val < range_lo || val > range_hi) {
- /* array value out of range */
- ret = BCME_RANGE;
- break;
- }
- }
-
- return ret;
-}
-
-/* Validate an ordered uint8 configuration array */
-int
-verify_ordered_array_uint8(uint8 *array, int array_size,
- uint8 range_lo, uint8 range_hi)
-{
- return verify_ordered_array(array, NULL, array_size, (int)range_lo, (int)range_hi,
- TRUE, TRUE);
-}
-
-/* Validate an ordered int16 non-zero-terminated configuration array */
-int
-verify_ordered_array_int16(int16 *array, int array_size,
- int16 range_lo, int16 range_hi)
-{
- return verify_ordered_array(NULL, array, array_size, (int)range_lo, (int)range_hi,
- FALSE, TRUE);
-}
-
-/* Validate all values in an array are in range */
-int
-verify_array_values(uint8 *array, int array_size,
- int range_lo, int range_hi, bool zero_terminated)
-{
- int ret = BCME_OK;
- int i;
- int val = 0;
-
- /* Check that:
- * - values are in strict descending order.
- * - values are within the valid range.
- */
- for (i = 0; i < array_size; i++) {
- val = (int)array[i];
- if (val == 0 && zero_terminated) {
- ret = BCME_OK;
- break;
- }
- if (val < range_lo || val > range_hi) {
- /* array value out of range */
- ret = BCME_RANGE;
- break;
- }
- }
- return ret;
-}
-
-/* Adds/replaces NVRAM variable with given value
- * varbuf[in,out] - Buffer with NVRAM variables (sequence of zero-terminated 'name=value' records,
- * terminated with additional zero)
- * buflen[in] - Length of buffer (may, even should, have some unused space)
- * variable[in] - Variable to add/replace in 'name=value' form
- * datalen[out,opt] - Optional output parameter - resulting length of data in buffer
- * Returns TRUE on success, FALSE if buffer too short or variable specified incorrectly
- */
-bool
-replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable,
- unsigned int *datalen)
-{
- char *p;
- int variable_heading_len, record_len, variable_record_len = (int)strlen(variable) + 1;
- char *buf_end = varbuf + buflen;
- p = strchr(variable, '=');
- if (!p) {
- return FALSE;
- }
- /* Length of given variable name, followed by '=' */
- variable_heading_len = (int)((const char *)(p + 1) - variable);
- /* Scanning NVRAM, record by record up to trailing 0 */
- for (p = varbuf; *p; p += strlen(p) + 1) {
- /* If given variable found - remove it */
- if (!strncmp(p, variable, (size_t)variable_heading_len)) {
- record_len = (int)strlen(p) + 1;
- memmove_s(p, buf_end - p, p + record_len,
- (size_t)(buf_end - (p + record_len)));
- }
- }
- /* If buffer does not have space for given variable - return FALSE */
- if ((p + variable_record_len + 1) > buf_end) {
- return FALSE;
- }
- /* Copy given variable to end of buffer */
- memmove_s(p, buf_end - p, variable, (size_t)variable_record_len);
- /* Adding trailing 0 */
- p[variable_record_len] = 0;
- /* Setting optional output parameter - length of data in buffer */
- if (datalen) {
- *datalen = (unsigned int)(p + variable_record_len + 1 - varbuf);
- }
- return TRUE;
-}
-
-/* Add to adjust the 802.1x priority */
-void
-pktset8021xprio(void *pkt, int prio)
-{
- struct ether_header *eh;
- uint8 *pktdata;
- if(prio == PKTPRIO(pkt))
- return;
- pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
- ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
- eh = (struct ether_header *) pktdata;
- if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
- ASSERT(prio >= 0 && prio <= MAXPRIO);
- PKTSETPRIO(pkt, prio);
- }
-}
* Contents are wifi-specific, used by any kernel or app-level
* software that might want wifi things as it grows.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmwifi_channels.c 806092 2019-02-21 08:19:13Z $
+ * $Id: bcmwifi_channels.c 612483 2016-01-14 03:44:27Z $
*/
#include <bcm_cfg.h>
#include <ctype.h>
#ifndef ASSERT
#define ASSERT(exp)
-#endif // endif
+#endif
#endif /* BCMDRIVER */
#include <bcmwifi_channels.h>
#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
#include <bcmstdlib.h> /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */
-#endif // endif
+#endif
-#include <802.11.h>
+/* Definitions for D11AC capable Chanspec type */
-/* Definitions for D11AC capable (80MHz+) Chanspec type */
-
-/* Chanspec ASCII representation:
- * [<band> 'g'] <channel> ['/'<bandwidth> [<primary-sideband>]['/'<1st80channel>'-'<2nd80channel>]]
+/* Chanspec ASCII representation with 802.11ac capability:
+ * [<band> 'g'] <channel> ['/'<bandwidth> [<ctl-sideband>]['/'<1st80channel>'-'<2nd80channel>]]
*
* <band>:
* (optional) 2, 3, 4, 5 for 2.4GHz, 3GHz, 4GHz, and 5GHz respectively.
* <1st80Channel>:
* <2nd80Channel>:
* Required for 80+80, otherwise not allowed.
- * Specifies the center channel of the primary and secondary 80MHz band.
+ * Specifies the center channel of the first and second 80MHz band.
*
* In its simplest form, it is a 20MHz channel number, with the implied band
* of 2.4GHz if channel number <= 14, and 5GHz otherwise.
*
* To allow for backward compatibility with scripts, the old form for
- * 40MHz channels is also allowed: <channel><primary-sideband>
+ * 40MHz channels is also allowed: <channel><ctl-sideband>
*
* <channel>:
* primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz
- * <primary-sideband>:
- * "U" for upper, "L" for lower (or lower case "u" "l")
+ * <ctl-sideband>:
+ * "U" for upper, "L" for lower (or lower case "u" "l")
*
* 5 GHz Examples:
* Chanspec BW Center Ch Channel Range Primary Ch
"80",
"160",
"80+80",
+#ifdef WL11ULB
+ "2.5"
+#else /* WL11ULB */
"na"
+#endif /* WL11ULB */
};
static const uint8 wf_chspec_bw_mhz[] =
/* 40MHz channels in 5GHz band */
static const uint8 wf_5g_40m_chans[] =
-{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159, 167, 175};
+{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159};
#define WF_NUM_5G_40M_CHANS \
(sizeof(wf_5g_40m_chans)/sizeof(uint8))
/* 80MHz channels in 5GHz band */
static const uint8 wf_5g_80m_chans[] =
-{42, 58, 106, 122, 138, 155, 171};
+{42, 58, 106, 122, 138, 155};
#define WF_NUM_5G_80M_CHANS \
(sizeof(wf_5g_80m_chans)/sizeof(uint8))
(WL_CHANSPEC_BAND_2G |((WL_CHANSPEC_BW_40)&WL_CHANSPEC_BW_MASK)|WL_CHANSPEC_CTL_SB_UPPER),
};
-/**
- * Return the chanspec bandwidth in MHz
- * Bandwidth of 160 MHz will be returned for 80+80MHz chanspecs.
- *
- * @param chspec chanspec_t
- *
- * @return bandwidth of chspec in MHz units
- */
-uint
-wf_bw_chspec_to_mhz(chanspec_t chspec)
+/* convert bandwidth from chanspec to MHz */
+static uint
+bw_chspec_to_mhz(chanspec_t chspec)
{
uint bw;
return (uint8)(center_ch - center_chan_to_edge(bw));
}
-/* return side band number given center channel and primary20 channel
+/* return side band number given center channel and control channel
* return -1 on error
*/
static int
-channel_to_sb(uint center_ch, uint primary_ch, uint bw)
+channel_to_sb(uint center_ch, uint ctl_ch, uint bw)
{
uint lowest = channel_low_edge(center_ch, bw);
uint sb;
- if ((primary_ch - lowest) % 4) {
- /* bad primary channel, not mult 4 */
+ if ((ctl_ch - lowest) % 4) {
+ /* bad ctl channel, not mult 4 */
return -1;
}
- sb = ((primary_ch - lowest) / 4);
+ sb = ((ctl_ch - lowest) / 4);
/* sb must be a index to a 20MHz channel in range */
if (sb >= (bw / 20)) {
- /* primary_ch must have been too high for the center_ch */
+ /* ctl_ch must have been too high for the center_ch */
return -1;
}
- return (int)sb;
+ return sb;
}
-/* return primary20 channel given center channel and side band */
+/* return control channel given center channel and side band */
static uint8
-channel_to_primary20_chan(uint center_ch, uint bw, uint sb)
+channel_to_ctl_chan(uint center_ch, uint bw, uint sb)
{
return (uint8)(channel_low_edge(center_ch, bw) + sb * 4);
}
uint i;
for (i = 0; i < WF_NUM_5G_80M_CHANS; i ++) {
if (ch == wf_5g_80m_chans[i])
- return (int)i;
+ return i;
}
return -1;
wf_chspec_ntoa(chanspec_t chspec, char *buf)
{
const char *band;
- uint pri_chan;
+ uint ctl_chan;
if (wf_chspec_malformed(chspec))
return NULL;
(CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL))
band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g";
- /* primary20 channel */
- pri_chan = wf_chspec_primary20_chan(chspec);
+ /* ctl channel */
+ ctl_chan = wf_chspec_ctlchan(chspec);
- /* bandwidth and primary20 sideband */
+ /* bandwidth and ctl sideband */
if (CHSPEC_IS20(chspec)) {
- snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, pri_chan);
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan);
} else if (!CHSPEC_IS8080(chspec)) {
const char *bw;
const char *sb = "";
- bw = wf_chspec_to_bw_str(chspec);
+ bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT];
#ifdef CHANSPEC_NEW_40MHZ_FORMAT
- /* primary20 sideband string if needed for 2g 40MHz */
+ /* ctl sideband string if needed for 2g 40MHz */
if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) {
sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
}
- snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, pri_chan, bw, sb);
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb);
#else
- /* primary20 sideband string instead of BW for 40MHz */
+ /* ctl sideband string instead of BW for 40MHz */
if (CHSPEC_IS40(chspec)) {
sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
- snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, pri_chan, sb);
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb);
} else {
- snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, pri_chan, bw);
+ snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw);
}
#endif /* CHANSPEC_NEW_40MHZ_FORMAT */
chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0;
/* Outputs a max of CHANSPEC_STR_LEN chars including '\0' */
- snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", pri_chan, chan1, chan2);
+ snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2);
}
return (buf);
{
chanspec_t chspec;
uint chspec_ch, chspec_band, bw, chspec_bw, chspec_sb;
- uint num, pri_ch;
+ uint num, ctl_ch;
uint ch1, ch2;
char c, sb_ul = '\0';
int i;
if (!read_uint(&a, &num))
return 0;
/* if we are looking at a 'g', then the first number was a band */
- c = tolower(a[0]);
+ c = tolower((int)a[0]);
if (c == 'g') {
a++; /* consume the char */
return 0;
/* read the channel number */
- if (!read_uint(&a, &pri_ch))
+ if (!read_uint(&a, &ctl_ch))
return 0;
- c = tolower(a[0]);
+ c = tolower((int)a[0]);
}
else {
/* first number is channel, use default for band */
- pri_ch = num;
- chspec_band = ((pri_ch <= CH_MAX_2G_CHANNEL) ?
+ ctl_ch = num;
+ chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ?
WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
}
return 0;
/* convert to chspec value */
- if (bw == 5) {
+ if (bw == 2) {
+ chspec_bw = WL_CHANSPEC_BW_2P5;
+ } else if (bw == 5) {
chspec_bw = WL_CHANSPEC_BW_5;
} else if (bw == 10) {
chspec_bw = WL_CHANSPEC_BW_10;
/* So far we have <band>g<chan>/<bw>
* Can now be followed by u/l if bw = 40,
- * or '+80' if bw = 80, to make '80+80' bw.
+ * or '+80' if bw = 80, to make '80+80' bw,
+ * or '.5' if bw = 2.5 to make '2.5' bw .
*/
- c = (char)tolower((int)a[0]);
+ c = tolower((int)a[0]);
/* if we have a 2g/40 channel, we should have a l/u spec now */
if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
/* read secondary 80MHz channel */
if (!read_uint(&a, &ch2))
return 0;
+ } else if (c == '.') {
+ /* 2.5 */
+ /* must be looking at '.5'
+ * check and consume this string.
+ */
+ chspec_bw = WL_CHANSPEC_BW_2P5;
+
+ a ++; /* consume the char '.' */
+
+ /* consume the '5' string */
+ if (*a++ != '5') {
+ return 0;
+ }
}
done_read:
return 0;
/* Now have all the chanspec string parts read;
- * chspec_band, pri_ch, chspec_bw, sb_ul, ch1, ch2.
+ * chspec_band, ctl_ch, chspec_bw, sb_ul, ch1, ch2.
* chspec_band and chspec_bw are chanspec values.
- * Need to convert pri_ch, sb_ul, and ch1,ch2 into
+ * Need to convert ctl_ch, sb_ul, and ch1,ch2 into
* a center channel (or two) and sideband.
*/
*/
if (sb_ul != '\0') {
if (sb_ul == 'l') {
- chspec_ch = UPPER_20_SB(pri_ch);
+ chspec_ch = UPPER_20_SB(ctl_ch);
chspec_sb = WL_CHANSPEC_CTL_SB_LLL;
} else if (sb_ul == 'u') {
- chspec_ch = LOWER_20_SB(pri_ch);
+ chspec_ch = LOWER_20_SB(ctl_ch);
chspec_sb = WL_CHANSPEC_CTL_SB_LLU;
}
}
/* if the bw is 20, center and sideband are trivial */
- else if (chspec_bw == WL_CHANSPEC_BW_20) {
- chspec_ch = pri_ch;
+ else if (BW_LE20(chspec_bw)) {
+ chspec_ch = ctl_ch;
chspec_sb = WL_CHANSPEC_CTL_SB_NONE;
}
/* if the bw is 40/80/160, not 80+80, a single method
* can be used to to find the center and sideband
*/
else if (chspec_bw != WL_CHANSPEC_BW_8080) {
- /* figure out primary20 sideband based on primary20 channel and bandwidth */
+ /* figure out ctl sideband based on ctl channel and bandwidth */
const uint8 *center_ch = NULL;
int num_ch = 0;
int sb = -1;
}
for (i = 0; i < num_ch; i ++) {
- sb = channel_to_sb(center_ch[i], pri_ch, bw);
+ sb = channel_to_sb(center_ch[i], ctl_ch, bw);
if (sb >= 0) {
chspec_ch = center_ch[i];
- chspec_sb = (uint)(sb << WL_CHANSPEC_CTL_SB_SHIFT);
+ chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
break;
}
}
/* figure out primary 20 MHz sideband */
/* is the primary channel contained in the 1st 80MHz channel? */
- sb = channel_to_sb(ch1, pri_ch, bw);
+ sb = channel_to_sb(ch1, ctl_ch, bw);
if (sb < 0) {
- /* no match for primary channel 'pri_ch' in segment0 80MHz channel */
+ /* no match for primary channel 'ctl_ch' in segment0 80MHz channel */
return 0;
}
- chspec_sb = (uint)(sb << WL_CHANSPEC_CTL_SB_SHIFT);
+ chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
}
- chspec = (chanspec_t)(chspec_ch | chspec_band | chspec_bw | chspec_sb);
+ chspec = (chspec_ch | chspec_band | chspec_bw | chspec_sb);
if (wf_chspec_malformed(chspec))
return 0;
/*
* Verify the chanspec is using a legal set of parameters, i.e. that the
- * chanspec specified a band, bw, pri_sb and channel and that the
+ * chanspec specified a band, bw, ctl_sb and channel and that the
* combination could be legal given any set of circumstances.
* RETURNS: TRUE is the chanspec is malformed, false if it looks good.
*/
if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS)
return TRUE;
- } else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 ||
- chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) {
-
+ } else if (BW_LE160(chspec_bw)) {
if (chspec_ch > MAXCHANNEL) {
return TRUE;
}
}
/* side band needs to be consistent with bandwidth */
- if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (BW_LE20(chspec_bw)) {
if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL)
return TRUE;
} else if (chspec_bw == WL_CHANSPEC_BW_40) {
return TRUE;
} else if (chspec_bw == WL_CHANSPEC_BW_80 ||
chspec_bw == WL_CHANSPEC_BW_8080) {
- /* both 80MHz and 80+80MHz use 80MHz side bands.
- * 80+80 SB info is relative to the primary 80MHz sub-band.
- */
if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU)
return TRUE;
}
if (CHSPEC_IS2G(chanspec)) {
/* must be valid bandwidth and channel range */
- if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (BW_LE20(chspec_bw)) {
if (chspec_ch >= 1 && chspec_ch <= 14)
return TRUE;
} else if (chspec_bw == WL_CHANSPEC_BW_40) {
const uint8 *center_ch;
uint num_ch, i;
- if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40) {
+ if (BW_LE40(chspec_bw)) {
center_ch = wf_5g_40m_chans;
num_ch = WF_NUM_5G_40M_CHANS;
} else if (chspec_bw == WL_CHANSPEC_BW_80) {
}
/* check for a valid center channel */
- if (chspec_bw == WL_CHANSPEC_BW_20) {
+ if (BW_LE20(chspec_bw)) {
/* We don't have an array of legal 20MHz 5G channels, but they are
* each side of the legal 40MHz channels. Check the chanspec
* channel against either side of the 40MHz channels.
}
/*
- * This function returns TRUE if both the chanspec can co-exist in PHY.
- * Addition to primary20 channel, the function checks for side band for 2g 40 channels
- */
-bool
-wf_chspec_coexist(chanspec_t chspec1, chanspec_t chspec2)
-{
- bool same_primary;
-
- same_primary = (wf_chspec_primary20_chan(chspec1) == wf_chspec_primary20_chan(chspec2));
-
- if (same_primary && CHSPEC_IS2G(chspec1)) {
- if (CHSPEC_IS40(chspec1) && CHSPEC_IS40(chspec2)) {
- return (CHSPEC_CTL_SB(chspec1) == CHSPEC_CTL_SB(chspec2));
- }
- }
- return same_primary;
-}
-
-/**
- * Create a 20MHz chanspec for the given band.
- *
- * This function returns a 20MHz chanspec in the given band.
- *
- * @param channel 20MHz channel number
- * @param band a chanspec band (e.g. WL_CHANSPEC_BAND_2G)
- *
- * @return Returns a 20MHz chanspec, or IVNCHANSPEC in case of error.
- */
-chanspec_t
-wf_create_20MHz_chspec(uint channel, chanspec_band_t band)
-{
- chanspec_t chspec;
-
- if (channel <= WL_CHANSPEC_CHAN_MASK &&
- (band == WL_CHANSPEC_BAND_2G ||
- band == WL_CHANSPEC_BAND_5G)) {
- chspec = band | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE | channel;
- if (!wf_chspec_valid(chspec)) {
- chspec = INVCHANSPEC;
- }
- } else {
- chspec = INVCHANSPEC;
- }
-
- return chspec;
-}
-
-/**
- * Return the primary 20MHz channel.
- *
- * This function returns the channel number of the primary 20MHz channel. For
- * 20MHz channels this is just the channel number. For 40MHz or wider channels
- * it is the primary 20MHz channel specified by the chanspec.
- *
- * @param chspec input chanspec
- *
- * @return Returns the channel number of the primary 20MHz channel
+ * This function returns the channel number that control traffic is being sent on, for 20MHz
+ * channels this is just the channel number, for 40MHZ, 80MHz, 160MHz channels it is the 20MHZ
+ * sideband depending on the chanspec selected
*/
uint8
-wf_chspec_primary20_chan(chanspec_t chspec)
+wf_chspec_ctlchan(chanspec_t chspec)
{
uint center_chan;
uint bw_mhz;
ASSERT(!wf_chspec_malformed(chspec));
/* Is there a sideband ? */
- if (CHSPEC_IS20(chspec)) {
+ if (CHSPEC_BW_LE20(chspec)) {
return CHSPEC_CHANNEL(chspec);
} else {
sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
center_chan = wf_5g_80m_chans[chan_id];
}
else {
- bw_mhz = wf_bw_chspec_to_mhz(chspec);
+ bw_mhz = bw_chspec_to_mhz(chspec);
center_chan = CHSPEC_CHANNEL(chspec) >> WL_CHANSPEC_CHAN_SHIFT;
}
- return (channel_to_primary20_chan(center_chan, bw_mhz, sb));
+ return (channel_to_ctl_chan(center_chan, bw_mhz, sb));
}
}
/* given a chanspec, return the bandwidth string */
const char *
-BCMRAMFN(wf_chspec_to_bw_str)(chanspec_t chspec)
+wf_chspec_to_bw_str(chanspec_t chspec)
{
return wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)];
}
/*
- * Return the primary 20MHz chanspec of the given chanspec
+ * This function returns the chanspec of the control channel of a given chanspec
*/
chanspec_t
-wf_chspec_primary20_chspec(chanspec_t chspec)
+wf_chspec_ctlchspec(chanspec_t chspec)
{
- chanspec_t pri_chspec = chspec;
- uint8 pri_chan;
+ chanspec_t ctl_chspec = chspec;
+ uint8 ctl_chan;
ASSERT(!wf_chspec_malformed(chspec));
/* Is there a sideband ? */
- if (!CHSPEC_IS20(chspec)) {
- pri_chan = wf_chspec_primary20_chan(chspec);
- pri_chspec = pri_chan | WL_CHANSPEC_BW_20;
- pri_chspec |= CHSPEC_BAND(chspec);
+ if (!CHSPEC_BW_LE20(chspec)) {
+ ctl_chan = wf_chspec_ctlchan(chspec);
+ ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20;
+ ctl_chspec |= CHSPEC_BAND(chspec);
}
- return pri_chspec;
+ return ctl_chspec;
}
-/* return chanspec given primary 20MHz channel and bandwidth
+/* return chanspec given control channel and bandwidth
* return 0 on error
*/
uint16
-wf_channel2chspec(uint pri_ch, uint bw)
+wf_channel2chspec(uint ctl_ch, uint bw)
{
uint16 chspec;
const uint8 *center_ch = NULL;
int sb = -1;
int i = 0;
- chspec = ((pri_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
+ chspec = ((ctl_ch <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
chspec |= bw;
center_ch = wf_5g_160m_chans;
num_ch = WF_NUM_5G_160M_CHANS;
bw = 160;
- } else if (bw == WL_CHANSPEC_BW_20) {
- chspec |= pri_ch;
+ } else if (BW_LE20(bw)) {
+ chspec |= ctl_ch;
return chspec;
} else {
return 0;
}
for (i = 0; i < num_ch; i ++) {
- sb = channel_to_sb(center_ch[i], pri_ch, bw);
+ sb = channel_to_sb(center_ch[i], ctl_ch, bw);
if (sb >= 0) {
chspec |= center_ch[i];
chspec |= (sb << WL_CHANSPEC_CTL_SB_SHIFT);
}
/*
- * This function returns the chanspec for the primary 40MHz of an 80MHz or wider channel.
- * The primary 20MHz channel of the returned 40MHz chanspec is the same as the primary 20MHz
- * channel of the input chanspec.
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
*/
extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec)
{
}
/* Create primary 40MHz chanspec */
- chspec40 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
+ chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
sb | center_chan);
}
* frequency is not a 2.4 GHz channel, or if the frequency is not and even
* multiple of 5 MHz from the base frequency to the base plus 1 GHz.
*
- * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
*/
int
wf_mhz2channel(uint freq, uint start_factor)
if ((freq < base) || (freq > base + 1000))
return -1;
- offset = (int)(freq - base);
+ offset = freq - base;
ch = offset / 5;
/* check that frequency is a 5MHz multiple from the base */
* the answer is rounded down to an integral MHz.
* -1 is returned for an out of range channel.
*
- * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
*/
int
wf_channel2mhz(uint ch, uint start_factor)
else if ((start_factor == WF_CHAN_FACTOR_2_4_G) && (ch == 14))
freq = 2484;
else
- freq = (int)(ch * 5 + start_factor / 2);
+ freq = ch * 5 + start_factor / 2;
return freq;
}
for (i = 0; i < WF_NUM_SIDEBANDS_80MHZ; i++) {
chanspec_cur = CH80MHZ_CHSPEC(center_channel, sidebands[i]);
- if (primary_channel == wf_chspec_primary20_chan(chanspec_cur)) {
+ if (primary_channel == wf_chspec_ctlchan(chanspec_cur)) {
chanspec = chanspec_cur;
break;
}
*
* Returns INVCHANSPEC in case of error.
*
- * Refer to 802.11-2016 section 22.3.14 "Channelization".
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
*/
chanspec_t
wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1)
/* no, so does the primary channel fit with the 2nd 80MHz channel ? */
sb = channel_to_sb(chan1, primary_20mhz, 80);
if (sb < 0) {
- /* no match for pri_ch to either 80MHz center channel */
+ /* no match for ctl_ch to either 80MHz center channel */
return INVCHANSPEC;
}
/* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */
seg1 = chan0_id;
}
- chanspec = (uint16)((seg0 << WL_CHANSPEC_CHAN1_SHIFT) |
+ chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) |
(seg1 << WL_CHANSPEC_CHAN2_SHIFT) |
(sb << WL_CHANSPEC_CTL_SB_SHIFT) |
WL_CHANSPEC_BW_8080 |
}
/*
- * Returns the center channel of the primary 80 MHz sub-band of the provided chanspec
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ * chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ * returns -1 in case the provided channel is 20/40 Mhz chanspec
*/
+
uint8
wf_chspec_primary80_channel(chanspec_t chanspec)
{
- chanspec_t primary80_chspec;
uint8 primary80_chan;
- primary80_chspec = wf_chspec_primary80_chspec(chanspec);
-
- if (primary80_chspec == INVCHANSPEC) {
- primary80_chan = INVCHANNEL;
- } else {
- primary80_chan = CHSPEC_CHANNEL(primary80_chspec);
+ if (CHSPEC_IS80(chanspec)) {
+ primary80_chan = CHSPEC_CHANNEL(chanspec);
}
+ else if (CHSPEC_IS8080(chanspec)) {
+ /* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */
+ primary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chanspec));
+ }
+ else if (CHSPEC_IS160(chanspec)) {
+ uint8 center_chan = CHSPEC_CHANNEL(chanspec);
+ uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+ /* based on the sb value primary 80 channel can be retrieved
+ * if sb is in range 0 to 3 the lower band is the 80Mhz primary band
+ */
+ if (sb < 4) {
+ primary80_chan = center_chan - CH_40MHZ_APART;
+ }
+ /* if sb is in range 4 to 7 the upper band is the 80Mhz primary band */
+ else
+ {
+ primary80_chan = center_chan + CH_40MHZ_APART;
+ }
+ }
+ else {
+ /* for 20 and 40 Mhz */
+ primary80_chan = -1;
+ }
return primary80_chan;
}
/*
- * Returns the center channel of the secondary 80 MHz sub-band of the provided chanspec
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ * chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ * returns -1 in case the provided channel is 20/40/80 Mhz chanspec
*/
uint8
wf_chspec_secondary80_channel(chanspec_t chanspec)
{
- chanspec_t secondary80_chspec;
uint8 secondary80_chan;
- secondary80_chspec = wf_chspec_secondary80_chspec(chanspec);
-
- if (secondary80_chspec == INVCHANSPEC) {
- secondary80_chan = INVCHANNEL;
- } else {
- secondary80_chan = CHSPEC_CHANNEL(secondary80_chspec);
+ if (CHSPEC_IS8080(chanspec)) {
+ secondary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chanspec));
}
+ else if (CHSPEC_IS160(chanspec)) {
+ uint8 center_chan = CHSPEC_CHANNEL(chanspec);
+ uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+ /* based on the sb value secondary 80 channel can be retrieved
+ * if sb is in range 0 to 3 upper band is the secondary 80Mhz band
+ */
+ if (sb < 4) {
+ secondary80_chan = center_chan + CH_40MHZ_APART;
+ }
+ /* if sb is in range 4 to 7 the lower band is the secondary 80Mhz band */
+ else
+ {
+ secondary80_chan = center_chan - CH_40MHZ_APART;
+ }
+ }
+ else {
+ /* for 20, 40, and 80 Mhz */
+ secondary80_chan = -1;
+ }
return secondary80_chan;
}
/*
- * Returns the chanspec for the primary 80MHz sub-band of an 160MHz or 80+80 channel
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ *
+ * chanspec - Input chanspec for which the primary 80Mhz chanspec has to be retreived
+ *
+ * returns the input chanspec in case the provided chanspec is an 80 MHz chanspec
+ * returns INVCHANSPEC in case the provided channel is 20/40 MHz chanspec
*/
chanspec_t
wf_chspec_primary80_chspec(chanspec_t chspec)
uint sb;
ASSERT(!wf_chspec_malformed(chspec));
-
if (CHSPEC_IS80(chspec)) {
chspec80 = chspec;
}
else if (CHSPEC_IS8080(chspec)) {
- sb = CHSPEC_CTL_SB(chspec);
- /* primary sub-band is stored in seg0 */
+ /* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */
center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec));
+ sb = CHSPEC_CTL_SB(chspec);
+
/* Create primary 80MHz chanspec */
- chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+ chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
}
else if (CHSPEC_IS160(chspec)) {
center_chan = CHSPEC_CHANNEL(chspec);
center_chan += CH_40MHZ_APART;
sb -= WL_CHANSPEC_CTL_SB_ULL;
}
-
/* Create primary 80MHz chanspec */
- chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
- }
- else {
- chspec80 = INVCHANSPEC;
- }
-
- return chspec80;
-}
-
-/*
- * Returns the chanspec for the secondary 80MHz sub-band of an 160MHz or 80+80 channel
- */
-chanspec_t
-wf_chspec_secondary80_chspec(chanspec_t chspec)
-{
- chanspec_t chspec80;
- uint center_chan;
-
- ASSERT(!wf_chspec_malformed(chspec));
-
- if (CHSPEC_IS8080(chspec)) {
- /* secondary sub-band is stored in seg1 */
- center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chspec));
-
- /* Create secondary 80MHz chanspec */
- chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G |
- WL_CHANSPEC_BW_80 |
- WL_CHANSPEC_CTL_SB_LL |
- center_chan);
- }
- else if (CHSPEC_IS160(chspec)) {
- center_chan = CHSPEC_CHANNEL(chspec);
-
- if (CHSPEC_CTL_SB(chspec) < WL_CHANSPEC_CTL_SB_ULL) {
- /* Primary 80MHz is on lower side */
- center_chan -= CH_40MHZ_APART;
- }
- else {
- /* Primary 80MHz is on upper side */
- center_chan += CH_40MHZ_APART;
- }
-
- /* Create secondary 80MHz chanspec */
- chspec80 = (chanspec_t)(WL_CHANSPEC_BAND_5G |
- WL_CHANSPEC_BW_80 |
- WL_CHANSPEC_CTL_SB_LL |
- center_chan);
+ chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
}
else {
chspec80 = INVCHANSPEC;
return chspec80;
}
-/*
- * For 160MHz or 80P80 chanspec, set ch[0]/ch[1] to be the low/high 80 Mhz channels
- *
- * For 20/40/80MHz chanspec, set ch[0] to be the center freq, and chan[1]=-1
- */
-void
-wf_chspec_get_80p80_channels(chanspec_t chspec, uint8 *ch)
-{
-
- if (CHSPEC_IS8080(chspec)) {
- ch[0] = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec));
- ch[1] = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chspec));
- }
- else if (CHSPEC_IS160(chspec)) {
- uint8 center_chan = CHSPEC_CHANNEL(chspec);
- ch[0] = center_chan - CH_40MHZ_APART;
- ch[1] = center_chan + CH_40MHZ_APART;
- }
- else {
- /* for 20, 40, and 80 Mhz */
- ch[0] = CHSPEC_CHANNEL(chspec);
- ch[1] = 0xFFu;
- }
- return;
-
-}
-
#ifdef WL11AC_80P80
uint8
wf_chspec_channel(chanspec_t chspec)
/* TODO: Implement this function ! */
return 12; /* opclass 12 for basic 2G channels */
}
-
-/* Populates array with all 20MHz side bands of a given chanspec_t in the following order:
- * primary20, secondary20, two secondary40s, four secondary80s.
- * 'chspec' is the chanspec of interest
- * 'pext' must point to an uint8 array of long enough to hold all side bands of the given chspec
- *
- * Works with 20, 40, 80, 80p80 and 160MHz chspec
- */
-void
-wf_get_all_ext(chanspec_t chspec, uint8 *pext)
-{
-#ifdef WL11N_20MHZONLY
- GET_ALL_SB(chspec, pext);
-#else /* !WL11N_20MHZONLY */
- chanspec_t t = (CHSPEC_IS160(chspec) || CHSPEC_IS8080(chspec)) ? /* if bw > 80MHz */
- wf_chspec_primary80_chspec(chspec) : (chspec); /* extract primary 80 */
- /* primary20 channel as first element */
- uint8 pri_ch = (pext)[0] = wf_chspec_primary20_chan(t);
- if (CHSPEC_IS20(chspec)) return; /* nothing more to do since 20MHz chspec */
- /* 20MHz EXT */
- (pext)[1] = pri_ch + (uint8)(IS_CTL_IN_L20(t) ? CH_20MHZ_APART : -CH_20MHZ_APART);
- if (CHSPEC_IS40(chspec)) return; /* nothing more to do since 40MHz chspec */
- /* center 40MHz EXT */
- t = wf_channel2chspec((uint)(pri_ch + (IS_CTL_IN_L40(chspec) ?
- CH_40MHZ_APART : -CH_40MHZ_APART)), WL_CHANSPEC_BW_40);
- GET_ALL_SB(t, &((pext)[2])); /* get the 20MHz side bands in 40MHz EXT */
- if (CHSPEC_IS80(chspec)) return; /* nothing more to do since 80MHz chspec */
- t = CH80MHZ_CHSPEC(wf_chspec_secondary80_channel(chspec), WL_CHANSPEC_CTL_SB_LLL);
- /* get the 20MHz side bands in 80MHz EXT (secondary) */
- GET_ALL_SB(t, &((pext)[4]));
-#endif /* !WL11N_20MHZONLY */
-}
-
-/*
- * Given two chanspecs, returns true if they overlap.
- * (Overlap: At least one 20MHz subband is common between the two chanspecs provided)
- */
-bool wf_chspec_overlap(chanspec_t chspec0, chanspec_t chspec1)
-{
- uint8 ch0, ch1;
-
- FOREACH_20_SB(chspec0, ch0) {
- FOREACH_20_SB(chspec1, ch1) {
- if (ABS(ch0 - ch1) < CH_20MHZ_APART) {
- return TRUE;
- }
- }
- }
-
- return FALSE;
-}
-
-uint8
-channel_bw_to_width(chanspec_t chspec)
-{
- uint8 channel_width;
-
- if (CHSPEC_IS80(chspec))
- channel_width = VHT_OP_CHAN_WIDTH_80;
- else if (CHSPEC_IS160(chspec))
- channel_width = VHT_OP_CHAN_WIDTH_160;
- else if (CHSPEC_IS8080(chspec))
- channel_width = VHT_OP_CHAN_WIDTH_80_80;
- else
- channel_width = VHT_OP_CHAN_WIDTH_20_40;
-
- return channel_width;
-}
* This header file housing the define and function prototype use by
* both the wl driver, tools & Apps.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmwifi_channels.h 806092 2019-02-21 08:19:13Z $
+ * $Id: bcmwifi_channels.h 612483 2016-01-14 03:44:27Z $
*/
#ifndef _bcmwifi_channels_h_
#define _bcmwifi_channels_h_
-/* A chanspec holds the channel number, band, bandwidth and primary 20MHz sideband */
+
+/* A chanspec holds the channel number, band, bandwidth and control sideband */
typedef uint16 chanspec_t;
-typedef uint16 chanspec_band_t;
-typedef uint16 chanspec_bw_t;
-typedef uint16 chanspec_subband_t;
/* channel defines */
+#define CH_UPPER_SB 0x01
+#define CH_LOWER_SB 0x02
+#define CH_EWA_VALID 0x04
#define CH_80MHZ_APART 16
#define CH_40MHZ_APART 8
#define CH_20MHZ_APART 4
#define CH_10MHZ_APART 2
#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
-
-#define CH_MIN_2G_CHANNEL 1u /* Min channel in 2G band */
-#define CH_MAX_2G_CHANNEL 14u /* Max channel in 2G band */
-#define CH_MIN_2G_40M_CHANNEL 3u /* Min 40MHz center channel in 2G band */
-#define CH_MAX_2G_40M_CHANNEL 11u /* Max 40MHz center channel in 2G band */
+#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
/* maximum # channels the s/w supports */
#define MAXCHANNEL 224 /* max # supported channels. The max channel no is above,
*/
#define MAXCHANNEL_NUM (MAXCHANNEL - 1) /* max channel number */
-#define INVCHANNEL 255 /* error value for a bad channel */
-
/* channel bitvec */
typedef struct {
uint8 vec[MAXCHANNEL/8]; /* bitvec of channels */
#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU
#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL
-#define WL_CHANSPEC_BW_MASK 0x3800u
-#define WL_CHANSPEC_BW_SHIFT 11u
-#define WL_CHANSPEC_BW_5 0x0000u
-#define WL_CHANSPEC_BW_10 0x0800u
-#define WL_CHANSPEC_BW_20 0x1000u
-#define WL_CHANSPEC_BW_40 0x1800u
-#define WL_CHANSPEC_BW_80 0x2000u
-#define WL_CHANSPEC_BW_160 0x2800u
-#define WL_CHANSPEC_BW_8080 0x3000u
-
-#define WL_CHANSPEC_BAND_MASK 0xc000u
-#define WL_CHANSPEC_BAND_SHIFT 14u
-#define WL_CHANSPEC_BAND_2G 0x0000u
-#define WL_CHANSPEC_BAND_3G 0x4000u
-#define WL_CHANSPEC_BAND_4G 0x8000u
-#define WL_CHANSPEC_BAND_5G 0xc000u
-#define INVCHANSPEC 255u
-#define MAX_CHANSPEC 0xFFFFu
+#define WL_CHANSPEC_BW_MASK 0x3800
+#define WL_CHANSPEC_BW_SHIFT 11
+#define WL_CHANSPEC_BW_5 0x0000
+#define WL_CHANSPEC_BW_10 0x0800
+#define WL_CHANSPEC_BW_20 0x1000
+#define WL_CHANSPEC_BW_40 0x1800
+#define WL_CHANSPEC_BW_80 0x2000
+#define WL_CHANSPEC_BW_160 0x2800
+#define WL_CHANSPEC_BW_8080 0x3000
+#define WL_CHANSPEC_BW_2P5 0x3800
+
+#define WL_CHANSPEC_BAND_MASK 0xc000
+#define WL_CHANSPEC_BAND_SHIFT 14
+#define WL_CHANSPEC_BAND_2G 0x0000
+#define WL_CHANSPEC_BAND_3G 0x4000
+#define WL_CHANSPEC_BAND_4G 0x8000
+#define WL_CHANSPEC_BAND_5G 0xc000
+#define INVCHANSPEC 255
+#define MAX_CHANSPEC 0xFFFF
#define WL_CHANNEL_BAND(ch) (((ch) <= CH_MAX_2G_CHANNEL) ? \
WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)
#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
((channel) + CH_10MHZ_APART) : 0)
-/* pass a 80MHz channel number (uint8) to get respective LL, UU, LU, UL */
#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0)
-#define UU_20_SB(channel) (((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \
+#define UU_20_SB(channel) (((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \
((channel) + 3 * CH_10MHZ_APART) : 0)
#define LU_20_SB(channel) LOWER_20_SB(channel)
#define UL_20_SB(channel) UPPER_20_SB(channel)
#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
(((channel) <= CH_MAX_2G_CHANNEL) ? \
WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define CH2P5MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_2P5 | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define CH5MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_5 | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
+#define CH10MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_10 | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
((channel) + CH_20MHZ_APART) : 0)
#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
#define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
((channel) | (ctlsb) | \
WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G)
+#define CHBW_CHSPEC(bw, channel) (chanspec_t)((chanspec_t)(channel) | (bw) | \
+ (((channel) <= CH_MAX_2G_CHANNEL) ? \
+ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
/* simple MACROs to get different fields of chanspec */
#ifdef WL11AC_80P80
#define CHSPEC_CHANNEL(chspec) wf_chspec_channel(chspec)
#else
#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
-#endif // endif
+#endif
#define CHSPEC_CHAN1(chspec) ((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT
#define CHSPEC_CHAN2(chspec) ((chspec) & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT
#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
#ifdef WL11N_20MHZONLY
+#ifdef WL11ULB
+#define CHSPEC_IS2P5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_2P5)
+#define CHSPEC_IS5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_5)
+#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
+#else
+#define CHSPEC_IS2P5(chspec) 0
+#define CHSPEC_IS5(chspec) 0
+#define CHSPEC_IS10(chspec) 0
+#endif
#define CHSPEC_IS20(chspec) 1
#define CHSPEC_IS20_2G(chspec) ((((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \
CHSPEC_IS2G(chspec))
#ifndef CHSPEC_IS40
#define CHSPEC_IS40(chspec) 0
-#endif // endif
+#endif
#ifndef CHSPEC_IS80
#define CHSPEC_IS80(chspec) 0
-#endif // endif
+#endif
#ifndef CHSPEC_IS160
#define CHSPEC_IS160(chspec) 0
-#endif // endif
+#endif
#ifndef CHSPEC_IS8080
#define CHSPEC_IS8080(chspec) 0
-#endif // endif
-
-/* see FOREACH_20_SB in !WL11N_20MHZONLY section */
-#define FOREACH_20_SB(chspec, channel) \
- for (channel = CHSPEC_CHANNEL(chspec); channel; channel = 0)
-
-/* see GET_ALL_SB in !WL11N_20MHZONLY section */
-#define GET_ALL_SB(chspec, psb) do { \
- psb[0] = CHSPEC_CHANNEL(chspec); \
-} while (0)
-
+#endif
+#define BW_LE20(bw) TRUE
+#define CHSPEC_ISLE20(chspec) TRUE
#else /* !WL11N_20MHZONLY */
+#define CHSPEC_IS2P5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_2P5)
+#define CHSPEC_IS5(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_5)
+#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
#define CHSPEC_IS20_5G(chspec) ((((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) && \
CHSPEC_IS5G(chspec))
#ifndef CHSPEC_IS40
#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
-#endif // endif
+#endif
#ifndef CHSPEC_IS80
#define CHSPEC_IS80(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80)
-#endif // endif
+#endif
#ifndef CHSPEC_IS160
#define CHSPEC_IS160(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160)
-#endif // endif
+#endif
#ifndef CHSPEC_IS8080
#define CHSPEC_IS8080(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080)
-#endif // endif
-
-/* pass a center channel and get channel offset from it by 10MHz */
-#define CH_OFF_10MHZ_MULTIPLES(channel, offset) ((uint8) (((offset) < 0) ? \
- (((channel) > (WL_CHANSPEC_CHAN_MASK & ((uint16)((-(offset)) * CH_10MHZ_APART)))) ?\
- ((channel) + (offset) * CH_10MHZ_APART) : 0) : \
- (((channel) < (uint16)(MAXCHANNEL - (offset) * CH_10MHZ_APART)) ? \
- ((channel) + (offset) * CH_10MHZ_APART) : 0)))
-
-#if defined(WL11AC_80P80) || defined(WL11AC_160)
-/* pass a 160MHz center channel to get 20MHz subband channel numbers */
-#define LLL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -7)
-#define LLU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -5)
-#define LUL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -3)
-#define LUU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, -1)
-#define ULL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 1)
-#define ULU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 3)
-#define UUL_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 5)
-#define UUU_20_SB_160(channel) CH_OFF_10MHZ_MULTIPLES(channel, 7)
-
-/* given an 80p80 channel, return the lower 80MHz sideband */
-#define LOWER_80_SB(chspec) (wf_chspec_primary80_channel(chspec) < \
- wf_chspec_secondary80_channel(chspec) ? \
- wf_chspec_primary80_channel(chspec) : wf_chspec_secondary80_channel(chspec))
-
-/* given an 80p80 channel, return the upper 80MHz sideband */
-#define UPPER_80_SB(chspec) (wf_chspec_primary80_channel(chspec) > \
- wf_chspec_secondary80_channel(chspec) ? \
- wf_chspec_primary80_channel(chspec) : wf_chspec_secondary80_channel(chspec))
-
-/* pass an 80P80 chanspec (not channel) to get 20MHz subnand channel numbers */
-#define LLL_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(LOWER_80_SB(chspec), -3)
-#define LLU_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(LOWER_80_SB(chspec), -1)
-#define LUL_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(LOWER_80_SB(chspec), 1)
-#define LUU_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(LOWER_80_SB(chspec), 3)
-#define ULL_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(UPPER_80_SB(chspec), -3)
-#define ULU_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(UPPER_80_SB(chspec), -1)
-#define UUL_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(UPPER_80_SB(chspec), 1)
-#define UUU_20_SB_8080(chspec) CH_OFF_10MHZ_MULTIPLES(UPPER_80_SB(chspec), 3)
-
-/* get lowest 20MHz sideband of a given chspec
- * (works with 20, 40, 80, 160, 80p80)
- */
-#define CH_FIRST_20_SB(chspec) ((uint8) (\
- CHSPEC_IS160(chspec) ? LLL_20_SB_160(CHSPEC_CHANNEL(chspec)) : (\
- CHSPEC_IS8080(chspec) ? LLL_20_SB_8080(chspec) : (\
- CHSPEC_IS80(chspec) ? LL_20_SB(CHSPEC_CHANNEL(chspec)) : (\
- CHSPEC_IS40(chspec) ? LOWER_20_SB(CHSPEC_CHANNEL(chspec)) : \
- CHSPEC_CHANNEL(chspec))))))
-
-/* get upper most 20MHz sideband of a given chspec
- * (works with 20, 40, 80, 160, 80p80)
- */
-#define CH_LAST_20_SB(chspec) ((uint8) (\
- CHSPEC_IS160(chspec) ? UUU_20_SB_160(CHSPEC_CHANNEL(chspec)) : (\
- CHSPEC_IS8080(chspec) ? UUU_20_SB_8080(chspec) : (\
- CHSPEC_IS80(chspec) ? UU_20_SB(CHSPEC_CHANNEL(chspec)) : (\
- CHSPEC_IS40(chspec) ? UPPER_20_SB(CHSPEC_CHANNEL(chspec)) : \
- CHSPEC_CHANNEL(chspec))))))
-
-/* call this with chspec and a valid 20MHz sideband of this channel to get the next 20MHz sideband
- * (works with 80p80 only)
- * resolves to 0 if called with upper most channel
- */
-#define CH_NEXT_20_SB_IN_8080(chspec, channel) ((uint8) (\
- ((uint8) ((channel) + CH_20MHZ_APART) > CH_LAST_20_SB(chspec) ? 0 : \
- ((channel) == LUU_20_SB_8080(chspec) ? ULL_20_SB_8080(chspec) : \
- (channel) + CH_20MHZ_APART))))
-
-/* call this with chspec and a valid 20MHz sideband of this channel to get the next 20MHz sideband
- * (works with 20, 40, 80, 160, 80p80)
- * resolves to 0 if called with upper most channel
- */
-#define CH_NEXT_20_SB(chspec, channel) ((uint8) (\
- (CHSPEC_IS8080(chspec) ? CH_NEXT_20_SB_IN_8080((chspec), (channel)) : \
- ((uint8) ((channel) + CH_20MHZ_APART) > CH_LAST_20_SB(chspec) ? 0 : \
- ((channel) + CH_20MHZ_APART)))))
-
-#else /* WL11AC_80P80, WL11AC_160 */
-
-#define LLL_20_SB_160(channel) 0
-#define LLU_20_SB_160(channel) 0
-#define LUL_20_SB_160(channel) 0
-#define LUU_20_SB_160(channel) 0
-#define ULL_20_SB_160(channel) 0
-#define ULU_20_SB_160(channel) 0
-#define UUL_20_SB_160(channel) 0
-#define UUU_20_SB_160(channel) 0
-
-#define LOWER_80_SB(chspec) 0
-
-#define UPPER_80_SB(chspec) 0
-
-#define LLL_20_SB_8080(chspec) 0
-#define LLU_20_SB_8080(chspec) 0
-#define LUL_20_SB_8080(chspec) 0
-#define LUU_20_SB_8080(chspec) 0
-#define ULL_20_SB_8080(chspec) 0
-#define ULU_20_SB_8080(chspec) 0
-#define UUL_20_SB_8080(chspec) 0
-#define UUU_20_SB_8080(chspec) 0
-
-/* get lowest 20MHz sideband of a given chspec
- * (works with 20, 40, 80)
- */
-#define CH_FIRST_20_SB(chspec) ((uint8) (\
- CHSPEC_IS80(chspec) ? LL_20_SB(CHSPEC_CHANNEL(chspec)) : (\
- CHSPEC_IS40(chspec) ? LOWER_20_SB(CHSPEC_CHANNEL(chspec)) : \
- CHSPEC_CHANNEL(chspec))))
-/* get upper most 20MHz sideband of a given chspec
- * (works with 20, 40, 80, 160, 80p80)
- */
-#define CH_LAST_20_SB(chspec) ((uint8) (\
- CHSPEC_IS80(chspec) ? UU_20_SB(CHSPEC_CHANNEL(chspec)) : (\
- CHSPEC_IS40(chspec) ? UPPER_20_SB(CHSPEC_CHANNEL(chspec)) : \
- CHSPEC_CHANNEL(chspec))))
-
-/* call this with chspec and a valid 20MHz sideband of this channel to get the next 20MHz sideband
- * (works with 20, 40, 80, 160, 80p80)
- * resolves to 0 if called with upper most channel
- */
-#define CH_NEXT_20_SB(chspec, channel) ((uint8) (\
- ((uint8) ((channel) + CH_20MHZ_APART) > CH_LAST_20_SB(chspec) ? 0 : \
- ((channel) + CH_20MHZ_APART))))
-
-#endif /* WL11AC_80P80, WL11AC_160 */
-
-/* Iterator for 20MHz side bands of a chanspec: (chanspec_t chspec, uint8 channel)
- * 'chspec' chanspec_t of interest (used in loop, better to pass a resolved value than a macro)
- * 'channel' must be a variable (not an expression).
- */
-#define FOREACH_20_SB(chspec, channel) \
- for (channel = CH_FIRST_20_SB(chspec); channel; \
- channel = CH_NEXT_20_SB((chspec), channel))
-
-/* Uses iterator to populate array with all side bands involved (sorted lower to upper).
- * 'chspec' chanspec_t of interest
- * 'psb' pointer to uint8 array of enough size to hold all side bands for the given chspec
- */
-#define GET_ALL_SB(chspec, psb) do { \
- uint8 channel, idx = 0; \
- chanspec_t chspec_local = chspec; \
- FOREACH_20_SB(chspec_local, channel) \
- (psb)[idx++] = channel; \
-} while (0)
-
-/* given a chanspec of any bw, tests if primary20 SB is in lower 20, 40, 80 respectively */
-#define IS_CTL_IN_L20(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_U) /* CTL SB is in low 20 of any 40 */
-#define IS_CTL_IN_L40(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_UL) /* in low 40 of any 80 */
-#define IS_CTL_IN_L80(chspec) !((chspec) & WL_CHANSPEC_CTL_SB_ULL) /* in low 80 of 80p80/160 */
-
-#endif /* !WL11N_20MHZONLY */
-
-/* ULB introduced macros. Remove once ULB is cleaned from phy code */
-#define CHSPEC_IS2P5(chspec) 0
-#define CHSPEC_IS5(chspec) 0
-#define CHSPEC_IS10(chspec) 0
+#endif
+
+#ifdef WL11ULB
+#define BW_LT20(bw) (((bw) == WL_CHANSPEC_BW_2P5) || \
+ ((bw) == WL_CHANSPEC_BW_5) || \
+ ((bw) == WL_CHANSPEC_BW_10))
+#define CHSPEC_BW_LT20(chspec) (BW_LT20(CHSPEC_BW(chspec)))
+/* This MACRO is strictly to avoid abandons in existing code with ULB feature and is in no way
+ * optimial to use. Should be replaced with CHSPEC_BW_LE() instead
+ */
+#define BW_LE20(bw) (((bw) == WL_CHANSPEC_BW_2P5) || \
+ ((bw) == WL_CHANSPEC_BW_5) || \
+ ((bw) == WL_CHANSPEC_BW_10) || \
+ ((bw) == WL_CHANSPEC_BW_20))
+#define CHSPEC_ISLE20(chspec) (BW_LE20(CHSPEC_BW(chspec)))
+
+#else /* WL11ULB */
+#define BW_LE20(bw) ((bw) == WL_CHANSPEC_BW_20)
#define CHSPEC_ISLE20(chspec) (CHSPEC_IS20(chspec))
-#define CHSPEC_BW_LE20(chspec) (CHSPEC_IS20(chspec))
+#endif /* WL11ULB */
+#endif /* !WL11N_20MHZONLY */
-#define BW_LE40(bw) ((bw) == WL_CHANSPEC_BW_20 || ((bw) == WL_CHANSPEC_BW_40))
+#define BW_LE40(bw) (BW_LE20(bw) || ((bw) == WL_CHANSPEC_BW_40))
#define BW_LE80(bw) (BW_LE40(bw) || ((bw) == WL_CHANSPEC_BW_80))
#define BW_LE160(bw) (BW_LE80(bw) || ((bw) == WL_CHANSPEC_BW_160))
-
+#define CHSPEC_BW_LE20(chspec) (BW_LE20(CHSPEC_BW(chspec)))
#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
#define CHSPEC_SB_UPPER(chspec) \
*/
#define CHANSPEC_STR_LEN 20
-/*
- * This function returns TRUE if both the chanspec can co-exist in PHY.
- * Addition to primary20 channel, the function checks for side band for 2g 40 channels
- */
-extern bool wf_chspec_coexist(chanspec_t chspec1, chanspec_t chspec2);
#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\
CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080)
* The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide.
* If both chspec bandwidth and bw is not 160 wide, then the comparison is made.
*/
+#ifdef WL11ULB
+#define CHSPEC_BW_GE(chspec, bw) \
+ (((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
+ (CHSPEC_BW(chspec) >= (bw))) && \
+ (!(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5)))
+#else /* WL11ULB */
#define CHSPEC_BW_GE(chspec, bw) \
((CHSPEC_IS_BW_160_WIDE(chspec) &&\
((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
(CHSPEC_BW(chspec) >= (bw)))
+#endif /* WL11ULB */
+#ifdef WL11ULB
+#define CHSPEC_BW_LE(chspec, bw) \
+ (((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
+ (CHSPEC_BW(chspec) <= (bw))) || \
+ (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5))
+#else /* WL11ULB */
#define CHSPEC_BW_LE(chspec, bw) \
((CHSPEC_IS_BW_160_WIDE(chspec) &&\
((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) ||\
(CHSPEC_BW(chspec) <= (bw)))
+#endif /* WL11ULB */
+#ifdef WL11ULB
+#define CHSPEC_BW_GT(chspec, bw) \
+ ((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
+ (CHSPEC_BW(chspec) > (bw))) && \
+ (CHSPEC_BW(chspec) != WL_CHANSPEC_BW_2P5))
+#else /* WL11ULB */
#define CHSPEC_BW_GT(chspec, bw) \
(!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
(CHSPEC_BW(chspec) > (bw)))
+#endif /* WL11ULB */
+#ifdef WL11ULB
+#define CHSPEC_BW_LT(chspec, bw) \
+ ((!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ ((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
+ (CHSPEC_BW(chspec) < (bw))) || \
+ ((CHSPEC_BW(chspec) == WL_CHANSPEC_BW_2P5 && (bw) != WL_CHANSPEC_BW_2P5)))
+#else /* WL11ULB */
#define CHSPEC_BW_LT(chspec, bw) \
(!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
((bw) == WL_CHANSPEC_BW_160 || (bw) == WL_CHANSPEC_BW_8080)) &&\
(CHSPEC_BW(chspec) < (bw)))
+#endif /* WL11ULB */
/* Legacy Chanspec defines
* These are the defines for the previous format of the chanspec_t
WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G))
-#define GET_ALL_EXT wf_get_all_ext
-
/*
* WF_CHAN_FACTOR_* constants are used to calculate channel frequency
* given a channel number.
#define WF_NUM_SIDEBANDS_8080MHZ 4
#define WF_NUM_SIDEBANDS_160MHZ 8
-/**
- * Return the chanspec bandwidth in MHz
- * Bandwidth of 160 MHz will be returned for 80+80MHz chanspecs.
- *
- * @param chspec chanspec_t
- *
- * @return bandwidth of chspec in MHz units
- */
-extern uint wf_bw_chspec_to_mhz(chanspec_t chspec);
-
/**
* Convert chanspec to ascii string
*
* Verify the chanspec fields are valid.
*
* Verify the chanspec is using a legal set field values, i.e. that the chanspec
- * specified a band, bw, primary_sb, and channel and that the combination could be
+ * specified a band, bw, ctl_sb and channel and that the combination could be
* legal given some set of circumstances.
*
* @param chanspec input chanspec to verify
extern bool wf_chspec_valid(chanspec_t chanspec);
/**
- * Return the primary 20MHz channel.
+ * Return the primary (control) channel.
*
* This function returns the channel number of the primary 20MHz channel. For
* 20MHz channels this is just the channel number. For 40MHz or wider channels
*
* @return Returns the channel number of the primary 20MHz channel
*/
-extern uint8 wf_chspec_primary20_chan(chanspec_t chspec);
-
-/* alias for old function name */
-#define wf_chspec_ctlchan(c) wf_chspec_primary20_chan(c)
+extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
-/**
+/*
* Return the bandwidth string.
*
* This function returns the bandwidth string for the passed chanspec.
*
* @param chspec input chanspec
*
- * @return Returns the bandwidth string:
- * "5", "10", "20", "40", "80", "160", "80+80"
+ * @return Returns the bandwidth string
*/
extern const char *wf_chspec_to_bw_str(chanspec_t chspec);
/**
- * Create a 20MHz chanspec for the given band.
- */
-chanspec_t wf_create_20MHz_chspec(uint channel, chanspec_band_t band);
-
-/**
- * Return the primary 20MHz chanspec.
+ * Return the primary (control) chanspec.
*
* This function returns the chanspec of the primary 20MHz channel. For 20MHz
* channels this is just the chanspec. For 40MHz or wider channels it is the
*
* @return Returns the chanspec of the primary 20MHz channel
*/
-extern chanspec_t wf_chspec_primary20_chspec(chanspec_t chspec);
-
-/* alias for old function name */
-#define wf_chspec_ctlchspec(c) wf_chspec_primary20_chspec(c)
+extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
/**
- * Return the primary 40MHz chanspec.
+ * Return a channel number corresponding to a frequency.
*
- * This function returns the chanspec for the primary 40MHz of an 80MHz or wider channel.
- * The primary 20MHz channel of the returned 40MHz chanspec is the same as the primary 20MHz
- * channel of the input chanspec.
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
*/
extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec);
* frequency is not a 2.4 GHz channel, or if the frequency is not and even
* multiple of 5 MHz from the base frequency to the base plus 1 GHz.
*
- * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
*
* @param freq frequency in MHz
* @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz
* the answer is rounded down to an integral MHz.
* -1 is returned for an out of range channel.
*
- * Reference 802.11-2016, section 17.3.8.3 and section 16.3.6.3
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
*
* @param channel input channel number
* @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz
*
* Returns INVCHANSPEC in case of error.
*
- * Refer to 802.11-2016 section 22.3.14 "Channelization".
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
*/
extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz,
uint8 chan0_80Mhz, uint8 chan1_80Mhz);
-/**
- * Returns the center channel of the primary 80 MHz sub-band of the provided chanspec
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
*
- * @param chspec input chanspec
+ * chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
*
- * @return center channel number of the primary 80MHz sub-band of the input.
- * Will return the center channel of an input 80MHz chspec.
- * Will return INVCHANNEL if the chspec is malformed or less than 80MHz bw.
+ * returns -1 in case the provided channel is 20/40 Mhz chanspec
*/
extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec);
-/**
- * Returns the center channel of the secondary 80 MHz sub-band of the provided chanspec
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
*
- * @param chspec input chanspec
+ * chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
*
- * @return center channel number of the secondary 80MHz sub-band of the input.
- * Will return INVCHANNEL if the chspec is malformed or bw is not greater than 80MHz.
+ * returns -1 in case the provided channel is 20/40 Mhz chanspec
*/
extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec);
-/**
- * Returns the chanspec for the primary 80MHz sub-band of an 160MHz or 80+80 channel
- *
- * @param chspec input chanspec
- *
- * @return An 80MHz chanspec describing the primary 80MHz sub-band of the input.
- * Will return an input 80MHz chspec as is.
- * Will return INVCHANSPEC if the chspec is malformed or less than 80MHz bw.
- */
-extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec);
-
-/**
- * Returns the chanspec for the secondary 80MHz sub-band of an 160MHz or 80+80 channel
- * The sideband in the chanspec is always set to WL_CHANSPEC_CTL_SB_LL since this sub-band
- * does not contain the primary 20MHz channel.
- *
- * @param chspec input chanspec
- *
- * @return An 80MHz chanspec describing the secondary 80MHz sub-band of the input.
- * Will return INVCHANSPEC if the chspec is malformed or bw is not greater than 80MHz.
- */
-extern chanspec_t wf_chspec_secondary80_chspec(chanspec_t chspec);
-
/*
- * For 160MHz or 80P80 chanspec, set ch[0]/ch[1] to be the low/high 80 Mhz channels
- *
- * For 20/40/80MHz chanspec, set ch[0] to be the center freq, and chan[1]=-1
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
*/
-extern void wf_chspec_get_80p80_channels(chanspec_t chspec, uint8 *ch);
+extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec);
#ifdef WL11AC_80P80
/*
* In case of 80+80 chanspec it returns the primary 80 Mhz centre channel
*/
extern uint8 wf_chspec_channel(chanspec_t chspec);
-#endif // endif
+#endif
extern chanspec_t wf_channel_create_chspec_frm_opclass(uint8 opclass, uint8 channel);
extern int wf_channel_create_opclass_frm_chspec(chanspec_t chspec);
-
-/* Populates array with all 20MHz side bands of a given chanspec_t in the following order:
- * primary20, ext20, two ext40s, four ext80s.
- * 'chspec' is the chanspec of interest
- * 'pext' must point to an uint8 array of long enough to hold all side bands of the given chspec
- *
- * Works with 20, 40, 80, 80p80 and 160MHz chspec
- */
-
-extern void wf_get_all_ext(chanspec_t chspec, uint8 *chan_ptr);
-
-/*
- * Given two chanspecs, returns true if they overlap.
- * (Overlap: At least one 20MHz subband is common between the two chanspecs provided)
- */
-extern bool wf_chspec_overlap(chanspec_t chspec0, chanspec_t chspec1);
-
-extern uint8 channel_bw_to_width(chanspec_t chspec);
#endif /* _bcmwifi_channels_h_ */
/*
* Indices for 802.11 a/b/g/n/ac 1-3 chain symmetric transmit rates
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmwifi_rates.h 697006 2017-05-01 19:13:40Z $
+ * $Id: bcmwifi_rates.h 612483 2016-01-14 03:44:27Z $
*/
#ifndef _bcmwifi_rates_h_
#define _bcmwifi_rates_h_
-#include <typedefs.h>
-
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
+
#define WL_RATESET_SZ_DSSS 4
#define WL_RATESET_SZ_OFDM 8
-#define WL_RATESET_SZ_VHT_MCS 10
-#define WL_RATESET_SZ_VHT_MCS_P 12 /* 10 VHT rates + 2 proprietary rates */
-#define WL_RATESET_SZ_HE_MCS 12 /* 12 HE rates (mcs 0-11) */
+#define WL_RATESET_SZ_VHT_MCS 10
+#define WL_RATESET_SZ_VHT_MCS_P 12
+#if defined(WLPROPRIETARY_11N_RATES)
+#define WL_RATESET_SZ_HT_MCS WL_RATESET_SZ_VHT_MCS
+#else
#define WL_RATESET_SZ_HT_MCS 8
+#endif
#define WL_RATESET_SZ_HT_IOCTL 8 /* MAC histogram, compatibility with wl utility */
WL_TX_BW_10
} wl_tx_bw_t;
+
/*
* Transmit modes.
* Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed
WL_NUM_TX_MODES
} wl_tx_mode_t;
+
/* Number of transmit chains */
typedef enum wl_tx_chains {
WL_TX_CHAINS_1 = 1,
WL_TX_CHAINS_4
} wl_tx_chains_t;
+
/* Number of transmit streams */
typedef enum wl_tx_nss {
WL_TX_NSS_1 = 1,
WL_TX_NSS_4
} wl_tx_nss_t;
+
/* This enum maps each rate to a CLM index */
typedef enum clm_rates {
WL_RATE_P_1X1_VHT10SS1 = 22,
WL_RATE_P_1X1_VHT11SS1 = 23,
+
/************
* 2 chains *
************
WL_RATE_P_2X2_TXBF_VHT10SS2 = 102,
WL_RATE_P_2X2_TXBF_VHT11SS2 = 103,
+
/************
* 3 chains *
************
WL_RATE_P_3X3_VHT10SS3 = 162,
WL_RATE_P_3X3_VHT11SS3 = 163,
+
/****************************
* TX Beamforming, 3 chains *
****************************
WL_RATE_P_3X3_TXBF_VHT10SS3 = 206,
WL_RATE_P_3X3_TXBF_VHT11SS3 = 207,
+
/************
* 4 chains *
************
WL_RATE_P_3X4_VHT10SS3 = 266,
WL_RATE_P_3X4_VHT11SS3 = 267,
+
/* 4 Streams */
WL_RATE_4X4_SDM_MCS24 = 268,
WL_RATE_4X4_SDM_MCS25 = 269,
WL_RATE_P_4X4_VHT10SS4 = 278,
WL_RATE_P_4X4_VHT11SS4 = 279,
+
/****************************
* TX Beamforming, 4 chains *
****************************
/* Number of rate codes */
#define WL_NUMRATES 336
-/* MCS rates */
-#define WLC_MAX_VHT_MCS 11 /**< Std VHT MCS 0-9 plus prop VHT MCS 10-11 */
-#define WLC_MAX_HE_MCS 11 /**< Std HE MCS 0-11 */
-
-/* Convert encoded rate value in plcp header to numerical rates in 500 KHz increments */
-#define OFDM_PHY2MAC_RATE(rlpt) plcp_ofdm_rate_tbl[(rlpt) & 0x7]
-#define CCK_PHY2MAC_RATE(signal) ((signal)/5)
-
-/* given a proprietary MCS, get number of spatial streams */
-#define GET_PROPRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8)
-
-#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) : \
- ((mcs) == 32 ? 1 : GET_PROPRIETARY_11N_MCS_NSS(mcs)))
-
-#define IS_PROPRIETARY_11N_MCS(mcs) FALSE
-#define IS_PROPRIETARY_11N_SS_MCS(mcs) FALSE /**< is proprietary HT single stream MCS */
-
-/* Store HE mcs map for all NSS in a compact form:
- *
- * bit[0:2] mcs code for NSS 1
- * bit[3:5] mcs code for NSS 2
- * ...
- * bit[21:23] mcs code for NSS 8
- */
-
-/**
- * 3 bits are used for encoding each NSS mcs map (HE MCS MAP is 24 bits)
- */
-#define HE_CAP_MCS_CODE_NONE 7
-
-/* macros to access above compact format */
-#define HE_CAP_MCS_NSS_SET_MASK 0x00ffffff /* Field is to be 24 bits long */
-#define HE_CAP_MCS_NSS_GET_SS_IDX(nss) (((nss)-1) * HE_CAP_MCS_CODE_SIZE)
-#define HE_CAP_MCS_NSS_GET_MCS(nss, mcs_nss_map) \
- (((mcs_nss_map) >> HE_CAP_MCS_NSS_GET_SS_IDX(nss)) & HE_CAP_MCS_CODE_MASK)
-#define HE_CAP_MCS_NSS_SET_MCS(nss, mcs_code, mcs_nss_map) \
- do { \
- (mcs_nss_map) &= (~(HE_CAP_MCS_CODE_MASK << HE_CAP_MCS_NSS_GET_SS_IDX(nss))); \
- (mcs_nss_map) |= (((mcs_code) & HE_CAP_MCS_CODE_MASK) << HE_CAP_MCS_NSS_GET_SS_IDX(nss)); \
- (mcs_nss_map) &= (HE_CAP_MCS_NSS_SET_MASK); \
- } while (0)
-
-extern const uint8 plcp_ofdm_rate_tbl[];
-
-uint8 wf_get_single_stream_mcs(uint mcs);
-
-uint8 wf_vht_plcp_to_rate(uint8 *plcp);
-uint wf_mcs_to_rate(uint mcs, uint nss, uint bw, int sgi);
-uint wf_he_mcs_to_rate(uint mcs, uint nss, uint bw, uint gi, bool dcm);
-uint wf_mcs_to_Ndbps(uint mcs, uint nss, uint bw);
#ifdef __cplusplus
}
#endif /* __cplusplus */
+++ /dev/null
-/*
- * Common OS-independent driver header for rate management.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: bcmwifi_rspec.h 736703 2017-12-18 06:55:37Z $
- */
-
-#ifndef _bcmwifi_rspec_h_
-#define _bcmwifi_rspec_h_
-
-#include <typedefs.h>
-
-/**
- * ===================================================================================
- * rate spec : holds rate and mode specific information required to generate a tx frame.
- * Legacy CCK and OFDM information is held in the same manner as was done in the past.
- * (in the lower byte) the upper 3 bytes primarily hold MIMO specific information
- * ===================================================================================
- */
-typedef uint32 ratespec_t;
-
-/* Rate spec. definitions */
-#define WL_RSPEC_RATE_MASK 0x000000FF /**< Legacy rate or MCS or MCS + NSS */
-#define WL_RSPEC_TXEXP_MASK 0x00000300 /**< Tx chain expansion beyond Nsts */
-#define WL_RSPEC_TXEXP_SHIFT 8
-#define WL_RSPEC_HE_GI_MASK 0x00000C00 /* HE GI indices */
-#define WL_RSPEC_HE_GI_SHIFT 10
-#define WL_RSPEC_BW_MASK 0x00070000 /**< Band width */
-#define WL_RSPEC_BW_SHIFT 16
-#define WL_RSPEC_DCM 0x00080000 /**< Dual Carrier Modulation */
-#define WL_RSPEC_STBC 0x00100000 /**< STBC expansion, Nsts = 2 * Nss */
-#define WL_RSPEC_TXBF 0x00200000
-#define WL_RSPEC_LDPC 0x00400000
-#define WL_RSPEC_SGI 0x00800000
-#define WL_RSPEC_SHORT_PREAMBLE 0x00800000 /**< DSSS short preable - Encoding 0 */
-#define WL_RSPEC_ENCODING_MASK 0x03000000 /**< Encoding of RSPEC_RATE field */
-#define WL_RSPEC_ENCODING_SHIFT 24
-
-#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /**< override rate only */
-#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /**< override both rate & mode */
-
-/* ======== RSPEC_HE_GI|RSPEC_SGI fields for HE ======== */
-
-/* GI for HE */
-#define RSPEC_HE_LTF_GI(rspec) (((rspec) & WL_RSPEC_HE_GI_MASK) >> WL_RSPEC_HE_GI_SHIFT)
-#define WL_RSPEC_HE_1x_LTF_GI_0_8us (0x0)
-#define WL_RSPEC_HE_2x_LTF_GI_0_8us (0x1)
-#define WL_RSPEC_HE_2x_LTF_GI_1_6us (0x2)
-#define WL_RSPEC_HE_4x_LTF_GI_3_2us (0x3)
-#define RSPEC_ISHEGI(rspec) (RSPEC_HE_LTF_GI(rspec) > WL_RSPEC_HE_1x_LTF_GI_0_8us)
-#define HE_GI_TO_RSPEC(gi) (((gi) << WL_RSPEC_HE_GI_SHIFT) & WL_RSPEC_HE_GI_MASK)
-/* ======== RSPEC_RATE field ======== */
-
-/* Encoding 0 - legacy rate */
-/* DSSS, CCK, and OFDM rates in [500kbps] units */
-#define WL_RSPEC_LEGACY_RATE_MASK 0x0000007F
-#define WLC_RATE_1M 2
-#define WLC_RATE_2M 4
-#define WLC_RATE_5M5 11
-#define WLC_RATE_11M 22
-#define WLC_RATE_6M 12
-#define WLC_RATE_9M 18
-#define WLC_RATE_12M 24
-#define WLC_RATE_18M 36
-#define WLC_RATE_24M 48
-#define WLC_RATE_36M 72
-#define WLC_RATE_48M 96
-#define WLC_RATE_54M 108
-
-/* Encoding 1 - HT MCS */
-#define WL_RSPEC_HT_MCS_MASK 0x0000007F /**< HT MCS value mask in rspec */
-
-/* Encoding 2 - VHT MCS + NSS */
-#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /**< VHT MCS value mask in rspec */
-#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /**< VHT Nss value mask in rspec */
-#define WL_RSPEC_VHT_NSS_SHIFT 4 /**< VHT Nss value shift in rspec */
-
-/* Encoding 3 - HE MCS + NSS */
-#define WL_RSPEC_HE_MCS_MASK 0x0000000F /**< HE MCS value mask in rspec */
-#define WL_RSPEC_HE_NSS_MASK 0x000000F0 /**< HE Nss value mask in rspec */
-#define WL_RSPEC_HE_NSS_SHIFT 4 /**< HE Nss value shift in rpsec */
-
-/* ======== RSPEC_BW field ======== */
-
-#define WL_RSPEC_BW_UNSPECIFIED 0
-#define WL_RSPEC_BW_20MHZ 0x00010000
-#define WL_RSPEC_BW_40MHZ 0x00020000
-#define WL_RSPEC_BW_80MHZ 0x00030000
-#define WL_RSPEC_BW_160MHZ 0x00040000
-
-/* ======== RSPEC_ENCODING field ======== */
-
-#define WL_RSPEC_ENCODE_RATE 0x00000000 /**< Legacy rate is stored in RSPEC_RATE */
-#define WL_RSPEC_ENCODE_HT 0x01000000 /**< HT MCS is stored in RSPEC_RATE */
-#define WL_RSPEC_ENCODE_VHT 0x02000000 /**< VHT MCS and NSS are stored in RSPEC_RATE */
-#define WL_RSPEC_ENCODE_HE 0x03000000 /**< HE MCS and NSS are stored in RSPEC_RATE */
-
-/**
- * ===============================
- * Handy macros to parse rate spec
- * ===============================
- */
-#define RSPEC_BW(rspec) ((rspec) & WL_RSPEC_BW_MASK)
-#define RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ)
-#define RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ)
-#define RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ)
-#define RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ)
-
-#define RSPEC_ISSGI(rspec) (((rspec) & WL_RSPEC_SGI) != 0)
-#define RSPEC_ISLDPC(rspec) (((rspec) & WL_RSPEC_LDPC) != 0)
-#define RSPEC_ISSTBC(rspec) (((rspec) & WL_RSPEC_STBC) != 0)
-#define RSPEC_ISTXBF(rspec) (((rspec) & WL_RSPEC_TXBF) != 0)
-
-#define RSPEC_TXEXP(rspec) (((rspec) & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT)
-
-#define RSPEC_ENCODE(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) >> WL_RSPEC_ENCODING_SHIFT)
-#define RSPEC_ISLEGACY(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE)
-
-#define RSPEC_ISCCK(rspec) (RSPEC_ISLEGACY(rspec) && \
- (int8)rate_info[(rspec) & WL_RSPEC_LEGACY_RATE_MASK] > 0)
-#define RSPEC_ISOFDM(rspec) (RSPEC_ISLEGACY(rspec) && \
- (int8)rate_info[(rspec) & WL_RSPEC_LEGACY_RATE_MASK] < 0)
-
-#define RSPEC_ISHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT)
-#ifdef WL11AC
-#define RSPEC_ISVHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)
-#else /* WL11AC */
-#define RSPEC_ISVHT(rspec) 0
-#endif /* WL11AC */
-#ifdef WL11AX
-#define RSPEC_ISHE(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HE)
-#else /* WL11AX */
-#define RSPEC_ISHE(rspec) 0
-#endif /* WL11AX */
-
-/**
- * ================================
- * Handy macros to create rate spec
- * ================================
- */
-/* create ratespecs */
-#define LEGACY_RSPEC(rate) (WL_RSPEC_ENCODE_RATE | WL_RSPEC_BW_20MHZ | \
- ((rate) & WL_RSPEC_LEGACY_RATE_MASK))
-#define CCK_RSPEC(cck) LEGACY_RSPEC(cck)
-#define OFDM_RSPEC(ofdm) LEGACY_RSPEC(ofdm)
-#define HT_RSPEC(mcs) (WL_RSPEC_ENCODE_HT | ((mcs) & WL_RSPEC_HT_MCS_MASK))
-#define VHT_RSPEC(mcs, nss) (WL_RSPEC_ENCODE_VHT | \
- (((nss) << WL_RSPEC_VHT_NSS_SHIFT) & WL_RSPEC_VHT_NSS_MASK) | \
- ((mcs) & WL_RSPEC_VHT_MCS_MASK))
-#define HE_RSPEC(mcs, nss) (WL_RSPEC_ENCODE_HE | \
- (((nss) << WL_RSPEC_HE_NSS_SHIFT) & WL_RSPEC_HE_NSS_MASK) | \
- ((mcs) & WL_RSPEC_HE_MCS_MASK))
-
-/**
- * ==================
- * Other handy macros
- * ==================
- */
-
-/* return rate in unit of Kbps */
-#define RSPEC2KBPS(rspec) wf_rspec_to_rate(rspec)
-
-/* return rate in unit of 500Kbps */
-#define RSPEC2RATE(rspec) ((rspec) & WL_RSPEC_LEGACY_RATE_MASK)
-
-/**
- * =================================
- * Macros to use the rate_info table
- * =================================
- */
-/* phy_rate table index is in [500kbps] units */
-#define WLC_MAXRATE 108 /**< in 500kbps units */
-extern const uint8 rate_info[];
-/* phy_rate table value is encoded */
-#define RATE_INFO_OFDM_MASK 0x80 /* ofdm mask */
-#define RATE_INFO_RATE_MASK 0x7f /* rate signal index mask */
-#define RATE_INFO_M_RATE_MASK 0x0f /* M_RATE_TABLE index mask */
-#define RATE_INFO_RATE_ISCCK(r) ((r) <= WLC_MAXRATE && (int8)rate_info[r] > 0)
-#define RATE_INFO_RATE_ISOFDM(r) ((r) <= WLC_MAXRATE && (int8)rate_info[r] < 0)
-
-/**
- * ===================
- * function prototypes
- * ===================
- */
-ratespec_t wf_vht_plcp_to_rspec(uint8 *plcp);
-ratespec_t wf_he_plcp_to_rspec(uint8 *plcp);
-uint wf_rspec_to_rate(ratespec_t rspec);
-
-#endif /* _bcmwifi_rspec_h_ */
/*
* Driver O/S-independent utility routines
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmxtlv.c 788740 2018-11-13 21:45:01Z $
+ * $Id: bcmxtlv.c 628611 2016-03-31 17:53:25Z $
*/
#include <bcm_cfg.h>
#ifdef BCMDRIVER
#include <osl.h>
#else /* !BCMDRIVER */
+ #include <stdlib.h> /* AS!!! */
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#ifndef ASSERT
#define ASSERT(exp)
-#endif // endif
+#endif
+INLINE void* MALLOCZ(void *o, size_t s) { BCM_REFERENCE(o); return calloc(1, s); }
+INLINE void MFREE(void *o, void *p, size_t s) { BCM_REFERENCE(o); BCM_REFERENCE(s); free(p); }
#endif /* !BCMDRIVER */
-#include <bcmtlv.h>
#include <bcmendian.h>
#include <bcmutils.h>
-int
-bcm_xtlv_hdr_size(bcm_xtlv_opts_t opts)
+static INLINE int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts)
{
- int len = (int)OFFSETOF(bcm_xtlv_t, data); /* nominal */
- if (opts & BCM_XTLV_OPTION_LENU8) --len;
- if (opts & BCM_XTLV_OPTION_IDU8) --len;
-
- return len;
-}
-
-bool
-bcm_valid_xtlv(const bcm_xtlv_t *elt, int buf_len, bcm_xtlv_opts_t opts)
-{
- return elt != NULL &&
- buf_len >= bcm_xtlv_hdr_size(opts) &&
- buf_len >= bcm_xtlv_size(elt, opts);
-}
-
-int
-bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts)
-{
- int hsz;
-
- hsz = bcm_xtlv_hdr_size(opts);
- return ((opts & BCM_XTLV_OPTION_ALIGN32) ? ALIGN_SIZE(dlen + hsz, 4)
- : (dlen + hsz));
-}
-
-int
-bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
-{
- int size; /* size including header, data, and any pad */
- int len; /* length wthout padding */
-
- len = BCM_XTLV_LEN_EX(elt, opts);
- size = bcm_xtlv_size_for_data(len, opts);
- return size;
-}
-
-int
-bcm_xtlv_len(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
-{
- const uint8 *lenp;
- int len;
-
- lenp = (const uint8 *)&elt->len; /* nominal */
- if (opts & BCM_XTLV_OPTION_IDU8) {
- --lenp;
- }
-
- if (opts & BCM_XTLV_OPTION_LENU8) {
- len = *lenp;
- } else if (opts & BCM_XTLV_OPTION_LENBE) {
- len = (uint32)hton16(elt->len);
- } else {
- len = ltoh16_ua(lenp);
- }
-
- return len;
-}
-
-int
-bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
-{
- int id = 0;
- if (opts & BCM_XTLV_OPTION_IDU8) {
- id = *(const uint8 *)elt;
- } else if (opts & BCM_XTLV_OPTION_IDBE) {
- id = (uint32)hton16(elt->id);
- } else {
- id = ltoh16_ua((const uint8 *)elt);
- }
-
- return id;
+ return ((opts & BCM_XTLV_OPTION_ALIGN32) ? ALIGN_SIZE(dlen + BCM_XTLV_HDR_SIZE, 4)
+ : (dlen + BCM_XTLV_HDR_SIZE));
}
bcm_xtlv_t *
-bcm_next_xtlv(const bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts)
+bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts)
{
int sz;
/* advance to next elt */
- sz = BCM_XTLV_SIZE_EX(elt, opts);
- elt = (const bcm_xtlv_t*)((const uint8 *)elt + sz);
+ sz = BCM_XTLV_SIZE(elt, opts);
+ elt = (bcm_xtlv_t*)((uint8 *)elt + sz);
*buflen -= sz;
/* validate next elt */
if (!bcm_valid_xtlv(elt, *buflen, opts))
return NULL;
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- return (bcm_xtlv_t *)(elt);
- GCC_DIAGNOSTIC_POP();
+ return elt;
}
int
uint16
bcm_xtlv_buf_len(bcm_xtlvbuf_t *tbuf)
{
- uint16 len;
-
- if (tbuf)
- len = (uint16)(tbuf->buf - tbuf->head);
- else
- len = 0;
-
- return len;
+ if (tbuf == NULL) return 0;
+ return (uint16)(tbuf->buf - tbuf->head);
}
-
uint16
bcm_xtlv_buf_rlen(bcm_xtlvbuf_t *tbuf)
{
- uint16 rlen;
- if (tbuf)
- rlen = tbuf->size - bcm_xtlv_buf_len(tbuf);
- else
- rlen = 0;
-
- return rlen;
+ if (tbuf == NULL) return 0;
+ return tbuf->size - bcm_xtlv_buf_len(tbuf);
}
-
uint8 *
bcm_xtlv_buf(bcm_xtlvbuf_t *tbuf)
{
- return tbuf ? tbuf->buf : NULL;
+ if (tbuf == NULL) return NULL;
+ return tbuf->buf;
}
-
uint8 *
bcm_xtlv_head(bcm_xtlvbuf_t *tbuf)
{
- return tbuf ? tbuf->head : NULL;
+ if (tbuf == NULL) return NULL;
+ return tbuf->head;
}
-
-void
-bcm_xtlv_pack_xtlv(bcm_xtlv_t *xtlv, uint16 type, uint16 len, const uint8 *data,
- bcm_xtlv_opts_t opts)
+int
+bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen)
{
- uint8 *data_buf;
- bcm_xtlv_opts_t mask = BCM_XTLV_OPTION_IDU8 | BCM_XTLV_OPTION_LENU8;
-
- if (!(opts & mask)) { /* default */
- uint8 *idp = (uint8 *)xtlv;
- uint8 *lenp = idp + sizeof(xtlv->id);
- htol16_ua_store(type, idp);
- htol16_ua_store(len, lenp);
- data_buf = lenp + sizeof(uint16);
- } else if ((opts & mask) == mask) { /* u8 id and u8 len */
- uint8 *idp = (uint8 *)xtlv;
- uint8 *lenp = idp + 1;
- *idp = (uint8)type;
- *lenp = (uint8)len;
- data_buf = lenp + sizeof(uint8);
- } else if (opts & BCM_XTLV_OPTION_IDU8) { /* u8 id, u16 len */
- uint8 *idp = (uint8 *)xtlv;
- uint8 *lenp = idp + 1;
- *idp = (uint8)type;
- htol16_ua_store(len, lenp);
- data_buf = lenp + sizeof(uint16);
- } else if (opts & BCM_XTLV_OPTION_LENU8) { /* u16 id, u8 len */
- uint8 *idp = (uint8 *)xtlv;
- uint8 *lenp = idp + sizeof(uint16);
- htol16_ua_store(type, idp);
- *lenp = (uint8)len;
- data_buf = lenp + sizeof(uint8);
- } else {
- bool Unexpected_xtlv_option = TRUE;
- BCM_REFERENCE(Unexpected_xtlv_option);
- ASSERT(!Unexpected_xtlv_option);
- return;
- }
-
- if (opts & BCM_XTLV_OPTION_LENU8) {
- ASSERT(len <= 0x00ff);
- len &= 0xff;
- }
-
- if (data != NULL)
- memcpy(data_buf, data, len);
-}
+ bcm_xtlv_t *xtlv;
+ int size;
-/* xtlv header is always packed in LE order */
-void
-bcm_xtlv_unpack_xtlv(const bcm_xtlv_t *xtlv, uint16 *type, uint16 *len,
- const uint8 **data, bcm_xtlv_opts_t opts)
-{
- if (type)
- *type = (uint16)bcm_xtlv_id(xtlv, opts);
- if (len)
- *len = (uint16)bcm_xtlv_len(xtlv, opts);
- if (data)
- *data = (const uint8 *)xtlv + BCM_XTLV_HDR_SIZE_EX(opts);
+ if (tbuf == NULL)
+ return BCME_BADARG;
+ size = bcm_xtlv_size_for_data(dlen, tbuf->opts);
+ if (bcm_xtlv_buf_rlen(tbuf) < size)
+ return BCME_NOMEM;
+ xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+ xtlv->id = htol16(type);
+ xtlv->len = htol16(dlen);
+ memcpy(xtlv->data, data, dlen);
+ tbuf->buf += size;
+ return BCME_OK;
}
-
int
-bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n)
+bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data)
{
bcm_xtlv_t *xtlv;
int size;
if (tbuf == NULL)
return BCME_BADARG;
-
- size = bcm_xtlv_size_for_data(n, tbuf->opts);
+ size = bcm_xtlv_size_for_data(1, tbuf->opts);
if (bcm_xtlv_buf_rlen(tbuf) < size)
return BCME_NOMEM;
-
xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
- bcm_xtlv_pack_xtlv(xtlv, type, (uint16)n, data, tbuf->opts);
- tbuf->buf += size; /* note: data may be NULL, reserves space */
+ xtlv->id = htol16(type);
+ xtlv->len = htol16(sizeof(data));
+ xtlv->data[0] = data;
+ tbuf->buf += size;
return BCME_OK;
}
-
-static int
-bcm_xtlv_put_int(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n, int int_sz)
+int
+bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data)
{
bcm_xtlv_t *xtlv;
- int xtlv_len;
- uint8 *xtlv_data;
- int err = BCME_OK;
+ int size;
- if (tbuf == NULL) {
- err = BCME_BADARG;
- goto done;
- }
+ if (tbuf == NULL)
+ return BCME_BADARG;
+ size = bcm_xtlv_size_for_data(2, tbuf->opts);
+ if (bcm_xtlv_buf_rlen(tbuf) < size)
+ return BCME_NOMEM;
xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
-
- /* put type and length in xtlv and reserve data space */
- xtlv_len = n * int_sz;
- err = bcm_xtlv_put_data(tbuf, type, NULL, xtlv_len);
- if (err != BCME_OK)
- goto done;
-
- xtlv_data = (uint8 *)xtlv + bcm_xtlv_hdr_size(tbuf->opts);
-
- /* write data w/ little-endianness into buffer - single loop, aligned access */
- for (; n != 0; --n, xtlv_data += int_sz, data += int_sz) {
- switch (int_sz) {
- case sizeof(uint8):
- break;
- case sizeof(uint16):
- {
- uint16 v = load16_ua(data);
- htol16_ua_store(v, xtlv_data);
- break;
- }
- case sizeof(uint32):
- {
- uint32 v = load32_ua(data);
- htol32_ua_store(v, xtlv_data);
- break;
- }
- case sizeof(uint64):
- {
- uint64 v = load64_ua(data);
- htol64_ua_store(v, xtlv_data);
- break;
- }
- default:
- err = BCME_UNSUPPORTED;
- goto done;
- }
- }
-
-done:
- return err;
-}
-
-int
-bcm_xtlv_put16(bcm_xtlvbuf_t *tbuf, uint16 type, const uint16 *data, int n)
-{
- return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint16));
+ xtlv->id = htol16(type);
+ xtlv->len = htol16(sizeof(data));
+ htol16_ua_store(data, xtlv->data);
+ tbuf->buf += size;
+ return BCME_OK;
}
-
int
-bcm_xtlv_put32(bcm_xtlvbuf_t *tbuf, uint16 type, const uint32 *data, int n)
+bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data)
{
- return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint32));
-}
+ bcm_xtlv_t *xtlv;
+ int size;
-int
-bcm_xtlv_put64(bcm_xtlvbuf_t *tbuf, uint16 type, const uint64 *data, int n)
-{
- return bcm_xtlv_put_int(tbuf, type, (const uint8 *)data, n, sizeof(uint64));
+ if (tbuf == NULL)
+ return BCME_BADARG;
+ size = bcm_xtlv_size_for_data(4, tbuf->opts);
+ if (bcm_xtlv_buf_rlen(tbuf) < size)
+ return BCME_NOMEM;
+ xtlv = (bcm_xtlv_t *)bcm_xtlv_buf(tbuf);
+ xtlv->id = htol16(type);
+ xtlv->len = htol16(sizeof(data));
+ htol32_ua_store(data, xtlv->data);
+ tbuf->buf += size;
+ return BCME_OK;
}
/*
* caller's resposible for dst space check
*/
int
-bcm_unpack_xtlv_entry(const uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len,
- uint8 *dst_data, bcm_xtlv_opts_t opts)
+bcm_unpack_xtlv_entry(uint8 **tlv_buf, uint16 xpct_type, uint16 xpct_len, void *dst,
+ bcm_xtlv_opts_t opts)
{
- const bcm_xtlv_t *ptlv = (const bcm_xtlv_t *)*tlv_buf;
+ bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf;
uint16 len;
uint16 type;
- const uint8 *data;
ASSERT(ptlv);
-
- bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts);
- if (len) {
- if ((type != xpct_type) || (len > xpct_len))
+ /* tlv headr is always packed in LE order */
+ len = ltoh16(ptlv->len);
+ type = ltoh16(ptlv->id);
+ if (len == 0) {
+ /* z-len tlv headers: allow, but don't process */
+ printf("z-len, skip unpack\n");
+ } else {
+ if ((type != xpct_type) ||
+ (len > xpct_len)) {
+ printf("xtlv_unpack Error: found[type:%d,len:%d] != xpct[type:%d,len:%d]\n",
+ type, len, xpct_type, xpct_len);
return BCME_BADARG;
- if (dst_data && data)
- memcpy(dst_data, data, len); /* copy data to dst */
+ }
+ /* copy tlv record to caller's buffer */
+ memcpy(dst, ptlv->data, ptlv->len);
}
-
- *tlv_buf += BCM_XTLV_SIZE_EX(ptlv, opts);
+ *tlv_buf = (uint8*)(*tlv_buf) + BCM_XTLV_SIZE(ptlv, opts);
return BCME_OK;
}
/*
- * packs user data into tlv record and advances tlv pointer to next xtlv slot
+ * packs user data into tlv record
+ * advances tlv pointer to next xtlv slot
* buflen is used for tlv_buf space check
*/
int
-bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len,
- const uint8 *src_data, bcm_xtlv_opts_t opts)
+bcm_pack_xtlv_entry(uint8 **tlv_buf, uint16 *buflen, uint16 type, uint16 len, void *src,
+ bcm_xtlv_opts_t opts)
{
bcm_xtlv_t *ptlv = (bcm_xtlv_t *)*tlv_buf;
int size;
ASSERT(ptlv);
+ ASSERT(src);
size = bcm_xtlv_size_for_data(len, opts);
/* copy data from tlv buffer to dst provided by user */
- if (size > *buflen)
+ if (size > *buflen) {
+ printf("bcm_pack_xtlv_entry: no space tlv_buf: requested:%d, available:%d\n",
+ size, *buflen);
return BCME_BADLEN;
+ }
+ ptlv->id = htol16(type);
+ ptlv->len = htol16(len);
- bcm_xtlv_pack_xtlv(ptlv, type, len, src_data, opts);
+ /* copy callers data */
+ memcpy(ptlv->data, src, len);
/* advance callers pointer to tlv buff */
*tlv_buf = (uint8*)(*tlv_buf) + size;
* to set function one call per found tlv record
*/
int
-bcm_unpack_xtlv_buf(void *ctx, const uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts,
+bcm_unpack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts,
bcm_xtlv_unpack_cbfn_t *cbfn)
{
uint16 len;
uint16 type;
int res = BCME_OK;
int size;
- const bcm_xtlv_t *ptlv;
+ bcm_xtlv_t *ptlv;
int sbuflen = buflen;
- const uint8 *data;
- int hdr_size;
ASSERT(!buflen || tlv_buf);
ASSERT(!buflen || cbfn);
- hdr_size = BCM_XTLV_HDR_SIZE_EX(opts);
- while (sbuflen >= hdr_size) {
- ptlv = (const bcm_xtlv_t *)tlv_buf;
+ while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) {
+ ptlv = (bcm_xtlv_t *)tlv_buf;
+
+ /* tlv header is always packed in LE order */
+ len = ltoh16(ptlv->len);
+ type = ltoh16(ptlv->id);
- bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts);
size = bcm_xtlv_size_for_data(len, opts);
sbuflen -= size;
- if (sbuflen < 0) /* check for buffer overrun */
+ /* check for possible buffer overrun */
+ if (sbuflen < 0)
break;
- if ((res = cbfn(ctx, data, type, len)) != BCME_OK)
+ if ((res = cbfn(ctx, ptlv->data, type, len)) != BCME_OK)
break;
- tlv_buf += size;
+ tlv_buf = (uint8*)tlv_buf + size;
}
return res;
}
int
-bcm_pack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts,
+bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen, bcm_xtlv_opts_t opts,
bcm_pack_xtlv_next_info_cbfn_t get_next, bcm_pack_xtlv_pack_next_cbfn_t pack_next,
int *outlen)
{
uint8 *buf;
bool more;
int size;
- int hdr_size;
ASSERT(get_next && pack_next);
- buf = tlv_buf;
+ buf = (uint8 *)tlv_buf;
startp = buf;
endp = (uint8 *)buf + buflen;
more = TRUE;
- hdr_size = BCM_XTLV_HDR_SIZE_EX(opts);
-
while (more && (buf < endp)) {
more = get_next(ctx, &tlv_id, &tlv_len);
size = bcm_xtlv_size_for_data(tlv_len, opts);
goto done;
}
- bcm_xtlv_pack_xtlv((bcm_xtlv_t *)buf, tlv_id, tlv_len, NULL, opts);
- pack_next(ctx, tlv_id, tlv_len, buf + hdr_size);
+ htol16_ua_store(tlv_id, buf);
+ htol16_ua_store(tlv_len, buf + sizeof(tlv_id));
+ pack_next(ctx, tlv_id, tlv_len, buf + BCM_XTLV_HDR_SIZE);
buf += size;
}
* pack xtlv buffer from memory according to xtlv_desc_t
*/
int
-bcm_pack_xtlv_buf_from_mem(uint8 **tlv_buf, uint16 *buflen, const xtlv_desc_t *items,
+bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items,
bcm_xtlv_opts_t opts)
{
int res = BCME_OK;
- uint8 *ptlv = *tlv_buf;
+ uint8 *ptlv = (uint8 *)*tlv_buf;
while (items->type != 0) {
- if (items->len && items->ptr) {
- res = bcm_pack_xtlv_entry(&ptlv, buflen, items->type,
- items->len, items->ptr, opts);
- if (res != BCME_OK)
- break;
+ if ((items->len > 0) && (res = bcm_pack_xtlv_entry(&ptlv,
+ buflen, items->type,
+ items->len, items->ptr, opts) != BCME_OK)) {
+ break;
}
items++;
}
-
*tlv_buf = ptlv; /* update the external pointer */
return res;
}
*
*/
int
-bcm_unpack_xtlv_buf_to_mem(uint8 *tlv_buf, int *buflen, xtlv_desc_t *items,
- bcm_xtlv_opts_t opts)
+bcm_unpack_xtlv_buf_to_mem(void *tlv_buf, int *buflen, xtlv_desc_t *items, bcm_xtlv_opts_t opts)
{
int res = BCME_OK;
bcm_xtlv_t *elt;
for (; elt != NULL && res == BCME_OK; elt = bcm_next_xtlv(elt, buflen, opts)) {
/* find matches in desc_t items */
xtlv_desc_t *dst_desc = items;
- uint16 len, type;
- const uint8 *data;
+ uint16 len = ltoh16(elt->len);
- bcm_xtlv_unpack_xtlv(elt, &type, &len, &data, opts);
while (dst_desc->type != 0) {
- if (type == dst_desc->type) {
+ if (ltoh16(elt->id) == dst_desc->type) {
if (len != dst_desc->len) {
res = BCME_BADLEN;
} else {
- memcpy(dst_desc->ptr, data, len);
+ memcpy(dst_desc->ptr, elt->data, len);
}
break;
}
/*
* return data pointer of a given ID from xtlv buffer.
- * If the specified xTLV ID is found, on return *datalen will contain
+ * If the specified xTLV ID is found, on return *data_len_out will contain
* the the data length of the xTLV ID.
*/
-const uint8*
-bcm_get_data_from_xtlv_buf(const uint8 *tlv_buf, uint16 buflen, uint16 id,
- uint16 *datalen, bcm_xtlv_opts_t opts)
+void *
+bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id,
+ uint16 *datalen_out, bcm_xtlv_opts_t opts)
{
- const uint8 *retptr = NULL;
+ void *retptr = NULL;
uint16 type, len;
int size;
- const bcm_xtlv_t *ptlv;
+ bcm_xtlv_t *ptlv;
int sbuflen = buflen;
- const uint8 *data;
- int hdr_size;
-
- hdr_size = BCM_XTLV_HDR_SIZE_EX(opts);
- /* Init the datalength */
- if (datalen) {
- *datalen = 0;
- }
- while (sbuflen >= hdr_size) {
- ptlv = (const bcm_xtlv_t *)tlv_buf;
- bcm_xtlv_unpack_xtlv(ptlv, &type, &len, &data, opts);
+ while (sbuflen >= (int)BCM_XTLV_HDR_SIZE) {
+ ptlv = (bcm_xtlv_t *)tlv_buf;
+ /* tlv header is always packed in LE order */
+ type = ltoh16(ptlv->id);
+ len = ltoh16(ptlv->len);
size = bcm_xtlv_size_for_data(len, opts);
+
sbuflen -= size;
- if (sbuflen < 0) /* buffer overrun? */
+ /* check for possible buffer overrun */
+ if (sbuflen < 0) {
+ printf("%s %d: Invalid sbuflen %d\n",
+ __FUNCTION__, __LINE__, sbuflen);
break;
+ }
if (id == type) {
- retptr = data;
- if (datalen)
- *datalen = len;
+ retptr = ptlv->data;
+ if (datalen_out) {
+ *datalen_out = len;
+ }
break;
}
-
tlv_buf += size;
}
return retptr;
}
-bcm_xtlv_t*
-bcm_xtlv_bcopy(const bcm_xtlv_t *src, bcm_xtlv_t *dst,
- int src_buf_len, int dst_buf_len, bcm_xtlv_opts_t opts)
+int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts)
{
- bcm_xtlv_t *dst_next = NULL;
- src = (src && bcm_valid_xtlv(src, src_buf_len, opts)) ? src : NULL;
- if (src && dst) {
- uint16 type;
- uint16 len;
- const uint8 *data;
- int size;
- bcm_xtlv_unpack_xtlv(src, &type, &len, &data, opts);
- size = bcm_xtlv_size_for_data(len, opts);
- if (size <= dst_buf_len) {
- bcm_xtlv_pack_xtlv(dst, type, len, data, opts);
- dst_next = (bcm_xtlv_t *)((uint8 *)dst + size);
- }
- }
+ int size; /* entire size of the XTLV including header, data, and optional padding */
+ int len; /* XTLV's value real length wthout padding */
- return dst_next;
+ len = BCM_XTLV_LEN(elt);
+
+ size = bcm_xtlv_size_for_data(len, opts);
+
+ return size;
}
/* For Get nvram */
file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
if (file_exists) {
- nv_image = dhd_os_open_image1(dhd_bus->dhd, pnv_path);
+ nv_image = dhd_os_open_image(pnv_path);
if (nv_image == NULL) {
printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path);
goto err;
goto err;
}
if (nv_image) {
- dhd_os_close_image1(dhd_bus->dhd, nv_image);
+ dhd_os_close_image(nv_image);
nv_image = NULL;
}
/* For Get first block of fw to calculate total_len */
file_exists = ((pfw_path != NULL) && (pfw_path[0] != '\0'));
if (file_exists) {
- fw_image = dhd_os_open_image1(dhd_bus->dhd, pfw_path);
+ fw_image = dhd_os_open_image(pfw_path);
if (fw_image == NULL) {
printf("%s: Open fw file failed %s\n", __FUNCTION__, pfw_path);
goto err;
if (fw_memblock)
MFREE(dhd_bus->pub.osh, fw_memblock, MAX_NVRAMBUF_SIZE);
if (fw_image)
- dhd_os_close_image1(dhd_bus->dhd, fw_image);
+ dhd_os_close_image(fw_image);
if (nv_memblock)
MFREE(dhd_bus->pub.osh, nv_memblock, MAX_NVRAMBUF_SIZE);
if (nv_image)
- dhd_os_close_image1(dhd_bus->dhd, nv_image);
+ dhd_os_close_image(nv_image);
return bcmerror;
}
return bcmerror;
}
+void
+dhd_set_path_params(struct dhd_bus *bus)
+{
+ /* External conf takes precedence if specified */
+ dhd_conf_preinit(bus->dhd);
+
+ if (bus->dhd->conf_path[0] == '\0') {
+ dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path);
+ }
+ if (bus->dhd->clm_path[0] == '\0') {
+ dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path);
+ }
+#ifdef CONFIG_PATH_AUTO_SELECT
+ dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path);
+#endif
+
+ dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
+
+ dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
+ dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
+ dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
+
+ printf("Final fw_path=%s\n", bus->fw_path);
+ printf("Final nv_path=%s\n", bus->nv_path);
+ printf("Final clm_path=%s\n", bus->dhd->clm_path);
+ printf("Final conf_path=%s\n", bus->dhd->conf_path);
+
+}
+
void
dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path,
char *pnv_path, char *pclm_path, char *pconf_path)
bus->dhd->clm_path = pclm_path;
bus->dhd->conf_path = pconf_path;
- dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
+ dhd_set_path_params(bus);
}
}
} else {
pub = g_pub;
- osh = pub->osh;
}
if (pub->bus) {
if (dbus_download_firmware(bus, bus->fw_path, bus->nv_path) != DBUS_OK)
goto fail;
#endif
- } else {
- goto fail;
}
}
} else {
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd.h 822756 2019-05-30 13:20:26Z $
+ * $Id: dhd.h 711448 2017-07-18 08:27:03Z $
*/
/****************
#include <linux/random.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
-#if defined(CONFIG_HAS_WAKELOCK)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
#include <linux/wakelock.h>
-#endif /* defined CONFIG_HAS_WAKELOCK */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
-#include <uapi/linux/sched/types.h>
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined (CONFIG_HAS_WAKELOCK) */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
#include <linux/sched/types.h>
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
/* The kernel threading is sdio-specific */
#define ALL_INTERFACES 0xff
-/* H2D and D2H ring dump is enabled by default */
-#ifdef PCIE_FULL_DONGLE
-#define DHD_DUMP_PCIE_RINGS
-#endif /* PCIE_FULL_DONGLE */
-
#include <wlioctl.h>
-#include <bcmstdlib_s.h>
-#include <dhdioctl.h>
#include <wlfc_proto.h>
#include <hnd_armtrap.h>
#if defined(DUMP_IOCTL_IOV_LIST) || defined(DHD_DEBUG)
#include <bcmutils.h>
#endif /* DUMP_IOCTL_IOV_LIST || DHD_DEBUG */
+#include <hnd_pktq.h>
#if defined(BCMWDF)
#include <wdf.h>
#include <WdfMiniport.h>
#endif /* (BCMWDF) */
-#ifdef DHD_ERPOM
-#include <pom.h>
-#endif /* DHD_ERPOM */
-
-#include <dngl_stats.h>
-
#ifdef DEBUG_DPC_THREAD_WATCHDOG
#define MAX_RESCHED_CNT 600
#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE < \
+ KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT))
+#define WL_VENDOR_EXT_SUPPORT
+#endif /* 3.18 > KERNEL_VER >= 3.14 || defined(CONFIG_BCMDHD_VENDOR_EXT) */
+
#if defined(KEEP_ALIVE)
/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
#define KEEP_ALIVE_PERIOD 55000
#define NULL_PKT_STR "null_pkt"
#endif /* KEEP_ALIVE */
-/* By default enabled from here, later the WQ code will be removed */
-#define DHD_USE_KTHREAD_FOR_LOGTRACE
-
-/*
- * Earlier DHD used to have it own time stamp for printk and
- * Dongle used to have its own time stamp for console messages
- * With this flag, DHD and Dongle console messges will have same time zone
- */
-#define DHD_H2D_LOG_TIME_SYNC
/* Forward decls */
struct dhd_bus;
struct dhd_prot;
struct dhd_ioctl;
struct dhd_dbg;
struct dhd_ts;
-#ifdef DNGL_AXI_ERROR_LOGGING
-struct dhd_axi_error_dump;
-#endif /* DNGL_AXI_ERROR_LOGGING */
/* The level of bus communication with the dongle */
enum dhd_bus_state {
DHD_BUS_REMOVE, /* Bus has been removed */
};
-/* The level of bus communication with the dongle */
-enum dhd_bus_devreset_type {
- DHD_BUS_DEVRESET_ON = 0, /* ON */
- DHD_BUS_DEVRESET_OFF = 1, /* OFF */
- DHD_BUS_DEVRESET_FLR = 2, /* FLR */
- DHD_BUS_DEVRESET_FLR_FORCE_FAIL = 3, /* FLR FORCE FAIL */
- DHD_BUS_DEVRESET_QUIESCE = 4, /* FLR */
-};
-
/*
* Bit fields to Indicate clean up process that wait till they are finished.
* Future synchronizable processes can add their bit filed below and update
DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \
DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS)
#define DHD_BUS_BUSY_IN_CHECKDIED 0x800
-#define DHD_BUS_BUSY_IN_MEMDUMP 0x1000
-#define DHD_BUS_BUSY_IN_SSSRDUMP 0x2000
-#define DHD_BUS_BUSY_IN_LOGDUMP 0x4000
-#define DHD_BUS_BUSY_IN_HALDUMP 0x8000
#define DHD_BUS_BUSY_SET_IN_TX(dhdp) \
(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX
(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS
#define DHD_BUS_BUSY_SET_IN_CHECKDIED(dhdp) \
(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_CHECKDIED
-#define DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP
-#define DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP
-#define DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP
-#define DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_HALDUMP
#define DHD_BUS_BUSY_CLEAR_IN_TX(dhdp) \
(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX
(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS
#define DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(dhdp) \
(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_CHECKDIED
-#define DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_MEMDUMP
-#define DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SSSRDUMP
-#define DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_LOGDUMP
-#define DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp) \
- (dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_HALDUMP
#define DHD_BUS_BUSY_CHECK_IN_TX(dhdp) \
((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX)
((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL)
#define DHD_BUS_BUSY_CHECK_IN_CHECKDIED(dhdp) \
((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_CHECKDIED)
-#define DHD_BUS_BUSY_CHECK_IN_MEMDUMP(dhdp) \
- ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_MEMDUMP)
-#define DHD_BUS_BUSY_CHECK_IN_SSSRDUMP(dhdp) \
- ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SSSRDUMP)
-#define DHD_BUS_BUSY_CHECK_IN_LOGDUMP(dhdp) \
- ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_LOGDUMP)
-#define DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp) \
- ((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP)
#define DHD_BUS_BUSY_CHECK_IDLE(dhdp) \
((dhdp)->dhd_bus_busy_state == 0)
#define DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp) \
- ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp))
+ ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \
+ DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp))
#define DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp) \
(DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \
DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp))
-#define DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp) \
- ((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp))
-
#define DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) \
((dhdp)->busstate == DHD_BUS_DOWN || (dhdp)->busstate == DHD_BUS_DOWN_IN_PROGRESS || \
(dhdp)->busstate == DHD_BUS_REMOVE)
#define DHD_BUS_CHECK_REMOVE(dhdp) \
((dhdp)->busstate == DHD_BUS_REMOVE)
-/* IOVar flags for common error checks */
-#define DHD_IOVF_PWRREQ_BYPASS (1<<0) /* flags to prevent bp access during host sleep state */
-
-#define MAX_MTU_SZ (1600u)
-
-/* (u64)result = (u64)dividend / (u64)divisor */
-#define DIV_U64_BY_U64(dividend, divisor) div64_u64(dividend, divisor)
-
-/* (u64)result = (u64)dividend / (u32)divisor */
-#define DIV_U64_BY_U32(dividend, divisor) div_u64(dividend, divisor)
-
-/* Be careful while using this, as it divides dividend also
- * (u32)remainder = (u64)dividend % (u32)divisor
- * (u64)dividend = (u64)dividend / (u32)divisor
+/* Macro to print Ethernet Address as String
+ * expects both arguements as (char *)
*/
-#define DIV_AND_MOD_U64_BY_U32(dividend, divisor) do_div(dividend, divisor)
-
-/* (u32)remainder = (u64)dividend % (u32)divisor */
-#define MOD_U64_BY_U32(dividend, divisor) ({ \
- uint64 temp_dividend = (dividend); \
- uint32 rem = DIV_AND_MOD_U64_BY_U32(temp_dividend, (divisor)); \
- rem; \
-})
-
-#define SEC_USEC_FMT \
- "%5llu.%06u"
+#define DHD_MAC_TO_STR(mac, str) (snprintf(str, ETHER_ADDR_STR_LEN, \
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", \
+ (uchar)mac[0]&0xff, \
+ (uchar)mac[1]&0xff, \
+ (uchar)mac[2]&0xff, \
+ (uchar)mac[3]&0xff, \
+ (uchar)mac[4]&0xff, \
+ (uchar)mac[5]&0xff))
-/* t: time in nano second */
-#define GET_SEC_USEC(t) \
- DIV_U64_BY_U32(t, NSEC_PER_SEC), \
- ((uint32)(MOD_U64_BY_U32(t, NSEC_PER_SEC) / (uint32)NSEC_PER_USEC))
/* Download Types */
typedef enum download_type {
FW,
NVRAM,
- CLM_BLOB,
- TXCAP_BLOB
+ CLM_BLOB
} download_type_t;
+
/* For supporting multiple interfaces */
-#define DHD_MAX_IFS 16
-#define DHD_MAX_STATIC_IFS 1
-#define DHD_DEL_IF -0xE
-#define DHD_BAD_IF -0xF
-#define DHD_DUMMY_INFO_IF 0xDEAF /* Hack i/f to handle events from INFO Ring */
-#define DHD_EVENT_IF DHD_DUMMY_INFO_IF
+#define DHD_MAX_IFS 16
+#define DHD_DEL_IF -0xE
+#define DHD_BAD_IF -0xF
+#define DHD_EVENT_IF 0xFFFF /* Hack i/f to handle events from INFO Ring */
enum dhd_op_flags {
/* Firmware requested operation mode */
#define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) \
(dhd ? ((((dhd_pub_t *)dhd)->op_mode) & opmode_flag) : -1)
-#define DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) \
- (dhd ? (((dhd->op_mode) & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) == \
- DHD_FLAG_CONCURR_STA_HOSTAP_MODE) : 0)
/* Max sequential TX/RX Control timeouts to set HANG event */
#ifndef MAX_CNTL_TX_TIMEOUT
#define DHD_SCAN_ASSOC_ACTIVE_TIME 40 /* ms: Embedded default Active setting from DHD */
#define DHD_SCAN_UNASSOC_ACTIVE_TIME 80 /* ms: Embedded def. Unassoc Active setting from DHD */
+#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */
#define DHD_SCAN_HOME_TIME 45 /* ms: Embedded default Home time setting from DHD */
#define DHD_SCAN_HOME_AWAY_TIME 100 /* ms: Embedded default Home Away time setting from DHD */
-#ifndef CUSTOM_SCAN_PASSIVE_TIME
-#define DHD_SCAN_PASSIVE_TIME 130 /* ms: Embedded default Passive setting from DHD */
-#else
-#define DHD_SCAN_PASSIVE_TIME CUSTOM_SCAN_PASSIVE_TIME /* ms: Custom Passive setting from DHD */
-#endif /* CUSTOM_SCAN_PASSIVE_TIME */
#ifndef POWERUP_MAX_RETRY
#define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */
-#endif // endif
+#endif
#ifndef POWERUP_WAIT_MS
#define POWERUP_WAIT_MS 2000 /* ms: time out in waiting wifi to come up */
-#endif // endif
+#endif
/*
* MAX_NVRAMBUF_SIZE determines the size of the Buffer in the DHD that holds
* the NVRAM data. That is the size of the buffer pointed by bus->vars
*/
#define MAX_NVRAMBUF_SIZE (16 * 1024) /* max nvram buf size */
#define MAX_CLM_BUF_SIZE (48 * 1024) /* max clm blob size */
-#define MAX_TXCAP_BUF_SIZE (16 * 1024) /* max txcap blob size */
#ifdef DHD_DEBUG
#define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */
#define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */
-#endif /* DHD_DEBUG */
+#endif
#ifndef CONFIG_BCMDHD_CLM_PATH
-#define CONFIG_BCMDHD_CLM_PATH "/etc/wifi/bcmdhd_clm.blob"
+#define CONFIG_BCMDHD_CLM_PATH "/system/etc/wifi/bcmdhd_clm.blob"
#endif /* CONFIG_BCMDHD_CLM_PATH */
#define WL_CCODE_NULL_COUNTRY "#n"
#define FW_VER_STR_LEN 128
-#define FWID_STR_LEN 256
#define CLM_VER_STR_LEN 128
#define BUS_API_REV_STR_LEN 128
-#define FW_VER_STR "Version"
-#define FWID_STR_1 "FWID: 01-"
-#define FWID_STR_2 "FWID=01-"
extern char bus_api_revision[];
enum dhd_bus_wake_state {
- WAKE_LOCK_OFF = 0,
- WAKE_LOCK_PRIV = 1,
- WAKE_LOCK_DPC = 2,
- WAKE_LOCK_IOCTL = 3,
- WAKE_LOCK_DOWNLOAD = 4,
- WAKE_LOCK_TMOUT = 5,
- WAKE_LOCK_WATCHDOG = 6,
- WAKE_LOCK_LINK_DOWN_TMOUT = 7,
- WAKE_LOCK_PNO_FIND_TMOUT = 8,
- WAKE_LOCK_SOFTAP_SET = 9,
- WAKE_LOCK_SOFTAP_STOP = 10,
- WAKE_LOCK_SOFTAP_START = 11,
- WAKE_LOCK_SOFTAP_THREAD = 12
+ WAKE_LOCK_OFF,
+ WAKE_LOCK_PRIV,
+ WAKE_LOCK_DPC,
+ WAKE_LOCK_IOCTL,
+ WAKE_LOCK_DOWNLOAD,
+ WAKE_LOCK_TMOUT,
+ WAKE_LOCK_WATCHDOG,
+ WAKE_LOCK_LINK_DOWN_TMOUT,
+ WAKE_LOCK_PNO_FIND_TMOUT,
+ WAKE_LOCK_SOFTAP_SET,
+ WAKE_LOCK_SOFTAP_STOP,
+ WAKE_LOCK_SOFTAP_START,
+ WAKE_LOCK_SOFTAP_THREAD
};
+#ifdef PCIE_INB_DW
+enum dhd_bus_ds_state {
+ DW_DEVICE_DS_INVALID = -1,
+ DW_DEVICE_DS_DEV_SLEEP = 0,
+ DW_DEVICE_DS_DEV_SLEEP_PEND,
+ DW_DEVICE_DS_DISABLED_WAIT,
+ DW_DEVICE_DS_DEV_WAKE,
+ DW_DEVICE_DS_ACTIVE,
+ DW_DEVICE_HOST_SLEEP_WAIT,
+ DW_DEVICE_HOST_SLEEP,
+ DW_DEVICE_HOST_WAKE_WAIT,
+ DW_DEVICE_DS_D3_INFORM_WAIT
+};
+#endif /* PCIE_INB_DW */
+
enum dhd_prealloc_index {
- DHD_PREALLOC_PROT = 0,
- DHD_PREALLOC_RXBUF = 1,
- DHD_PREALLOC_DATABUF = 2,
- DHD_PREALLOC_OSL_BUF = 3,
- DHD_PREALLOC_SKB_BUF = 4,
- DHD_PREALLOC_WIPHY_ESCAN0 = 5,
- DHD_PREALLOC_WIPHY_ESCAN1 = 6,
- DHD_PREALLOC_DHD_INFO = 7,
- DHD_PREALLOC_DHD_WLFC_INFO = 8,
- DHD_PREALLOC_IF_FLOW_LKUP = 9,
+ DHD_PREALLOC_PROT = 0,
+ DHD_PREALLOC_RXBUF,
+ DHD_PREALLOC_DATABUF,
+ DHD_PREALLOC_OSL_BUF,
+#if defined(STATIC_WL_PRIV_STRUCT)
+ DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+#endif /* STATIC_WL_PRIV_STRUCT */
+ DHD_PREALLOC_DHD_INFO = 7,
+ DHD_PREALLOC_DHD_WLFC_INFO = 8,
+ DHD_PREALLOC_IF_FLOW_LKUP = 9,
/* 10 */
- DHD_PREALLOC_MEMDUMP_RAM = 11,
- DHD_PREALLOC_DHD_WLFC_HANGER = 12,
- DHD_PREALLOC_PKTID_MAP = 13,
- DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
- DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
- DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
- DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,
+ DHD_PREALLOC_MEMDUMP_RAM = 11,
+ DHD_PREALLOC_DHD_WLFC_HANGER = 12,
+ DHD_PREALLOC_PKTID_MAP = 13,
+ DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
+ DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,
DHD_PREALLOC_STAT_REPORT_BUF = 18,
- DHD_PREALLOC_WL_ESCAN = 19,
+ DHD_PREALLOC_WL_ESCAN_INFO = 19,
DHD_PREALLOC_FW_VERBOSE_RING = 20,
DHD_PREALLOC_FW_EVENT_RING = 21,
DHD_PREALLOC_DHD_EVENT_RING = 22,
};
enum dhd_dongledump_mode {
- DUMP_DISABLED = 0,
- DUMP_MEMONLY = 1,
- DUMP_MEMFILE = 2,
- DUMP_MEMFILE_BUGON = 3,
- DUMP_MEMFILE_MAX = 4
+ DUMP_DISABLED = 0,
+ DUMP_MEMONLY,
+ DUMP_MEMFILE,
+ DUMP_MEMFILE_BUGON,
+ DUMP_MEMFILE_MAX
};
enum dhd_dongledump_type {
- DUMP_TYPE_RESUMED_ON_TIMEOUT = 1,
- DUMP_TYPE_D3_ACK_TIMEOUT = 2,
- DUMP_TYPE_DONGLE_TRAP = 3,
- DUMP_TYPE_MEMORY_CORRUPTION = 4,
- DUMP_TYPE_PKTID_AUDIT_FAILURE = 5,
- DUMP_TYPE_PKTID_INVALID = 6,
- DUMP_TYPE_SCAN_TIMEOUT = 7,
- DUMP_TYPE_SCAN_BUSY = 8,
- DUMP_TYPE_BY_SYSDUMP = 9,
- DUMP_TYPE_BY_LIVELOCK = 10,
- DUMP_TYPE_AP_LINKUP_FAILURE = 11,
- DUMP_TYPE_AP_ABNORMAL_ACCESS = 12,
- DUMP_TYPE_CFG_VENDOR_TRIGGERED = 13,
- DUMP_TYPE_RESUMED_ON_TIMEOUT_TX = 14,
- DUMP_TYPE_RESUMED_ON_TIMEOUT_RX = 15,
- DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR = 16,
- DUMP_TYPE_TRANS_ID_MISMATCH = 17,
- DUMP_TYPE_IFACE_OP_FAILURE = 18,
- DUMP_TYPE_DONGLE_INIT_FAILURE = 19,
- DUMP_TYPE_READ_SHM_FAIL = 20,
- DUMP_TYPE_DONGLE_HOST_EVENT = 21,
- DUMP_TYPE_SMMU_FAULT = 22,
- DUMP_TYPE_RESUMED_UNKNOWN = 23,
- DUMP_TYPE_DUE_TO_BT = 24,
- DUMP_TYPE_LOGSET_BEYOND_RANGE = 25,
- DUMP_TYPE_BY_USER = 26,
- DUMP_TYPE_CTO_RECOVERY = 27,
- DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR = 28,
- DUMP_TYPE_PROXD_TIMEOUT = 29,
- DUMP_TYPE_PKTID_POOL_DEPLETED = 30
+ DUMP_TYPE_RESUMED_ON_TIMEOUT = 1,
+ DUMP_TYPE_D3_ACK_TIMEOUT,
+ DUMP_TYPE_DONGLE_TRAP,
+ DUMP_TYPE_MEMORY_CORRUPTION,
+ DUMP_TYPE_PKTID_AUDIT_FAILURE,
+ DUMP_TYPE_PKTID_INVALID,
+ DUMP_TYPE_SCAN_TIMEOUT,
+ DUMP_TYPE_JOIN_TIMEOUT,
+ DUMP_TYPE_SCAN_BUSY,
+ DUMP_TYPE_BY_SYSDUMP,
+ DUMP_TYPE_BY_LIVELOCK,
+ DUMP_TYPE_AP_LINKUP_FAILURE,
+ DUMP_TYPE_AP_ABNORMAL_ACCESS,
+ DUMP_TYPE_CFG_VENDOR_TRIGGERED,
+ DUMP_TYPE_RESUMED_ON_TIMEOUT_TX,
+ DUMP_TYPE_RESUMED_ON_TIMEOUT_RX,
+ DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR,
+ DUMP_TYPE_DONGLE_HOST_EVENT,
+ DUMP_TYPE_RESUMED_UNKNOWN,
+ DUMP_TYPE_TRANS_ID_MISMATCH,
+ DUMP_TYPE_HANG_ON_IFACE_OP_FAIL,
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ DUMP_TYPE_READ_SHM_FAIL
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
};
enum dhd_hang_reason {
- HANG_REASON_MASK = 0x8000,
- HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001,
- HANG_REASON_DONGLE_TRAP = 0x8002,
- HANG_REASON_D3_ACK_TIMEOUT = 0x8003,
- HANG_REASON_BUS_DOWN = 0x8004,
- HANG_REASON_MSGBUF_LIVELOCK = 0x8006,
- HANG_REASON_IFACE_DEL_FAILURE = 0x8007,
- HANG_REASON_HT_AVAIL_ERROR = 0x8008,
- HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009,
- HANG_REASON_PCIE_PKTID_ERROR = 0x800A,
- HANG_REASON_IFACE_ADD_FAILURE = 0x800B,
- HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR = 0x800C,
- HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR = 0x800D,
- HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR = 0x800E,
- HANG_REASON_SCAN_BUSY = 0x800F,
- HANG_REASON_BSS_UP_FAILURE = 0x8010,
- HANG_REASON_BSS_DOWN_FAILURE = 0x8011,
- HANG_REASON_PCIE_LINK_DOWN_RC_DETECT = 0x8805,
- HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806,
- HANG_REASON_UNKNOWN = 0x8807,
- HANG_REASON_PCIE_LINK_DOWN_EP_DETECT = 0x8808,
- HANG_REASON_PCIE_CTO_DETECT = 0x8809,
- HANG_REASON_MAX = 0x880A
+ HANG_REASON_MASK = 0x8000,
+ HANG_REASON_IOCTL_RESP_TIMEOUT = 0x8001,
+ HANG_REASON_DONGLE_TRAP = 0x8002,
+ HANG_REASON_D3_ACK_TIMEOUT = 0x8003,
+ HANG_REASON_BUS_DOWN = 0x8004,
+ HANG_REASON_MSGBUF_LIVELOCK = 0x8006,
+ HANG_REASON_IFACE_OP_FAILURE = 0x8007,
+ HANG_REASON_HT_AVAIL_ERROR = 0x8008,
+ HANG_REASON_PCIE_RC_LINK_UP_FAIL = 0x8009,
+ HANG_REASON_PCIE_PKTID_ERROR = 0x800A,
+ HANG_REASON_PCIE_LINK_DOWN = 0x8805,
+ HANG_REASON_INVALID_EVENT_OR_DATA = 0x8806,
+ HANG_REASON_UNKNOWN = 0x8807,
+ HANG_REASON_MAX = 0x8808
};
-#define WLC_E_DEAUTH_MAX_REASON 0x0FFF
-
enum dhd_rsdb_scan_features {
/* Downgraded scan feature for AP active */
RSDB_SCAN_DOWNGRADED_AP_SCAN = 0x01,
RSDB_SCAN_DOWNGRADED_CH_PRUNE_ALL = 0x20
};
-#define VENDOR_SEND_HANG_EXT_INFO_LEN (800 + 1)
-
-#ifdef DHD_EWPR_VER2
-#define VENDOR_SEND_HANG_EXT_INFO_VER 20181111
-#else
-#define VENDOR_SEND_HANG_EXT_INFO_VER 20170905
-#endif // endif
-
-#define HANG_INFO_TRAP_T_NAME_MAX 6
-#define HANG_INFO_TRAP_T_REASON_IDX 0
-#define HANG_INFO_TRAP_T_SUBTYPE_IDX 2
-#define HANG_INFO_TRAP_T_OFFSET_IDX 3
-#define HANG_INFO_TRAP_T_EPC_IDX 4
-#define HANG_FIELD_STR_MAX_LEN 9
-#define HANG_FIELD_CNT_MAX 69
-#define HANG_FIELD_IF_FAILURE_CNT 10
-#define HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT 8
-#define HANG_FIELD_TRAP_T_STACK_CNT_MAX 16
-#define HANG_FIELD_MISMATCH_CNT 10
-#define HANG_INFO_BIGDATA_KEY_STACK_CNT 4
-
-#define DEBUG_DUMP_TIME_BUF_LEN (16 + 1)
-/* delimiter between values */
-#define HANG_KEY_DEL ' '
-#define HANG_RAW_DEL '_'
-
-#ifdef DHD_EWPR_VER2
-#define HANG_INFO_BIGDATA_EXTRA_KEY 4
-#define HANG_INFO_TRAP_T_EXTRA_KEY_IDX 5
-#endif // endif
-
/* Packet alignment for most efficient SDIO (can change based on platform) */
#ifndef DHD_SDALIGN
#define DHD_SDALIGN 32
-#endif // endif
-
-#define DHD_TX_CONTEXT_MASK 0xff
-#define DHD_TX_START_XMIT 0x01
-#define DHD_TX_SEND_PKT 0x02
-#define DHD_IF_SET_TX_ACTIVE(ifp, context) \
- ifp->tx_paths_active |= context;
-#define DHD_IF_CLR_TX_ACTIVE(ifp, context) \
- ifp->tx_paths_active &= ~context;
-#define DHD_IF_IS_TX_ACTIVE(ifp) \
- (ifp->tx_paths_active)
+#endif
+
/**
* DMA-able buffer parameters
* - dmaaddr_t is 32bits on a 32bit host.
uint8 pend_pkts;
} reorder_info_t;
-/* throughput test packet format */
-typedef struct tput_pkt {
- /* header */
- uint8 mac_sta[ETHER_ADDR_LEN];
- uint8 mac_ap[ETHER_ADDR_LEN];
- uint16 pkt_type;
- uint8 PAD[2];
- /* data */
- uint32 crc32;
- uint32 pkt_id;
- uint32 num_pkts;
-} tput_pkt_t;
-
-typedef enum {
- TPUT_PKT_TYPE_NORMAL,
- TPUT_PKT_TYPE_STOP
-} tput_pkt_type_t;
-
-#define TPUT_TEST_MAX_PAYLOAD 1500
-#define TPUT_TEST_WAIT_TIMEOUT_DEFAULT 5000
-
#ifdef DHDTCPACK_SUPPRESS
enum {
TCPACK_SUP_HOLD,
TCPACK_SUP_LAST_MODE
};
+
+#ifdef BCMSDIO
+#define TCPACK_SUP_DEFAULT TCPACK_SUP_DELAYTX
+#elif defined(BCMPCIE)
+#define TCPACK_SUP_DEFAULT TCPACK_SUP_HOLD
+#else
+#define TCPACK_SUP_DEFAULT TCPACK_SUP_OFF
+#endif /* BCMSDIO */
#endif /* DHDTCPACK_SUPPRESS */
+#if defined(TRAFFIC_MGMT_DWM)
+#define DHD_DWM_TBL_SIZE 57
+/* DSCP WMM AC Mapping macros and structures */
+#define DHD_TRF_MGMT_DWM_FILTER_BIT 0x8
+#define DHD_TRF_MGMT_DWM_PRIO_BITS 0x7
+#define DHD_TRF_MGMT_DWM_FAVORED_BIT 0x10
+#define DHD_TRF_MGMT_DWM_PRIO(dwm_tbl_entry) ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_PRIO_BITS)
+#define DHD_TRF_MGMT_DWM_IS_FAVORED_SET(dwm_tbl_entry) \
+ ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FAVORED_BIT)
+#define DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry) \
+ ((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FAVORED_BIT)
+#define DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_tbl_entry) \
+ ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FILTER_BIT)
+#define DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry) \
+ ((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FILTER_BIT)
+
+typedef struct {
+ uint8 dhd_dwm_enabled;
+ uint8 dhd_dwm_tbl[DHD_DWM_TBL_SIZE];
+} dhd_trf_mgmt_dwm_tbl_t;
+#endif
+
#define DHD_NULL_CHK_AND_RET(cond) \
if (!cond) { \
DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \
#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
#ifdef DHD_LOG_DUMP
-#define DUMP_SSSR_ATTR_START 2
-#define DUMP_SSSR_ATTR_COUNT 6
-
-typedef enum {
- SSSR_C0_D11_BEFORE = 0,
- SSSR_C0_D11_AFTER = 1,
- SSSR_C1_D11_BEFORE = 2,
- SSSR_C1_D11_AFTER = 3,
- SSSR_DIG_BEFORE = 4,
- SSSR_DIG_AFTER = 5
-} EWP_SSSR_DUMP;
-
-typedef enum {
- DLD_BUF_TYPE_GENERAL = 0,
- DLD_BUF_TYPE_PRESERVE = 1,
- DLD_BUF_TYPE_SPECIAL = 2,
- DLD_BUF_TYPE_ECNTRS = 3,
- DLD_BUF_TYPE_FILTER = 4,
- DLD_BUF_TYPE_ALL = 5
-} log_dump_type_t;
-
-#define LOG_DUMP_MAGIC 0xDEB3DEB3
-#define HEALTH_CHK_BUF_SIZE 256
-
-#ifdef EWP_ECNTRS_LOGGING
-#define ECNTR_RING_ID 0xECDB
-#define ECNTR_RING_NAME "ewp_ecntr_ring"
-#endif /* EWP_ECNTRS_LOGGING */
-
-#ifdef EWP_RTT_LOGGING
-#define RTT_RING_ID 0xADCD
-#define RTT_RING_NAME "ewp_rtt_ring"
-#endif /* EWP_ECNTRS_LOGGING */
-
-#if defined(DEBUGABILITY) && defined(EWP_ECNTRS_LOGGING)
-#error "Duplicate rings will be created since both the features are enabled"
-#endif /* DEBUGABILITY && EWP_ECNTRS_LOGGING */
-
-typedef enum {
- LOG_DUMP_SECTION_GENERAL = 0,
- LOG_DUMP_SECTION_ECNTRS,
- LOG_DUMP_SECTION_SPECIAL,
- LOG_DUMP_SECTION_DHD_DUMP,
- LOG_DUMP_SECTION_EXT_TRAP,
- LOG_DUMP_SECTION_HEALTH_CHK,
- LOG_DUMP_SECTION_PRESERVE,
- LOG_DUMP_SECTION_COOKIE,
- LOG_DUMP_SECTION_FLOWRING,
- LOG_DUMP_SECTION_STATUS,
- LOG_DUMP_SECTION_RTT
-} log_dump_section_type_t;
-
-/* Each section in the debug_dump log file shall begin with a header */
-typedef struct {
- uint32 magic; /* 0xDEB3DEB3 */
- uint32 type; /* of type log_dump_section_type_t */
- uint64 timestamp;
- uint32 length; /* length of the section that follows */
- uint32 pad;
-} log_dump_section_hdr_t;
-
/* below structure describe ring buffer. */
struct dhd_log_dump_buf
{
spinlock_t lock;
- void *dhd_pub;
unsigned int enable;
unsigned int wraparound;
unsigned long max;
};
#define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE 256
-#define DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE (80 * 1024)
-
-extern void dhd_log_dump_write(int type, char *binary_data,
- int binary_len, const char *fmt, ...);
+extern void dhd_log_dump_write(int type, const char *fmt, ...);
+extern char *dhd_log_dump_get_timestamp(void);
#endif /* DHD_LOG_DUMP */
-/* DEBUG_DUMP SUB COMMAND */
-enum {
- CMD_DEFAULT,
- CMD_UNWANTED,
- CMD_DISCONNECTED,
- CMD_MAX
-};
-
-#define DHD_LOG_DUMP_TS_MULTIPLIER_VALUE 60
-#define DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS "%02d%02d%02d%02d%02d%02d%04d"
-#define DHD_DEBUG_DUMP_TYPE "debug_dump_FORUSER"
-#define DHD_DUMP_SUBSTR_UNWANTED "_unwanted"
-#define DHD_DUMP_SUBSTR_DISCONNECTED "_disconnected"
-
-#ifdef DNGL_AXI_ERROR_LOGGING
-#define DHD_DUMP_AXI_ERROR_FILENAME "axi_error"
-#define DHD_DUMP_HAL_FILENAME_SUFFIX "_hal"
-#endif /* DNGL_AXI_ERROR_LOGGING */
-
-extern void get_debug_dump_time(char *str);
-extern void clear_debug_dump_time(char *str);
-
-#define FW_LOGSET_MASK_ALL 0xFFFFu
-
-#ifdef WL_MONITOR
-#define MONPKT_EXTRA_LEN 48u
-#endif /* WL_MONITOR */
-
-#define DHDIF_FWDER(dhdif) FALSE
-
+#if defined(CUSTOMER_HW2)
#define DHD_COMMON_DUMP_PATH "/data/misc/wifi/"
+#else
+#define DHD_COMMON_DUMP_PATH "/installmedia/"
+#endif
struct cntry_locales_custom {
char iso_abbrev[WLC_CNTRY_BUF_SZ]; /* ISO 3166-1 country abbreviation */
int32 custom_locale_rev; /* Custom local revisin default -1 */
};
-int dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size);
+#ifdef REPORT_FATAL_TIMEOUTS
+typedef struct timeout_info {
+ void *scan_timer_lock;
+ void *join_timer_lock;
+ void *cmd_timer_lock;
+ void *bus_timer_lock;
+ uint32 scan_timeout_val;
+ uint32 join_timeout_val;
+ uint32 cmd_timeout_val;
+ uint32 bus_timeout_val;
+ bool scan_timer_active;
+ bool join_timer_active;
+ bool cmd_timer_active;
+ bool bus_timer_active;
+ osl_timer_t *scan_timer;
+ osl_timer_t *join_timer;
+ osl_timer_t *cmd_timer;
+ osl_timer_t *bus_timer;
+ uint16 cmd_request_id;
+ uint32 cmd;
+ uint32 cmd_join_error;
+} timeout_info_t;
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+#ifdef HOFFLOAD_MODULES
+/* Metadata structure containing module information */
+struct module_metadata {
+ void *data; /* module data */
+ uint32_t size; /* module size */
+ u64 data_addr; /* address of module data in host */
+};
+#endif
#ifdef DMAMAP_STATS
typedef struct dmamap_stats {
} dma_stats_t;
#endif /* DMAMAP_STATS */
-/* see wlfc_proto.h for tx status details */
-#define DHD_MAX_TX_STATUS_MSGS 9u
-
-#ifdef TX_STATUS_LATENCY_STATS
-typedef struct dhd_if_tx_status_latency {
- /* total number of tx_status received on this interface */
- uint64 num_tx_status;
- /* cumulative tx_status latency for this interface */
- uint64 cum_tx_status_latency;
-} dhd_if_tx_status_latency_t;
-#endif /* TX_STATUS_LATENCY_STATS */
-
-#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
-/* Timestamps to trace dhd_logtrace_thread() */
-struct dhd_logtrace_thr_ts {
- uint64 entry_time;
- uint64 sem_down_time;
- uint64 flush_time;
- uint64 unexpected_break_time;
- uint64 complete_time;
-};
-#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
-
-/* Enable Reserve STA flowrings only for Android */
-#define DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
-
-typedef enum dhd_induce_error_states
-{
- DHD_INDUCE_ERROR_CLEAR = 0x0,
- DHD_INDUCE_IOCTL_TIMEOUT = 0x1,
- DHD_INDUCE_D3_ACK_TIMEOUT = 0x2,
- DHD_INDUCE_LIVELOCK = 0x3,
- DHD_INDUCE_DROP_OOB_IRQ = 0x4,
- DHD_INDUCE_DROP_AXI_SIG = 0x5,
- DHD_INDUCE_ERROR_MAX = 0x6
-} dhd_induce_error_states_t;
-
-#ifdef DHD_HP2P
-#define MAX_TX_HIST_BIN 16
-#define MAX_RX_HIST_BIN 10
-#define MAX_HP2P_FLOWS 16
-#define HP2P_PRIO 7
-#define HP2P_PKT_THRESH 48
-#define HP2P_TIME_THRESH 200
-#define HP2P_PKT_EXPIRY 40
-#define HP2P_TIME_SCALE 32
-
-typedef struct hp2p_info {
- void *dhd_pub;
- uint16 flowid;
- bool hrtimer_init;
- void *ring;
- struct tasklet_hrtimer timer;
- uint64 num_pkt_limit;
- uint64 num_timer_limit;
- uint64 num_timer_start;
- uint64 tx_t0[MAX_TX_HIST_BIN];
- uint64 tx_t1[MAX_TX_HIST_BIN];
- uint64 rx_t0[MAX_RX_HIST_BIN];
-} hp2p_info_t;
-#endif /* DHD_HP2P */
-
-typedef enum {
- FW_UNLOADED = 0,
- FW_DOWNLOAD_IN_PROGRESS = 1,
- FW_DOWNLOAD_DONE = 2
-} fw_download_status_t;
-
-/**
- * Common structure for module and instance linkage.
- * Instantiated once per hardware (dongle) instance that this DHD manages.
- */
+/* Common structure for module and instance linkage */
typedef struct dhd_pub {
/* Linkage ponters */
osl_t *osh; /* OSL handle */
struct dhd_prot *prot; /* Protocol module handle */
struct dhd_info *info; /* Info module handle */
struct dhd_dbg *dbg; /* Debugability module handle */
-#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
- struct dhd_logtrace_thr_ts logtrace_thr_ts;
-#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
/* to NDIS developer, the structure dhd_common is redundant,
* please do NOT merge it back from other branches !!!
uint maxctl; /* Max size rxctl request from proto to bus */
uint rxsz; /* Rx buffer size bus module should use */
uint8 wme_dp; /* wme discard priority */
-#ifdef DNGL_AXI_ERROR_LOGGING
- uint32 axierror_logbuf_addr;
- bool axi_error;
- struct dhd_axi_error_dump *axi_err_dump;
-#endif /* DNGL_AXI_ERROR_LOGGING */
+
/* Dongle media info */
bool iswl; /* Dongle-resident driver is wl */
ulong drv_version; /* Version of dongle-resident driver */
ulong rx_readahead_cnt; /* Number of packets where header read-ahead was used. */
ulong tx_realloc; /* Number of tx packets we had to realloc for headroom */
ulong fc_packets; /* Number of flow control pkts recvd */
- ulong tx_big_packets; /* Dropped data packets that are larger than MAX_MTU_SZ */
+
#ifdef DMAMAP_STATS
/* DMA Mapping statistics */
dma_stats_t dma_stats;
#ifdef PKT_FILTER_SUPPORT
int early_suspended; /* Early suspend status */
int dhcp_in_progress; /* DHCP period */
-#endif // endif
+#endif
/* Pkt filter defination */
char * pktfilter[100];
*/
/* #define WL_ENABLE_P2P_IF 1 */
- struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
- struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
+ struct mutex wl_softap_lock; /* lock/unlock for any SoftAP/STA settings */
+#endif
#ifdef PROP_TXSTATUS
bool wlfc_enabled;
#endif /* PROP_TXSTATUS */
#ifdef PNO_SUPPORT
void *pno_state;
-#endif // endif
+#endif
#ifdef RTT_SUPPORT
void *rtt_state;
bool rtt_supported;
-#endif // endif
-#ifdef ROAM_AP_ENV_DETECTION
- bool roam_env_detection;
-#endif // endif
+#endif
bool dongle_isolation;
bool is_pcie_watchdog_reset;
-
-/* Begin - Variables to track Bus Errors */
bool dongle_trap_occured; /* flag for sending HANG event to upper layer */
bool iovar_timeout_occured; /* flag to indicate iovar resumed on timeout */
- bool is_sched_error; /* flag to indicate timeout due to scheduling issue */
#ifdef PCIE_FULL_DONGLE
bool d3ack_timeout_occured; /* flag to indicate d3ack resumed on timeout */
- bool livelock_occured; /* flag to indicate livelock occured */
- bool pktid_audit_failed; /* flag to indicate pktid audit failure */
#endif /* PCIE_FULL_DONGLE */
- bool iface_op_failed; /* flag to indicate interface operation failed */
- bool scan_timeout_occurred; /* flag to indicate scan has timedout */
- bool scan_busy_occurred; /* flag to indicate scan busy occurred */
#ifdef BT_OVER_SDIO
bool is_bt_recovery_required;
-#endif // endif
- bool smmu_fault_occurred; /* flag to indicate SMMU Fault */
- /*
- * Add any new variables to track Bus errors above
- * this line. Also ensure that the variable is
- * cleared from dhd_clear_bus_errors
- */
-/* End - Variables to track Bus Errors */
-
+#endif
int hang_was_sent;
- int hang_was_pending;
int rxcnt_timeout; /* counter rxcnt timeout to send HANG */
int txcnt_timeout; /* counter txcnt timeout to send HANG */
#ifdef BCMPCIE
#endif /* BCMPCIE */
bool hang_report; /* enable hang report by default */
uint16 hang_reason; /* reason codes for HANG event */
+#if defined(DHD_HANG_SEND_UP_TEST)
+ uint req_hang_type;
+#endif /* DHD_HANG_SEND_UP_TEST */
#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
uint hang_counts;
#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
+#ifdef WLMEDIA_HTSF
+ uint8 htsfdlystat_sz; /* Size of delay stats, max 255B */
+#endif
#ifdef WLTDLS
bool tdls_enable;
-#endif // endif
+#endif
struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
- #define WLC_IOCTL_MAXBUF_FWCAP 1024
+ #define WLC_IOCTL_MAXBUF_FWCAP 512
char fw_capabilities[WLC_IOCTL_MAXBUF_FWCAP];
#define MAXSKBPEND 1024
void *skbbuf[MAXSKBPEND];
#endif /* DHDTCPACK_SUPPRESS */
#if defined(ARP_OFFLOAD_SUPPORT)
uint32 arp_version;
- bool hmac_updated;
-#endif // endif
-#if defined(BCMSUP_4WAY_HANDSHAKE)
- bool fw_4way_handshake; /* Whether firmware will to do the 4way handshake. */
-#endif // endif
+#endif
#ifdef DEBUG_DPC_THREAD_WATCHDOG
bool dhd_bug_on;
#endif /* DEBUG_DPC_THREAD_WATCHDOG */
int chan_isvht80;
#endif /* CUSTOM_SET_CPUCORE */
+
void *sta_pool; /* pre-allocated pool of sta objects */
void *staid_allocator; /* allocator of sta indexes */
#ifdef PCIE_FULL_DONGLE
void *if_flow_lkup; /* per interface flowid lkup hash table */
void *flowid_lock; /* per os lock for flowid info protection */
void *flowring_list_lock; /* per os lock for flowring list protection */
- uint8 max_multi_client_flow_rings;
- uint8 multi_client_flow_rings;
uint32 num_flow_rings;
cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */
cumm_ctr_t l2cumm_ctr; /* level 2 cumm queue length placeholder */
bool dma_h2d_ring_upd_support;
bool dma_ring_upd_overwrite; /* host overwrites support setting */
- bool hwa_enable;
- uint hwa_inited;
-
bool idma_enable;
uint idma_inited;
+ bool idma_retention_ds; /* Implicit DMA memory retention */
bool ifrm_enable; /* implicit frm enable */
uint ifrm_inited; /* implicit frm init */
- bool dar_enable; /* use DAR registers */
- uint dar_inited;
-
- bool fast_delete_ring_support; /* fast delete ring supported */
-
+#ifdef DHD_WMF
+ bool wmf_ucast_igmp;
+#ifdef DHD_IGMP_UCQUERY
+ bool wmf_ucast_igmp_query;
+#endif
+#ifdef DHD_UCAST_UPNP
+ bool wmf_ucast_upnp;
+#endif
+#endif /* DHD_WMF */
+#if defined(TRAFFIC_MGMT_DWM)
+ dhd_trf_mgmt_dwm_tbl_t dhd_tm_dwm_tbl;
+#endif
#ifdef DHD_L2_FILTER
unsigned long l2_filter_cnt; /* for L2_FILTER ARP table timeout */
#endif /* DHD_L2_FILTER */
#ifdef DHD_SSSR_DUMP
bool sssr_inited;
- bool sssr_dump_collected; /* Flag to indicate sssr dump is collected */
- sssr_reg_info_v1_t sssr_reg_info;
+ sssr_reg_info_t sssr_reg_info;
uint8 *sssr_mempool;
uint *sssr_d11_before[MAX_NUM_D11CORES];
uint *sssr_d11_after[MAX_NUM_D11CORES];
bool sssr_d11_outofreset[MAX_NUM_D11CORES];
- uint *sssr_dig_buf_before;
- uint *sssr_dig_buf_after;
- uint32 sssr_dump_mode;
- bool collect_sssr; /* Flag to indicate SSSR dump is required */
+ uint *sssr_vasip_buf_before;
+ uint *sssr_vasip_buf_after;
#endif /* DHD_SSSR_DUMP */
uint8 *soc_ram;
uint32 soc_ram_length;
uint32 memdump_type;
#ifdef DHD_FW_COREDUMP
uint32 memdump_enabled;
-#ifdef DHD_DEBUG_UART
bool memdump_success;
-#endif /* DHD_DEBUG_UART */
#endif /* DHD_FW_COREDUMP */
#ifdef PCIE_FULL_DONGLE
#ifdef WLTDLS
#endif /* PCIE_FULL_DONGLE */
#ifdef DHD_ULP
void *dhd_ulp;
-#endif // endif
+#endif
+#ifdef CACHE_FW_IMAGES
+ char *cached_fw;
+ int cached_fw_length;
+ char *cached_nvram;
+ int cached_nvram_length;
+ char *cached_clm;
+ int cached_clm_length;
+#endif
#ifdef WLTDLS
uint32 tdls_mode;
-#endif // endif
+#endif
#ifdef GSCAN_SUPPORT
bool lazy_roam_enable;
-#endif // endif
+#endif
#if defined(PKT_FILTER_SUPPORT) && defined(APF)
bool apf_set;
#endif /* PKT_FILTER_SUPPORT && APF */
- void *macdbg_info;
#ifdef DHD_WET
void *wet_info;
-#endif // endif
+#endif
bool h2d_phase_supported;
bool force_dongletrap_on_bad_h2d_phase;
uint32 dongle_trap_data;
- fw_download_status_t fw_download_status;
+ bool cto_enable; /* enable PCIE CTO Prevention and recovery */
+ uint32 cto_threshold; /* PCIE CTO timeout threshold */
+ bool fw_download_done;
trap_t last_trap_info; /* trap info from the last trap */
uint8 rand_mac_oui[DOT11_OUI_LEN];
#ifdef DHD_LOSSLESS_ROAMING
uint8 dequeue_prec_map;
uint8 prio_8021x;
-#endif // endif
-#ifdef WL_NATOE
- struct dhd_nfct_info *nfct;
- spinlock_t nfct_lock;
-#endif /* WL_NATOE */
+#endif
+#ifdef REPORT_FATAL_TIMEOUTS
+ timeout_info_t *timeout_info;
+#endif /* REPORT_FATAL_TIMEOUTS */
/* timesync link */
struct dhd_ts *ts;
bool d2h_hostrdy_supported;
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- atomic_t block_bus;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-#if defined(DBG_PKT_MON)
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
bool d11_tx_status;
-#endif // endif
- uint16 ndo_version; /* ND offload version supported */
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
+ uint16 ndo_version; /* ND offload version supported */
#ifdef NDO_CONFIG_SUPPORT
bool ndo_enable; /* ND offload feature enable */
bool ndo_host_ip_overflow; /* # of host ip addr exceed FW capacity */
uint32 ndo_max_host_ip; /* # of host ip addr supported by FW */
#endif /* NDO_CONFIG_SUPPORT */
-#if defined(DHD_LOG_DUMP)
- /* buffer to hold 'dhd dump' data before dumping to file */
- uint8 *concise_dbg_buf;
- uint64 last_file_posn;
- int logdump_periodic_flush;
- /* ecounter debug ring */
-#ifdef EWP_ECNTRS_LOGGING
- void *ecntr_dbg_ring;
-#endif // endif
-#ifdef EWP_RTT_LOGGING
- void *rtt_dbg_ring;
-#endif // endif
-#ifdef DNGL_EVENT_SUPPORT
- uint8 health_chk_event_data[HEALTH_CHK_BUF_SIZE];
-#endif // endif
- void *logdump_cookie;
-#endif /* DHD_LOG_DUMP */
- uint32 dhd_console_ms; /** interval for polling the dongle for console (log) messages */
- bool ext_trap_data_supported;
- uint32 *extended_trap_data;
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+ uint8 log_capture_enable;
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+ bool max_dtim_enable; /* use MAX bcn_li_dtim value in suspend mode */
+#ifdef PCIE_OOB
+ bool d2h_no_oob_dw;
+#endif /* PCIE_OOB */
+#ifdef PCIE_INB_DW
+ bool d2h_inband_dw;
+ enum dhd_bus_ds_state ds_state;
+#endif /* PCIE_INB_DW */
+#ifdef CUSTOM_SET_ANTNPM
+ uint32 mimo_ant_set;
+#endif /* CUSTOM_SET_ANTNPM */
+#ifdef CUSTOM_SET_OCLOFF
+ bool ocl_off;
+#endif /* CUSTOM_SET_OCLOFF */
+#ifdef HOFFLOAD_MODULES
+ struct module_metadata hmem;
+#endif
+ bool wbtext_support;
#ifdef DUMP_IOCTL_IOV_LIST
/* dump iovar list */
dll_t dump_iovlist_head;
uint8 dump_iovlist_len;
#endif /* DUMP_IOCTL_IOV_LIST */
-#ifdef CUSTOM_SET_ANTNPM
- uint32 mimo_ant_set;
-#endif /* CUSTOM_SET_ANTNPM */
#ifdef DHD_DEBUG
- /* memwaste feature */
+/* memwaste feature */
dll_t mw_list_head; /* memwaste list head */
uint32 mw_id; /* memwaste list unique id */
#endif /* DHD_DEBUG */
#ifdef WLTDLS
spinlock_t tdls_lock;
#endif /* WLTDLS */
- uint pcie_txs_metadata_enable;
- uint wbtext_policy; /* wbtext policy of dongle */
- bool wbtext_support; /* for product policy only */
- bool max_dtim_enable; /* use MAX bcn_li_dtim value in suspend mode */
- tput_test_t tput_data;
- uint64 tput_start_ts;
- uint64 tput_stop_ts;
-#ifdef WL_MONITOR
- bool monitor_enable;
-#endif // endif
- uint dhd_watchdog_ms_backup;
- void *event_log_filter;
- char debug_dump_time_str[DEBUG_DUMP_TIME_BUF_LEN];
- uint32 logset_prsrv_mask;
- bool wl_event_enabled;
- bool logtrace_pkt_sendup;
-#ifdef DHD_DUMP_MNGR
- struct _dhd_dump_file_manage *dump_file_manage;
-#endif /* DHD_DUMP_MNGR */
- int debug_dump_subcmd;
- uint64 debug_dump_time_sec;
- bool hscb_enable;
- wait_queue_head_t tx_completion_wait;
- uint32 batch_tx_pkts_cmpl;
- uint32 batch_tx_num_pkts;
-#ifdef DHD_ERPOM
- bool enable_erpom;
- pom_func_handler_t pom_wlan_handler;
- int (*pom_func_register)(pom_func_handler_t *func);
- int (*pom_func_deregister)(pom_func_handler_t *func);
- int (*pom_toggle_reg_on)(uchar func_id, uchar reason);
-#endif /* DHD_ERPOM */
-#ifdef EWP_EDL
- bool dongle_edl_support;
- dhd_dma_buf_t edl_ring_mem;
-#endif /* EWP_EDL */
- struct mutex ndev_op_sync;
-
- bool debug_buf_dest_support;
- uint32 debug_buf_dest_stat[DEBUG_BUF_DEST_MAX];
-#if defined(DHD_H2D_LOG_TIME_SYNC)
-#define DHD_H2D_LOG_TIME_STAMP_MATCH (10000) /* 10 Seconds */
- /*
- * Interval for updating the dongle console message time stamp with the Host (DHD)
- * time stamp
- */
- uint32 dhd_rte_time_sync_ms;
-#endif /* DHD_H2D_LOG_TIME_SYNC */
- int wlc_ver_major;
- int wlc_ver_minor;
-#ifdef DHD_STATUS_LOGGING
- void *statlog;
-#endif /* DHD_STATUS_LOGGING */
-#ifdef DHD_HP2P
- bool hp2p_enable;
- bool hp2p_infra_enable;
- bool hp2p_capable;
- bool hp2p_ts_capable;
- uint16 pkt_thresh;
- uint16 time_thresh;
- uint16 pkt_expiry;
- hp2p_info_t hp2p_info[MAX_HP2P_FLOWS];
- bool hp2p_ring_active;
-#endif /* D2H_HP2P */
-#ifdef DHD_DB0TS
- bool db0ts_capable;
-#endif /* DHD_DB0TS */
- bool event_log_max_sets_queried;
- uint32 event_log_max_sets;
- uint16 dhd_induce_error;
-#ifdef CONFIG_SILENT_ROAM
- bool sroam_turn_on; /* Silent roam monitor enable flags */
- bool sroamed; /* Silent roam monitor check flags */
-#endif /* CONFIG_SILENT_ROAM */
- bool extdtxs_in_txcpl;
- bool hostrdy_after_init;
-#ifdef SUPPORT_SET_TID
- uint8 tid_mode;
- uint32 target_uid;
- uint8 target_tid;
-#endif /* SUPPORT_SET_TID */
-#ifdef DHD_PKTDUMP_ROAM
- void *pktcnts;
-#endif /* DHD_PKTDUMP_ROAM */
- bool disable_dtim_in_suspend; /* Disable set bcn_li_dtim in suspend */
-#ifdef CSI_SUPPORT
- struct list_head csi_list;
- int csi_count;
-#endif /* CSI_SUPPORT */
+#ifdef WLADPS_SEAK_AP_WAR
+ uint32 disabled_adps;
+#endif /* WLADPS_SEAK_AP_WAR */
+ bool ext_trap_data_supported;
+ uint32 *extended_trap_data;
+#ifdef DHD_PKT_LOGGING
+ struct dhd_pktlog *pktlog;
+#endif /* DHD_PKT_LOGGING */
+#if defined(STAT_REPORT)
+ void *stat_report_info;
+#endif
char *clm_path; /* module_param: path to clm vars file */
char *conf_path; /* module_param: path to config vars file */
struct dhd_conf *conf; /* Bus module handle */
void *adapter; /* adapter information, interrupt, fw path etc. */
- void *event_params;
#ifdef BCMDBUS
bool dhd_remove;
#endif /* BCMDBUS */
-#ifdef WL_ESCAN
- struct wl_escan_info *escan;
-#endif
-#if defined(WL_WIRELESS_EXT)
- void *wext_info;
-#endif
-#ifdef WL_EXT_IAPSTA
- void *iapsta_params;
-#endif
- int hostsleep;
-#ifdef SENDPROB
- bool recv_probereq;
-#endif
} dhd_pub_t;
typedef struct {
/* Packet Tag for PCIE Full Dongle DHD */
typedef struct dhd_pkttag_fd {
uint16 flowid; /* Flowring Id */
- uint16 ifid;
-#ifndef DHD_PCIE_PKTID
+ uint16 dataoff; /* start of packet */
uint16 dma_len; /* pkt len for DMA_MAP/UNMAP */
dmaaddr_t pa; /* physical address */
void *dmah; /* dma mapper handle */
void *secdma; /* secure dma sec_cma_info handle */
-#endif /* !DHD_PCIE_PKTID */
-#if defined(TX_STATUS_LATENCY_STATS)
- uint64 q_time_us; /* time when tx pkt queued to flowring */
-#endif // endif
} dhd_pkttag_fd_t;
/* Packet Tag for DHD PCIE Full Dongle */
#define DHD_PKT_GET_SECDMA(pkt) ((DHD_PKTTAG_FD(pkt))->secdma)
#define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \
DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma)
-
-#if defined(TX_STATUS_LATENCY_STATS)
-#define DHD_PKT_GET_QTIME(pkt) ((DHD_PKTTAG_FD(pkt))->q_time_us)
-#define DHD_PKT_SET_QTIME(pkt, pkt_q_time_us) \
- DHD_PKTTAG_FD(pkt)->q_time_us = (uint64)(pkt_q_time_us)
-#endif // endif
#endif /* PCIE_FULL_DONGLE */
#if defined(BCMWDF)
WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context)
#endif /* (BCMWDF) */
- #if defined(CONFIG_PM_SLEEP)
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
#define _DHD_PM_RESUME_WAIT(a, b) do {\
} \
} while (0)
- #endif /* CONFIG_PM_SLEEP */
+ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
#ifndef OSL_SLEEP
#define OSL_SLEEP(ms) OSL_DELAY(ms*1000)
int dhd_pno_clean(dhd_pub_t *dhd);
#endif /* PNO_SUPPORT */
+#ifdef HOFFLOAD_MODULES
+void dhd_linux_get_modfw_address(dhd_pub_t *dhd);
+#endif
+
/*
* Wake locks are an Android power management concept. They are used by applications and services
* to request CPU resources.
inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_init(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
}
inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_lock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
}
inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&dhdp->wl_softap_lock);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
}
-#ifdef DHD_DEBUG_WAKE_LOCK
+#define PRINT_CALL_INFO(str)
+#define PRINT_CALL_INFO_TIMEOUT(str, val)
#define DHD_OS_WAKE_LOCK(pub) \
do { \
- printf("call wake_lock: %s %d\n", \
- __FUNCTION__, __LINE__); \
+ PRINT_CALL_INFO("call wakelock"); \
dhd_os_wake_lock(pub); \
} while (0)
#define DHD_OS_WAKE_UNLOCK(pub) \
do { \
- printf("call wake_unlock: %s %d\n", \
- __FUNCTION__, __LINE__); \
+ PRINT_CALL_INFO("call wake_unlock"); \
dhd_os_wake_unlock(pub); \
} while (0)
#define DHD_EVENT_WAKE_LOCK(pub) \
do { \
- printf("call event wake_lock: %s %d\n", \
- __FUNCTION__, __LINE__); \
- dhd_event_wake_lock(pub); \
+ PRINT_CALL_INFO("call event_wake lock"); \
+ dhd_event_wake_lock(pub); \
} while (0)
#define DHD_EVENT_WAKE_UNLOCK(pub) \
do { \
- printf("call event wake_unlock: %s %d\n", \
- __FUNCTION__, __LINE__); \
- dhd_event_wake_unlock(pub); \
+ PRINT_CALL_INFO("call event_wake unlock"); \
+ dhd_event_wake_unlock(pub); \
} while (0)
#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) \
do { \
- printf("call pm_wake_timeout enable\n"); \
+ PRINT_CALL_INFO("call pm_wake_timeout enable"); \
dhd_pm_wake_lock_timeout(pub, val); \
} while (0)
#define DHD_PM_WAKE_UNLOCK(pub) \
do { \
- printf("call pm_wake unlock\n"); \
+ PRINT_CALL_INFO("call pm_wake unlock"); \
dhd_pm_wake_unlock(pub); \
} while (0)
#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) \
do { \
- printf("call pm_wake_timeout enable\n"); \
+ PRINT_CALL_INFO("call pm_wake_timeout enable"); \
dhd_txfl_wake_lock_timeout(pub, val); \
} while (0)
#define DHD_TXFL_WAKE_UNLOCK(pub) \
do { \
- printf("call pm_wake unlock\n"); \
+ PRINT_CALL_INFO("call pm_wake unlock"); \
dhd_txfl_wake_unlock(pub); \
} while (0)
#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) \
do { \
- printf("call wake_lock_timeout: %s %d\n", \
- __FUNCTION__, __LINE__); \
+ PRINT_CALL_INFO("call wake_lock_timeout"); \
dhd_os_wake_lock_timeout(pub); \
} while (0)
#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
do { \
- printf("call wake_lock_rx_timeout_enable[%d]: %s %d\n", \
- val, __FUNCTION__, __LINE__); \
+ PRINT_CALL_INFO_TIMEOUT("call wake_lock_rx_timeout_enable", val); \
dhd_os_wake_lock_rx_timeout_enable(pub, val); \
} while (0)
#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
do { \
- printf("call wake_lock_ctrl_timeout_enable[%d]: %s %d\n", \
- val, __FUNCTION__, __LINE__); \
+ PRINT_CALL_INFO_TIMEOUT("call wake_lock_ctrl_timeout_enable", val); \
dhd_os_wake_lock_ctrl_timeout_enable(pub, val); \
} while (0)
#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
do { \
- printf("call wake_lock_ctrl_timeout_cancel: %s %d\n", \
- __FUNCTION__, __LINE__); \
+ PRINT_CALL_INFO("call wake_lock_ctrl_timeout_cancel"); \
dhd_os_wake_lock_ctrl_timeout_cancel(pub); \
} while (0)
#define DHD_OS_WAKE_LOCK_WAIVE(pub) \
do { \
- printf("call wake_lock_waive: %s %d\n", \
- __FUNCTION__, __LINE__); \
+ PRINT_CALL_INFO("call wake_lock_waive"); \
dhd_os_wake_lock_waive(pub); \
} while (0)
#define DHD_OS_WAKE_LOCK_RESTORE(pub) \
do { \
- printf("call wake_lock_restore: %s %d\n", \
- __FUNCTION__, __LINE__); \
+ PRINT_CALL_INFO("call wake_lock_restore"); \
dhd_os_wake_lock_restore(pub); \
} while (0)
#define DHD_OS_WAKE_LOCK_INIT(dhd) \
do { \
- printf("call wake_lock_init: %s %d\n", \
- __FUNCTION__, __LINE__); \
+ PRINT_CALL_INFO("call wake_lock_init"); \
dhd_os_wake_lock_init(dhd); \
} while (0)
#define DHD_OS_WAKE_LOCK_DESTROY(dhd) \
do { \
- printf("call wake_lock_destroy: %s %d\n", \
- __FUNCTION__, __LINE__); \
+ PRINT_CALL_INFO("call wake_lock_destroy"); \
dhd_os_wake_lock_destroy(dhd); \
} while (0)
-#else
-#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub)
-#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub)
-#define DHD_EVENT_WAKE_LOCK(pub) dhd_event_wake_lock(pub)
-#define DHD_EVENT_WAKE_UNLOCK(pub) dhd_event_wake_unlock(pub)
-#define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) dhd_pm_wake_lock_timeout(pub, val)
-#define DHD_PM_WAKE_UNLOCK(pub) dhd_pm_wake_unlock(pub)
-#define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) dhd_txfl_wake_lock_timeout(pub, val)
-#define DHD_TXFL_WAKE_UNLOCK(pub) dhd_txfl_wake_unlock(pub)
-#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub)
-#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
- dhd_os_wake_lock_rx_timeout_enable(pub, val)
-#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
- dhd_os_wake_lock_ctrl_timeout_enable(pub, val)
-#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
- dhd_os_wake_lock_ctrl_timeout_cancel(pub)
-#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub)
-#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub)
-#define DHD_OS_WAKE_LOCK_INIT(dhd) dhd_os_wake_lock_init(dhd);
-#define DHD_OS_WAKE_LOCK_DESTROY(dhd) dhd_os_wake_lock_destroy(dhd);
-#endif /* DHD_DEBUG_WAKE_LOCK */
#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub)
#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub)
#ifdef DHD_USE_SCAN_WAKELOCK
#ifdef DHD_DEBUG_SCAN_WAKELOCK
+#define PRINT_SCAN_CALL(str) printf("%s: %s %d\n", \
+ str, __FUNCTION__, __LINE__)
+#else
+#define PRINT_SCAN_CALL(str)
+#endif /* DHD_DEBUG_SCAN_WAKELOCK */
#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \
do { \
- printf("call wake_lock_scan: %s %d\n", \
- __FUNCTION__, __LINE__); \
+ PRINT_SCAN_CALL("call wake_lock_scan"); \
dhd_os_scan_wake_lock_timeout(pub, val); \
} while (0)
#define DHD_OS_SCAN_WAKE_UNLOCK(pub) \
do { \
- printf("call wake_unlock_scan: %s %d\n", \
- __FUNCTION__, __LINE__); \
+ PRINT_SCAN_CALL("call wake_unlock_scan"); \
dhd_os_scan_wake_unlock(pub); \
} while (0)
#else
-#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_scan_wake_lock_timeout(pub, val)
-#define DHD_OS_SCAN_WAKE_UNLOCK(pub) dhd_os_scan_wake_unlock(pub)
-#endif /* DHD_DEBUG_SCAN_WAKELOCK */
-#else
#define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val)
#define DHD_OS_SCAN_WAKE_UNLOCK(pub)
#endif /* DHD_USE_SCAN_WAKELOCK */
#define OOB_WAKE_LOCK_TIMEOUT 500
extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val);
extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub);
-
#define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val) dhd_os_oob_irq_wake_lock_timeout(pub, val)
#define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub) dhd_os_oob_irq_wake_unlock(pub)
#endif /* BCMPCIE_OOB_HOST_WAKE */
*/
void dhd_net_if_lock(struct net_device *dev);
void dhd_net_if_unlock(struct net_device *dev);
-
#if defined(MULTIPLE_SUPPLICANT)
extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
#endif
/* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
#define DHD_PID_KT_TL_INVALID -2
-/* default reporting period */
-#define ECOUNTERS_DEFAULT_PERIOD 0
-
-/* default number of reports. '0' indicates forever */
-#define ECOUNTERS_NUM_REPORTS 0
-
-typedef struct ecounters_cfg {
- uint16 type;
- uint16 if_slice_idx;
- uint16 stats_rep;
-} ecounters_cfg_t;
-
-typedef struct event_ecounters_cfg {
- uint16 event_id;
- uint16 type;
- uint16 if_slice_idx;
- uint16 stats_rep;
-} event_ecounters_cfg_t;
-
-typedef struct ecountersv2_xtlv_list_elt {
- /* Not quite the exact bcm_xtlv_t type as data could be pointing to other pieces in
- * memory at the time of parsing arguments.
- */
- uint16 id;
- uint16 len;
- uint8 *data;
- struct ecountersv2_xtlv_list_elt *next;
-} ecountersv2_xtlv_list_elt_t;
-
-typedef struct ecountersv2_processed_xtlv_list_elt {
- uint8 *data;
- struct ecountersv2_processed_xtlv_list_elt *next;
-} ecountersv2_processed_xtlv_list_elt;
-
/*
* Exported from dhd OS modules (dhd_linux/dhd_ndis)
*/
, void *adapter
#endif
);
-extern int dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock);
#if defined(WLP2P) && defined(WL_CFG80211)
/* To allow attach/detach calls corresponding to p2p0 interface */
extern int dhd_attach_p2p(dhd_pub_t *);
/* Notify tx completion */
extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
-extern void dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx);
-extern void dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx);
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-extern void dhd_bus_wakeup_work(dhd_pub_t *dhdp);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
#define WIFI_FEATURE_INFRA 0x0001 /* Basic infrastructure mode */
#define WIFI_FEATURE_INFRA_5G 0x0002 /* Support for 5 GHz Band */
#define WIFI_FEATURE_AP_STA 0x8000 /* Support for AP STA Concurrency */
#define WIFI_FEATURE_LINKSTAT 0x10000 /* Support for Linkstats */
#define WIFI_FEATURE_LOGGER 0x20000 /* WiFi Logger */
-#define WIFI_FEATURE_HAL_EPNO 0x40000 /* WiFi PNO enhanced */
-#define WIFI_FEATURE_RSSI_MONITOR 0x80000 /* RSSI Monitor */
+#define WIFI_FEATURE_HAL_EPNO 0x40000 /* WiFi PNO enhanced */
+#define WIFI_FEATURE_RSSI_MONITOR 0x80000 /* RSSI Monitor */
#define WIFI_FEATURE_MKEEP_ALIVE 0x100000 /* WiFi mkeep_alive */
-#define WIFI_FEATURE_CONFIG_NDO 0x200000 /* ND offload configure */
-#define WIFI_FEATURE_TX_TRANSMIT_POWER 0x400000 /* Capture Tx transmit power levels */
-#define WIFI_FEATURE_CONTROL_ROAMING 0x800000 /* Enable/Disable firmware roaming */
-#define WIFI_FEATURE_FILTER_IE 0x1000000 /* Probe req ie filter */
-#define WIFI_FEATURE_SCAN_RAND 0x2000000 /* Support MAC & Prb SN randomization */
-#define WIFI_FEATURE_INVALID 0xFFFFFFFF /* Invalid Feature */
+#define WIFI_FEATURE_CONFIG_NDO 0x200000 /* ND offload configure */
+#define WIFI_FEATURE_TX_TRANSMIT_POWER 0x400000 /* Capture Tx transmit power levels */
+#define WIFI_FEATURE_INVALID 0xFFFFFFFF /* Invalid Feature */
#define MAX_FEATURE_SET_CONCURRRENT_GROUPS 3
#ifdef CUSTOM_FORCE_NODFS_FLAG
extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs);
#endif /* CUSTOM_FORCE_NODFS_FLAG */
+
#ifdef NDO_CONFIG_SUPPORT
#ifndef NDO_MAX_HOST_IP_ENTRIES
#define NDO_MAX_HOST_IP_ENTRIES 10
#endif /* NDO_MAX_HOST_IP_ENTRIES */
-
extern int dhd_dev_ndo_cfg(struct net_device *dev, u8 enable);
extern int dhd_dev_ndo_update_inet6addr(struct net_device * dev);
#endif /* NDO_CONFIG_SUPPORT */
extern int dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable);
extern int dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
wl_bssid_pref_cfg_t *bssid_pref, uint32 flush);
-#endif /* GSCAN_SUPPORT */
-#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
extern int dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
uint32 len, uint32 flush);
extern int dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *whitelist,
uint32 len, uint32 flush);
-#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
+#endif /* GSCAN_SUPPORT */
/* OS independent layer functions */
extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub);
extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub);
-void dhd_os_logdump_lock(dhd_pub_t *pub);
-void dhd_os_logdump_unlock(dhd_pub_t *pub);
extern int dhd_os_proto_block(dhd_pub_t * pub);
extern int dhd_os_proto_unblock(dhd_pub_t * pub);
-extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition, bool resched);
extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
#else
static INLINE void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason)
{ printf("%s is NOT implemented for SDIO", __FUNCTION__); return; }
-#endif // endif
+#endif
#ifdef SHOW_LOGTRACE
-/* Bound and delay are fine tuned after several experiments and these
- * are the best case values to handle bombarding of console logs.
- */
-#define DHD_EVENT_LOGTRACE_BOUND 10
-/* since FW has event log rate health check (EVENT_LOG_RATE_HC) we can reduce
- * the reschedule delay to 10ms
-*/
-#define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 10u
extern int dhd_os_read_file(void *file, char *buf, uint32 size);
extern int dhd_os_seek_file(void *file, int64 offset);
-void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg);
#endif /* SHOW_LOGTRACE */
-int dhd_os_write_file_posn(void *fp, unsigned long *posn,
- void *buf, unsigned long buflen);
-int dhd_msix_message_set(dhd_pub_t *dhdp, uint table_entry,
- uint message_number, bool unmask);
extern void
dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr);
#define DHD_OS_IOCTL_RESP_LOCK(x)
#define DHD_OS_IOCTL_RESP_UNLOCK(x)
+
extern int dhd_os_get_image_block(char * buf, int len, void * image);
extern int dhd_os_get_image_size(void * image);
#if defined(BT_OVER_SDIO)
extern void dhdsdio_bus_usr_cnt_inc(dhd_pub_t *pub);
extern void dhdsdio_bus_usr_cnt_dec(dhd_pub_t *pub);
#endif /* (BT_OVER_SDIO) */
-extern void *dhd_os_open_image1(dhd_pub_t *pub, char *filename); /* rev1 function signature */
-extern void dhd_os_close_image1(dhd_pub_t *pub, void *image);
+extern void * dhd_os_open_image(char * filename);
+extern void dhd_os_close_image(void * image);
extern void dhd_os_wd_timer(void *bus, uint wdtick);
+#ifdef DHD_PCIE_RUNTIMEPM
+extern void dhd_os_runtimepm_timer(void *bus, uint tick);
+#endif /* DHD_PCIE_RUNTIMEPM */
extern void dhd_os_sdlock(dhd_pub_t * pub);
extern void dhd_os_sdunlock(dhd_pub_t * pub);
extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size);
#endif /* DHD_FW_COREDUMP */
-void dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode);
-#ifdef DNGL_AXI_ERROR_LOGGING
-void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type);
-#endif /* DNGL_AXI_ERROR_LOGGING */
-#ifdef BCMPCIE
-void dhd_schedule_cto_recovery(dhd_pub_t *dhdp);
-#endif /* BCMPCIE */
+void dhd_schedule_sssr_dump(dhd_pub_t *dhdp);
+
+#ifdef SUPPORT_AP_POWERSAVE
+extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable);
+#endif /* SUPPORT_AP_POWERSAVE */
#ifdef PKT_FILTER_SUPPORT
#define DHD_UNICAST_FILTER_NUM 0
#define DHD_ARP_FILTER_NUM 5
#define DHD_BROADCAST_ARP_FILTER_NUM 6
#define DHD_IP4BCAST_DROP_FILTER_NUM 7
-#define DHD_LLC_STP_DROP_FILTER_NUM 8
-#define DHD_LLC_XID_DROP_FILTER_NUM 9
#define DISCARD_IPV4_MCAST "102 1 6 IP4_H:16 0xf0 0xe0"
#define DISCARD_IPV6_MCAST "103 1 6 IP6_H:24 0xff 0xff"
-#define DISCARD_IPV4_BCAST "107 1 6 IP4_H:16 0xffffffff 0xffffffff"
-#define DISCARD_LLC_STP "108 1 6 ETH_H:14 0xFFFFFFFFFFFF 0xAAAA0300000C"
-#define DISCARD_LLC_XID "109 1 6 ETH_H:14 0xFFFFFF 0x0001AF"
extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd);
extern int dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num);
extern int net_os_enable_packet_filter(struct net_device *dev, int val);
extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val);
-
-#define MAX_PKTFLT_BUF_SIZE 2048
-#define MAX_PKTFLT_FIXED_PATTERN_SIZE 32
-#define MAX_PKTFLT_FIXED_BUF_SIZE \
- (WL_PKT_FILTER_FIXED_LEN + MAX_PKTFLT_FIXED_PATTERN_SIZE * 2)
-#define MAXPKT_ARG 16
#endif /* PKT_FILTER_SUPPORT */
+
#if defined(BCMPCIE)
extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
#else
#endif /* RSSI_MONITOR_SUPPORT */
#ifdef DHDTCPACK_SUPPRESS
-int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable);
+extern int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable);
#endif /* DHDTCPACK_SUPPRESS */
#define DHD_RSSI_MONITOR_EVT_VERSION 1
#ifdef SHOW_LOGTRACE
typedef struct {
- uint num_fmts;
+ int num_fmts;
char **fmts;
char *raw_fmts;
char *raw_sstr;
*/
#define PKT_FILTER_APF_ID 200
#define DHD_APF_LOCK(ndev) dhd_apf_lock(ndev)
-#define DHD_APF_UNLOCK(ndev) dhd_apf_unlock(ndev)
+#define DHD_APF_UNLOCK(ndev) dhd_apf_unlock(ndev)
extern void dhd_apf_lock(struct net_device *dev);
extern void dhd_apf_unlock(struct net_device *dev);
extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name);
extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock);
-#ifdef WL_STATIC_IF
-extern s32 dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
- uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state);
-#endif /* WL_STATIC_IF */
extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
-#ifdef WL_NATOE
-extern int dhd_natoe_ct_event(dhd_pub_t *dhd, char *data);
-#endif /* WL_NATOE */
-
/* Send packet to dongle via data channel */
extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
#ifdef LOG_INTO_TCPDUMP
extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len);
#endif /* LOG_INTO_TCPDUMP */
-#ifdef SHOW_LOGTRACE
-void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg);
-#endif // endif
extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
extern uint dhd_bus_status(dhd_pub_t *dhdp);
extern int dhd_bus_start(dhd_pub_t *dhdp);
extern unsigned long dhd_os_spin_lock(void *lock);
void dhd_os_spin_unlock(void *lock, unsigned long flags);
-/* linux is defined for DHD EFI builds also,
-* since its cross-compiled for EFI from linux.
-* dbgring_lock apis are meant only for linux
-* to use mutexes, other OSes will continue to
-* use dhd_os_spin_lock
-*/
-void *dhd_os_dbgring_lock_init(osl_t *osh);
-void dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx);
-unsigned long dhd_os_dbgring_lock(void *lock);
-void dhd_os_dbgring_unlock(void *lock, unsigned long flags);
-
-static INLINE int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition,
- uint timeout_ms)
-{ return 0; }
-static INLINE int dhd_os_tput_test_wake(dhd_pub_t * pub)
+#ifdef DHD_EFI
+extern int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_ds_enter_wake(dhd_pub_t * pub);
+#else
+static INLINE int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition)
+{ printf("%s is Not supported for this platform", __FUNCTION__); return 0; }
+static INLINE int dhd_os_ds_enter_wake(dhd_pub_t * pub)
{ return 0; }
+#endif /* DHD_EFI */
-extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition);
+#ifdef PCIE_INB_DW
+extern int dhd_os_ds_exit_wait(dhd_pub_t * pub, uint * condition);
+extern int dhd_os_ds_exit_wake(dhd_pub_t * pub);
+#endif /* PCIE_INB_DW */
extern int dhd_os_busbusy_wake(dhd_pub_t * pub);
-extern void dhd_os_tx_completion_wake(dhd_pub_t *dhd);
extern int dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition);
-int dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
- uint bitmask, uint condition);
+extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition);
extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition);
extern int dhd_os_d3ack_wake(dhd_pub_t * pub);
-extern int dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition);
-extern int dhd_os_dmaxfer_wake(dhd_pub_t *pub);
/*
* Manage sta objects in an interface. Interface is identified by an ifindex and
extern void dhd_del_sta(void *pub, int ifidx, void *ea);
extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx);
extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val);
+#if defined(BCM_GMAC3)
+extern int dhd_set_dev_def(dhd_pub_t *dhdp, uint32 idx, int val);
+#endif
extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx);
extern struct net_device *dhd_linux_get_primary_netdev(dhd_pub_t *dhdp);
#ifdef DHD_MCAST_REGEN
extern int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx);
extern int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val);
-#endif // endif
+#endif
typedef enum cust_gpio_modes {
WLAN_RESET_ON,
WLAN_RESET_OFF,
extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-extern void dhd_flush_rx_tx_wq(dhd_pub_t *dhdp);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
/*
* Insmod parameters for debug/test
*/
/* Watchdog timer interval */
extern uint dhd_watchdog_ms;
extern bool dhd_os_wd_timer_enabled(void *bus);
+#ifdef DHD_PCIE_RUNTIMEPM
+extern uint dhd_runtimepm_ms;
+#endif /* DHD_PCIE_RUNTIMEPM */
-/** Default console output poll interval */
+/* Console output poll interval */
extern uint dhd_console_ms;
-
extern uint android_msg_level;
extern uint config_msg_level;
extern uint sd_msglevel;
-extern uint dump_msg_level;
#ifdef BCMDBUS
extern uint dbus_msglevel;
#endif /* BCMDBUS */
/* Roaming mode control */
extern uint dhd_radio_up;
-/* TCM verification control */
-extern uint dhd_tcm_test_enable;
-
/* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
extern int dhd_idletime;
#ifdef DHD_USE_IDLECOUNT
#define DEFAULT_BCN_TIMEOUT_VALUE 4
#ifndef CUSTOM_BCN_TIMEOUT_SETTING
#define CUSTOM_BCN_TIMEOUT_SETTING DEFAULT_BCN_TIMEOUT_VALUE
-#endif // endif
+#endif
/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
#define DEFAULT_KEEP_ALIVE_VALUE 55000 /* msec */
#define DEFAULT_GLOM_VALUE -1
#ifndef CUSTOM_GLOM_SETTING
#define CUSTOM_GLOM_SETTING DEFAULT_GLOM_VALUE
-#endif // endif
+#endif
#define WL_AUTO_ROAM_TRIGGER -75
/* hooks for custom Roaming Trigger setting via Makefile */
#define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */
#define DEFAULT_ROAM_TRIGGER_SETTING -1
#ifndef CUSTOM_ROAM_TRIGGER_SETTING
#define CUSTOM_ROAM_TRIGGER_SETTING DEFAULT_ROAM_TRIGGER_VALUE
-#endif // endif
+#endif
/* hooks for custom Roaming Romaing setting via Makefile */
#define DEFAULT_ROAM_DELTA_VALUE 10 /* dBm default roam delta all band */
#define DEFAULT_ROAM_DELTA_SETTING -1
#ifndef CUSTOM_ROAM_DELTA_SETTING
#define CUSTOM_ROAM_DELTA_SETTING DEFAULT_ROAM_DELTA_VALUE
-#endif // endif
+#endif
/* hooks for custom PNO Event wake lock to guarantee enough time
for the Platform to detect Event before system suspended
#define DEFAULT_PNO_EVENT_LOCK_xTIME 2 /* multiplay of DHD_PACKET_TIMEOUT_MS */
#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME
#define CUSTOM_PNO_EVENT_LOCK_xTIME DEFAULT_PNO_EVENT_LOCK_xTIME
-#endif // endif
+#endif
/* hooks for custom dhd_dpc_prio setting option via Makefile */
#define DEFAULT_DHP_DPC_PRIO 1
#ifndef CUSTOM_DPC_PRIO_SETTING
#define CUSTOM_DPC_PRIO_SETTING DEFAULT_DHP_DPC_PRIO
-#endif // endif
+#endif
#ifndef CUSTOM_LISTEN_INTERVAL
#define CUSTOM_LISTEN_INTERVAL LISTEN_INTERVAL
#define DEFAULT_SUSPEND_BCN_LI_DTIM 3
#ifndef CUSTOM_SUSPEND_BCN_LI_DTIM
#define CUSTOM_SUSPEND_BCN_LI_DTIM DEFAULT_SUSPEND_BCN_LI_DTIM
-#endif // endif
+#endif
#ifndef BCN_TIMEOUT_IN_SUSPEND
#define BCN_TIMEOUT_IN_SUSPEND 6 /* bcn timeout value in suspend mode */
-#endif // endif
+#endif
#ifndef CUSTOM_RXF_PRIO_SETTING
#define CUSTOM_RXF_PRIO_SETTING MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1)
-#endif // endif
+#endif
#define DEFAULT_WIFI_TURNOFF_DELAY 0
-#ifndef WIFI_TURNOFF_DELAY
#define WIFI_TURNOFF_DELAY DEFAULT_WIFI_TURNOFF_DELAY
-#endif /* WIFI_TURNOFF_DELAY */
#define DEFAULT_WIFI_TURNON_DELAY 200
#ifndef WIFI_TURNON_DELAY
#ifdef WLTDLS
#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING
#define CUSTOM_TDLS_IDLE_MODE_SETTING 60000 /* 60sec to tear down TDLS of not active */
-#endif // endif
+#endif
#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
#define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */
-#endif // endif
+#endif
#ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW
#define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */
-#endif // endif
-#ifndef CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH
-#define CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH 100 /* pkt/sec threshold for establishing TDLS link */
-#endif // endif
-#ifndef CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW
-#define CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW 10 /* pkt/sec threshold for tearing down TDLS link */
-#endif // endif
+#endif
#endif /* WLTDLS */
#if defined(VSDB) || defined(ROAM_ENABLE)
#define DEFAULT_BCN_TIMEOUT 8
#else
#define DEFAULT_BCN_TIMEOUT 4
-#endif // endif
+#endif
#ifndef CUSTOM_BCN_TIMEOUT
#define CUSTOM_BCN_TIMEOUT DEFAULT_BCN_TIMEOUT
-#endif // endif
+#endif
#define MAX_DTIM_SKIP_BEACON_INTERVAL 100 /* max allowed associated AP beacon for DTIM skip */
#ifndef MAX_DTIM_ALLOWED_INTERVAL
#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */
-#endif // endif
+#endif
#ifndef MIN_DTIM_FOR_ROAM_THRES_EXTEND
#define MIN_DTIM_FOR_ROAM_THRES_EXTEND 600 /* minimum dtim interval to extend roam threshold */
-#endif // endif
+#endif
#define NO_DTIM_SKIP 1
#ifdef SDTEST
/* Echo packet len (0 => sawtooth, max 1800) */
extern uint dhd_pktgen_len;
#define MAX_PKTGEN_LEN 1800
-#endif // endif
+#endif
+
/* optionally set by a module_param_string() */
#define MOD_PARAM_PATHLEN 2048
#define MOD_PARAM_INFOLEN 512
-#define MOD_PARAM_SRLEN 64
+#define MOD_PARAM_SRLEN 64
#ifdef SOFTAP
extern char fw_path2[MOD_PARAM_PATHLEN];
-#endif // endif
-
-#if defined(ANDROID_PLATFORM_VERSION)
-#if (ANDROID_PLATFORM_VERSION < 7)
-#define DHD_LEGACY_FILE_PATH
-#define VENDOR_PATH "/system"
-#elif (ANDROID_PLATFORM_VERSION == 7)
-#define VENDOR_PATH "/system"
-#elif (ANDROID_PLATFORM_VERSION >= 8)
-#define VENDOR_PATH "/vendor"
-#endif /* ANDROID_PLATFORM_VERSION < 7 */
-#else
-#define VENDOR_PATH ""
-#endif /* ANDROID_PLATFORM_VERSION */
-
-#if defined(ANDROID_PLATFORM_VERSION)
-#if (ANDROID_PLATFORM_VERSION < 9)
-#ifdef WL_STATIC_IF
-#undef WL_STATIC_IF
-#endif /* WL_STATIC_IF */
-#ifdef WL_STATIC_IFNAME_PREFIX
-#undef WL_STATIC_IFNAME_PREFIX
-#endif /* WL_STATIC_IFNAME_PREFIX */
-#endif /* ANDROID_PLATFORM_VERSION < 9 */
-#endif /* ANDROID_PLATFORM_VERSION */
-
-#if defined(DHD_LEGACY_FILE_PATH)
+#endif
+
+#ifdef DHD_LEGACY_FILE_PATH
#define PLATFORM_PATH "/data/"
#elif defined(PLATFORM_SLP)
#define PLATFORM_PATH "/opt/etc/"
#else
-#if defined(ANDROID_PLATFORM_VERSION)
-#if (ANDROID_PLATFORM_VERSION >= 9)
-#define PLATFORM_PATH "/data/vendor/conn/"
-#define DHD_MAC_ADDR_EXPORT
-#define DHD_ADPS_BAM_EXPORT
-#define DHD_EXPORT_CNTL_FILE
-#define DHD_SOFTAP_DUAL_IF_INFO
-#define DHD_SEND_HANG_PRIVCMD_ERRORS
-#else
-#define PLATFORM_PATH "/data/misc/conn/"
-#endif /* ANDROID_PLATFORM_VERSION >= 9 */
-#else
-#define PLATFORM_PATH "/data/misc/conn/"
-#endif /* ANDROID_PLATFORM_VERSION */
+#define PLATFORM_PATH "/data/misc/conn/"
#endif /* DHD_LEGACY_FILE_PATH */
-#ifdef DHD_MAC_ADDR_EXPORT
-extern struct ether_addr sysfs_mac_addr;
-#endif /* DHD_MAC_ADDR_EXPORT */
-
/* Flag to indicate if we should download firmware on driver load */
extern uint dhd_download_fw_on_driverload;
#ifndef BCMDBUS
extern int dhd_write_file(const char *filepath, char *buf, int buf_len);
extern int dhd_read_file(const char *filepath, char *buf, int buf_len);
extern int dhd_write_file_and_check(const char *filepath, char *buf, int buf_len);
-extern int dhd_file_delete(char *path);
#ifdef READ_MACADDR
extern int dhd_set_macaddr_from_file(dhd_pub_t *dhdp);
#else
static INLINE int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
#endif /* WRITE_MACADDR */
-#ifdef USE_CID_CHECK
-#define MAX_VNAME_LEN 64
-#ifdef DHD_EXPORT_CNTL_FILE
-extern char cidinfostr[MAX_VNAME_LEN];
-#endif /* DHD_EXPORT_CNTL_FILE */
-extern int dhd_check_module_cid(dhd_pub_t *dhdp);
-extern char *dhd_get_cid_info(unsigned char *vid, int vid_length);
-#else
static INLINE int dhd_check_module_cid(dhd_pub_t *dhdp) { return 0; }
-#endif /* USE_CID_CHECK */
#ifdef GET_MAC_FROM_OTP
extern int dhd_check_module_mac(dhd_pub_t *dhdp);
#else
static INLINE int dhd_check_module_mac(dhd_pub_t *dhdp) { return 0; }
#endif /* GET_MAC_FROM_OTP */
-#if defined(READ_MACADDR) || defined(WRITE_MACADDR) || defined(USE_CID_CHECK) || \
- defined(GET_MAC_FROM_OTP)
+#if defined(READ_MACADDR) || defined(WRITE_MACADDR) || defined(GET_MAC_FROM_OTP)
#define DHD_USE_CISINFO
-#endif /* READ_MACADDR || WRITE_MACADDR || USE_CID_CHECK || GET_MAC_FROM_OTP */
+#endif
#ifdef DHD_USE_CISINFO
int dhd_read_cis(dhd_pub_t *dhdp);
void dhd_clear_cis(dhd_pub_t *dhdp);
-#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
-extern int dhd_check_module_b85a(void);
-extern int dhd_check_module_b90(void);
-#define BCM4359_MODULE_TYPE_B90B 1
-#define BCM4359_MODULE_TYPE_B90S 2
-#endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
-#if defined(USE_CID_CHECK)
-extern int dhd_check_module_bcm(char *module_type, int index, bool *is_murata_fem);
-#endif /* defined(USE_CID_CHECK) */
#else
static INLINE int dhd_read_cis(dhd_pub_t *dhdp) { return 0; }
static INLINE void dhd_clear_cis(dhd_pub_t *dhdp) { }
#endif /* DHD_USE_CISINFO */
-#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
-/* Flags to indicate if we distingish power off policy when
- * user set the memu "Keep Wi-Fi on during sleep" to "Never"
- */
-extern int trigger_deep_sleep;
-int dhd_deepsleep(struct net_device *dev, int flag);
-#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
+#define IBSS_COALESCE_DEFAULT 1
+#define IBSS_INITIAL_SCAN_ALLOWED_DEFAULT 1
+
extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
/* ioctl processing for nl80211 */
int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf);
-#if defined(SUPPORT_MULTIPLE_REVISION)
-extern int
-concate_revision(struct dhd_bus *bus, char *fwpath, char *nvpath);
-#endif /* SUPPORT_MULTIPLE_REVISION */
void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path,
char *pclm_path, char *pconf_path);
void dhd_set_bus_state(void *bus, uint32 state);
int dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size);
int dhd_common_socram_dump(dhd_pub_t *dhdp);
-int dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen);
-
int dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size);
-void dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname);
+
uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail);
void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size);
PARAM_LAST_VALUE
};
extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val);
-#ifdef DHD_EXPORT_CNTL_FILE
-#define VALUENOTSET 0xFFFFFFFFu
-extern uint32 bus_txglom;
-extern uint32 roam_off;
-#ifdef USE_WL_FRAMEBURST
-extern uint32 frameburst;
-#endif /* USE_WL_FRAMEBURST */
-#ifdef USE_WL_TXBF
-extern uint32 txbf;
-#endif /* USE_WL_TXBF */
-#ifdef PROP_TXSTATUS
-extern uint32 proptx;
-#endif /* PROP_TXSTATUS */
-#endif /* DHD_EXPORT_CNTL_FILE */
#endif /* USE_WFA_CERT_CONF */
#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0)
#define DHD_SPIN_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
#define DHD_SPIN_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
-#define DHD_RING_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
-#define DHD_RING_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
-
-#define DHD_BUS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
-#define DHD_BUS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
-
-/* Enable DHD backplane spin lock/unlock */
-#define DHD_BACKPLANE_ACCESS_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
-#define DHD_BACKPLANE_ACCESS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
-
#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
#define DHD_BUS_INB_DW_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
#define DHD_TDLS_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
#endif /* WLTDLS */
-#define DHD_BUS_INB_DW_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
-#define DHD_BUS_INB_DW_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
-
#ifdef DBG_PKT_MON
/* Enable DHD PKT MON spin lock/unlock */
#define DHD_PKT_MON_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
#define DHD_LINUX_GENERAL_LOCK(dhdp, flags) DHD_GENERAL_LOCK(dhdp, flags)
#define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags) DHD_GENERAL_UNLOCK(dhdp, flags)
-/* linux is defined for DHD EFI builds also,
-* since its cross-compiled for EFI from linux
-*/
-#define DHD_DBG_RING_LOCK_INIT(osh) dhd_os_dbgring_lock_init(osh)
-#define DHD_DBG_RING_LOCK_DEINIT(osh, lock) dhd_os_dbgring_lock_deinit(osh, (lock))
-#define DHD_DBG_RING_LOCK(lock, flags) (flags) = dhd_os_dbgring_lock(lock)
-#define DHD_DBG_RING_UNLOCK(lock, flags) dhd_os_dbgring_unlock((lock), flags)
-
extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp);
-extern void dhd_print_tasklet_status(dhd_pub_t *dhd);
-
#ifdef BCMDBUS
extern uint dhd_get_rxsz(dhd_pub_t *pub);
extern void dhd_set_path(dhd_pub_t *pub);
extern int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val);
extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx);
extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val);
-extern int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx);
-extern int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val);
#endif /* DHD_L2_FILTER */
+
typedef struct wl_io_pport {
dhd_pub_t *dhd_pub;
uint ifidx;
void custom_rps_map_clear(struct netdev_rx_queue *queue);
#define PRIMARY_INF 0
#define VIRTUAL_INF 1
-#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890)
+#if defined(CONFIG_MACH_UNIVERSAL5433) || defined(CONFIG_MACH_UNIVERSAL7420) || \
+ defined(CONFIG_SOC_EXYNOS8890)
#define RPS_CPUS_MASK "10"
#define RPS_CPUS_MASK_P2P "10"
#define RPS_CPUS_MASK_IBSS "10"
#define RPS_CPUS_MASK "6"
#define RPS_CPUS_MASK_P2P "6"
#define RPS_CPUS_MASK_IBSS "6"
-#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */
-#endif // endif
+#endif /* CONFIG_MACH_UNIVERSAL5433 || CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */
+#endif
int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
char ** buffer, int *length);
void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length);
-int dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
+int dhd_download_blob(dhd_pub_t *dhd, unsigned char *image,
uint32 len, char *iovar);
-int dhd_download_blob_cached(dhd_pub_t *dhd, char *file_path,
- uint32 len, char *iovar);
-
-int dhd_apply_default_txcap(dhd_pub_t *dhd, char *txcap_path);
int dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path);
#ifdef SHOW_LOGTRACE
#if defined(DHD_LB_STATS)
#include <bcmutils.h>
extern void dhd_lb_stats_init(dhd_pub_t *dhd);
-extern void dhd_lb_stats_deinit(dhd_pub_t *dhd);
extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count);
extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count);
#endif /* !DHD_LB_STATS */
#ifdef DHD_SSSR_DUMP
-#define DHD_SSSR_MEMPOOL_SIZE (2 * 1024 * 1024) /* 2MB size */
-
-/* used in sssr_dump_mode */
-#define SSSR_DUMP_MODE_SSSR 0 /* dump both *before* and *after* files */
-#define SSSR_DUMP_MODE_FIS 1 /* dump *after* files only */
-
+#define DHD_SSSR_MEMPOOL_SIZE (1024 * 1024) /* 1MB size */
extern int dhd_sssr_mempool_init(dhd_pub_t *dhd);
extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd);
extern int dhd_sssr_dump_init(dhd_pub_t *dhd);
extern void dhd_sssr_dump_deinit(dhd_pub_t *dhd);
-extern int dhdpcie_sssr_dump(dhd_pub_t *dhd);
-extern void dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path);
-
#define DHD_SSSR_MEMPOOL_INIT(dhdp) dhd_sssr_mempool_init(dhdp)
#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp)
#define DHD_SSSR_DUMP_INIT(dhdp) dhd_sssr_dump_init(dhdp)
#define DHD_SSSR_DUMP_DEINIT(dhdp) dhd_sssr_dump_deinit(dhdp)
-#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) dhd_sssr_print_filepath(dhdp, path)
#else
-#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0)
-#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0)
-#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0)
-#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0)
-#define DHD_SSSR_PRINT_FILEPATH(dhdp, path) do { /* noop */ } while (0)
+#define DHD_SSSR_MEMPOOL_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_MEMPOOL_DEINIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_DUMP_INIT(dhdp) do { /* noop */ } while (0)
+#define DHD_SSSR_DUMP_DEINIT(dhdp) do { /* noop */ } while (0)
#endif /* DHD_SSSR_DUMP */
+#ifdef SHOW_LOGTRACE
+void dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *read_buf_info);
+#endif /* SHOW_LOGTRACE */
+
#ifdef BCMPCIE
extern int dhd_prot_debug_info_print(dhd_pub_t *dhd);
-extern bool dhd_bus_skip_clm(dhd_pub_t *dhdp);
-extern void dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd);
-extern bool dhd_pcie_dump_int_regs(dhd_pub_t *dhd);
#else
#define dhd_prot_debug_info_print(x)
-static INLINE bool dhd_bus_skip_clm(dhd_pub_t *dhd_pub)
-{ return 0; }
#endif /* BCMPCIE */
-fw_download_status_t dhd_fw_download_status(dhd_pub_t * dhd_pub);
-void dhd_show_kirqstats(dhd_pub_t *dhd);
+extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info);
+
+bool dhd_fw_download_status(dhd_pub_t * dhd_pub);
/* Bitmask used for Join Timeout */
#define WLC_SSID_MASK 0x01
extern int dhd_start_join_timer(dhd_pub_t *pub);
extern int dhd_stop_join_timer(dhd_pub_t *pub);
-extern int dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan);
-extern int dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id);
+extern int dhd_start_scan_timer(dhd_pub_t *pub);
+extern int dhd_stop_scan_timer(dhd_pub_t *pub);
extern int dhd_start_cmd_timer(dhd_pub_t *pub);
extern int dhd_stop_cmd_timer(dhd_pub_t *pub);
extern int dhd_start_bus_timer(dhd_pub_t *pub);
void dhd_pktid_error_handler(dhd_pub_t *dhdp);
#endif /* DHD_PKTID_AUDIT_ENABLED */
-#ifdef DHD_MAP_PKTID_LOGGING
-extern void dhd_pktid_logging_dump(dhd_pub_t *dhdp);
-#endif /* DHD_MAP_PKTID_LOGGING */
-
+#ifdef DHD_PCIE_RUNTIMEPM
+extern bool dhd_runtimepm_state(dhd_pub_t *dhd);
+extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr);
+extern bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void *func_addr);
+extern void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp);
+extern bool dhdpcie_is_resume_done(dhd_pub_t *dhdp);
+extern void dhd_runtime_pm_disable(dhd_pub_t *dhdp);
+extern void dhd_runtime_pm_enable(dhd_pub_t *dhdp);
+/* Disable the Runtime PM and wake up if the bus is already in suspend */
+#define DHD_DISABLE_RUNTIME_PM(dhdp) \
+do { \
+ dhd_runtime_pm_disable(dhdp); \
+} while (0);
+
+/* Enable the Runtime PM */
+#define DHD_ENABLE_RUNTIME_PM(dhdp) \
+do { \
+ dhd_runtime_pm_enable(dhdp); \
+} while (0);
+#else
#define DHD_DISABLE_RUNTIME_PM(dhdp)
#define DHD_ENABLE_RUNTIME_PM(dhdp)
+#endif /* DHD_PCIE_RUNTIMEPM */
-extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info);
-extern void dhd_prot_dump_ring_ptrs(void *prot_info);
+/*
+ * Enable this macro if you want to track the calls to wake lock
+ * This records can be printed using the following command
+ * cat /sys/bcm-dhd/wklock_trace
+ * DHD_TRACE_WAKE_LOCK supports over linux 2.6.0 version
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+#undef DHD_TRACE_WAKE_LOCK
+#endif /* KERNEL_VER < KERNEL_VERSION(2, 6, 0) */
#if defined(DHD_TRACE_WAKE_LOCK)
void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp);
-#endif // endif
+#endif
extern bool dhd_query_bus_erros(dhd_pub_t *dhdp);
-void dhd_clear_bus_errors(dhd_pub_t *dhdp);
-
-#if defined(CONFIG_64BIT)
-#define DHD_SUPPORT_64BIT
-#endif /* (linux || LINUX) && CONFIG_64BIT */
-
-#if defined(DHD_ERPOM)
-extern void dhd_schedule_reset(dhd_pub_t *dhdp);
-#else
-static INLINE void dhd_schedule_reset(dhd_pub_t *dhdp) {;}
-#endif // endif
extern void init_dhd_timeouts(dhd_pub_t *pub);
extern void deinit_dhd_timeouts(dhd_pub_t *pub);
DHD_REASON_OQS_TO
} timeout_reasons_t;
-extern void dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level);
-int dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data);
-void dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt);
-static INLINE int dhd_get_max_txbufs(dhd_pub_t *dhdp)
-{ return -1; }
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+extern int dhd_bus_set_device_wake(struct dhd_bus *bus, bool val);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+
+#ifdef DHD_EFI
+extern void dhd_schedule_reset(dhd_pub_t *dhdp);
+#else
+static INLINE void dhd_schedule_reset(dhd_pub_t *dhdp) {;}
+#endif
+
+#ifdef ENABLE_TEMP_THROTTLING
+#ifndef TEMP_THROTTLE_CONTROL_BIT
+#define TEMP_THROTTLE_CONTROL_BIT 0xd
+#endif
+#endif /* ENABLE_TEMP_THROTTLING */
-#ifdef FILTER_IE
-int dhd_read_from_file(dhd_pub_t *dhd);
-int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf);
-int dhd_get_filter_ie_count(dhd_pub_t *dhd, uint8 *buf);
-int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len);
-int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8 *buf, int len);
-#endif /* FILTER_IE */
+int dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size);
+#ifdef REPORT_FATAL_TIMEOUTS
+void dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason);
+#endif
-uint16 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp);
+#if defined(CONFIG_64BIT)
+#define DHD_SUPPORT_64BIT
+#elif defined(DHD_EFI)
+#define DHD_SUPPORT_64BIT
+/* by default disabled for other platforms, can enable appropriate macro to enable 64 bit support */
+#endif /* (linux || LINUX) && CONFIG_64BIT */
#ifdef SET_PCIE_IRQ_CPU_CORE
-enum {
- PCIE_IRQ_AFFINITY_OFF = 0,
- PCIE_IRQ_AFFINITY_BIG_CORE_ANY,
- PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS,
- PCIE_IRQ_AFFINITY_LAST
-};
-extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd);
+extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set);
+extern void set_irq_cpucore(unsigned int irq, int set);
#endif /* SET_PCIE_IRQ_CPU_CORE */
+#if defined(DHD_HANG_SEND_UP_TEST)
+extern void dhd_make_hang_with_reason(struct net_device *dev, const char *string_num);
+#endif /* DHD_HANG_SEND_UP_TEST */
-#ifdef DHD_WAKE_STATUS
-wake_counts_t* dhd_get_wakecount(dhd_pub_t *dhdp);
-#endif /* DHD_WAKE_STATUS */
-extern int dhd_get_random_bytes(uint8 *buf, uint len);
#if defined(DHD_BLOB_EXISTENCE_CHECK)
extern void dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path);
#endif /* DHD_BLOB_EXISTENCE_CHECK */
-/* configuration of ecounters. API's tp start/stop. currently supported only for linux */
-extern int dhd_ecounter_configure(dhd_pub_t *dhd, bool enable);
-extern int dhd_start_ecounters(dhd_pub_t *dhd);
-extern int dhd_stop_ecounters(dhd_pub_t *dhd);
-extern int dhd_start_event_ecounters(dhd_pub_t *dhd);
-extern int dhd_stop_event_ecounters(dhd_pub_t *dhd);
-
-int dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask);
-
-#ifdef DHD_LOG_DUMP
-void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type);
-void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd);
-int dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
- unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr, char *text_hdr,
- uint32 sec_type);
-int dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
- log_dump_section_hdr_t *sec_hdr, char *text_hdr, int buflen, uint32 sec_type);
-int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp,
- const void *user_buf, unsigned long *f_pos);
-int dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf);
-uint32 dhd_log_dump_cookie_len(dhd_pub_t *dhdp);
-int dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size);
-void dhd_logdump_cookie_deinit(dhd_pub_t *dhdp);
-void dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type);
-int dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size);
-int dhd_logdump_cookie_count(dhd_pub_t *dhdp);
-int dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf, void *fp,
- uint32 len, int type, void *pos);
-int dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos);
-int dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos);
-int dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos);
-int dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos);
-int dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos);
-#ifdef DHD_DUMP_PCIE_RINGS
-int dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos);
-uint32 dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp);
-#endif /* DHD_DUMP_PCIE_RINGS */
-#ifdef DHD_STATUS_LOGGING
-extern int dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp,
- const void *user_buf, void *fp, uint32 len, void *pos);
-extern uint32 dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp);
-#endif /* DHD_STATUS_LOGGING */
-int dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos);
-int dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos);
-int dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp,
- char *dump_path, int size);
-#if defined(BCMPCIE)
-uint32 dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp);
-#endif
-uint32 dhd_get_time_str_len(void);
-uint32 dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp);
-uint32 dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp);
-uint32 dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp);
-uint32 dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp);
-uint32 dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp);
-uint32 dhd_get_dld_len(int log_type);
-void dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr);
-extern char *dhd_log_dump_get_timestamp(void);
-bool dhd_log_dump_ecntr_enabled(void);
-bool dhd_log_dump_rtt_enabled(void);
-void dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len);
-int dhd_get_debug_dump(void *dev, const void *user_buf, uint32 len, int type);
-int
-dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core);
-int
-dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core);
-int
-dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len);
-int
-dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len);
-
-#ifdef DNGL_AXI_ERROR_LOGGING
-extern int dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len);
-extern int dhd_os_get_axi_error_dump_size(struct net_device *dev);
-extern void dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len);
-#endif /* DNGL_AXI_ERROR_LOGGING */
-
-#endif /* DHD_LOG_DUMP */
-int dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos);
-#define DHD_PCIE_CONFIG_SAVE(bus) pci_save_state(bus->dev)
-#define DHD_PCIE_CONFIG_RESTORE(bus) pci_restore_state(bus->dev)
-
-typedef struct dhd_pkt_parse {
- uint32 proto; /* Network layer protocol */
- uint32 t1; /* n-tuple */
- uint32 t2;
-} dhd_pkt_parse_t;
-
-/* ========= RING API functions : exposed to others ============= */
-#define DHD_RING_TYPE_FIXED 1
-#define DHD_RING_TYPE_SINGLE_IDX 2
-uint32 dhd_ring_get_hdr_size(void);
-void *dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
- uint32 elem_cnt, uint32 type);
-void dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring);
-void *dhd_ring_get_first(void *_ring);
-void dhd_ring_free_first(void *_ring);
-void dhd_ring_set_read_idx(void *_ring, uint32 read_idx);
-void dhd_ring_set_write_idx(void *_ring, uint32 write_idx);
-uint32 dhd_ring_get_read_idx(void *_ring);
-uint32 dhd_ring_get_write_idx(void *_ring);
-void *dhd_ring_get_last(void *_ring);
-void *dhd_ring_get_next(void *_ring, void *cur);
-void *dhd_ring_get_prev(void *_ring, void *cur);
-void *dhd_ring_get_empty(void *_ring);
-int dhd_ring_get_cur_size(void *_ring);
-void dhd_ring_lock(void *ring, void *fist_ptr, void *last_ptr);
-void dhd_ring_lock_free(void *ring);
-void *dhd_ring_lock_get_first(void *_ring);
-void *dhd_ring_lock_get_last(void *_ring);
-int dhd_ring_lock_get_count(void *_ring);
-void dhd_ring_lock_free_first(void *ring);
-void dhd_ring_whole_lock(void *ring);
-void dhd_ring_whole_unlock(void *ring);
-
-#define DHD_DUMP_TYPE_NAME_SIZE 32
-#define DHD_DUMP_FILE_PATH_SIZE 256
-#define DHD_DUMP_FILE_COUNT_MAX 5
-#define DHD_DUMP_TYPE_COUNT_MAX 10
-
-#ifdef DHD_DUMP_MNGR
-typedef struct _DFM_elem {
- char type_name[DHD_DUMP_TYPE_NAME_SIZE];
- char file_path[DHD_DUMP_FILE_COUNT_MAX][DHD_DUMP_FILE_PATH_SIZE];
- int file_idx;
-} DFM_elem_t;
-
-typedef struct _dhd_dump_file_manage {
- DFM_elem_t elems[DHD_DUMP_TYPE_COUNT_MAX];
-} dhd_dump_file_manage_t;
-
-extern void dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname);
-#endif /* DHD_DUMP_MNGR */
-
-#ifdef PKT_FILTER_SUPPORT
-extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
-extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
-extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
-#endif // endif
-
-#ifdef DHD_DUMP_PCIE_RINGS
-extern int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
- unsigned long *file_posn, bool file_write);
-#endif /* DHD_DUMP_PCIE_RINGS */
-
-#ifdef EWP_EDL
-#define DHD_EDL_RING_SIZE (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_ITEMSIZE)
-int dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
- void *evt_decode_data);
-int dhd_edl_mem_init(dhd_pub_t *dhd);
-void dhd_edl_mem_deinit(dhd_pub_t *dhd);
-void dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd);
-#define DHD_EDL_MEM_INIT(dhdp) dhd_edl_mem_init(dhdp)
-#define DHD_EDL_MEM_DEINIT(dhdp) dhd_edl_mem_deinit(dhdp)
-#define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) \
- dhd_prot_edl_ring_tcm_rd_update(dhdp)
-#else
-#define DHD_EDL_MEM_INIT(dhdp) do { /* noop */ } while (0)
-#define DHD_EDL_MEM_DEINIT(dhdp) do { /* noop */ } while (0)
-#define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) do { /* noop */ } while (0)
-#endif /* EWP_EDL */
-
-void dhd_schedule_logtrace(void *dhd_info);
-int dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath);
-
-#define HD_PREFIX_SIZE 2 /* hexadecimal prefix size */
-#define HD_BYTE_SIZE 2 /* hexadecimal byte size */
-
-#if defined(DHD_H2D_LOG_TIME_SYNC)
-void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp);
-void dhd_h2d_log_time_sync(dhd_pub_t *dhdp);
-#endif /* DHD_H2D_LOG_TIME_SYNC */
-extern void dhd_cleanup_if(struct net_device *net);
-
-#ifdef DNGL_AXI_ERROR_LOGGING
-extern void dhd_axi_error(dhd_pub_t *dhd);
-#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
-extern void dhd_axi_error_dispatch(dhd_pub_t *dhdp);
-#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
-#endif /* DNGL_AXI_ERROR_LOGGING */
-
-#ifdef DHD_HP2P
-extern unsigned long dhd_os_hp2plock(dhd_pub_t *pub);
-extern void dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags);
-#endif /* DHD_HP2P */
-extern struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
-
-#ifdef DHD_STATUS_LOGGING
-#include <dhd_statlog.h>
-#else
-#define ST(x) 0
-#define STDIR(x) 0
-#define DHD_STATLOG_CTRL(dhdp, stat, ifidx, reason) \
- do { /* noop */ } while (0)
-#define DHD_STATLOG_DATA(dhdp, stat, ifidx, dir, cond) \
- do { BCM_REFERENCE(cond); } while (0)
-#define DHD_STATLOG_DATA_RSN(dhdp, stat, ifidx, dir, reason) \
- do { /* noop */ } while (0)
-#endif /* DHD_STATUS_LOGGING */
-
-#ifdef CONFIG_SILENT_ROAM
-extern int dhd_sroam_set_mon(dhd_pub_t *dhd, bool set);
-typedef wlc_sroam_info_v1_t wlc_sroam_info_t;
-#endif /* CONFIG_SILENT_ROAM */
-
-#ifdef SUPPORT_SET_TID
-enum dhd_set_tid_mode {
- /* Disalbe changing TID */
- SET_TID_OFF = 0,
- /* Change TID for all UDP frames */
- SET_TID_ALL_UDP,
- /* Change TID for UDP frames based on UID */
- SET_TID_BASED_ON_UID
-};
-extern void dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt);
-#endif /* SUPPORT_SET_TID */
-
-#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
-#define FILE_NAME_HAL_TAG ""
-#else
-#define FILE_NAME_HAL_TAG "_hal" /* The tag name concatenated by HAL */
-#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+#ifdef DHD_WAKE_STATUS
+wake_counts_t* dhd_get_wakecount(dhd_pub_t *dhdp);
+#endif /* DHD_WAKE_STATUS */
-#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
-extern int dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab);
-extern uint8 control_he_enab;
-#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+#ifdef BCM_ASLR_HEAP
+extern uint32 dhd_get_random_number(void);
+#endif /* BCM_ASLR_HEAP */
#endif /* _dhd_h_ */
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_bus.h 814378 2019-04-11 02:21:31Z $
+ * $Id: dhd_bus.h 698895 2017-05-11 02:55:17Z $
*/
#ifndef _dhd_bus_h_
* Exported from dhd bus module (dhd_usb, dhd_sdio)
*/
-/* global variable for the bus */
-extern struct dhd_bus *g_dhd_bus;
-
/* Indicate (dis)interest in finding dongles. */
extern int dhd_bus_register(void);
extern void dhd_bus_unregister(void);
/* Set the Bus Idle Time */
extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+/* Size of Extended Trap data Buffer */
+#ifdef BCMPCIE
+#define BCMPCIE_EXT_TRAP_DATA_MAXLEN 4096
+#endif
+
/* Send a data frame to the dongle. Callee disposes of txp. */
#ifdef BCMPCIE
extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx);
#else
extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
-#endif // endif
+#endif
-#ifdef BCMPCIE
-extern void dhdpcie_cto_recovery_handler(dhd_pub_t *dhd);
-#endif /* BCMPCIE */
+extern struct device * dhd_bus_to_dev(struct dhd_bus *bus);
/* Send/receive a control message to/from the dongle.
* Expects caller to enforce a single outstanding transaction.
/* Device console input function */
extern int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen);
-#ifdef CONSOLE_DPC
-extern int dhd_bus_txcons(dhd_pub_t *dhd, uchar *msg, uint msglen);
-#endif
/* Deferred processing for the bus, return TRUE requests reschedule */
extern bool dhd_bus_dpc(struct dhd_bus *bus);
extern void dhd_bus_isr(bool * InterruptRecognized, bool * QueueMiniportHandleInterrupt, void *arg);
+
/* Check for and handle local prot-specific iovar commands */
extern int dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
void *params, int plen, void *arg, int len, bool set);
extern void *dhd_bus_pub(struct dhd_bus *bus);
extern void *dhd_bus_txq(struct dhd_bus *bus);
-extern void *dhd_bus_sih(struct dhd_bus *bus);
+extern const void *dhd_bus_sih(struct dhd_bus *bus);
extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
#ifdef BCMSDIO
extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val);
extern uint8 dhd_bus_is_ioready(struct dhd_bus *bus);
#else
#define dhd_bus_set_dotxinrx(a, b) do {} while (0)
-#endif // endif
+#endif
#define DHD_SET_BUS_STATE_DOWN(_bus) do { \
(_bus)->dhd->busstate = DHD_BUS_DOWN; \
#if defined(DHD_FW_COREDUMP) && (defined(BCMPCIE) || defined(BCMSDIO))
extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
-extern int dhd_bus_get_mem_dump(dhd_pub_t *dhdp);
#else
#define dhd_bus_mem_dump(x)
-#define dhd_bus_get_mem_dump(x)
#endif /* DHD_FW_COREDUMP && (BCMPCIE || BCMSDIO) */
#ifdef BCMPCIE
MAX_HOST_RXBUFS,
HOST_API_VERSION,
DNGL_TO_HOST_TRAP_ADDR,
- HOST_SCB_ADDR, /* update host scb base address to dongle */
+#ifdef HOFFLOAD_MODULES
+ WRT_HOST_MODULE_ADDR
+#endif
};
typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32);
extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus);
extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs);
+extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val);
#ifdef IDLE_TX_FLOW_MGMT
extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
#endif /* IDLE_TX_FLOW_MGMT */
+
extern int dhdpcie_bus_clock_start(struct dhd_bus *bus);
extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus);
extern int dhdpcie_bus_enable_device(struct dhd_bus *bus);
extern int dhd_bus_release_dongle(struct dhd_bus *bus);
extern int dhd_bus_request_irq(struct dhd_bus *bus);
extern int dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq);
-extern void dhd_bus_aer_config(struct dhd_bus *bus);
-extern struct device * dhd_bus_to_dev(struct dhd_bus *bus);
-
-extern int dhdpcie_cto_init(struct dhd_bus *bus, bool enable);
-extern int dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable);
+extern void dhdpcie_cto_init(struct dhd_bus *bus, bool enable);
-extern void dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus);
#ifdef DHD_FW_COREDUMP
+extern struct dhd_bus *g_dhd_bus;
extern int dhd_dongle_mem_dump(void);
#endif /* DHD_FW_COREDUMP */
/* dump the device trap informtation */
extern void dhd_bus_dump_trap_info(struct dhd_bus *bus, struct bcmstrbuf *b);
-extern void dhd_bus_copy_trap_sig(struct dhd_bus *bus, trap_t *tr);
+
/* Function to set default min res mask */
extern bool dhd_bus_set_default_min_res_mask(struct dhd_bus *bus);
/* Function to reset PMU registers */
extern void dhd_bus_pmu_reg_reset(dhd_pub_t *dhdp);
-extern void dhd_bus_ucode_download(struct dhd_bus *bus);
-
#ifdef DHD_ULP
extern void dhd_bus_ulp_disable_console(dhd_pub_t *dhdp);
+extern void dhd_bus_ucode_download(struct dhd_bus *bus);
#endif /* DHD_ULP */
extern int dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read);
-extern int dhd_get_idletime(dhd_pub_t *dhd);
-#ifdef BCMPCIE
-extern void dhd_bus_dump_console_buffer(struct dhd_bus *bus);
-extern void dhd_bus_intr_count_dump(dhd_pub_t *dhdp);
-extern void dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp);
-extern bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp);
-extern int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type);
-extern bool dhd_bus_check_driver_up(void);
-extern int dhd_bus_get_cto(dhd_pub_t *dhdp);
-extern void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val);
-extern int dhd_bus_get_linkdown(dhd_pub_t *dhdp);
-#else
-#define dhd_bus_dump_console_buffer(x)
-static INLINE void dhd_bus_intr_count_dump(dhd_pub_t *dhdp) { UNUSED_PARAMETER(dhdp); }
-static INLINE void dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp) { }
-static INLINE bool dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp) { return 0; }
-static INLINE int dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type) { return 0; }
-static INLINE bool dhd_bus_check_driver_up(void) { return FALSE; }
-extern INLINE void dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val) { }
-extern INLINE int dhd_bus_get_linkdown(dhd_pub_t *dhdp) { return 0; }
-static INLINE int dhd_bus_get_cto(dhd_pub_t *dhdp) { return 0; }
-#endif /* BCMPCIE */
-
-#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
-void dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd, uint8 *ext_trap_data,
- void *event_decode_data);
-#endif // endif
-
-extern uint16 dhd_get_chipid(dhd_pub_t *dhd);
-
-#ifdef DHD_WAKE_STATUS
-extern wake_counts_t* dhd_bus_get_wakecount(dhd_pub_t *dhd);
-extern int dhd_bus_get_bus_wake(dhd_pub_t * dhd);
-#endif /* DHD_WAKE_STATUS */
#ifdef BT_OVER_SDIO
/*
int __dhdsdio_clk_disable(struct dhd_bus *bus, bus_owner_t owner, int can_wait);
void dhdsdio_reset_bt_use_count(struct dhd_bus *bus);
#endif /* BT_OVER_SDIO */
-
-int dhd_bus_perform_flr(struct dhd_bus *bus, bool force_fail);
-extern bool dhd_bus_get_flr_force_fail(struct dhd_bus *bus);
-
-extern bool dhd_bus_aspm_enable_rc_ep(struct dhd_bus *bus, bool enable);
-extern void dhd_bus_l1ss_enable_rc_ep(struct dhd_bus *bus, bool enable);
-
-bool dhd_bus_is_multibp_capable(struct dhd_bus *bus);
-
#ifdef BCMPCIE
-extern void dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp);
-extern void dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd);
+extern void dhd_bus_dump_console_buffer(struct dhd_bus *bus);
+#else
+#define dhd_bus_dump_console_buffer(x)
#endif /* BCMPCIE */
-extern bool dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus);
-
-#ifdef DHD_SSSR_DUMP
-extern int dhd_bus_fis_trigger(dhd_pub_t *dhd);
-extern int dhd_bus_fis_dump(dhd_pub_t *dhd);
-#endif /* DHD_SSSR_DUMP */
-
-#ifdef PCIE_FULL_DONGLE
-extern int dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val);
-#endif /* PCIE_FULL_DONGLE */
+extern uint16 dhd_get_chipid(dhd_pub_t *dhd);
-#ifdef DHD_USE_BP_RESET
-extern int dhd_bus_perform_bp_reset(struct dhd_bus *bus);
-#endif /* DHD_USE_BP_RESET */
+extern int dhd_get_idletime(dhd_pub_t *dhd);
-extern void dhd_bwm_bt_quiesce(struct dhd_bus *bus);
-extern void dhd_bwm_bt_resume(struct dhd_bus *bus);
+#ifdef DHD_WAKE_STATUS
+extern wake_counts_t* dhd_bus_get_wakecount(dhd_pub_t *dhd);
+extern int dhd_bus_get_bus_wake(dhd_pub_t * dhd);
+#endif /* DHD_WAKE_STATUS */
#endif /* _dhd_bus_h_ */
/*
* Broadcom logging system - Empty implementaiton
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
+++ /dev/null
-\r
-#ifdef CCODE_LIST\r
-#ifdef CONFIG_COMPAT\r
-#include <linux/compat.h>\r
-#endif /* COMFIG_COMPAT */\r
-#include <typedefs.h>\r
-#include <dhd_config.h>\r
-\r
-#ifdef BCMSDIO\r
-#define CCODE_43438\r
-#define CCODE_43455C0\r
-#define CCODE_43456C5\r
-#endif\r
-#if defined(BCMSDIO) || defined(BCMPCIE)\r
-#define CCODE_4356A2\r
-#define CCODE_4359C0\r
-#endif\r
-#ifdef BCMDBUS\r
-#define CCODE_4358U\r
-#endif\r
-\r
-#ifdef BCMSDIO\r
-#ifdef CCODE_43438\r
-const char ccode_43438[] = "RU/13";\r
-#else\r
-const char ccode_43438 = "";\r
-#endif\r
-\r
-#ifdef CCODE_43455C0\r
-const char ccode_43455c0[] = \\r
-"AE/6 AG/2 AI/1 AL/2 AS/12 AT/4 AU/6 AW/2 AZ/2 "\\r
-"BA/2 BD/1 BE/4 BG/4 BH/4 BM/12 BN/4 BR/2 BS/2 BY/3 "\\r
-"CA/2 CA/31 CH/4 CN/38 CO/17 CR/17 CY/4 CZ/4 "\\r
-"DE/3 DE/7 DK/4 "\\r
-"EC/21 EE/4 EG/13 ES/4 ET/2 "\\r
-"FI/4 FR/5 "\\r
-"GB/1 GB/6 GD/2 GF/2 GP/2 GR/4 GT/1 GU/30 "\\r
-"HK/2 HR/4 HU/4 "\\r
-"ID/1 IE/5 IL/14 IN/3 IS/4 IT/4 "\\r
-"JO/3 JP/45 JP/58 "\\r
-"KH/2 KR/45 KR/48 KR/49 KR/70 KR/71 KR/96 KW/5 KY/3 "\\r
-"LA/2 LB/5 LI/4 LK/1 LS/2 LT/4 LU/3 LV/4 "\\r
-"MA/2 MC/1 MD/2 ME/2 MK/2 MN/1 MQ/2 MR/2 MT/4 MU/2 MV/3 MW/1 MX/44 MY/3 "\\r
-"NI/2 NL/4 NO/4 NZ/4 "\\r
-"OM/4 "\\r
-"PA/17 PE/20 PH/5 PL/4 PR/38 PT/4 PY/2 "\\r
-"Q2/993 "\\r
-"RE/2 RO/4 RS/2 RU/13 "\\r
-"SE/4 SI/4 SK/4 SV/25 "\\r
-"TH/5 TN/1 TR/7 TT/3 TW/1 "\\r
-"UA/8 US/988 "\\r
-"VA/2 VE/3 VG/2 VN/4 "\\r
-"XZ/11 "\\r
-"YT/2 "\\r
-"ZA/6";\r
-#else\r
-const char ccode_43455c0[] = "";\r
-#endif\r
-\r
-#ifdef CCODE_43456C5\r
-const char ccode_43456c5[] = \\r
-"AE/6 AG/2 AI/1 AL/2 AS/12 AT/4 AU/6 AW/2 AZ/2 "\\r
-"BA/2 BD/1 BE/4 BG/4 BH/4 BM/12 BN/4 BR/4 BS/2 BY/3 "\\r
-"CA/2 CH/4 CN/38 CO/17 CR/17 CY/4 CZ/4 "\\r
-"DE/7 DK/4 "\\r
-"EC/21 EE/4 EG/13 ES/4 ET/2 "\\r
-"FI/4 FR/5 "\\r
-"GB/6 GD/2 GF/2 GP/2 GR/4 GT/1 GU/30 "\\r
-"HK/2 HR/4 HU/4 "\\r
-"ID/1 IE/5 IL/14 IN/3 IS/4 IT/4 "\\r
-"JO/3 JP/58 "\\r
-"KH/2 KR/96 KW/5 KY/3 "\\r
-"LA/2 LB/5 LI/4 LK/1 LS/2 LT/4 LU/3 LV/4 "\\r
-"MA/2 MC/1 MD/2 ME/2 MK/2 MN/1 MQ/2 MR/2 MT/4 MU/2 MV/3 MW/1 MX/44 MY/3 "\\r
-"NI/2 NL/4 NO/4 NZ/4 "\\r
-"OM/4 "\\r
-"PA/17 PE/20 PH/5 PL/4 PR/38 PT/4 PY/2 "\\r
-"Q2/993 "\\r
-"RE/2 RO/4 RS/2 RU/13 "\\r
-"SE/4 SI/4 SK/4 SV/25 "\\r
-"TH/5 TN/1 TR/7 TT/3 TW/65 "\\r
-"UA/8 US/988 "\\r
-"VA/2 VE/3 VG/2 VN/4 "\\r
-"XZ/11 "\\r
-"YT/2 "\\r
-"ZA/6";\r
-#else\r
-const char ccode_43456c5[] = "";\r
-#endif\r
-#endif\r
-\r
-#ifdef CCODE_4356A2\r
-const char ccode_4356a2[] = \\r
-"AE/6 AG/2 AI/1 AL/2 AN/2 AR/21 AS/12 AT/4 AU/6 AW/2 AZ/2 "\\r
-"BA/2 BD/2 BE/4 BG/4 BH/4 BM/12 BN/4 BR/4 BS/2 BY/3 "\\r
-"CA/31 CH/4 CN/38 CO/17 CR/17 CY/4 CZ/4 "\\r
-"DE/7 DK/4 DZ/1 "\\r
-"EC/21 EE/4 ES/4 ET/2 "\\r
-"FI/4 FR/5 "\\r
-"GB/6 GD/2 GF/2 GP/2 GR/4 GT/1 GU/12 "\\r
-"HK/2 HR/4 HU/4 "\\r
-"ID/13 IE/5 IL/7 IN/28 IS/4 IT/4 "\\r
-"JO/3 JP/58 "\\r
-"KH/2 KR/57 KW/5 KY/3 "\\r
-"LA/2 LB/5 LI/4 LK/1 LS/2 LT/4 LU/3 LV/4 "\\r
-"MA/2 MC/1 MD/2 ME/2 MK/2 MN/1 MO/2 MR/2 MT/4 MQ/2 MU/2 MV/3 MW/1 MX/20 MY/16 "\\r
-"NI/2 NL/4 NO/4 NP/3 NZ/4 "\\r
-"OM/4 "\\r
-"PA/17 PE/20 PG/2 PH/5 PL/4 PR/20 PT/4 PY/2 "\\r
-"RE/2 RO/4 RS/2 RU/986 "\\r
-"SE/4 SG/4 SG/19 SI/4 SK/4 SN/2 SV/19 "\\r
-"TH/9 TN/1 TR/7 TT/3 TW/1 "\\r
-"UA/8 UG/2 US/1 UY/1 "\\r
-"VA/2 UA/16 VE/3 VG/2 VI/13 VN/4 "\\r
-"XZ/11 "\\r
-"YT/2 "\\r
-"ZM/2 "\\r
-"E0/32";\r
-#else\r
-const char ccode_4356a2[] = "";\r
-#endif\r
-\r
-#ifdef CCODE_4359C0\r
-const char ccode_4359c0[] = \\r
-"AD/1 AE/6 AG/2 AI/1 AL/3 AS/12 AT/21 AU/6 AW/2 AZ/8 "\\r
-"BA/4 BD/1 BE/19 BG/18 BH/4 BM/12 BN/4 BR/2 BS/2 BY/3 "\\r
-"CN/38 CO/17 CR/17 CY/18 CZ/18 "\\r
-"DE/30 DK/19 "\\r
-"E0/32 EC/21 EE/18 EG/13 ES/21 ET/2 "\\r
-"FI/19 FR/21 "\\r
-"GB/996 GD/2 GE/1 GF/2 GP/2 GR/18 GT/1 GU/30 "\\r
-"HK/2 HR/18 HU/18 "\\r
-"ID/1 IE/21 IL/14 IN/3 IS/17 IT/20 "\\r
-"JO/3 JP/967 "\\r
-"KH/2 KR/70 KW/5 KY/3 "\\r
-"LA/2 LB/5 LI/17 LI/4 LK/1 LS/2 LT/18 LU/18 LV/18 "\\r
-"MA/2 MC/2 MD/3 ME/5 MK/4 MN/1 MQ/2 MR/2 MT/18 MU/2 MV/3 MW/1 MX/44 MY/3 "\\r
-"NI/2 NL/19 NO/18 NZ/4 "\\r
-"OM/4 "\\r
-"PA/17 PE/20 PH/5 PL/18 PR/38 PT/20 PY/2 "\\r
-"Q1/947 Q2/993 "\\r
-"RE/2 RO/18 RS/4 RU/986 "\\r
-"SE/19 SI/18 SK/18 SM/1 SV/25 "\\r
-"TH/5 TN/1 TR/18 TT/3 TW/1 "\\r
-"UA/16 US/988 "\\r
-"VA/3 VE/3 VG/2 VN/4 "\\r
-"XZ/11 "\\r
-"YT/2 "\\r
-"ZA/6";\r
-#else\r
-const char ccode_4359c0[] = "";\r
-#endif\r
-\r
-#ifdef CCODE_4358U\r
-const char ccode_4358u[] = \\r
-"BE/4 BR/4 CA/2 CH/4 CN/38 CY/4 DE/7 DK/4 ES/4 "\\r
-"FI/4 FR/5 GB/6 GR/4 HK/2 HU/4 IE/5 IL/7 IS/4 "\\r
-"IT/4 JP/72 KE/0 MY/3 NL/4 PT/4 SA/5 SE/4 SG/0 "\\r
-"SZ/0 TH/5 TR/7 TW/230 US/0 VN/4";\r
-#else\r
-const char ccode_4358u[] = "";\r
-#endif\r
-\r
-typedef struct ccode_list_map_t {\r
- uint chip;\r
- uint chiprev;\r
- const char *ccode_list;\r
- const char *ccode_ww;\r
-} ccode_list_map_t;\r
-\r
-extern const char ccode_43438[];\r
-extern const char ccode_43455c0[];\r
-extern const char ccode_43456c5[];\r
-extern const char ccode_4356a2[];\r
-extern const char ccode_4359c0[];\r
-extern const char ccode_4358u[];\r
-\r
-const ccode_list_map_t ccode_list_map[] = {\r
- /* ChipID Chiprev ccode */\r
-#ifdef BCMSDIO\r
- {BCM43430_CHIP_ID, 0, ccode_43438, ""},\r
- {BCM43430_CHIP_ID, 1, ccode_43438, ""},\r
- {BCM4345_CHIP_ID, 6, ccode_43455c0, "XZ/11"},\r
- {BCM43454_CHIP_ID, 6, ccode_43455c0, "XZ/11"},\r
- {BCM4345_CHIP_ID, 9, ccode_43456c5, "XZ/11"},\r
- {BCM43454_CHIP_ID, 9, ccode_43456c5, "XZ/11"},\r
- {BCM4354_CHIP_ID, 2, ccode_4356a2, "XZ/11"},\r
- {BCM4356_CHIP_ID, 2, ccode_4356a2, "XZ/11"},\r
- {BCM4371_CHIP_ID, 2, ccode_4356a2, "XZ/11"},\r
- {BCM4359_CHIP_ID, 9, ccode_4359c0, "XZ/11"},\r
-#endif\r
-#ifdef BCMPCIE\r
- {BCM4354_CHIP_ID, 2, ccode_4356a2, "XZ/11"},\r
- {BCM4356_CHIP_ID, 2, ccode_4356a2, "XZ/11"},\r
- {BCM4359_CHIP_ID, 9, ccode_4359c0, "XZ/11"},\r
-#endif\r
-#ifdef BCMDBUS\r
- {BCM43569_CHIP_ID, 2, ccode_4358u, "XW/0"},\r
-#endif\r
-};\r
-\r
-int\r
-dhd_ccode_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec)\r
-{\r
- int bcmerror = -1, i;\r
- uint chip = dhd->conf->chip, chiprev = dhd->conf->chiprev; \r
- const char *ccode_list = NULL, *ccode_ww = NULL;\r
- char *pch;\r
-\r
- for (i=0; i<sizeof(ccode_list_map)/sizeof(ccode_list_map[0]); i++) {\r
- const ccode_list_map_t* row = &ccode_list_map[i];\r
- if (row->chip == chip && row->chiprev == chiprev) {\r
- ccode_list = row->ccode_list;\r
- ccode_ww = row->ccode_ww;\r
- break;\r
- }\r
- }\r
-\r
- if (ccode_list) {\r
- pch = strstr(ccode_list, cspec->ccode);\r
- if (pch) {\r
- cspec->rev = (int)simple_strtol(pch+strlen(cspec->ccode)+1, NULL, 0);\r
- bcmerror = 0;\r
- }\r
- }\r
-\r
- if (bcmerror && ccode_ww && strlen(ccode_ww)>=4) {\r
- memcpy(cspec->ccode, ccode_ww, 2);\r
- cspec->rev = (int)simple_strtol(ccode_ww+3, NULL, 0);\r
- }\r
-\r
- return bcmerror;\r
-}\r
-#endif\r
/*
* DHD Protocol Module for CDC and BDC.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_cdc.c 752794 2018-03-19 04:00:31Z $
+ * $Id: dhd_cdc.c 699163 2017-05-12 05:18:23Z $
*
* BDC is like CDC, except it includes a header for data packets to convey
* packet priority over the bus, and flags (e.g. to indicate checksum status
#include <dhd_bus.h>
#include <dhd_dbg.h>
+
#ifdef PROP_TXSTATUS
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
-#endif // endif
+#endif
#ifdef BCMDBUS
#include <dhd_config.h>
#endif /* BCMDBUS */
#include <dhd_ulp.h>
#endif /* DHD_ULP */
+
#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
#define BUS_HEADER_LEN (24+DHD_SDALIGN) /* Must be at least SDPCM_RESERVE
* defined in dhd_sdio.c (amount of header tha might be added)
unsigned char buf[WLC_IOCTL_MAXLEN + ROUND_UP_MARGIN];
} dhd_prot_t;
-uint16
-dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
-{
- /* SDIO does not have ioctl_trans_id yet, so return -1 */
- return -1;
-}
-
static int
dhdcdc_msg(dhd_pub_t *dhd)
{
#endif /* BCMDBUS */
#ifdef BCMDBUS
- timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed);
+ timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false);
if ((!timeout) || (!prot->ctl_completed)) {
DHD_ERROR(("Txctl timeout %d ctl_completed %d\n",
timeout, prot->ctl_completed));
/* interrupt polling is sucessfully submitted. Wait for dongle to send
* interrupt
*/
- timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed);
+ timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false);
if (!timeout) {
DHD_ERROR(("intr poll wait timed out\n"));
}
DHD_OS_IOCTL_RESP_UNLOCK(dhd);
goto done;
}
- timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed);
+ timeout = dhd_os_ioctl_resp_wait(dhd, &prot->ctl_completed, false);
if ((!timeout) || (!prot->ctl_completed)) {
DHD_ERROR(("Rxctl timeout %d ctl_completed %d\n",
timeout, prot->ctl_completed));
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
DHD_CTL(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
/* Respond "bcmerror" and "bcmerrorstr" with local cache */
if (cmd == WLC_GET_VAR && buf)
{
memset(msg, 0, sizeof(cdc_ioctl_t));
-#ifdef BCMSPI
- /* 11bit gSPI bus allows 2048bytes of max-data. We restrict 'len'
- * value which is 8Kbytes for various 'get' commands to 2000. 48 bytes are
- * left for sw headers and misc.
- */
- if (len > 2000) {
- DHD_ERROR(("dhdcdc_query_ioctl: len is truncated to 2000 bytes\n"));
- len = 2000;
- }
-#endif /* BCMSPI */
msg->cmd = htol32(cmd);
msg->len = htol32(len);
msg->flags = (++prot->reqid << CDCF_IOC_ID_SHIFT);
return ret;
}
-#ifdef DHD_PM_CONTROL_FROM_FILE
-extern bool g_pm_control;
-#endif /* DHD_PM_CONTROL_FROM_FILE */
static int
dhdcdc_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
}
if (cmd == WLC_SET_PM) {
-#ifdef DHD_PM_CONTROL_FROM_FILE
- if (g_pm_control == TRUE) {
- DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
- __FUNCTION__, buf ? *(char *)buf : 0));
- goto done;
- }
-#endif /* DHD_PM_CONTROL_FROM_FILE */
DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
}
bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
#ifdef PROP_TXSTATUS
dhd_wlfc_dump(dhdp, strbuf);
-#endif // endif
+#endif
}
/* The FreeBSD PKTPUSH could change the packet buf pinter
if (PKTSUMNEEDED(PKTBUF))
h->flags |= BDC_FLAG_SUM_NEEDED;
+
h->priority = (PKTPRIO(PKTBUF) & BDC_PRIORITY_MASK);
h->flags2 = 0;
h->dataOffset = 0;
#ifdef BDC
/* Length of BDC(+WLFC) headers pushed */
hdrlen = BDC_HEADER_LEN + (((struct bdc_header *)PKTBUF)->dataOffset * 4);
-#endif // endif
+#endif
return hdrlen;
}
{
#ifdef BDC
struct bdc_header *h;
-#endif // endif
+#endif
uint8 data_offset = 0;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
#endif /* BDC */
+
#ifdef PROP_TXSTATUS
if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) {
/*
return 0;
}
+
int
dhd_prot_attach(dhd_pub_t *dhd)
{
dhd->prot = cdc;
#ifdef BDC
dhd->hdrlen += BDC_HEADER_LEN;
-#endif // endif
+#endif
dhd->maxctl = WLC_IOCTL_MAXLEN + sizeof(cdc_ioctl_t) + ROUND_UP_MARGIN;
return 0;
{
#ifdef PROP_TXSTATUS
dhd_wlfc_deinit(dhd);
-#endif // endif
+#endif
DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
dhd->prot = NULL;
}
}
#endif /* BCMDBUS */
+
DHD_SSSR_DUMP_INIT(dhd);
dhd_process_cid_mac(dhd, TRUE);
/* Nothing to do for CDC */
}
+
static void
dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt,
uint32 *pkt_count, void **pplast, uint8 start, uint8 end)
cur_idx = reorder_info_buf[WLHOST_REORDERDATA_CURIDX_OFFSET];
exp_idx = reorder_info_buf[WLHOST_REORDERDATA_EXPIDX_OFFSET];
+
if ((exp_idx == ptr->exp_idx) && (cur_idx != ptr->exp_idx)) {
/* still in the current hole */
/* enqueue the current on the buffer chain */
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_cfg80211.c 807961 2019-03-05 05:47:47Z $
+ * $Id: dhd_cfg80211.c 699163 2017-05-12 05:18:23Z $
*/
#include <linux/vmalloc.h>
#ifdef PKT_FILTER_SUPPORT
#include <dngl_stats.h>
#include <dhd.h>
-#endif // endif
+#endif
#ifdef PKT_FILTER_SUPPORT
extern uint dhd_pkt_filter_enable;
extern uint dhd_master_mode;
extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
-#endif // endif
+#endif
static int dhd_dongle_up = FALSE;
{
struct net_device *ndev;
s32 err = 0;
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
WL_TRACE(("In\n"));
- if ((!dhd_dongle_up) || (!dhd->up)) {
- WL_INFORM_MEM(("Dongle is already down\n"));
- err = 0;
- goto done;
+ if (!dhd_dongle_up) {
+ WL_ERR(("Dongle is already down\n"));
+ return err;
}
+
ndev = bcmcfg_to_prmry_ndev(cfg);
wl_dongle_down(ndev);
-done:
dhd_dongle_up = FALSE;
- return err;
+ return 0;
}
s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val)
return 0;
}
-#ifdef WL_STATIC_IF
-int32
-wl_cfg80211_update_iflist_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- int ifidx, uint8 *addr, int bssidx, char *name, int if_state)
-{
- return dhd_update_iflist_info(cfg->pub, ndev, ifidx, addr, bssidx, name, if_state);
-}
-#endif /* WL_STATIC_IF */
-
struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, const char *name,
uint8 *mac, uint8 bssidx, const char *dngl_name)
{
return dhd_remove_if(cfg->pub, ifidx, rtnl_lock_reqd);
}
-void wl_cfg80211_cleanup_if(struct net_device *net)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
- BCM_REFERENCE(cfg);
- dhd_cleanup_if(net);
-}
-
struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev)
{
- struct bcm_cfg80211 *cfg;
-
if (ndev) {
- cfg = wl_get_cfg(ndev);
if (ndev->ieee80211_ptr) {
- MFREE(cfg->osh, ndev->ieee80211_ptr, sizeof(struct wireless_dev));
+ kfree(ndev->ieee80211_ptr);
ndev->ieee80211_ptr = NULL;
}
free_netdev(ndev);
{
#ifdef WL_CFG80211
ndev = dhd_cfg80211_netdev_free(ndev);
-#endif // endif
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
if (ndev)
free_netdev(ndev);
+#endif
}
static s32
return err;
}
-s32
-wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
-{
- s32 err = 0;
-
- /* Setup timeout if Beacons are lost and roam is off to report link down */
- if (roamvar) {
- err = wldev_iovar_setint(ndev, "bcn_timeout", bcn_timeout);
- if (unlikely(err)) {
- WL_ERR(("bcn_timeout error (%d)\n", err));
- goto dongle_rom_out;
- }
- }
- /* Enable/Disable built-in roaming to allow supplicant to take care of roaming */
- err = wldev_iovar_setint(ndev, "roam_off", roamvar);
- if (unlikely(err)) {
- WL_ERR(("roam_off error (%d)\n", err));
- goto dongle_rom_out;
- }
-dongle_rom_out:
- return err;
-}
s32 dhd_config_dongle(struct bcm_cfg80211 *cfg)
{
#ifndef DHD_SDALIGN
#define DHD_SDALIGN 32
-#endif // endif
+#endif
struct net_device *ndev;
s32 err = 0;
dhd = cfg->pub;
DHD_OS_WAKE_LOCK(dhd);
+ /* send to dongle only if we are not waiting for reload already */
+ if (dhd->hang_was_sent) {
+ WL_ERR(("HANG was sent up earlier\n"));
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS);
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return OSL_ERROR(BCME_DONGLE_DOWN);
+ }
+
ndev = wdev_to_wlc_ndev(wdev, cfg);
index = dhd_net2idx(dhd->info, ndev);
if (index == DHD_BAD_IF) {
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_cfg80211.h 763539 2018-05-19 06:39:21Z $
+ * $Id: dhd_cfg80211.h 612483 2016-01-14 03:44:27Z $
*/
+
#ifndef __DHD_CFG80211__
#define __DHD_CFG80211__
#ifndef WL_ERR
#define WL_ERR CFG80211_ERR
-#endif // endif
+#endif
#ifndef WL_TRACE
#define WL_TRACE CFG80211_TRACE
-#endif // endif
+#endif
s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg);
s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg);
int dhd_cfgvendor_priv_string_handler(struct bcm_cfg80211 *cfg,
struct wireless_dev *wdev, const struct bcm_nlmsg_hdr *nlioc, void *data);
-s32 wl_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout);
#endif /* __DHD_CFG80211__ */
/*
* Broadcom Dongle Host Driver (DHD), common DHD core.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_common.c 826445 2019-06-20 04:47:47Z $
+ * $Id: dhd_common.c 710862 2017-07-14 07:43:59Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <epivers.h>
#include <bcmutils.h>
-#include <bcmstdlib_s.h>
#include <bcmendian.h>
#include <dngl_stats.h>
+#include <wlioctl.h>
#include <dhd.h>
#include <dhd_ip.h>
#include <bcmevent.h>
-#include <dhdioctl.h>
#ifdef PCIE_FULL_DONGLE
#include <bcmmsgbuf.h>
#ifdef BCMPCIE
#include <dhd_flowring.h>
-#endif // endif
+#endif
#include <dhd_bus.h>
#include <dhd_proto.h>
+#include <dhd_config.h>
#include <bcmsdbus.h>
#include <dhd_dbg.h>
-#include <802.1d.h>
#include <dhd_debug.h>
-#include <dhd_dbg_ring.h>
#include <dhd_mschdbg.h>
#include <msgtrace.h>
-#include <dhd_config.h>
-#include <wl_android.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
-#endif // endif
-#if defined(PNO_SUPPORT)
+#endif
+#ifdef PNO_SUPPORT
#include <dhd_pno.h>
-#endif /* OEM_ANDROID && PNO_SUPPORT */
+#endif
#ifdef RTT_SUPPORT
#include <dhd_rtt.h>
-#endif // endif
+#endif
#ifdef DNGL_EVENT_SUPPORT
#include <dnglevent.h>
-#endif // endif
+#endif
#define htod32(i) (i)
#define htod16(i) (i)
#ifdef PROP_TXSTATUS
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
-#endif // endif
+#endif
-#if defined(DHD_POST_EAPOL_M1_AFTER_ROAM_EVT)
+#ifdef DHD_WMF
#include <dhd_linux.h>
-#endif // endif
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
#ifdef DHD_L2_FILTER
#include <dhd_l2_filter.h>
#ifdef DHD_PSTA
#include <dhd_psta.h>
#endif /* DHD_PSTA */
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
#ifdef DHD_WET
#include <dhd_wet.h>
#endif /* DHD_WET */
-#ifdef DHD_LOG_DUMP
-#include <dhd_dbg.h>
-#endif /* DHD_LOG_DUMP */
+#if defined(BCMEMBEDIMAGE) && defined(DHD_EFI)
+#include <nvram_4364.h>
+#endif
-#ifdef DHD_LOG_PRINT_RATE_LIMIT
-int log_print_threshold = 0;
-#endif /* DHD_LOG_PRINT_RATE_LIMIT */
-int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL;// | DHD_EVENT_VAL
- /* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */
-// | DHD_PKT_MON_VAL;
+#ifdef WLMEDIA_HTSF
+extern void htsf_update(struct dhd_info *dhd, void *data);
+#endif
+
+extern int is_wlc_event_frame(void *pktdata, uint pktlen, uint16 exp_usr_subtype,
+ bcm_event_msg_u_t *out_event);
+
+/* By default all logs are enabled */
+int dhd_msg_level = DHD_ERROR_VAL | DHD_MSGTRACE_VAL | DHD_FWLOG_VAL;
-#if defined(WL_WIRELESS_EXT)
-#include <wl_iw.h>
-#endif // endif
+
+#if defined(WL_WLC_SHIM)
+#include <wl_shim.h>
+#else
+#endif /* WL_WLC_SHIM */
#ifdef DHD_ULP
#include <dhd_ulp.h>
#include <sdiovar.h>
#endif /* DHD_DEBUG */
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-#include <linux/pm_runtime.h>
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
-#ifdef CSI_SUPPORT
-#include <dhd_csi.h>
-#endif /* CSI_SUPPORT */
-
#ifdef SOFTAP
char fw_path2[MOD_PARAM_PATHLEN];
extern bool softap_enabled;
-#endif // endif
+#endif
+
+#ifdef REPORT_FATAL_TIMEOUTS
+/* Default timeout value in ms */
+#define SCAN_TIMEOUT_DEFAULT 1
+#define JOIN_TIMEOUT_DEFAULT 7500
+#ifdef DHD_EFI
+#define BUS_TIMEOUT_DEFAULT 8000000 /* 800ms, in units of 100ns */
+#define CMD_TIMEOUT_DEFAULT 15000000 /* 1.5sec, in units of 100ns */
+#else
+#define BUS_TIMEOUT_DEFAULT 800
+#define CMD_TIMEOUT_DEFAULT 1200
+#endif /* DHD_EFI */
+#endif /* REPORT_FATAL_TIMEOUTS */
#ifdef SHOW_LOGTRACE
-#define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */
+#define BYTES_AHEAD_NUM 11 /* address in map file is before these many bytes */
#define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
#define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
-static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */
-static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */
-static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */
+static char *ramstart_str = "text_start"; /* string in mapfile has addr ramstart */
+static char *rodata_start_str = "rodata_start"; /* string in mapfile has addr rodata start */
+static char *rodata_end_str = "rodata_end"; /* string in mapfile has addr rodata end */
#define RAMSTART_BIT 0x01
#define RDSTART_BIT 0x02
#define RDEND_BIT 0x04
#define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
#endif /* SHOW_LOGTRACE */
-#ifdef SHOW_LOGTRACE
-/* the fw file path is taken from either the module parameter at
- * insmod time or is defined as a constant of different values
- * for different platforms
- */
-extern char *st_str_file_path;
-#endif /* SHOW_LOGTRACE */
-
-#define DHD_TPUT_MAX_TX_PKTS_BATCH 1000
-
-#ifdef EWP_EDL
-typedef struct msg_hdr_edl {
- uint32 infobuf_ver;
- info_buf_payload_hdr_t pyld_hdr;
- msgtrace_hdr_t trace_hdr;
-} msg_hdr_edl_t;
-#endif /* EWP_EDL */
-
/* Last connection success/failure status */
uint32 dhd_conn_event;
uint32 dhd_conn_status;
extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
#if !defined(AP) && defined(WLP2P)
extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
-#endif // endif
+#endif
extern int dhd_socram_dump(struct dhd_bus *bus);
-extern void dhd_set_packet_filter(dhd_pub_t *dhd);
#ifdef DNGL_EVENT_SUPPORT
static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
#ifdef DHD_DEBUG
#ifndef SRCBASE
#define SRCBASE "drivers/net/wireless/bcmdhd"
-#endif // endif
+#endif
#define DHD_COMPILED "\nCompiled in " SRCBASE
#endif /* DHD_DEBUG */
const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
#else
const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR;
-#endif // endif
+#endif
char fw_version[FW_VER_STR_LEN] = "\0";
char clm_version[CLM_VER_STR_LEN] = "\0";
void dhd_set_timer(void *bus, uint wdtick);
-static char* ioctl2str(uint32 ioctl);
+#if defined(TRAFFIC_MGMT_DWM)
+static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd,
+ trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len);
+#endif
/* IOVar table */
enum {
IOV_DHD_JOIN_TIMEOUT_DBG,
IOV_SCAN_TIMEOUT,
IOV_MEM_DEBUG,
-#ifdef BCMPCIE
- IOV_FLOW_RING_DEBUG,
-#endif /* BCMPCIE */
#endif /* defined(DHD_DEBUG) */
#ifdef PROP_TXSTATUS
IOV_PROPTXSTATUS_ENABLE,
IOV_PROPTXSTATUS_RXPKT_CHK,
#endif /* PROP_TXSTATUS */
IOV_BUS_TYPE,
+#ifdef WLMEDIA_HTSF
+ IOV_WLPKTDLYSTAT_SZ,
+#endif
IOV_CHANGEMTU,
IOV_HOSTREORDER_FLOWS,
#ifdef DHDTCPACK_SUPPRESS
IOV_TCPACK_SUPPRESS,
#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+ IOV_WMF_BSS_ENAB,
+ IOV_WMF_UCAST_IGMP,
+ IOV_WMF_MCAST_DATA_SENDUP,
+#ifdef WL_IGMP_UCQUERY
+ IOV_WMF_UCAST_IGMP_QUERY,
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+ IOV_WMF_UCAST_UPNP,
+#endif /* DHD_UCAST_UPNP */
+ IOV_WMF_PSTA_DISABLE,
+#endif /* DHD_WMF */
+#if defined(TRAFFIC_MGMT_DWM)
+ IOV_TRAFFIC_MGMT_DWM,
+#endif
IOV_AP_ISOLATE,
#ifdef DHD_L2_FILTER
IOV_DHCP_UNICAST,
IOV_BLOCK_PING,
IOV_PROXY_ARP,
IOV_GRAT_ARP,
- IOV_BLOCK_TDLS,
#endif /* DHD_L2_FILTER */
IOV_DHD_IE,
#ifdef DHD_PSTA
IOV_LMTEST,
#ifdef DHD_MCAST_REGEN
IOV_MCAST_REGEN_BSS_ENABLE,
-#endif // endif
+#endif
#ifdef SHOW_LOGTRACE
IOV_DUMP_TRACE_LOG,
#endif /* SHOW_LOGTRACE */
+#ifdef REPORT_FATAL_TIMEOUTS
+ IOV_SCAN_TO,
+ IOV_JOIN_TO,
+ IOV_CMD_TO,
+ IOV_OQS_TO,
+#endif /* REPORT_FATAL_TIMEOUTS */
IOV_DONGLE_TRAP_TYPE,
IOV_DONGLE_TRAP_INFO,
IOV_BPADDR,
- IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */
-#if defined(DHD_LOG_DUMP)
- IOV_LOG_DUMP,
-#endif /* DHD_LOG_DUMP */
- IOV_TPUT_TEST,
- IOV_FIS_TRIGGER,
- IOV_DEBUG_BUF_DEST_STAT,
-#ifdef DHD_DEBUG
- IOV_INDUCE_ERROR,
-#endif /* DHD_DEBUG */
-#ifdef WL_IFACE_MGMT_CONF
-#ifdef WL_CFG80211
-#ifdef WL_NANP2P
- IOV_CONC_DISC,
-#endif /* WL_NANP2P */
-#ifdef WL_IFACE_MGMT
- IOV_IFACE_POLICY,
-#endif /* WL_IFACE_MGMT */
-#endif /* WL_CFG80211 */
-#endif /* WL_IFACE_MGMT_CONF */
-#ifdef RTT_GEOFENCE_CONT
-#if defined(RTT_SUPPORT) && defined(WL_NAN)
- IOV_RTT_GEOFENCE_TYPE_OVRD,
-#endif /* RTT_SUPPORT && WL_NAN */
-#endif /* RTT_GEOFENCE_CONT */
- IOV_LAST
+ IOV_LAST,
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+ IOV_LOG_CAPTURE_ENABLE,
+ IOV_LOG_DUMP
+#endif /* DHD_EFI && DHD_LOG_DUMP */
};
const bcm_iovar_t dhd_iovars[] = {
- /* name varid flags flags2 type minlen */
- {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, sizeof(dhd_version)},
+ {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, sizeof(dhd_version) },
{"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0 },
#ifdef DHD_DEBUG
- {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0},
+ {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
{"mem_debug", IOV_MEM_DEBUG, 0, 0, IOVT_BUFFER, 0 },
-#ifdef BCMPCIE
- {"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 },
-#endif /* BCMPCIE */
#endif /* DHD_DEBUG */
- {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN},
- {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0},
- {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0},
- {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN},
- {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0},
- {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0},
- {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0},
- {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0},
- {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0},
+ {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN },
+ {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0 },
+ {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0 },
+ {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN },
+ {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0 },
+ {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0 },
+ {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0 },
+ {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0 },
+ {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0 },
#ifdef PROP_TXSTATUS
- {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 },
+ {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 },
/*
set the proptxtstatus operation mode:
0 - Do not do any proptxtstatus flow control
{"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, 0, IOVT_UINT32, 0 },
{"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 },
{"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 },
- {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 },
+ {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 },
{"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 },
#endif /* PROP_TXSTATUS */
{"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0},
+#ifdef WLMEDIA_HTSF
+ {"pktdlystatsz", IOV_WLPKTDLYSTAT_SZ, 0, 0, IOVT_UINT8, 0 },
+#endif
{"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 },
{"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER,
(WLHOST_REORDERDATA_MAXFLOWS + 1) },
#ifdef DHDTCPACK_SUPPRESS
{"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, 0, IOVT_UINT8, 0 },
#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+ {"wmf_bss_enable", IOV_WMF_BSS_ENAB, 0, 0, IOVT_BOOL, 0 },
+ {"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP, 0, 0, IOVT_BOOL, 0 },
+ {"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP, 0, 0, IOVT_BOOL, 0 },
+#ifdef WL_IGMP_UCQUERY
+ {"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), 0, IOVT_BOOL, 0 },
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+ {"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), 0, IOVT_BOOL, 0 },
+#endif /* DHD_UCAST_UPNP */
+ {"wmf_psta_disable", IOV_WMF_PSTA_DISABLE, (0), 0, IOVT_BOOL, 0 },
+#endif /* DHD_WMF */
+#if defined(TRAFFIC_MGMT_DWM)
+ {"trf_mgmt_filters_add", IOV_TRAFFIC_MGMT_DWM, (0), 0, IOVT_BUFFER, 0},
+#endif
#ifdef DHD_L2_FILTER
{"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 },
#endif /* DHD_L2_FILTER */
{"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0},
{"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0},
{"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0},
- {"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0},
#endif /* DHD_L2_FILTER */
{"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0},
#ifdef DHD_PSTA
{"lmtest", IOV_LMTEST, 0, 0, IOVT_UINT32, 0 },
#ifdef DHD_MCAST_REGEN
{"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0},
-#endif // endif
+#endif
#ifdef SHOW_LOGTRACE
{"dump_trace_buf", IOV_DUMP_TRACE_LOG, 0, 0, IOVT_BUFFER, sizeof(trace_buf_info_t) },
#endif /* SHOW_LOGTRACE */
+#ifdef REPORT_FATAL_TIMEOUTS
+ {"scan_timeout", IOV_SCAN_TO, 0, 0, IOVT_UINT32, 0 },
+ {"join_timeout", IOV_JOIN_TO, 0, 0, IOVT_UINT32, 0 },
+ {"cmd_timeout", IOV_CMD_TO, 0, 0, IOVT_UINT32, 0 },
+ {"oqs_timeout", IOV_OQS_TO, 0, 0, IOVT_UINT32, 0 },
+#endif /* REPORT_FATAL_TIMEOUTS */
{"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 },
{"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) },
#ifdef DHD_DEBUG
{"bpaddr", IOV_BPADDR, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
#endif /* DHD_DEBUG */
- {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
- MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) },
-#if defined(DHD_LOG_DUMP)
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+ {"log_capture_enable", IOV_LOG_CAPTURE_ENABLE, 0, 0, IOVT_UINT8, 0},
{"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0},
-#endif /* DHD_LOG_DUMP */
- {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
-#ifdef DHD_DEBUG
- {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
-#endif /* DHD_DEBUG */
-#ifdef WL_IFACE_MGMT_CONF
-#ifdef WL_CFG80211
-#ifdef WL_NANP2P
- {"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 },
-#endif /* WL_NANP2P */
-#ifdef WL_IFACE_MGMT
- {"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)},
-#endif /* WL_IFACE_MGMT */
-#endif /* WL_CFG80211 */
-#endif /* WL_IFACE_MGMT_CONF */
-#ifdef RTT_GEOFENCE_CONT
-#if defined(RTT_SUPPORT) && defined(WL_NAN)
- {"rtt_geofence_type_ovrd", IOV_RTT_GEOFENCE_TYPE_OVRD, (0), 0, IOVT_BOOL, 0},
-#endif /* RTT_SUPPORT && WL_NAN */
-#endif /* RTT_GEOFENCE_CONT */
+#endif /* DHD_EFI && DHD_LOG_DUMP */
{NULL, 0, 0, 0, 0, 0 }
};
bool ret = FALSE;
if (dhdp->dongle_reset) {
- DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
+ DHD_ERROR(("%s: Dongle Reset occurred, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
}
if (dhdp->dongle_trap_occured) {
- DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
+ DHD_ERROR(("%s: FW TRAP has occurred, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
dhd_os_send_hang_message(dhdp);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
}
if (dhdp->iovar_timeout_occured) {
- DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
+ DHD_ERROR(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
}
#ifdef PCIE_FULL_DONGLE
if (dhdp->d3ack_timeout_occured) {
- DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
- __FUNCTION__));
- ret = TRUE;
- }
- if (dhdp->livelock_occured) {
- DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
- __FUNCTION__));
- ret = TRUE;
- }
-
- if (dhdp->pktid_audit_failed) {
- DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
+ DHD_ERROR(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
__FUNCTION__));
ret = TRUE;
}
#endif /* PCIE_FULL_DONGLE */
- if (dhdp->iface_op_failed) {
- DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
- __FUNCTION__));
- ret = TRUE;
- }
-
- if (dhdp->scan_timeout_occurred) {
- DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
- __FUNCTION__));
- ret = TRUE;
- }
-
- if (dhdp->scan_busy_occurred) {
- DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
- __FUNCTION__));
- ret = TRUE;
- }
-
-#ifdef DNGL_AXI_ERROR_LOGGING
- if (dhdp->axi_error) {
- DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
- __FUNCTION__));
- ret = TRUE;
- }
-#endif /* DNGL_AXI_ERROR_LOGGING */
-
- if (dhd_bus_get_linkdown(dhdp)) {
- DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
- __FUNCTION__));
- ret = TRUE;
- }
-
- if (dhd_bus_get_cto(dhdp)) {
- DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
- __FUNCTION__));
- ret = TRUE;
- }
-
return ret;
}
-void
-dhd_clear_bus_errors(dhd_pub_t *dhdp)
-{
- if (!dhdp)
- return;
-
- dhdp->dongle_reset = FALSE;
- dhdp->dongle_trap_occured = FALSE;
- dhdp->iovar_timeout_occured = FALSE;
-#ifdef PCIE_FULL_DONGLE
- dhdp->d3ack_timeout_occured = FALSE;
- dhdp->livelock_occured = FALSE;
- dhdp->pktid_audit_failed = FALSE;
-#endif // endif
- dhdp->iface_op_failed = FALSE;
- dhdp->scan_timeout_occurred = FALSE;
- dhdp->scan_busy_occurred = FALSE;
-}
-
#ifdef DHD_SSSR_DUMP
-
-/* This can be overwritten by module parameter defined in dhd_linux.c */
-uint support_sssr_dump = TRUE;
-
int
dhd_sssr_mempool_init(dhd_pub_t *dhd)
{
}
}
-void
-dhd_dump_sssr_reg_info(sssr_reg_info_v1_t *sssr_reg_info)
-{
-}
-
int
dhd_get_sssr_reg_info(dhd_pub_t *dhd)
{
- int ret;
+ int ret = BCME_ERROR;
+
+ DHD_ERROR(("%s: get sssr_reg_info\n", __FUNCTION__));
/* get sssr_reg_info from firmware */
memset((void *)&dhd->sssr_reg_info, 0, sizeof(dhd->sssr_reg_info));
- ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0, (char *)&dhd->sssr_reg_info,
- sizeof(dhd->sssr_reg_info), FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
- __FUNCTION__, ret));
- return BCME_ERROR;
+ if (bcm_mkiovar("sssr_reg_info", 0, 0, (char *)&dhd->sssr_reg_info,
+ sizeof(dhd->sssr_reg_info))) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, &dhd->sssr_reg_info,
+ sizeof(dhd->sssr_reg_info), FALSE, 0)) < 0) {
+ DHD_ERROR(("%s: dhd_wl_ioctl_cmd failed (error=%d)\n", __FUNCTION__, ret));
+ }
+ } else {
+ DHD_ERROR(("%s: bcm_mkiovar failed\n", __FUNCTION__));
}
- dhd_dump_sssr_reg_info(&dhd->sssr_reg_info);
- return BCME_OK;
+ return ret;
}
uint32
dhd->sssr_inited = FALSE;
- if (!support_sssr_dump) {
- DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__));
- return BCME_OK;
- }
-
/* check if sssr mempool is allocated */
if (dhd->sssr_mempool == NULL) {
DHD_ERROR(("%s: sssr_mempool is not allocated\n",
}
/* Validate structure version */
- if (dhd->sssr_reg_info.version > SSSR_REG_INFO_VER_1) {
+ if (dhd->sssr_reg_info.version != SSSR_REG_INFO_VER) {
DHD_ERROR(("%s: dhd->sssr_reg_info.version (%d : %d) mismatch\n",
__FUNCTION__, (int)dhd->sssr_reg_info.version, SSSR_REG_INFO_VER));
return BCME_ERROR;
}
/* Validate structure length */
- if (dhd->sssr_reg_info.length < sizeof(sssr_reg_info_v0_t)) {
+ if (dhd->sssr_reg_info.length != sizeof(dhd->sssr_reg_info)) {
DHD_ERROR(("%s: dhd->sssr_reg_info.length (%d : %d) mismatch\n",
__FUNCTION__, (int)dhd->sssr_reg_info.length,
(int)sizeof(dhd->sssr_reg_info)));
dhd->sssr_d11_before[i] = NULL;
dhd->sssr_d11_after[i] = NULL;
}
- dhd->sssr_dig_buf_before = NULL;
- dhd->sssr_dig_buf_after = NULL;
+ dhd->sssr_vasip_buf_before = NULL;
+ dhd->sssr_vasip_buf_after = NULL;
/* Allocate memory */
for (i = 0; i < MAX_NUM_D11CORES; i++) {
}
if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
- dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
+ dhd->sssr_vasip_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
- dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
+ dhd->sssr_vasip_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
- } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
- dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
- dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
- mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size;
-
- dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
- mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size;
}
dhd->sssr_inited = TRUE;
dhd->sssr_d11_before[i] = NULL;
dhd->sssr_d11_after[i] = NULL;
}
- dhd->sssr_dig_buf_before = NULL;
- dhd->sssr_dig_buf_after = NULL;
+ dhd->sssr_vasip_buf_before = NULL;
+ dhd->sssr_vasip_buf_after = NULL;
return;
}
-void
-dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
-{
- bool print_info = FALSE;
- int dump_mode;
-
- if (!dhd || !path) {
- DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
- __FUNCTION__));
- return;
- }
-
- if (!dhd->sssr_dump_collected) {
- /* SSSR dump is not collected */
- return;
- }
-
- dump_mode = dhd->sssr_dump_mode;
-
- if (bcmstrstr(path, "core_0_before")) {
- if (dhd->sssr_d11_outofreset[0] &&
- dump_mode == SSSR_DUMP_MODE_SSSR) {
- print_info = TRUE;
- }
- } else if (bcmstrstr(path, "core_0_after")) {
- if (dhd->sssr_d11_outofreset[0]) {
- print_info = TRUE;
- }
- } else if (bcmstrstr(path, "core_1_before")) {
- if (dhd->sssr_d11_outofreset[1] &&
- dump_mode == SSSR_DUMP_MODE_SSSR) {
- print_info = TRUE;
- }
- } else if (bcmstrstr(path, "core_1_after")) {
- if (dhd->sssr_d11_outofreset[1]) {
- print_info = TRUE;
- }
- } else {
- print_info = TRUE;
- }
-
- if (print_info) {
- DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
- path, FILE_NAME_HAL_TAG));
- }
-}
#endif /* DHD_SSSR_DUMP */
#ifdef DHD_FW_COREDUMP
#endif /* BCMDBUS */
}
-int
+static int
dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
{
+ char eabuf[ETHER_ADDR_STR_LEN];
+
struct bcmstrbuf b;
struct bcmstrbuf *strbuf = &b;
dhdp->up, dhdp->txoff, dhdp->busstate);
bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
- bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n",
- dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac));
+ bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac %s\n",
+ dhdp->iswl, dhdp->drv_version, bcm_ether_ntoa(&dhdp->mac, eabuf));
bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
bcm_bprintf(strbuf, "dongle stats:\n");
dhdp->rx_readahead_cnt, dhdp->tx_realloc);
bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
- bcm_bprintf(strbuf, "tx_big_packets %lu\n",
- dhdp->tx_big_packets);
bcm_bprintf(strbuf, "\n");
+
#ifdef DMAMAP_STATS
/* Add DMA MAP info */
bcm_bprintf(strbuf, "DMA MAP stats: \n");
dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
dhdp->dma_stats.tsbuf_rx_sz));
#endif /* DMAMAP_STATS */
- bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
+
/* Add any prot info */
dhd_prot_dump(dhdp, strbuf);
bcm_bprintf(strbuf, "\n");
/* Add any bus info */
dhd_bus_dump(dhdp, strbuf);
+
#if defined(DHD_LB_STATS)
dhd_lb_stats_dump(dhdp, strbuf);
#endif /* DHD_LB_STATS */
dhd_wet_dump(dhdp, strbuf);
}
#endif /* DHD_WET */
-
- /* return remaining buffer length */
- return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size);
+ return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
}
void
dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
int cmd, uint8 set, int ifidx)
{
- char iovbuf[WLC_IOCTL_SMLEN];
+ char iovbuf[WLC_IOCTL_SMLEN] = {0};
int ret = -1;
int lval = htol32(val);
uint len;
return ret;
}
-static struct ioctl2str_s {
- uint32 ioctl;
- char *name;
-} ioctl2str_array[] = {
- {WLC_UP, "UP"},
- {WLC_DOWN, "DOWN"},
- {WLC_SET_PROMISC, "SET_PROMISC"},
- {WLC_SET_INFRA, "SET_INFRA"},
- {WLC_SET_AUTH, "SET_AUTH"},
- {WLC_SET_SSID, "SET_SSID"},
- {WLC_RESTART, "RESTART"},
- {WLC_SET_CHANNEL, "SET_CHANNEL"},
- {WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"},
- {WLC_SET_KEY, "SET_KEY"},
- {WLC_SCAN, "SCAN"},
- {WLC_DISASSOC, "DISASSOC"},
- {WLC_REASSOC, "REASSOC"},
- {WLC_SET_COUNTRY, "SET_COUNTRY"},
- {WLC_SET_WAKE, "SET_WAKE"},
- {WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"},
- {WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"},
- {WLC_SET_WSEC, "SET_WSEC"},
- {WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"},
- {WLC_SET_RADAR, "SET_RADAR"},
- {0, NULL}
-};
-
-static char *
-ioctl2str(uint32 ioctl)
-{
- struct ioctl2str_s *p = ioctl2str_array;
-
- while (p->name != NULL) {
- if (p->ioctl == ioctl) {
- return p->name;
- }
- p++;
- }
-
- return "";
-}
-
-/**
- * @param ioc IO control struct, members are partially used by this function.
- * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return.
- * @param len Maximum number of bytes that dongle is allowed to write into 'buf'.
- */
int
dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
{
#ifdef DUMP_IOCTL_IOV_LIST
dhd_iov_li_t *iov_li;
#endif /* DUMP_IOCTL_IOV_LIST */
- int hostsleep_set = 0;
- int hostsleep_val = 0;
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- DHD_OS_WAKE_LOCK(dhd_pub);
- if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) {
- DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__));
- DHD_OS_WAKE_UNLOCK(dhd_pub);
- return BCME_ERROR;
- }
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
#ifdef KEEPIF_ON_DEVICE_RESET
if (ioc->cmd == WLC_GET_VAR) {
dbus_config_t config;
config.general_param = 0;
- if (buf) {
- if (!strcmp(buf, "wowl_activate")) {
- /* 1 (TRUE) after decreased by 1 */
- config.general_param = 2;
- } else if (!strcmp(buf, "wowl_clear")) {
- /* 0 (FALSE) after decreased by 1 */
- config.general_param = 1;
- }
+ if (!strcmp(buf, "wowl_activate")) {
+ config.general_param = 2; /* 1 (TRUE) after decreased by 1 */
+ } else if (!strcmp(buf, "wowl_clear")) {
+ config.general_param = 1; /* 0 (FALSE) after decreased by 1 */
}
if (config.general_param) {
config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET;
if (dhd_os_proto_block(dhd_pub))
{
#ifdef DHD_LOG_DUMP
- int slen, val, lval, min_len;
- char *msg, tmp[64];
+ int slen, i, val, rem, lval, min_len;
+ char *pval, *pos, *msg;
+ char tmp[64];
/* WLC_GET_VAR */
- if (ioc->cmd == WLC_GET_VAR && buf) {
+ if (ioc->cmd == WLC_GET_VAR) {
min_len = MIN(sizeof(tmp) - 1, strlen(buf));
memset(tmp, 0, sizeof(tmp));
bcopy(buf, tmp, min_len);
tmp[min_len] = '\0';
}
#endif /* DHD_LOG_DUMP */
-
-#ifdef DHD_DISCONNECT_TRACE
- if ((WLC_DISASSOC == ioc->cmd) || (WLC_DOWN == ioc->cmd) ||
- (WLC_DISASSOC_MYAP == ioc->cmd)) {
- DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
- }
-#endif /* HW_DISCONNECT_TRACE */
-
- /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
- if (ioc->set == TRUE) {
- char *pars = (char *)buf; // points at user buffer
- if (ioc->cmd == WLC_SET_VAR && buf) {
- DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars));
- if (ioc->len > 1 + sizeof(uint32)) {
- // skip iovar name:
- pars += strnlen(pars, ioc->len - 1 - sizeof(uint32));
- pars++; // skip NULL character
- }
- } else {
- DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
- ifidx, ioc->cmd, ioctl2str(ioc->cmd)));
- }
- if (pars != NULL) {
- DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars));
- } else {
- DHD_DNGL_IOVAR_SET((" NULL\n"));
- }
- }
-
DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
+#ifdef DHD_EFI
DHD_INFO(("%s: returning as busstate=%d\n",
__FUNCTION__, dhd_pub->busstate));
+#else
+ DHD_ERROR(("%s: returning as busstate=%d\n",
+ __FUNCTION__, dhd_pub->busstate));
+#endif /* DHD_EFI */
DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
dhd_os_proto_unblock(dhd_pub);
return -ENODEV;
DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub);
DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
}
DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+#if defined(WL_WLC_SHIM)
+ {
+ struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
+
+ wl_io_pport_t io_pport;
+ io_pport.dhd_pub = dhd_pub;
+ io_pport.ifidx = ifidx;
+
+ ret = wl_shim_ioctl(shim, ioc, len, &io_pport);
+ if (ret != BCME_OK) {
+ DHD_TRACE(("%s: wl_shim_ioctl(%d) ERR %d\n",
+ __FUNCTION__, ioc->cmd, ret));
+ }
+ }
+#else
#ifdef DUMP_IOCTL_IOV_LIST
if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) {
if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) {
DHD_ERROR(("iovar dump list item allocation Failed\n"));
} else {
iov_li->cmd = ioc->cmd;
- if (buf)
- bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
+ bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head,
- &iov_li->list);
+ &iov_li->list);
}
}
#endif /* DUMP_IOCTL_IOV_LIST */
-
- if (dhd_conf_check_hostsleep(dhd_pub, ioc->cmd, ioc->buf, len,
- &hostsleep_set, &hostsleep_val, &ret))
- goto exit;
ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
- dhd_conf_get_hostsleep(dhd_pub, hostsleep_set, hostsleep_val, ret);
-
#ifdef DUMP_IOCTL_IOV_LIST
if (ret == -ETIMEDOUT) {
DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
dhd_iov_li_print(&dhd_pub->dump_iovlist_head);
}
#endif /* DUMP_IOCTL_IOV_LIST */
+#endif /* defined(WL_WLC_SHIM) */
#ifdef DHD_LOG_DUMP
- if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
- buf != NULL) {
- if (buf) {
- lval = 0;
- slen = strlen(buf) + 1;
- msg = (char*)buf;
- if (len >= slen + sizeof(lval)) {
- if (ioc->cmd == WLC_GET_VAR) {
- msg = tmp;
- lval = *(int*)buf;
- } else {
- min_len = MIN(ioc->len - slen, sizeof(int));
- bcopy((msg + slen), &lval, min_len);
- }
- if (!strncmp(msg, "cur_etheraddr",
- strlen("cur_etheraddr"))) {
- lval = 0;
- }
+ if (ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) {
+ lval = 0;
+ slen = strlen(buf) + 1;
+ msg = (char*)buf;
+ if (len >= slen + sizeof(lval)) {
+ if (ioc->cmd == WLC_GET_VAR) {
+ msg = tmp;
+ lval = *(int*)buf;
+ } else {
+ min_len = MIN(ioc->len - slen, sizeof(int));
+ bcopy((msg + slen), &lval, min_len);
}
- DHD_IOVAR_MEM((
- "%s: cmd: %d, msg: %s val: 0x%x,"
- " len: %d, set: %d, txn-id: %d\n",
- ioc->cmd == WLC_GET_VAR ?
- "WLC_GET_VAR" : "WLC_SET_VAR",
- ioc->cmd, msg, lval, ioc->len, ioc->set,
- dhd_prot_get_ioctl_trans_id(dhd_pub)));
- } else {
- DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
- ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
- ioc->cmd, ioc->len, ioc->set,
- dhd_prot_get_ioctl_trans_id(dhd_pub)));
}
+ DHD_ERROR_MEM(("%s: cmd: %d, msg: %s, val: 0x%x, len: %d, set: %d\n",
+ ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
+ ioc->cmd, msg, lval, ioc->len, ioc->set));
} else {
slen = ioc->len;
- if (buf != NULL && slen != 0) {
- if (slen >= 4) {
- val = *(int*)buf;
- } else if (slen >= 2) {
- val = *(short*)buf;
- } else {
- val = *(char*)buf;
+ if (buf != NULL) {
+ val = *(int*)buf;
+ pval = (char*)buf;
+ pos = tmp;
+ rem = sizeof(tmp);
+ memset(tmp, 0, sizeof(tmp));
+ for (i = 0; i < slen; i++) {
+ if (rem <= 3) {
+ /* At least 2 byte required + 1 byte(NULL) */
+ break;
+ }
+ pos += snprintf(pos, rem, "%02x ", pval[i]);
+ rem = sizeof(tmp) - (int)(pos - tmp);
}
/* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION)
- DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
- "set: %d\n", ioc->cmd, val, ioc->len, ioc->set));
+ DHD_ERROR_MEM(("WLC_IOCTL: cmd: %d, val: %d(%s), "
+ "len: %d, set: %d\n",
+ ioc->cmd, val, tmp, ioc->len, ioc->set));
} else {
- DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
+ DHD_ERROR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
}
}
#endif /* DHD_LOG_DUMP */
DHD_ERROR(("%s: 'resumed on timeout' error is "
"occurred before the interface does not"
" bring up\n", __FUNCTION__));
+ dhd_pub->busstate = DHD_BUS_DOWN;
}
-exit:
DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
dhd_os_busbusy_wake(dhd_pub);
}
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus));
- pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus));
-
- DHD_OS_WAKE_UNLOCK(dhd_pub);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
return ret;
}
#ifdef DHD_DEBUG
int
-dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen)
+dhd_mem_debug(dhd_pub_t *dhd, char *msg, uint msglen)
{
unsigned long int_arg = 0;
char *p;
dhd_dbg_mwli_t *mw_li;
dll_t *item, *next;
/* check if mwalloc, mwquery or mwfree was supplied arguement with space */
- p = bcmstrstr((char *)msg, " ");
+ p = bcmstrstr(msg, " ");
if (p != NULL) {
/* space should be converted to null as separation flag for firmware */
*p = '\0';
/* lets query the list inetrnally */
if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
+ /* reset the id */
+ dhd->mw_id = 0;
} else {
for (item = dll_head_p(&dhd->mw_list_head);
!dll_end(&dhd->mw_list_head, item); item = next) {
*(p+1+sizeof(int32)) = '\0';
/* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
- * 1 bytes for null caracter
+ *1 bytes for null caracter
*/
msglen = strlen(msg) + sizeof(int32) + 1;
- if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) {
+ if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen, FALSE, 0) < 0) {
DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
}
size = mw_li->size;
dll_delete(item);
MFREE(dhd->osh, mw_li, sizeof(*mw_li));
- if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
- /* reset the id */
- dhd->mw_id = 0;
- }
}
}
if (handle) {
}
return 0;
}
+
extern void
dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head)
{
MFREE(dhd->osh, mw_li, sizeof(*mw_li));
}
}
-#ifdef BCMPCIE
-int
-dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen)
-{
- flow_ring_table_t *flow_ring_table;
- char *cmd;
- char *end_ptr = NULL;
- uint8 prio;
- uint16 flowid;
- int i;
- int ret = 0;
- cmd = bcmstrstr(msg, " ");
- BCM_REFERENCE(prio);
- if (cmd != NULL) {
- /* in order to use string operations append null */
- *cmd = '\0';
- } else {
- DHD_ERROR(("missing: create/delete args\n"));
- return BCME_ERROR;
- }
- if (cmd && !strcmp(msg, "create")) {
- /* extract <"source address", "destination address", "priority"> */
- uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN];
- BCM_REFERENCE(sa);
- BCM_REFERENCE(da);
- msg = msg + strlen("create") + 1;
- /* fill ethernet source address */
- for (i = 0; i < ETHER_ADDR_LEN; i++) {
- sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
- if (*end_ptr == ':') {
- msg = (end_ptr + 1);
- } else if (i != 5) {
- DHD_ERROR(("not a valid source mac addr\n"));
- return BCME_ERROR;
- }
- }
- if (*end_ptr != ' ') {
- DHD_ERROR(("missing: destiantion mac id\n"));
- return BCME_ERROR;
- } else {
- /* skip space */
- msg = end_ptr + 1;
- }
- /* fill ethernet destination address */
- for (i = 0; i < ETHER_ADDR_LEN; i++) {
- da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
- if (*end_ptr == ':') {
- msg = (end_ptr + 1);
- } else if (i != 5) {
- DHD_ERROR(("not a valid destination mac addr\n"));
- return BCME_ERROR;
- }
- }
- if (*end_ptr != ' ') {
- DHD_ERROR(("missing: priority\n"));
- return BCME_ERROR;
- } else {
- msg = end_ptr + 1;
- }
- /* parse priority */
- prio = (uint8)bcm_strtoul(msg, &end_ptr, 10);
- if (prio > MAXPRIO) {
- DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
- __FUNCTION__));
- return BCME_ERROR;
- }
-
- if (*end_ptr != '\0') {
- DHD_ERROR(("msg not truncated with NULL character\n"));
- return BCME_ERROR;
- }
- ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret));
- return BCME_ERROR;
- }
- return BCME_OK;
-
- } else if (cmd && !strcmp(msg, "delete")) {
- msg = msg + strlen("delete") + 1;
- /* parse flowid */
- flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10);
- if (*end_ptr != '\0') {
- DHD_ERROR(("msg not truncated with NULL character\n"));
- return BCME_ERROR;
- }
-
- /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
- if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK)
- {
- DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid));
- return BCME_ERROR;
- }
-
- flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
- ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret));
- return BCME_ERROR;
- }
- return BCME_OK;
- }
- DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__));
- return BCME_ERROR;
-}
-#endif /* BCMPCIE */
#endif /* DHD_DEBUG */
#ifdef PKT_STATICS
break;
}
- dhd_watchdog_ms = (uint)int_val;
+ if (CUSTOM_DHD_WATCHDOG_MS == 0 && int_val == 0) {
+ dhd_watchdog_ms = (uint)int_val;
+ }
dhd_os_wd_timer(dhd_pub, (uint)int_val);
break;
case IOV_GVAL(IOV_DUMP):
- if (dhd_dump(dhd_pub, arg, len) <= 0)
- bcmerror = BCME_ERROR;
- else
- bcmerror = BCME_OK;
+ bcmerror = dhd_dump(dhd_pub, arg, len);
break;
#ifndef BCMDBUS
case IOV_GVAL(IOV_DCONSOLE_POLL):
- int_val = (int32)dhd_pub->dhd_console_ms;
+ int_val = (int32)dhd_console_ms;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_DCONSOLE_POLL):
- dhd_pub->dhd_console_ms = (uint)int_val;
+ dhd_console_ms = (uint)int_val;
break;
-#if defined(DHD_DEBUG)
case IOV_SVAL(IOV_CONS):
- if (len > 0) {
-#ifdef CONSOLE_DPC
- bcmerror = dhd_bus_txcons(dhd_pub, arg, len - 1);
-#else
+ if (len > 0)
bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
-#endif
- }
break;
-#endif /* DHD_DEBUG */
#endif /* !BCMDBUS */
case IOV_SVAL(IOV_CLEARCOUNTS):
dhd_pub->rx_readahead_cnt = 0;
dhd_pub->tx_realloc = 0;
dhd_pub->wd_dpc_sched = 0;
- dhd_pub->tx_big_packets = 0;
memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
dhd_bus_clearcounts(dhd_pub);
#ifdef PROP_TXSTATUS
#endif /* DHD_LB_STATS */
break;
+
case IOV_GVAL(IOV_IOCTLTIMEOUT): {
int_val = (int32)dhd_os_get_ioctl_resp_timeout();
bcopy(&int_val, arg, sizeof(int_val));
/* The dhd application queries the driver to check if its usb or sdio. */
#ifdef BCMDBUS
int_val = BUS_TYPE_USB;
-#endif // endif
+#endif /* BCMDBUS */
#ifdef BCMSDIO
int_val = BUS_TYPE_SDIO;
-#endif // endif
+#endif
#ifdef PCIE_FULL_DONGLE
int_val = BUS_TYPE_PCIE;
-#endif // endif
+#endif
+ bcopy(&int_val, arg, val_size);
+ break;
+
+
+#ifdef WLMEDIA_HTSF
+ case IOV_GVAL(IOV_WLPKTDLYSTAT_SZ):
+ int_val = dhd_pub->htsfdlystat_sz;
bcopy(&int_val, arg, val_size);
break;
+ case IOV_SVAL(IOV_WLPKTDLYSTAT_SZ):
+ dhd_pub->htsfdlystat_sz = int_val & 0xff;
+ printf("Setting tsfdlystat_sz:%d\n", dhd_pub->htsfdlystat_sz);
+ break;
+#endif
case IOV_SVAL(IOV_CHANGEMTU):
int_val &= 0xffff;
bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
break;
}
#endif /* DHDTCPACK_SUPPRESS */
-
-#ifdef DHD_L2_FILTER
- case IOV_GVAL(IOV_DHCP_UNICAST): {
- uint32 bssidx;
+#ifdef DHD_WMF
+ case IOV_GVAL(IOV_WMF_BSS_ENAB): {
+ uint32 bssidx;
+ dhd_wmf_t *wmf;
const char *val;
+
if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
- DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
- __FUNCTION__, name));
+ DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
bcmerror = BCME_BADARG;
break;
}
- int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
- memcpy(arg, &int_val, val_size);
- break;
+
+ wmf = dhd_wmf_conf(dhd_pub, bssidx);
+ int_val = wmf->wmf_enable ? 1 :0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_WMF_BSS_ENAB): {
+ /* Enable/Disable WMF */
+ uint32 bssidx;
+ dhd_wmf_t *wmf;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ ASSERT(val);
+ bcopy(val, &int_val, sizeof(uint32));
+ wmf = dhd_wmf_conf(dhd_pub, bssidx);
+ if (wmf->wmf_enable == int_val)
+ break;
+ if (int_val) {
+ /* Enable WMF */
+ if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) {
+ DHD_ERROR(("%s: Error in creating WMF instance\n",
+ __FUNCTION__));
+ break;
+ }
+ if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) {
+ DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__));
+ break;
+ }
+ wmf->wmf_enable = TRUE;
+ } else {
+ /* Disable WMF */
+ wmf->wmf_enable = FALSE;
+ dhd_wmf_stop(dhd_pub, bssidx);
+ dhd_wmf_instance_del(dhd_pub, bssidx);
+ }
+ break;
+ }
+ case IOV_GVAL(IOV_WMF_UCAST_IGMP):
+ int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_UCAST_IGMP):
+ if (dhd_pub->wmf_ucast_igmp == int_val)
+ break;
+
+ if (int_val >= OFF && int_val <= ON)
+ dhd_pub->wmf_ucast_igmp = int_val;
+ else
+ bcmerror = BCME_RANGE;
+ break;
+ case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP):
+ int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP):
+ dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val);
+ break;
+
+#ifdef WL_IGMP_UCQUERY
+ case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY):
+ int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY):
+ if (dhd_pub->wmf_ucast_igmp_query == int_val)
+ break;
+
+ if (int_val >= OFF && int_val <= ON)
+ dhd_pub->wmf_ucast_igmp_query = int_val;
+ else
+ bcmerror = BCME_RANGE;
+ break;
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+ case IOV_GVAL(IOV_WMF_UCAST_UPNP):
+ int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_UCAST_UPNP):
+ if (dhd_pub->wmf_ucast_upnp == int_val)
+ break;
+
+ if (int_val >= OFF && int_val <= ON)
+ dhd_pub->wmf_ucast_upnp = int_val;
+ else
+ bcmerror = BCME_RANGE;
+ break;
+#endif /* DHD_UCAST_UPNP */
+
+ case IOV_GVAL(IOV_WMF_PSTA_DISABLE): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ int_val = dhd_get_wmf_psta_disable(dhd_pub, bssidx);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+
+ case IOV_SVAL(IOV_WMF_PSTA_DISABLE): {
+ uint32 bssidx;
+ const char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ ASSERT(val);
+ bcopy(val, &int_val, sizeof(uint32));
+ dhd_set_wmf_psta_disable(dhd_pub, bssidx, int_val);
+ break;
+ }
+#endif /* DHD_WMF */
+
+#if defined(TRAFFIC_MGMT_DWM)
+ case IOV_SVAL(IOV_TRAFFIC_MGMT_DWM): {
+ trf_mgmt_filter_list_t *trf_mgmt_filter_list =
+ (trf_mgmt_filter_list_t *)(arg);
+ bcmerror = traffic_mgmt_add_dwm_filter(dhd_pub, trf_mgmt_filter_list, len);
+ }
+ break;
+#endif
+
+#ifdef DHD_L2_FILTER
+ case IOV_GVAL(IOV_DHCP_UNICAST): {
+ uint32 bssidx;
+ const char *val;
+ if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
+ __FUNCTION__, name));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
+ memcpy(arg, &int_val, val_size);
+ break;
}
case IOV_SVAL(IOV_DHCP_UNICAST): {
uint32 bssidx;
bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
break;
}
- case IOV_GVAL(IOV_BLOCK_TDLS): {
- uint32 bssidx;
- const char *val;
-
- if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
- DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
- bcmerror = BCME_BADARG;
- break;
- }
- int_val = dhd_get_block_tdls_status(dhd_pub, bssidx);
- memcpy(arg, &int_val, val_size);
- break;
- }
- case IOV_SVAL(IOV_BLOCK_TDLS): {
- uint32 bssidx;
- const char *val;
-
- if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
- DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
- bcmerror = BCME_BADARG;
- break;
- }
- memcpy(&int_val, val, sizeof(int_val));
- bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0);
- break;
- }
#endif /* DHD_L2_FILTER */
case IOV_SVAL(IOV_DHD_IE): {
uint32 bssidx;
g_assert_type = (uint32)int_val;
break;
+
#if !defined(MACOSX_DHD)
case IOV_GVAL(IOV_LMTEST): {
*(uint32 *)arg = (uint32)lmtest;
}
break;
}
-#endif // endif
+#endif
#ifdef SHOW_LOGTRACE
case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
- trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
- dhd_dbg_ring_t *dbg_verbose_ring = NULL;
-
- dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
- if (dbg_verbose_ring == NULL) {
- DHD_ERROR(("dbg_verbose_ring is NULL\n"));
- bcmerror = BCME_UNSUPPORTED;
- break;
- }
+ trace_buf_info_t *trace_buf_info;
+ trace_buf_info = (trace_buf_info_t *)MALLOC(dhd_pub->osh,
+ sizeof(trace_buf_info_t));
if (trace_buf_info != NULL) {
- bzero(trace_buf_info, sizeof(trace_buf_info_t));
- dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
+ dhd_get_read_buf_ptr(dhd_pub, trace_buf_info);
+ memcpy((void*)arg, (void*)trace_buf_info, sizeof(trace_buf_info_t));
+ MFREE(dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
} else {
- DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
+ DHD_ERROR(("Memory allocation Failed\n"));
bcmerror = BCME_NOMEM;
}
break;
}
#endif /* SHOW_LOGTRACE */
+#ifdef REPORT_FATAL_TIMEOUTS
+ case IOV_GVAL(IOV_SCAN_TO): {
+ dhd_get_scan_to_val(dhd_pub, (uint32 *)&int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_SCAN_TO): {
+ dhd_set_scan_to_val(dhd_pub, (uint32)int_val);
+ break;
+ }
+ case IOV_GVAL(IOV_JOIN_TO): {
+ dhd_get_join_to_val(dhd_pub, (uint32 *)&int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_JOIN_TO): {
+ dhd_set_join_to_val(dhd_pub, (uint32)int_val);
+ break;
+ }
+ case IOV_GVAL(IOV_CMD_TO): {
+ dhd_get_cmd_to_val(dhd_pub, (uint32 *)&int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_CMD_TO): {
+ dhd_set_cmd_to_val(dhd_pub, (uint32)int_val);
+ break;
+ }
+ case IOV_GVAL(IOV_OQS_TO): {
+ dhd_get_bus_to_val(dhd_pub, (uint32 *)&int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_OQS_TO): {
+ dhd_set_bus_to_val(dhd_pub, (uint32)int_val);
+ break;
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
#ifdef DHD_DEBUG
#if defined(BCMSDIO) || defined(BCMPCIE)
case IOV_GVAL(IOV_DONGLE_TRAP_TYPE):
break;
}
#endif /* BCMSDIO || BCMPCIE */
-#ifdef BCMPCIE
- case IOV_SVAL(IOV_FLOW_RING_DEBUG):
- {
- bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len);
- break;
- }
-#endif /* BCMPCIE */
case IOV_SVAL(IOV_MEM_DEBUG):
if (len > 0) {
bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1);
}
break;
#endif /* DHD_DEBUG */
-#if defined(DHD_LOG_DUMP)
- case IOV_GVAL(IOV_LOG_DUMP):
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+ case IOV_GVAL(IOV_LOG_CAPTURE_ENABLE):
{
- dhd_prot_debug_info_print(dhd_pub);
- dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT);
+ int_val = dhd_pub->log_capture_enable;
+ bcopy(&int_val, arg, val_size);
break;
}
-#endif /* DHD_LOG_DUMP */
- case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT):
+
+ case IOV_SVAL(IOV_LOG_CAPTURE_ENABLE):
{
- if (dhd_pub->debug_buf_dest_support) {
- debug_buf_dest_stat_t *debug_buf_dest_stat =
- (debug_buf_dest_stat_t *)arg;
- memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat,
- sizeof(dhd_pub->debug_buf_dest_stat));
- } else {
- bcmerror = BCME_DISABLED;
- }
+ dhd_pub->log_capture_enable = (uint8)int_val;
break;
}
-#ifdef DHD_DEBUG
- case IOV_SVAL(IOV_INDUCE_ERROR): {
- if (int_val >= DHD_INDUCE_ERROR_MAX) {
- DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
- } else {
- dhd_pub->dhd_induce_error = (uint16)int_val;
+ case IOV_GVAL(IOV_LOG_DUMP):
+ {
+ dhd_prot_debug_info_print(dhd_pub);
+ dhd_bus_mem_dump(dhd_pub);
+ break;
}
- break;
- }
-#endif /* DHD_DEBUG */
-
-#ifdef WL_IFACE_MGMT_CONF
-#ifdef WL_CFG80211
-#ifdef WL_NANP2P
- case IOV_GVAL(IOV_CONC_DISC): {
- int_val = wl_cfg80211_get_iface_conc_disc(
- dhd_linux_get_primary_netdev(dhd_pub));
- bcopy(&int_val, arg, sizeof(int_val));
- break;
- }
- case IOV_SVAL(IOV_CONC_DISC): {
- bcmerror = wl_cfg80211_set_iface_conc_disc(
- dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val);
- break;
- }
-#endif /* WL_NANP2P */
-#ifdef WL_IFACE_MGMT
- case IOV_GVAL(IOV_IFACE_POLICY): {
- int_val = wl_cfg80211_get_iface_policy(
- dhd_linux_get_primary_netdev(dhd_pub));
- bcopy(&int_val, arg, sizeof(int_val));
- break;
- }
- case IOV_SVAL(IOV_IFACE_POLICY): {
- bcmerror = wl_cfg80211_set_iface_policy(
- dhd_linux_get_primary_netdev(dhd_pub),
- arg, len);
- break;
- }
-#endif /* WL_IFACE_MGMT */
-#endif /* WL_CFG80211 */
-#endif /* WL_IFACE_MGMT_CONF */
-#ifdef RTT_GEOFENCE_CONT
-#if defined(RTT_SUPPORT) && defined(WL_NAN)
- case IOV_GVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
- bool enable = 0;
- dhd_rtt_get_geofence_cont_ind(dhd_pub, &enable);
- int_val = enable ? 1 : 0;
- bcopy(&int_val, arg, val_size);
- break;
- }
- case IOV_SVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
- bool enable = *(bool *)arg;
- dhd_rtt_set_geofence_cont_ind(dhd_pub, enable);
- break;
- }
-#endif /* RTT_SUPPORT && WL_NAN */
-#endif /* RTT_GEOFENCE_CONT */
+#endif /* DHD_EFI && DHD_LOG_DUMP */
default:
bcmerror = BCME_UNSUPPORTED;
break;
/* Fast case, precedence queue is not full and we are also not
* exceeding total queue length
*/
- if (!pktqprec_full(q, prec) && !pktq_full(q)) {
+ if (!pktq_pfull(q, prec) && !pktq_full(q)) {
pktq_penq(q, prec, pkt);
return TRUE;
}
/* Determine precedence from which to evict packet, if any */
- if (pktqprec_full(q, prec))
+ if (pktq_pfull(q, prec))
eprec = prec;
else if (pktq_full(q)) {
p = pktq_peek_tail(q, &eprec);
/* Evict if needed */
if (eprec >= 0) {
/* Detect queueing to unconfigured precedence */
- ASSERT(!pktqprec_empty(q, eprec));
+ ASSERT(!pktq_pempty(q, eprec));
discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
if (eprec == prec && !discard_oldest)
return FALSE; /* refuse newer (incoming) packet */
p = first;
while (p) {
next = PKTLINK(p);
- q->n_pkts--;
- pq->n_pkts_tot--;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
+ q->len--;
+ pq->len--;
PKTSETLINK(p, NULL);
uint arglen;
DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) &&
- bcmstricmp((char *)buf, "devreset")) {
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
/* In platforms like FC19, the FW download is done via IOCTL
* and should not return error for IOCTLs fired before FW
* Download is done
*/
- if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
+ if (dhd_fw_download_status(dhd_pub)) {
DHD_ERROR(("%s: returning as busstate=%d\n",
__FUNCTION__, dhd_pub->busstate));
DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub);
DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
/* If Suspend/Resume is tested via pcie_suspend IOVAR
* other IOVARs, also include pciecfgreg and devreset to go
* through.
*/
+#ifdef DHD_EFI
+ if (bcmstricmp((char *)buf, "pcie_suspend") &&
+ bcmstricmp((char *)buf, "pciecfgreg") &&
+ bcmstricmp((char *)buf, "devreset") &&
+ bcmstricmp((char *)buf, "sdio_suspend") &&
+ bcmstricmp((char *)buf, "control_signal"))
+#else
if (bcmstricmp((char *)buf, "pcie_suspend") &&
bcmstricmp((char *)buf, "pciecfgreg") &&
bcmstricmp((char *)buf, "devreset") &&
- bcmstricmp((char *)buf, "sdio_suspend")) {
+ bcmstricmp((char *)buf, "sdio_suspend"))
+#endif /* DHD_EFI */
+ {
DHD_ERROR(("%s: bus is in suspend(%d)"
"or suspending(0x%x) state\n",
__FUNCTION__, dhd_pub->busstate,
* to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
* not used in Production platforms but only used in FC19 setups.
*/
- if (!bcmstricmp((char *)buf, "devreset") ||
-#ifdef BCMPCIE
- (dhd_bus_is_multibp_capable(dhd_pub->bus) &&
- !bcmstricmp((char *)buf, "dwnldstate")) ||
-#endif /* BCMPCIE */
- FALSE)
- {
+ if (!bcmstricmp((char *)buf, "devreset")) {
DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
}
DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
goto unlock_exit;
}
+#ifdef DHD_TIMESYNC
+ /* check TS module */
+ if (ioc->cmd == DHD_GET_VAR)
+ bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf, arg,
+ arglen, buf, buflen, IOV_GET);
+ else
+ bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf,
+ NULL, 0, arg, arglen, IOV_SET);
+#endif /* DHD_TIMESYNC */
}
goto unlock_exit;
}
#ifdef SHOW_EVENTS
-
static void
wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
void *raw_event_ptr, char *eventmask)
datalen = ntoh32(event->datalen);
/* debug dump of event messages */
- snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet));
+ snprintf(eabuf, sizeof(eabuf), "%02x:%02x:%02x:%02x:%02x:%02x",
+ (uchar)event->addr.octet[0]&0xff,
+ (uchar)event->addr.octet[1]&0xff,
+ (uchar)event->addr.octet[2]&0xff,
+ (uchar)event->addr.octet[3]&0xff,
+ (uchar)event->addr.octet[4]&0xff,
+ (uchar)event->addr.octet[5]&0xff);
event_name = bcmevent_get_name(event_type);
BCM_REFERENCE(event_name);
case WLC_E_REASSOC_IND:
DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
-
break;
case WLC_E_ASSOC:
} else if (status == WLC_E_STATUS_TIMEOUT) {
DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
} else if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
- event_name, eabuf, (int)status, (int)reason));
+ DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
+ event_name, eabuf, (int)reason));
} else {
DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
event_name, eabuf, (int)status));
}
-
break;
case WLC_E_DEAUTH_IND:
auth_str = "Open System";
else if (auth_type == DOT11_SHARED_KEY)
auth_str = "Shared Key";
- else if (auth_type == DOT11_SAE)
- auth_str = "SAE";
else {
snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
auth_str = err_msg;
}
- if (event_type == WLC_E_AUTH_IND) {
+ if (event_type == WLC_E_AUTH_IND) {
DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
} else if (status == WLC_E_STATUS_SUCCESS) {
DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
event_name, eabuf, auth_str));
} else if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
- event_name, eabuf, auth_str, (int)status, (int)reason));
- } else if (status == WLC_E_STATUS_NO_ACK) {
- DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
- event_name, eabuf, auth_str));
- } else {
- DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
- event_name, eabuf, auth_str, (int)status, (int)reason));
+ DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
+ event_name, eabuf, auth_str, (int)reason));
}
BCM_REFERENCE(auth_str);
case WLC_E_SET_SSID:
if (status == WLC_E_STATUS_SUCCESS) {
DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_clear_join_error(dhd_pub, WLC_SSID_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
} else {
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_set_join_error(dhd_pub, WLC_SSID_MASK);
+#endif /* REPORT_FATAL_TIMEOUTS */
if (status == WLC_E_STATUS_FAIL) {
- DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
+ DHD_EVENT(("MACEVENT: %s, failed\n", event_name));
} else if (status == WLC_E_STATUS_NO_NETWORKS) {
DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
} else {
break;
case WLC_E_LINK:
- DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d\n",
- event_name, link?"UP":"DOWN", flags, status));
+ DHD_EVENT(("MACEVENT: %s %s\n", event_name, link?"UP":"DOWN"));
BCM_REFERENCE(link);
break;
case WLC_E_SCAN_COMPLETE:
DHD_EVENT(("MACEVENT: %s\n", event_name));
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_stop_scan_timer(dhd_pub);
+#endif /* REPORT_FATAL_TIMEOUTS */
break;
case WLC_E_RSSI_LQM:
case WLC_E_PFN_NET_FOUND:
DHD_EVENT(("PNOEVENT: %s\n", event_name));
break;
- case WLC_E_PFN_SCAN_BACKOFF:
- case WLC_E_PFN_BSSID_SCAN_BACKOFF:
- DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
- event_name, (int)status, (int)reason));
- break;
-
case WLC_E_PSK_SUP:
case WLC_E_PRUNE:
DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
event_name, (int)status, (int)reason));
+#ifdef REPORT_FATAL_TIMEOUTS
+ if ((status == WLC_E_STATUS_SUCCESS || status == WLC_E_STATUS_UNSOLICITED) &&
+ (reason == WLC_E_SUP_OTHER)) {
+ dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
+ } else {
+ dhd_set_join_error(dhd_pub, WLC_WPA_MASK);
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
break;
#ifdef WIFI_ACT_FRAME
#ifdef SHOW_LOGTRACE
case WLC_E_TRACE:
- {
+ DHD_EVENT(("MACEVENT: %s Logtrace\n", event_name));
dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen);
break;
- }
#endif /* SHOW_LOGTRACE */
case WLC_E_RSSI:
case WLC_E_BT_WIFI_HANDOVER_REQ:
DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
break;
-#endif // endif
+#endif
case WLC_E_CCA_CHAN_QUAL:
if (datalen) {
- cca_chan_qual_event_t *cca_event = (cca_chan_qual_event_t *)event_data;
- if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) {
- cca_only_chan_qual_event_t *cca_only_event =
- (cca_only_chan_qual_event_t *)cca_event;
- BCM_REFERENCE(cca_only_event);
- DHD_EVENT((
- "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
- " channel 0x%02x\n",
- event_name, event_type, eabuf, (int)status,
- (int)reason, (int)auth_type, cca_event->chanspec));
- DHD_EVENT((
- "\tTOTAL (dur %dms me %dms notme %dms interf %dms"
- " ts 0x%08x)\n",
- cca_only_event->cca_busy_ext.duration,
- cca_only_event->cca_busy_ext.congest_ibss,
- cca_only_event->cca_busy_ext.congest_obss,
- cca_only_event->cca_busy_ext.interference,
- cca_only_event->cca_busy_ext.timestamp));
- DHD_EVENT((
- "\t !PM (dur %dms me %dms notme %dms interf %dms)\n",
- cca_only_event->cca_busy_nopm.duration,
- cca_only_event->cca_busy_nopm.congest_ibss,
- cca_only_event->cca_busy_nopm.congest_obss,
- cca_only_event->cca_busy_nopm.interference));
- DHD_EVENT((
- "\t PM (dur %dms me %dms notme %dms interf %dms)\n",
- cca_only_event->cca_busy_pm.duration,
- cca_only_event->cca_busy_pm.congest_ibss,
- cca_only_event->cca_busy_pm.congest_obss,
- cca_only_event->cca_busy_pm.interference));
- } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
- DHD_EVENT((
- "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
- " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
- " ts 0x%08x)\n",
- event_name, event_type, eabuf, (int)status,
- (int)reason, (int)auth_type, cca_event->chanspec,
- cca_event->cca_busy_ext.duration,
- cca_event->cca_busy_ext.congest_ibss,
- cca_event->cca_busy_ext.congest_obss,
- cca_event->cca_busy_ext.interference,
- cca_event->cca_busy_ext.timestamp));
- } else if (cca_event->id == WL_CHAN_QUAL_CCA) {
- DHD_EVENT((
- "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
- " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
- event_name, event_type, eabuf, (int)status,
- (int)reason, (int)auth_type, cca_event->chanspec,
- cca_event->cca_busy.duration,
- cca_event->cca_busy.congest,
- cca_event->cca_busy.timestamp));
- } else if ((cca_event->id == WL_CHAN_QUAL_NF) ||
- (cca_event->id == WL_CHAN_QUAL_NF_LTE)) {
- DHD_EVENT((
- "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
- " channel 0x%02x (NF[%d] %ddB)\n",
- event_name, event_type, eabuf, (int)status,
- (int)reason, (int)auth_type, cca_event->chanspec,
- cca_event->id, cca_event->noise));
- } else {
- DHD_EVENT((
- "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
- " channel 0x%02x (unknown ID %d)\n",
- event_name, event_type, eabuf, (int)status,
- (int)reason, (int)auth_type, cca_event->chanspec,
- cca_event->id));
- }
+ buf = (uchar *) event_data;
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d, "
+ "channel 0x%02x \n", event_name, event_type, eabuf, (int)status,
+ (int)reason, (int)auth_type, *(buf + 4)));
}
break;
case WLC_E_ESCAN_RESULT:
{
- wl_escan_result_v2_t *escan_result =
- (wl_escan_result_v2_t *)event_data;
- BCM_REFERENCE(escan_result);
- if ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT)) {
- DHD_EVENT(("MACEVENT: %s %d, status %d sync-id %u\n",
- event_name, event_type, (int)status,
- dtoh16(escan_result->sync_id)));
- } else {
- DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n",
- event_name, event_type, eabuf, (int)status));
- }
-
- break;
+ DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n",
+ event_name, event_type, eabuf, (int)status));
}
+ break;
+ case WLC_E_PSK_AUTH:
+ DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
+ event_name, eabuf, status, reason));
+ break;
case WLC_E_IF:
{
struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
BCM_REFERENCE(ifevent);
- DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n",
- event_name, ifevent->opcode, ifevent->ifidx, ifevent->role));
+ DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d\n",
+ event_name, ifevent->opcode, ifevent->ifidx));
break;
}
+
#ifdef SHOW_LOGTRACE
case WLC_E_MSCH:
{
}
#endif /* SHOW_LOGTRACE */
- case WLC_E_PSK_AUTH:
- DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
- event_name, eabuf, status, reason));
- break;
- case WLC_E_AGGR_EVENT:
- {
- event_aggr_data_t *aggrbuf = event_data;
- int j = 0, len = 0;
- uint8 *data = aggrbuf->data;
- DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
- event_name, aggrbuf->num_events, aggrbuf->len));
- for (j = 0; j < aggrbuf->num_events; j++)
- {
- wl_event_msg_t * sub_event = (wl_event_msg_t *)data;
- if (len > aggrbuf->len) {
- DHD_ERROR(("%s: Aggr events corrupted!",
- __FUNCTION__));
- break;
- }
- DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type)));
- len += ALIGN_SIZE((ntoh32(sub_event->datalen) +
- sizeof(wl_event_msg_t)), sizeof(uint64));
- buf = (uchar *)(data + sizeof(wl_event_msg_t));
- BCM_REFERENCE(buf);
- DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen)));
- for (i = 0; i < ntoh32(sub_event->datalen); i++) {
- DHD_EVENT((" 0x%02x ", buf[i]));
- }
- data = aggrbuf->data + len;
- }
- DHD_EVENT(("\n"));
- }
- break;
- case WLC_E_NAN_CRITICAL:
- {
- DHD_LOG_MEM(("MACEVENT: %s, type:%d\n", event_name, reason));
- break;
- }
- case WLC_E_NAN_NON_CRITICAL:
- {
- DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
- break;
- }
- case WLC_E_PROXD:
- {
- wl_proxd_event_t *proxd = (wl_proxd_event_t*)event_data;
- DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
- event_name, proxd->type, reason));
- break;
- }
- case WLC_E_RPSNOA:
- {
- rpsnoa_stats_t *stat = event_data;
- if (datalen == sizeof(*stat)) {
- DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name,
- (stat->band == WLC_BAND_2G) ? "2G":"5G",
- stat->state, stat->last_pps));
- }
- break;
- }
- case WLC_E_PHY_CAL:
- {
- DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
- break;
- }
- case WLC_E_WA_LQM:
- {
- wl_event_wa_lqm_t *event_wa_lqm = (wl_event_wa_lqm_t *)event_data;
- bcm_xtlv_t *subevent;
- wl_event_wa_lqm_basic_t *elqm_basic;
-
- if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
- (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
- DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
- event_name, event_wa_lqm->ver, event_wa_lqm->len));
- break;
- }
-
- subevent = (bcm_xtlv_t *)event_wa_lqm->subevent;
- if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
- (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
- DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
- event_name, subevent->id, subevent->len));
- break;
- }
-
- elqm_basic = (wl_event_wa_lqm_basic_t *)subevent->data;
- BCM_REFERENCE(elqm_basic);
- DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
- event_name, elqm_basic->rssi, elqm_basic->snr,
- elqm_basic->tx_rate, elqm_basic->rx_rate));
- break;
- }
default:
DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
event_name, event_type, eabuf, (int)status, (int)reason,
}
DHD_EVENT(("\n"));
}
-} /* wl_show_host_event */
+}
#endif /* SHOW_EVENTS */
#ifdef DNGL_EVENT_SUPPORT
return BCME_OK;
}
-#ifdef PARSE_DONGLE_HOST_EVENT
-typedef struct hck_id_to_str_s {
- uint32 id;
- char *name;
-} hck_id_to_str_t;
-
-hck_id_to_str_t hck_sw_id_to_str[] = {
- {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
- {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
- {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
- {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
- {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
- {WL_HC_DD_PHY, "WL_HC_DD_PHY"},
- {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
- {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
- {0, NULL}
-};
-
-hck_id_to_str_t hck_pcie_module_to_str[] = {
- {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
- {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
- {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
- {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
- {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
- {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
- {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
- {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
- {0, NULL}
-};
-
-hck_id_to_str_t hck_rx_stall_v2_to_str[] = {
- {BCM_RX_HC_RESERVED, "BCM_RX_HC_RESERVED"},
- {BCM_RX_HC_UNSPECIFIED, "BCM_RX_HC_UNSPECIFIED"},
- {BCM_RX_HC_UNICAST_DECRYPT_FAIL, "BCM_RX_HC_UNICAST_DECRYPT_FAIL"},
- {BCM_RX_HC_BCMC_DECRYPT_FAIL, "BCM_RX_HC_BCMC_DECRYPT_FAIL"},
- {BCM_RX_HC_UNICAST_REPLAY, "BCM_RX_HC_UNICAST_REPLAY"},
- {BCM_RX_HC_BCMC_REPLAY, "BCM_RX_HC_BCMC_REPLAY"},
- {BCM_RX_HC_AMPDU_DUP, "BCM_RX_HC_AMPDU_DUP"},
- {0, NULL}
-};
-
-static void
-dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
-{
- while (hck->name != NULL) {
- if (hck->id == id) {
- DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
- return;
- }
- hck++;
- }
-}
-
void
-dhd_parse_hck_common_sw_event(bcm_xtlv_t *wl_hc)
-{
-
- wl_rx_hc_info_v2_t *hck_rx_stall_v2;
- uint16 id;
-
- id = ltoh16(wl_hc->id);
-
- if (id == WL_HC_DD_RX_STALL_V2) {
- /* map the hck_rx_stall_v2 structure to the value of the XTLV */
- hck_rx_stall_v2 =
- (wl_rx_hc_info_v2_t*)wl_hc;
- DHD_ERROR(("type:%d len:%d if_idx:%d ac:%d pkts:%d"
- " drop:%d alert_th:%d reason:%d peer_ea:"MACF"\n",
- hck_rx_stall_v2->type,
- hck_rx_stall_v2->length,
- hck_rx_stall_v2->if_idx,
- hck_rx_stall_v2->ac,
- hck_rx_stall_v2->rx_hc_pkts,
- hck_rx_stall_v2->rx_hc_dropped_all,
- hck_rx_stall_v2->rx_hc_alert_th,
- hck_rx_stall_v2->reason,
- ETHER_TO_MACF(hck_rx_stall_v2->peer_ea)));
- dhd_print_dongle_hck_id(
- ltoh32(hck_rx_stall_v2->reason),
- hck_rx_stall_v2_to_str);
- } else {
- dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
- hck_sw_id_to_str);
- }
-
-}
-
-#endif /* PARSE_DONGLE_HOST_EVENT */
-
-void
-dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
- bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
+dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
+ bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
{
uint8 *p = (uint8 *)(event + 1);
uint16 type = ntoh16_ua((void *)&dngl_event->event_type);
DHD_EVENT(("Line:%d ", *(uint32 *)p));
p += sizeof(uint32);
DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
-#ifdef PARSE_DONGLE_HOST_EVENT
- DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
-#endif /* PARSE_DONGLE_HOST_EVENT */
break;
}
case SOCRAM_IND_TAG_HEALTH_CHECK:
{
bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
- DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
- ltoh32(dngl_hc->top_module_tag),
- ltoh32(dngl_hc->top_module_len),
- datalen));
+ DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d\n",
+ ltoh32(dngl_hc->top_module_tag), ltoh32(dngl_hc->top_module_len)));
if (DHD_EVENT_ON()) {
- prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len)
- + BCM_XTLV_HDR_SIZE, datalen));
+ prhex("HEALTHCHECK", p, ltoh32(dngl_hc->top_module_len));
}
-#ifdef DHD_LOG_DUMP
- memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE);
- memcpy(dhdp->health_chk_event_data, p,
- MIN(ltoh32(dngl_hc->top_module_len),
- HEALTH_CHK_BUF_SIZE));
-#endif /* DHD_LOG_DUMP */
p = (uint8 *)dngl_hc->value;
switch (ltoh32(dngl_hc->top_module_tag)) {
ltoh32(pcie_hc->pcie_err_ind_type),
ltoh32(pcie_hc->pcie_flag),
ltoh32(pcie_hc->pcie_control_reg)));
-#ifdef PARSE_DONGLE_HOST_EVENT
- dhd_print_dongle_hck_id(
- ltoh32(pcie_hc->pcie_err_ind_type),
- hck_pcie_module_to_str);
-#endif /* PARSE_DONGLE_HOST_EVENT */
break;
}
-#ifdef HCHK_COMMON_SW_EVENT
- case HCHK_SW_ENTITY_WL_PRIMARY:
- case HCHK_SW_ENTITY_WL_SECONDARY:
- {
- bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p;
-
- if (ltoh32(dngl_hc->top_module_len) <
- sizeof(bcm_xtlv_t)) {
- DHD_ERROR(("WL SW HC Wrong length:%d\n",
- ltoh32(dngl_hc->top_module_len)));
- return;
- }
- BCM_REFERENCE(wl_hc);
- DHD_EVENT(("WL SW HC type %d len %d\n",
- ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
-
-#ifdef PARSE_DONGLE_HOST_EVENT
- dhd_parse_hck_common_sw_event(wl_hc);
-#endif /* PARSE_DONGLE_HOST_EVENT */
- break;
-
- }
-#endif /* HCHK_COMMON_SW_EVENT */
default:
- {
DHD_ERROR(("%s:Unknown module TAG:%d\n",
__FUNCTION__,
ltoh32(dngl_hc->top_module_tag)));
break;
- }
}
break;
}
default:
- DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
+ DHD_ERROR(("%s:Unknown TAG", __FUNCTION__));
if (p && DHD_EVENT_ON()) {
prhex("SOCRAMIND", p, taglen);
}
break;
}
default:
- DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
+ DHD_ERROR(("%s:Unknown DNGL Event Type:%d", __FUNCTION__, type));
if (p && DHD_EVENT_ON()) {
prhex("SOCRAMIND", p, datalen);
}
break;
}
-#ifndef BCMDBUS
#ifdef DHD_FW_COREDUMP
- if (dhdp->memdump_enabled) {
- dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
- if (dhd_socram_dump(dhdp->bus)) {
- DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
- }
- }
-#else
- dhd_dbg_send_urgent_evt(dhdp, p, datalen);
+ dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
#endif /* DHD_FW_COREDUMP */
+#ifndef BCMDBUS
+ if (dhd_socram_dump(dhdp->bus)) {
+ DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
+ } else {
+ /* Notify framework */
+ dhd_dbg_send_urgent_evt(dhdp, p, datalen);
+ }
#endif /* !BCMDBUS */
}
-
#endif /* DNGL_EVENT_SUPPORT */
/* Stub for now. Will become real function as soon as shim
evt_pport.raw_event = raw_event;
evt_pport.data_len = pktlen;
+#if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS)
+ {
+ struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
+ if (shim) {
+ ret = wl_shim_event_process(shim, &event, &evt_pport);
+ } else {
+ /* events can come even before shim is initialized
+ (when waiting for "wlc_ver" response)
+ * handle them in a non-shim way.
+ */
+ DHD_ERROR(("%s: Events coming before shim initialization!\n",
+ __FUNCTION__));
+ ret = wl_event_process_default(&event, &evt_pport);
+ }
+ }
+#else
ret = wl_event_process_default(&event, &evt_pport);
+#endif /* WL_WLC_SHIM && WL_WLC_SHIM_EVENTS */
return ret;
-} /* wl_event_process */
+}
/* Check whether packet is a BRCM event pkt. If it is, record event data. */
int
bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
bcm_event_msg_u_t evu;
uint8 *event_data;
- uint32 type, status, datalen, reason;
+ uint32 type, status, datalen;
uint16 flags;
uint evlen;
int ret;
uint16 usr_subtype;
-#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
- dhd_if_t *ifp = NULL;
-#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+ char macstr[ETHER_ADDR_STR_LEN];
+
+ BCM_REFERENCE(macstr);
ret = wl_host_event_get_data(pktdata, pktlen, &evu);
if (ret != BCME_OK) {
type = ntoh32_ua((void *)&event->event_type);
flags = ntoh16_ua((void *)&event->flags);
status = ntoh32_ua((void *)&event->status);
- reason = ntoh32_ua((void *)&event->reason);
datalen = ntoh32_ua((void *)&event->datalen);
evlen = datalen + sizeof(bcm_event_t);
#endif /* LIMIT_BORROW */
#endif /* PROP_TXSTATUS */
+
case WLC_E_ULP:
#ifdef DHD_ULP
{
{
dhd_tdls_event_handler(dhd_pub, event);
}
-#endif // endif
+#endif
break;
case WLC_E_IF:
#ifdef PCIE_FULL_DONGLE
dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
ifevent->opcode, ifevent->role);
-#endif // endif
+#endif
#ifdef PROP_TXSTATUS
{
uint8* ea = pvt_data->eth.ether_dhost;
- WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n"
+ WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
+ "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
ifevent->ifidx,
((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
((ifevent->role == 0) ? "STA":"AP "),
- MAC2STRDBG(ea)));
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
(void)ea;
if (ifevent->opcode == WLC_E_IF_CHANGE)
#endif /* PCIE_FULL_DONGLE */
dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
event->addr.octet);
+ /* Return ifidx (for vitual i/f, it will be > 0)
+ * so that no other operations on deleted interface
+ * are carried out
+ */
+ ret = ifevent->ifidx;
+ goto exit;
} else if (ifevent->opcode == WLC_E_IF_CHANGE) {
#ifdef WL_CFG80211
dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname,
break;
}
+#ifdef WLMEDIA_HTSF
+ case WLC_E_HTSFSYNC:
+ htsf_update(dhd_pub->info, event_data);
+ break;
+#endif /* WLMEDIA_HTSF */
case WLC_E_NDIS_LINK:
break;
case WLC_E_PFN_NET_FOUND:
case WLC_E_PFN_BEST_BATCHING:
dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
break;
-#endif // endif
+#endif
#if defined(RTT_SUPPORT)
case WLC_E_PROXD:
-#ifndef WL_CFG80211
dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
-#endif /* WL_CFG80211 */
break;
#endif /* RTT_SUPPORT */
/* These are what external supplicant/authenticator wants */
DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
}
break;
-#endif // endif
+#endif
#endif /* !BCMDBUS */
- case WLC_E_NATOE_NFCT:
-#ifdef WL_NATOE
- DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__));
- dhd_natoe_ct_event(dhd_pub, event_data);
-#endif /* WL_NATOE */
- break;
-#ifdef WL_NAN
- case WLC_E_SLOTTED_BSS_PEER_OP:
- DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
- "" MACDBG ", status = %d\n",
- __FUNCTION__, MAC2STRDBG(event->addr.octet), status));
- if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) {
- dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
- event->ifname), &event->addr.octet);
- } else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) {
- uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
- BCM_REFERENCE(ifindex);
- dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
- event->ifname), &event->addr.octet);
-#ifdef PCIE_FULL_DONGLE
- dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
- (char *)&event->addr.octet[0]);
-#endif // endif
- } else {
- DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
- __FUNCTION__, status));
- }
- break;
-#endif /* WL_NAN */
-#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
- case WLC_E_REASSOC:
- ifp = dhd_get_ifp(dhd_pub, event->ifidx);
-
- if (!ifp)
- break;
-
- /* Consider STA role only since roam is disabled on P2P GC.
- * Drop EAPOL M1 frame only if roam is done to same BSS.
- */
- if ((status == WLC_E_STATUS_SUCCESS) &&
- IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
- wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
- ifp->recv_reassoc_evt = TRUE;
- }
- break;
-#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
-#if defined(CSI_SUPPORT)
- case WLC_E_CSI:
- dhd_csi_event_handler(dhd_pub, event, (void *)event_data);
+#ifdef DHD_WMF
+ case WLC_E_PSTA_PRIMARY_INTF_IND:
+ dhd_update_psta_interface_for_sta(dhd_pub, event->ifname,
+ (void *)(event->addr.octet), (void*) event_data);
break;
-#endif /* CSI_SUPPORT */
+#endif
case WLC_E_LINK:
#ifdef PCIE_FULL_DONGLE
+ DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
+ __FUNCTION__, type, flags, status));
if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
event->ifname), (uint8)flags) != BCME_OK) {
DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
__FUNCTION__, type, flags, status, role, del_sta));
if (del_sta) {
- DHD_EVENT(("%s: Deleting STA " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(event->addr.octet)));
+ DHD_MAC_TO_STR((event->addr.octet), macstr);
+ DHD_EVENT(("%s: Deleting STA %s\n", __FUNCTION__, macstr));
dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
event->ifname), &event->addr.octet);
}
}
#endif /* PCIE_FULL_DONGLE */
-#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
- /* fall through */
- ifp = dhd_get_ifp(dhd_pub, event->ifidx);
- if (ifp) {
- ifp->recv_reassoc_evt = FALSE;
- ifp->post_roam_evt = FALSE;
- }
-#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
/* fall through */
+
default:
*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
#ifdef DHD_UPDATE_INTF_MAC
__FUNCTION__, type, flags, status));
BCM_REFERENCE(flags);
BCM_REFERENCE(status);
- BCM_REFERENCE(reason);
break;
}
* to host with its registered interface name
*/
memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
-#endif // endif
+#endif
-#ifdef DHD_STATUS_LOGGING
- if (dhd_pub->statlog) {
- dhd_statlog_process_event(dhd_pub, type, *ifidx,
- status, reason, flags);
- }
-#endif /* DHD_STATUS_LOGGING */
+exit:
#ifdef SHOW_EVENTS
if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
}
#endif /* SHOW_EVENTS */
- return (BCME_OK);
-} /* wl_process_host_event */
+ return ret;
+}
int
wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
}
#ifndef strtoul
#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
-#endif // endif
+#endif
+#if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
/* Convert user's input in hex pattern to byte-size mask */
int
wl_pattern_atoh(char *src, char *dst)
}
return i;
}
-
-#if defined(PKT_FILTER_SUPPORT)
-int
-pattern_atoh_len(char *src, char *dst, int len)
-{
- int i;
- if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 &&
- strncmp(src, "0X", HD_PREFIX_SIZE) != 0) {
- DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
- return -1;
- }
- src = src + HD_PREFIX_SIZE; /* Skip past 0x */
- if (strlen(src) % HD_BYTE_SIZE != 0) {
- DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
- return -1;
- }
- for (i = 0; *src != '\0'; i++) {
- char num[HD_BYTE_SIZE + 1];
-
- if (i > len - 1) {
- DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len));
- return -1;
- }
- bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE);
- num[HD_BYTE_SIZE] = '\0';
- dst[i] = (uint8)strtoul(num, NULL, 16);
- src += HD_BYTE_SIZE;
- }
- return i;
-}
-#endif // endif
+#endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
#ifdef PKT_FILTER_SUPPORT
void
/* Enable/disable the specified filter. */
rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
rc = rc >= 0 ? 0 : rc;
- if (rc) {
+ if (rc)
DHD_ERROR(("%s: failed to %s pktfilter %s, retcode = %d\n",
__FUNCTION__, enable?"enable":"disable", arg, rc));
- dhd_set_packet_filter(dhd);
- rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
- rc = rc >= 0 ? 0 : rc;
- if (rc) {
- DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
- __FUNCTION__, arg, rc));
- } else {
- DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
- __FUNCTION__, arg));
- }
- }
else
DHD_TRACE(("%s: successfully %s pktfilter %s\n",
__FUNCTION__, enable?"enable":"disable", arg));
void
dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
{
- const char *str;
- wl_pkt_filter_t pkt_filter;
- wl_pkt_filter_t *pkt_filterp;
- int buf_len;
- int str_len;
- int rc = -1;
+ const char *str;
+ wl_pkt_filter_t pkt_filter;
+ wl_pkt_filter_t *pkt_filterp;
+ int buf_len;
+ int str_len;
+ int rc;
uint32 mask_size;
uint32 pattern_size;
- char *argv[MAXPKT_ARG] = {0}, * buf = 0;
- int i = 0;
+ char *argv[16], * buf = 0;
+ int i = 0;
char *arg_save = 0, *arg_org = 0;
+#define BUF_SIZE 2048
if (!arg)
return;
arg_org = arg_save;
- if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) {
+ if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
goto fail;
}
-
- memset(buf, 0, MAX_PKTFLT_BUF_SIZE);
+ memset(buf, 0, BUF_SIZE);
memcpy(arg_save, arg, strlen(arg) + 1);
- if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) {
+ if (strlen(arg) > BUF_SIZE) {
DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
goto fail;
}
argv[i] = bcmstrtok(&arg_save, " ", 0);
- while (argv[i++]) {
- if (i >= MAXPKT_ARG) {
- DHD_ERROR(("Invalid args provided\n"));
- goto fail;
- }
+ while (argv[i++])
argv[i] = bcmstrtok(&arg_save, " ", 0);
- }
i = 0;
if (argv[i] == NULL) {
str = "pkt_filter_add";
str_len = strlen(str);
- bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len);
+ bcm_strncpy_s(buf, BUF_SIZE, str, str_len);
buf[ str_len ] = '\0';
buf_len = str_len + 1;
/* Parse packet filter id. */
pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+ if (dhd_conf_del_pkt_filter(dhd, pkt_filter.id))
+ goto fail;
if (argv[++i] == NULL) {
DHD_ERROR(("Polarity not provided\n"));
}
/* Parse pattern filter mask. */
- rc = wl_pattern_atoh(argv[i],
- (char *) pkt_filterp->u.pattern.mask_and_pattern);
+ mask_size =
+ htod32(wl_pattern_atoh(argv[i],
+ (char *) pkt_filterp->u.pattern.mask_and_pattern));
- if (rc == -1) {
- DHD_ERROR(("Rejecting: %s\n", argv[i]));
- goto fail;
- }
- mask_size = htod32(rc);
if (argv[++i] == NULL) {
DHD_ERROR(("Pattern not provided\n"));
goto fail;
}
/* Parse pattern filter pattern. */
- rc = wl_pattern_atoh(argv[i],
- (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]);
+ pattern_size =
+ htod32(wl_pattern_atoh(argv[i],
+ (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]));
- if (rc == -1) {
- DHD_ERROR(("Rejecting: %s\n", argv[i]));
- goto fail;
- }
- pattern_size = htod32(rc);
if (mask_size != pattern_size) {
DHD_ERROR(("Mask and pattern not the same size\n"));
goto fail;
WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
} else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
int list_cnt = 0;
- char *endptr = NULL;
- wl_pkt_filter_pattern_listel_t *pf_el =
- (wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0];
+ char *endptr = '\0';
+ wl_pkt_filter_pattern_listel_t *pf_el = &pkt_filterp->u.patlist.patterns[0];
while (argv[++i] != NULL) {
- /* Check valid buffer size. */
- if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) {
- DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
- goto fail;
- }
-
/* Parse pattern filter base and offset. */
if (bcm_isdigit(*argv[i])) {
/* Numeric base */
*endptr = '\0';
rc = wl_pkt_filter_base_parse(argv[i]);
if (rc == -1) {
- printf("Invalid base %s\n", argv[i]);
+ printf("Invalid base %s\n", argv[i]);
goto fail;
}
*endptr = ':';
+ } else {
+ printf("Invalid [base:]offset format: %s\n", argv[i]);
+ goto fail;
}
}
- if (endptr == NULL) {
- printf("Invalid [base:]offset format: %s\n", argv[i]);
- goto fail;
- }
-
if (*endptr == ':') {
- pf_el->base_offs = htod16(rc);
+ pkt_filter.u.patlist.patterns[0].base_offs = htod16(rc);
rc = strtoul(endptr + 1, &endptr, 0);
} else {
/* Must have had a numeric offset only */
- pf_el->base_offs = htod16(0);
+ pkt_filter.u.patlist.patterns[0].base_offs = htod16(0);
}
if (*endptr) {
printf("Offset too large\n");
goto fail;
}
- pf_el->rel_offs = htod16(rc);
+ pkt_filter.u.patlist.patterns[0].rel_offs = htod16(rc);
/* Clear match_flag (may be set in parsing which follows) */
- pf_el->match_flags = htod16(0);
+ pkt_filter.u.patlist.patterns[0].match_flags = htod16(0);
/* Parse pattern filter mask and pattern directly into ioctl buffer */
if (argv[++i] == NULL) {
goto fail;
}
rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data);
- if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
+ if (rc == -1) {
printf("Rejecting: %s\n", argv[i]);
goto fail;
}
}
if (*argv[i] == '!') {
- pf_el->match_flags =
+ pkt_filter.u.patlist.patterns[0].match_flags =
htod16(WL_PKT_FILTER_MFLAG_NEG);
(argv[i])++;
}
goto fail;
}
rc = wl_pattern_atoh(argv[i], (char*)&pf_el->mask_and_data[rc]);
- if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
+ if (rc == -1) {
printf("Rejecting: %s\n", argv[i]);
goto fail;
}
goto fail;
}
- pf_el->size_bytes = mask_size;
+ pkt_filter.u.patlist.patterns[0].size_bytes = mask_size;
/* Account for the size of this pattern element */
buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc;
+ /* And the pattern element fields that were put in a local for
+ * alignment purposes now get copied to the ioctl buffer.
+ */
+ memcpy((char*)pf_el, &pkt_filter.u.patlist.patterns[0],
+ WL_PKT_FILTER_PATTERN_FIXED_LEN);
+
/* Move to next element location in ioctl buffer */
pf_el = (wl_pkt_filter_pattern_listel_t*)
((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc);
/* Account for initial fixed size, and copy initial fixed fields */
buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN;
- if (buf_len > MAX_PKTFLT_BUF_SIZE) {
- DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
- goto fail;
- }
/* Update list count and total size */
pkt_filter.u.patlist.list_cnt = list_cnt;
pkt_filter.u.patlist.PAD1[0] = 0;
rc = rc >= 0 ? 0 : rc;
if (rc)
- DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n",
+ DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
__FUNCTION__, arg, rc));
else
DHD_TRACE(("%s: successfully added pktfilter %s\n",
MFREE(dhd->osh, arg_org, strlen(arg) + 1);
if (buf)
- MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE);
+ MFREE(dhd->osh, buf, BUF_SIZE);
}
void
dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
{
int retcode;
-#ifdef WL_CFG80211
- /* Do not enable arp offload in case of non-STA interfaces active */
- if (arp_enable &&
- (wl_cfg80211_check_vif_in_use(dhd_linux_get_primary_netdev(dhd)))) {
- DHD_TRACE(("%s: Virtual interfaces active, ignore arp offload request \n",
- __FUNCTION__));
- return;
- }
-#endif /* WL_CFG80211 */
+
retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
arp_enable, WLC_SET_VAR, TRUE, 0);
DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
__FUNCTION__, arp_enable, retcode));
else
-#ifdef DHD_LOG_DUMP
- DHD_LOG_MEM(("%s: successfully enabed ARP offload to %d\n",
- __FUNCTION__, arp_enable));
-#else
DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n",
__FUNCTION__, arp_enable));
-#endif /* DHD_LOG_DUMP */
if (arp_enable) {
uint32 version;
retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
if (ret < 0)
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
- else {
-#ifdef DHD_LOG_DUMP
- DHD_LOG_MEM(("%s: ARP table clear\n", __FUNCTION__));
-#else
- DHD_TRACE(("%s: ARP table clear\n", __FUNCTION__));
-#endif /* DHD_LOG_DUMP */
- }
- /* mac address isn't cleared here but it will be cleared after dongle off */
- dhd->hmac_updated = 0;
}
void
ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
if (ret < 0)
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
- else {
-#ifdef DHD_LOG_DUMP
- DHD_LOG_MEM(("%s: ARP host ip clear\n", __FUNCTION__));
-#else
- DHD_TRACE(("%s: ARP host ip clear\n", __FUNCTION__));
-#endif /* DHD_LOG_DUMP */
- }
}
void
ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
NULL, 0, TRUE);
- if (ret < 0)
+ if (ret)
DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
- else {
- /* mac address is updated in the dongle */
- dhd->hmac_updated = 1;
-#ifdef DHD_LOG_DUMP
- DHD_LOG_MEM(("%s: ARP ip addr entry added \n", __FUNCTION__));
-#else
- DHD_ARPOE(("%s: ARP ip addr entry added \n", __FUNCTION__));
-#endif /* DHD_LOG_DUMP */
- }
+ else
+ DHD_ARPOE(("%s: sARP H ipaddr entry added \n",
+ __FUNCTION__));
}
int
if (dhd == NULL)
return -1;
-#if defined(WL_CFG80211) && defined(WL_NAN)
- if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) {
- /* If nan dp is active, skip NDO */
- DHD_INFO(("Active NAN DP, skip NDO\n"));
- return 0;
- }
-#endif /* WL_CFG80211 && WL_NAN */
-#ifdef WL_CFG80211
- if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
- /* NDO disable on STA+SOFTAP mode */
- ndo_enable = FALSE;
- }
-#endif /* WL_CFG80211 */
retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
ndo_enable, WLC_SET_VAR, TRUE, 0);
if (retcode)
dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
{
int iov_len = 0;
- char iovbuf[DHD_IOVAR_BUF_SIZE];
+ char iovbuf[DHD_IOVAR_BUF_SIZE] = {0};
int retcode;
if (dhd == NULL)
dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
{
int iov_len = 0;
- char iovbuf[DHD_IOVAR_BUF_SIZE];
+ char iovbuf[DHD_IOVAR_BUF_SIZE] = {0};
int retcode;
if (dhd == NULL)
return retcode;
}
+
/* Enhanced ND offload */
uint16
dhd_ndo_get_version(dhd_pub_t *dhdp)
ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16));
ndo_get_ver.u.version = 0;
iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver,
- WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
-
+ WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
if (!iov_len) {
DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
__FUNCTION__, sizeof(iovbuf)));
}
retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0);
-
if (retcode) {
DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
/* ver iovar not supported. NDO version is 0 */
DHD_INFO(("%s: Host IP count exceed device capacity,"
"ND offload deactivated\n", __FUNCTION__));
dhdp->ndo_host_ip_overflow = TRUE;
- dhd_ndo_enable(dhdp, FALSE);
+ dhd_ndo_enable(dhdp, 0);
}
#endif /* NDO_CONFIG_SUPPORT */
} else {
iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr,
WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
-
if (!iov_len) {
DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
__FUNCTION__, sizeof(iovbuf)));
iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN,
iovbuf, sizeof(iovbuf));
-
if (!iov_len) {
DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
__FUNCTION__, sizeof(iovbuf)));
iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int),
iovbuf, sizeof(iovbuf));
-
if (!iov_len) {
DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
__FUNCTION__, sizeof(iovbuf)));
return BCME_ERROR;
}
-
retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
if (retcode)
DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
return retcode;
}
-#ifdef SIMPLE_ISCAN
-
-uint iscan_thread_id = 0;
-iscan_buf_t * iscan_chain = 0;
-
-iscan_buf_t *
-dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
-{
- iscan_buf_t *iscanbuf_alloc = 0;
- iscan_buf_t *iscanbuf_head;
-
- DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
- dhd_iscan_lock();
-
- iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
- if (iscanbuf_alloc == NULL)
- goto fail;
-
- iscanbuf_alloc->next = NULL;
- iscanbuf_head = *iscanbuf;
-
- DHD_ISCAN(("%s: addr of allocated node = 0x%X"
- "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
- __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
-
- if (iscanbuf_head == NULL) {
- *iscanbuf = iscanbuf_alloc;
- DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
- goto fail;
- }
-
- while (iscanbuf_head->next)
- iscanbuf_head = iscanbuf_head->next;
-
- iscanbuf_head->next = iscanbuf_alloc;
-
-fail:
- dhd_iscan_unlock();
- return iscanbuf_alloc;
-}
-
-void
-dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
-{
- iscan_buf_t *iscanbuf_free = 0;
- iscan_buf_t *iscanbuf_prv = 0;
- iscan_buf_t *iscanbuf_cur;
- dhd_pub_t *dhd = dhd_bus_pub(dhdp);
- DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
-
- dhd_iscan_lock();
-
- iscanbuf_cur = iscan_chain;
-
- /* If iscan_delete is null then delete the entire
- * chain or else delete specific one provided
- */
- if (!iscan_delete) {
- while (iscanbuf_cur) {
- iscanbuf_free = iscanbuf_cur;
- iscanbuf_cur = iscanbuf_cur->next;
- iscanbuf_free->next = 0;
- MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
- }
- iscan_chain = 0;
- } else {
- while (iscanbuf_cur) {
- if (iscanbuf_cur == iscan_delete)
- break;
- iscanbuf_prv = iscanbuf_cur;
- iscanbuf_cur = iscanbuf_cur->next;
- }
- if (iscanbuf_prv)
- iscanbuf_prv->next = iscan_delete->next;
-
- iscan_delete->next = 0;
- MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
-
- if (!iscanbuf_prv)
- iscan_chain = 0;
- }
- dhd_iscan_unlock();
-}
-
-iscan_buf_t *
-dhd_iscan_result_buf(void)
-{
- return iscan_chain;
-}
-
-int
-dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
-{
- int rc = -1;
- dhd_pub_t *dhd = dhd_bus_pub(dhdp);
- char *buf;
- char iovar[] = "iscan";
- uint32 allocSize = 0;
- wl_ioctl_t ioctl;
- int len;
-
- if (pParams) {
- allocSize = (size + strlen(iovar) + 1);
- if ((allocSize < size) || (allocSize < strlen(iovar)))
- {
- DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
- __FUNCTION__, allocSize, size, strlen(iovar)));
- goto cleanUp;
- }
- buf = MALLOC(dhd->osh, allocSize);
-
- if (buf == NULL)
- {
- DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
- goto cleanUp;
- }
- ioctl.cmd = WLC_SET_VAR;
- len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
- if (len == 0) {
- rc = BCME_BUFTOOSHORT;
- goto cleanUp;
- }
- rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len);
- }
-
-cleanUp:
- if (buf) {
- MFREE(dhd->osh, buf, allocSize);
- }
-
- return rc;
-}
-
-static int
-dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
-{
- wl_iscan_results_t *list_buf;
- wl_iscan_results_t list;
- wl_scan_results_t *results;
- iscan_buf_t *iscan_cur;
- int status = -1;
- dhd_pub_t *dhd = dhd_bus_pub(dhdp);
- int rc;
- wl_ioctl_t ioctl;
- int len;
-
- DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
-
- iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
- if (!iscan_cur) {
- DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
- dhd_iscan_free_buf(dhdp, 0);
- dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
- dhd_ind_scan_confirm(dhdp, FALSE);
- goto fail;
- }
-
- dhd_iscan_lock();
-
- memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
- list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
- results = &list_buf->results;
- results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
- results->version = 0;
- results->count = 0;
-
- memset(&list, 0, sizeof(list));
- list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
- len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
- iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
- if (len == 0) {
- dhd_iscan_free_buf(dhdp, 0);
- dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
- dhd_ind_scan_confirm(dhdp, FALSE);
- status = BCME_BUFTOOSHORT;
- goto fail;
- }
- ioctl.cmd = WLC_GET_VAR;
- ioctl.set = FALSE;
- rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
-
- results->buflen = dtoh32(results->buflen);
- results->version = dtoh32(results->version);
- *scan_count = results->count = dtoh32(results->count);
- status = dtoh32(list_buf->status);
- DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
-
- dhd_iscan_unlock();
-
- if (!(*scan_count)) {
- /* TODO: race condition when FLUSH already called */
- dhd_iscan_free_buf(dhdp, 0);
- }
-fail:
- return status;
-}
-#endif /* SIMPLE_ISCAN */
/*
* returns = TRUE if associated, FALSE if not associated
int ret = -1;
int allowed_skip_dtim_cnt = 0;
- if (dhd->disable_dtim_in_suspend) {
- DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
- bcn_li_dtim = 0;
- return bcn_li_dtim;
- }
-
/* Check if associated */
if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
}
}
- if (dhd->conf->suspend_bcn_li_dtim >= 0)
- bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
__FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL));
int dtim_period = 0;
int ap_beacon = 0;
int allowed_skip_dtim_cnt = 0;
-
- if (dhd->disable_dtim_in_suspend) {
- DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
- bcn_li_dtim = 0;
- goto exit;
- }
-
/* Check if associated */
if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
}
if (dhd->max_dtim_enable) {
- bcn_li_dtim =
- (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
+ bcn_li_dtim = (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
if (bcn_li_dtim == 0) {
bcn_li_dtim = 1;
}
+ bcn_li_dtim = MAX(dhd->suspend_bcn_li_dtim, bcn_li_dtim);
} else {
/* attemp to use platform defined dtim skip interval */
bcn_li_dtim = dhd->suspend_bcn_li_dtim;
}
#endif /* OEM_ANDROID && BCMPCIE */
-#ifdef CONFIG_SILENT_ROAM
-int
-dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
+/* Check if the mode supports STA MODE */
+bool dhd_support_sta_mode(dhd_pub_t *dhd)
{
- int ret = BCME_OK;
- wlc_sroam_t *psroam;
- wlc_sroam_info_t *sroam;
- uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
- /* Check if associated */
- if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
- DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
- return ret;
- }
-
- if (set && (dhd->op_mode &
- (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
- DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
- return ret;
- }
-
- if (!dhd->sroam_turn_on) {
- DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
- return ret;
- }
- psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
- if (!psroam) {
- DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
- return BCME_NOMEM;
- }
-
- ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
- goto done;
- }
-
- if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
- ret = BCME_VERSION;
- goto done;
- }
-
- sroam = (wlc_sroam_info_t *)psroam->data;
- sroam->sroam_on = set;
- DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
-
- ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
- }
-
-done:
- if (psroam) {
- MFREE(dhd->osh, psroam, sroamlen);
- }
- return ret;
-
-}
-#endif /* CONFIG_SILENT_ROAM */
-
-/* Check if the mode supports STA MODE */
-bool dhd_support_sta_mode(dhd_pub_t *dhd)
-{
-
-#ifdef WL_CFG80211
- if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
- return FALSE;
- else
-#endif /* WL_CFG80211 */
- return TRUE;
-}
+#ifdef WL_CFG80211
+ if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
+ return FALSE;
+ else
+#endif /* WL_CFG80211 */
+ return TRUE;
+}
#if defined(KEEP_ALIVE)
int dhd_keep_alive_onoff(dhd_pub_t *dhd)
return res;
}
#endif /* defined(KEEP_ALIVE) */
-#define CSCAN_TLV_TYPE_SSID_IE 'S'
+
+#define CSCAN_TLV_TYPE_SSID_IE 'S'
/*
* SSIDs list parsing from cscan tlv list
*/
{
char* str;
int idx = 0;
- uint8 len;
if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
DHD_ERROR(("%s error paramters\n", __FUNCTION__));
- return BCME_BADARG;
+ return -1;
}
str = *list_str;
while (*bytes_left > 0) {
+
if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
*list_str = str;
DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
return idx;
}
- if (idx >= max) {
- DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx));
- return BCME_BADARG;
- }
-
/* Get proper CSCAN_TLV_TYPE_SSID_IE */
*bytes_left -= 1;
- if (*bytes_left == 0) {
- DHD_ERROR(("%s no length field.\n", __FUNCTION__));
- return BCME_BADARG;
- }
str += 1;
ssid[idx].rssi_thresh = 0;
ssid[idx].flags = 0;
- len = str[0];
- if (len == 0) {
+ if (str[0] == 0) {
/* Broadcast SSID */
ssid[idx].SSID_len = 0;
memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
str += 1;
DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
- } else if (len <= DOT11_MAX_SSID_LEN) {
+ }
+ else if (str[0] <= DOT11_MAX_SSID_LEN) {
/* Get proper SSID size */
- ssid[idx].SSID_len = len;
+ ssid[idx].SSID_len = str[0];
*bytes_left -= 1;
+ str += 1;
+
/* Get SSID */
if (ssid[idx].SSID_len > *bytes_left) {
DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
__FUNCTION__, ssid[idx].SSID_len, *bytes_left));
- return BCME_BADARG;
+ return -1;
}
- str += 1;
+
memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
*bytes_left -= ssid[idx].SSID_len;
DHD_TRACE(("%s :size=%d left=%d\n",
(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
- } else {
- DHD_ERROR(("### SSID size more than %d\n", str[0]));
- return BCME_BADARG;
}
- idx++;
+ else {
+ DHD_ERROR(("### SSID size more that %d\n", str[0]));
+ return -1;
+ }
+
+ if (idx++ > max) {
+ DHD_ERROR(("%s number of SSIDs more that %d\n", __FUNCTION__, idx));
+ return -1;
+ }
}
*list_str = str;
/* Clean all dest bytes */
memset(dst, 0, dst_size);
- if (*bytes_left > 0) {
+ while (*bytes_left > 0) {
if (str[0] != token) {
DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
*list_str = str;
return num;
}
-#endif
+
+#endif
+
+#if defined(TRAFFIC_MGMT_DWM)
+static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd,
+ trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len)
+{
+ int ret = 0;
+ uint32 i;
+ trf_mgmt_filter_t *trf_mgmt_filter;
+ uint8 dwm_tbl_entry;
+ uint32 dscp = 0;
+ uint16 dwm_filter_enabled = 0;
+
+
+ /* Check parameter length is adequate */
+ if (len < (OFFSETOF(trf_mgmt_filter_list_t, filter) +
+ trf_mgmt_filter_list->num_filters * sizeof(trf_mgmt_filter_t))) {
+ ret = BCME_BUFTOOSHORT;
+ return ret;
+ }
+
+ bzero(&dhd->dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
+
+ for (i = 0; i < trf_mgmt_filter_list->num_filters; i++) {
+ trf_mgmt_filter = &trf_mgmt_filter_list->filter[i];
+
+ dwm_filter_enabled = (trf_mgmt_filter->flags & TRF_FILTER_DWM);
+
+ if (dwm_filter_enabled) {
+ dscp = trf_mgmt_filter->dscp;
+ if (dscp >= DHD_DWM_TBL_SIZE) {
+ ret = BCME_BADARG;
+ return ret;
+ }
+ }
+
+ dhd->dhd_tm_dwm_tbl.dhd_dwm_enabled = 1;
+ /* set WMM AC bits */
+ dwm_tbl_entry = (uint8) trf_mgmt_filter->priority;
+ DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry);
+
+ /* set favored bits */
+ if (trf_mgmt_filter->flags & TRF_FILTER_FAVORED)
+ DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry);
+
+ dhd->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp] = dwm_tbl_entry;
+ }
+ return ret;
+}
+#endif
/* Given filename and download type, returns a buffer pointer and length
-* for download to f/w. Type can be FW or NVRAM.
-*
-*/
+ * for download to f/w. Type can be FW or NVRAM.
+ *
+ */
int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
char ** buffer, int *length)
uint8 *buf = NULL;
/* Point to cache if available. */
+#ifdef CACHE_FW_IMAGES
+ if (component == FW) {
+ if (dhd->cached_fw_length) {
+ len = dhd->cached_fw_length;
+ buf = dhd->cached_fw;
+ }
+ }
+ else if (component == NVRAM) {
+ if (dhd->cached_nvram_length) {
+ len = dhd->cached_nvram_length;
+ buf = dhd->cached_nvram;
+ }
+ }
+ else if (component == CLM_BLOB) {
+ if (dhd->cached_clm_length) {
+ len = dhd->cached_clm_length;
+ buf = dhd->cached_clm;
+ }
+ } else {
+ return ret;
+ }
+#endif /* CACHE_FW_IMAGES */
/* No Valid cache found on this call */
if (!len) {
file_len = *length;
*length = 0;
if (file_path) {
- image = dhd_os_open_image1(dhd, file_path);
+ image = dhd_os_open_image(file_path);
if (image == NULL) {
printf("%s: Open image file failed %s\n", __FUNCTION__, file_path);
goto err;
}
/* Download image */
+#if defined(BCMEMBEDIMAGE) && defined(DHD_EFI)
+ if (!image) {
+ memcpy(buf, nvram_arr, sizeof(nvram_arr));
+ len = sizeof(nvram_arr);
+ } else {
+ len = dhd_os_get_image_block((char *)buf, file_len, image);
+ if ((len <= 0 || len > file_len)) {
+ MFREE(dhd->osh, buf, file_len);
+ goto err;
+ }
+ }
+#else
len = dhd_os_get_image_block((char *)buf, file_len, image);
if ((len <= 0 || len > file_len)) {
MFREE(dhd->osh, buf, file_len);
goto err;
}
+#endif /* DHD_EFI */
}
ret = BCME_OK;
*buffer = (char *)buf;
/* Cache if first call. */
+#ifdef CACHE_FW_IMAGES
+ if (component == FW) {
+ if (!dhd->cached_fw_length) {
+ dhd->cached_fw = buf;
+ dhd->cached_fw_length = len;
+ }
+ }
+ else if (component == NVRAM) {
+ if (!dhd->cached_nvram_length) {
+ dhd->cached_nvram = buf;
+ dhd->cached_nvram_length = len;
+ }
+ }
+ else if (component == CLM_BLOB) {
+ if (!dhd->cached_clm_length) {
+ dhd->cached_clm = buf;
+ dhd->cached_clm_length = len;
+ }
+ }
+#endif /* CACHE_FW_IMAGES */
err:
if (image)
- dhd_os_close_image1(dhd, image);
+ dhd_os_close_image(image);
return ret;
}
}
int
-dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
+dhd_download_blob(dhd_pub_t *dhd, unsigned char *image,
uint32 len, char *iovar)
-
{
int chunk_len;
int size2alloc;
if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) {
do {
chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset),
- MAX_CHUNK_LEN, buf);
+ MAX_CHUNK_LEN, image);
if (chunk_len < 0) {
DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
__FUNCTION__, chunk_len));
err = BCME_ERROR;
goto exit;
}
+
if (len - chunk_len == 0)
dl_flag |= DL_END;
if (new_buf) {
MFREE(dhd->osh, new_buf, size2alloc);
}
- return err;
-}
-int
-dhd_apply_default_txcap(dhd_pub_t *dhd, char *path)
-{
- return 0;
+ return err;
}
int
dhd_check_current_clm_data(dhd_pub_t *dhd)
{
- char iovbuf[WLC_IOCTL_SMLEN];
+ char iovbuf[WLC_IOCTL_SMLEN] = {0};
wl_country_t *cspec;
int err = BCME_OK;
- memset(iovbuf, 0, sizeof(iovbuf));
- err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
- if (err == 0) {
- err = BCME_BUFTOOSHORT;
- DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__));
- return err;
- }
+ bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
if (err) {
DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
return err;
}
+
cspec = (wl_country_t *)iovbuf;
if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
{
char *clm_blob_path;
int len;
- char *memblock = NULL;
+ unsigned char *imgbuf = NULL;
int err = BCME_OK;
- char iovbuf[WLC_IOCTL_SMLEN];
+ char iovbuf[WLC_IOCTL_SMLEN] = {0};
int status = FALSE;
if (clm_path && clm_path[0] != '\0') {
clm_blob_path = clm_path;
DHD_TRACE(("clm path from module param:%s\n", clm_path));
} else {
- clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
+ clm_blob_path = CONFIG_BCMDHD_CLM_PATH;
}
/* If CLM blob file is found on the filesystem, download the file.
* validate the country code before proceeding with the initialization.
* If country code is not valid, fail the initialization.
*/
- memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path);
- if (memblock == NULL) {
+
+ imgbuf = dhd_os_open_image((char *)clm_blob_path);
+ if (imgbuf == NULL) {
printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path);
#if defined(DHD_BLOB_EXISTENCE_CHECK)
if (dhd->is_blob) {
if (status == TRUE) {
err = BCME_OK;
} else {
- err = status;
+ err = BCME_ERROR;
}
}
#endif /* DHD_BLOB_EXISTENCE_CHECK */
goto exit;
}
- len = dhd_os_get_image_size(memblock);
+ len = dhd_os_get_image_size(imgbuf);
- if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) {
+ if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && imgbuf) {
status = dhd_check_current_clm_data(dhd);
if (status == TRUE) {
#if defined(DHD_BLOB_EXISTENCE_CHECK)
"new CLM data will be added to the end of existing CLM data!\n",
__FUNCTION__));
#endif /* DHD_BLOB_EXISTENCE_CHECK */
- } else if (status != FALSE) {
- err = status;
- goto exit;
}
/* Found blob file. Download the file */
- DHD_TRACE(("clm file download from %s \n", clm_blob_path));
- err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload");
+ DHD_ERROR(("clm file download from %s \n", clm_blob_path));
+ err = dhd_download_blob(dhd, imgbuf, len, "clmload");
if (err) {
DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
/* Retrieve clmload_status and print */
- memset(iovbuf, 0, sizeof(iovbuf));
- len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
- if (len == 0) {
- err = BCME_BUFTOOSHORT;
- goto exit;
- }
+ bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
if (err) {
DHD_ERROR(("%s: clmload_status get failed err=%d \n",
DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
}
} else {
- DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock));
+ DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, imgbuf));
}
/* Verify country code */
if (status != TRUE) {
/* Country code not initialized or CLM download not proper */
DHD_ERROR(("country code not initialized\n"));
- err = status;
+ err = BCME_ERROR;
}
exit:
- if (memblock) {
- dhd_os_close_image1(dhd, memblock);
+ if (imgbuf) {
+ dhd_os_close_image(imgbuf);
}
return err;
void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length)
{
+#ifdef CACHE_FW_IMAGES
+ return;
+#endif
MFREE(dhd->osh, buffer, length);
}
-#ifdef SHOW_LOGTRACE
-int
-dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
- dhd_event_log_t *event_log)
+#if defined(DHD_8021X_DUMP)
+#define EAP_PRINT(str) \
+ DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: " str "\n", \
+ ifname, direction ? "TX" : "RX"));
+/* Parse EAPOL 4 way handshake messages */
+void
+dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction)
{
- uint32 *lognums = NULL;
- char *logstrs = NULL;
- logstr_trailer_t *trailer = NULL;
- int ram_index = 0;
- char **fmts = NULL;
- int num_fmts = 0;
- bool match_fail = TRUE;
- int32 i = 0;
- uint8 *pfw_id = NULL;
- uint32 fwid = 0;
- void *file = NULL;
- int file_len = 0;
- char fwid_str[FWID_STR_LEN];
- uint32 hdr_logstrs_size = 0;
-
- /* Read last three words in the logstrs.bin file */
- trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size -
- sizeof(logstr_trailer_t));
-
- if (trailer->log_magic == LOGSTRS_MAGIC) {
- /*
- * logstrs.bin has a header.
- */
- if (trailer->version == 1) {
- logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts +
- logstrs_size - sizeof(logstr_header_v1_t));
- DHD_INFO(("%s: logstr header version = %u\n",
- __FUNCTION__, hdr_v1->version));
- num_fmts = hdr_v1->rom_logstrs_offset / sizeof(uint32);
- ram_index = (hdr_v1->ram_lognums_offset -
- hdr_v1->rom_lognums_offset) / sizeof(uint32);
- lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset];
- logstrs = (char *) &raw_fmts[hdr_v1->rom_logstrs_offset];
- hdr_logstrs_size = hdr_v1->logstrs_size;
- } else if (trailer->version == 2) {
- logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
- sizeof(logstr_header_t));
- DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
- __FUNCTION__, hdr->trailer.version, hdr->trailer.flags));
-
- /* For ver. 2 of the header, need to match fwid of
- * both logstrs.bin and fw bin
- */
-
- /* read the FWID from fw bin */
- file = dhd_os_open_image1(NULL, st_str_file_path);
- if (!file) {
- DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__));
- goto error;
- }
- file_len = dhd_os_get_image_size(file);
- if (file_len <= 0) {
- DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__));
- goto error;
- }
- /* fwid is at the end of fw bin in string format */
- if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
- DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
- goto error;
- }
-
- memset(fwid_str, 0, sizeof(fwid_str));
- if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) {
- DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__));
- goto error;
- }
- pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
- FWID_STR_1, strlen(FWID_STR_1));
- if (!pfw_id) {
- pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
- FWID_STR_2, strlen(FWID_STR_2));
- if (!pfw_id) {
- DHD_ERROR(("%s: could not find id in FW bin!\n",
- __FUNCTION__));
- goto error;
+ unsigned char type;
+ int pair, ack, mic, kerr, req, sec, install;
+ unsigned short us_tmp;
+
+ type = dump_data[15];
+ if (type == 0) {
+ if ((dump_data[22] == 1) && (dump_data[18] == 1)) {
+ EAP_PRINT("EAP Packet, Request, Identity");
+ } else if ((dump_data[22] == 1) && (dump_data[18] == 2)) {
+ EAP_PRINT("EAP Packet, Response, Identity");
+ } else if (dump_data[22] == 254) {
+ if (dump_data[30] == 1) {
+ EAP_PRINT("EAP Packet, WSC Start");
+ } else if (dump_data[30] == 4) {
+ if (dump_data[41] == 4) {
+ EAP_PRINT("EAP Packet, WPS M1");
+ } else if (dump_data[41] == 5) {
+ EAP_PRINT("EAP Packet, WPS M2");
+ } else if (dump_data[41] == 7) {
+ EAP_PRINT("EAP Packet, WPS M3");
+ } else if (dump_data[41] == 8) {
+ EAP_PRINT("EAP Packet, WPS M4");
+ } else if (dump_data[41] == 9) {
+ EAP_PRINT("EAP Packet, WPS M5");
+ } else if (dump_data[41] == 10) {
+ EAP_PRINT("EAP Packet, WPS M6");
+ } else if (dump_data[41] == 11) {
+ EAP_PRINT("EAP Packet, WPS M7");
+ } else if (dump_data[41] == 12) {
+ EAP_PRINT("EAP Packet, WPS M8");
}
- }
- /* search for the '-' in the fw id str, after which the
- * actual 4 byte fw id is present
- */
- while (pfw_id && *pfw_id != '-') {
- ++pfw_id;
- }
- ++pfw_id;
- fwid = bcm_strtoul((char *)pfw_id, NULL, 16);
-
- /* check if fw id in logstrs.bin matches the fw one */
- if (hdr->trailer.fw_id != fwid) {
- DHD_ERROR(("%s: logstr id does not match FW!"
- "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
- __FUNCTION__, hdr->trailer.fw_id, fwid));
- goto error;
- }
-
- match_fail = FALSE;
- num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
- ram_index = (hdr->ram_lognums_offset -
- hdr->rom_lognums_offset) / sizeof(uint32);
- lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
- logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
- hdr_logstrs_size = hdr->logstrs_size;
-
-error:
- if (file) {
- dhd_os_close_image1(NULL, file);
- }
- if (match_fail) {
- return BCME_DECERR;
+ } else if (dump_data[30] == 5) {
+ EAP_PRINT("EAP Packet, WSC Done");
}
} else {
- DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__,
- trailer->version));
- return BCME_ERROR;
- }
- if (logstrs_size != hdr_logstrs_size) {
- DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size));
- return BCME_ERROR;
+ DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
+ ifname, direction ? "TX" : "RX",
+ dump_data[14], dump_data[15], dump_data[30]));
+ }
+ } else if (type == 3 && dump_data[18] == 2) {
+ us_tmp = (dump_data[19] << 8) | dump_data[20];
+ pair = 0 != (us_tmp & 0x08);
+ ack = 0 != (us_tmp & 0x80);
+ mic = 0 != (us_tmp & 0x100);
+ kerr = 0 != (us_tmp & 0x400);
+ req = 0 != (us_tmp & 0x800);
+ sec = 0 != (us_tmp & 0x200);
+ install = 0 != (us_tmp & 0x40);
+
+ if (!sec && !mic && ack && !install && pair && !kerr && !req) {
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M1");
+ } else if (pair && !install && !ack && mic && !sec && !kerr && !req) {
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M2");
+ } else if (pair && ack && mic && sec && !kerr && !req) {
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M3");
+ } else if (pair && !install && !ack && mic && sec && !req && !kerr) {
+ EAP_PRINT("EAPOL Packet, 4-way handshake, M4");
+ } else {
+ DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
+ ifname, direction ? "TX" : "RX",
+ dump_data[14], dump_data[15], dump_data[30]));
}
} else {
- /*
- * Legacy logstrs.bin format without header.
- */
- num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
-
- /* Legacy RAM-only logstrs.bin format:
- * - RAM 'lognums' section
- * - RAM 'logstrs' section.
- *
- * 'lognums' is an array of indexes for the strings in the
- * 'logstrs' section. The first uint32 is an index to the
- * start of 'logstrs'. Therefore, if this index is divided
- * by 'sizeof(uint32)' it provides the number of logstr
- * entries.
- */
- ram_index = 0;
- lognums = (uint32 *) raw_fmts;
- logstrs = (char *) &raw_fmts[num_fmts << 2];
- }
- if (num_fmts)
- fmts = MALLOC(osh, num_fmts * sizeof(char *));
- if (fmts == NULL) {
- DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
- return BCME_ERROR;
+ DHD_ERROR(("ETHER_TYPE_802_1X[%s] [%s]: ver %d, type %d, replay %d\n",
+ ifname, direction ? "TX" : "RX",
+ dump_data[14], dump_data[15], dump_data[30]));
}
- event_log->fmts_size = num_fmts * sizeof(char *);
+}
+#endif /* DHD_8021X_DUMP */
- for (i = 0; i < num_fmts; i++) {
- /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
- * (they are 0-indexed relative to 'rom_logstrs_offset').
- *
- * RAM lognums are already indexed to point to the correct RAM logstrs (they
- * are 0-indexed relative to the start of the logstrs.bin file).
- */
- if (i == ram_index) {
- logstrs = raw_fmts;
- }
- fmts[i] = &logstrs[lognums[i]];
+#ifdef REPORT_FATAL_TIMEOUTS
+void init_dhd_timeouts(dhd_pub_t *pub)
+{
+ pub->timeout_info = MALLOC(pub->osh, sizeof(timeout_info_t));
+ if (pub->timeout_info == NULL) {
+ DHD_ERROR(("%s: Failed to alloc timeout_info\n", __FUNCTION__));
+ } else {
+ DHD_INFO(("Initializing dhd_timeouts\n"));
+ pub->timeout_info->scan_timer_lock = dhd_os_spin_lock_init(pub->osh);
+ pub->timeout_info->join_timer_lock = dhd_os_spin_lock_init(pub->osh);
+ pub->timeout_info->bus_timer_lock = dhd_os_spin_lock_init(pub->osh);
+ pub->timeout_info->cmd_timer_lock = dhd_os_spin_lock_init(pub->osh);
+ pub->timeout_info->scan_timeout_val = SCAN_TIMEOUT_DEFAULT;
+ pub->timeout_info->join_timeout_val = JOIN_TIMEOUT_DEFAULT;
+ pub->timeout_info->cmd_timeout_val = CMD_TIMEOUT_DEFAULT;
+ pub->timeout_info->bus_timeout_val = BUS_TIMEOUT_DEFAULT;
+ pub->timeout_info->scan_timer_active = FALSE;
+ pub->timeout_info->join_timer_active = FALSE;
+ pub->timeout_info->cmd_timer_active = FALSE;
+ pub->timeout_info->bus_timer_active = FALSE;
+ pub->timeout_info->cmd_join_error = WLC_SSID_MASK;
+ pub->timeout_info->cmd_request_id = 0;
}
- event_log->fmts = fmts;
- event_log->raw_fmts_size = logstrs_size;
- event_log->raw_fmts = raw_fmts;
- event_log->num_fmts = num_fmts;
- return BCME_OK;
-} /* dhd_parse_logstrs_file */
+}
-int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
- uint32 *rodata_end)
+void
+deinit_dhd_timeouts(dhd_pub_t *pub)
{
- char *raw_fmts = NULL, *raw_fmts_loc = NULL;
- uint32 read_size = READ_NUM_BYTES;
- int error = 0;
- char * cptr = NULL;
- char c;
- uint8 count = 0;
+ /* stop the join, scan bus, cmd timers
+ * as failing to do so may cause a kernel panic if
+ * an rmmod is done
+ */
+ if (!pub->timeout_info) {
+ DHD_ERROR(("timeout_info pointer is NULL\n"));
+ ASSERT(0);
+ return;
+ }
+ if (dhd_stop_scan_timer(pub)) {
+ DHD_ERROR(("dhd_stop_scan_timer failed\n"));
+ ASSERT(0);
+ }
+ if (dhd_stop_bus_timer(pub)) {
+ DHD_ERROR(("dhd_stop_bus_timer failed\n"));
+ ASSERT(0);
+ }
+ if (dhd_stop_cmd_timer(pub)) {
+ DHD_ERROR(("dhd_stop_cmd_timer failed\n"));
+ ASSERT(0);
+ }
+ if (dhd_stop_join_timer(pub)) {
+ DHD_ERROR(("dhd_stop_join_timer failed\n"));
+ ASSERT(0);
+ }
- *ramstart = 0;
- *rodata_start = 0;
- *rodata_end = 0;
+ dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->scan_timer_lock);
+ dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->join_timer_lock);
+ dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->bus_timer_lock);
+ dhd_os_spin_lock_deinit(pub->osh, pub->timeout_info->cmd_timer_lock);
+ MFREE(pub->osh, pub->timeout_info, sizeof(timeout_info_t));
+ pub->timeout_info = NULL;
+}
- /* Allocate 1 byte more than read_size to terminate it with NULL */
- raw_fmts = MALLOCZ(osh, read_size + 1);
- if (raw_fmts == NULL) {
- DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
- goto fail;
+static void
+dhd_cmd_timeout(void *ctx)
+{
+ dhd_pub_t *pub = (dhd_pub_t *)ctx;
+ unsigned long flags;
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ASSERT(0);
+ return;
}
- /* read ram start, rodata_start and rodata_end values from map file */
- while (count != ALL_MAP_VAL)
- {
- error = dhd_os_read_file(file, raw_fmts, read_size);
- if (error < 0) {
- DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
- error));
- goto fail;
- }
+ DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
+ if (pub->timeout_info && pub->timeout_info->cmd_timer_active) {
+ DHD_ERROR(("\nERROR COMMAND TIMEOUT TO:%d\n", pub->timeout_info->cmd_timeout_val));
+ DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+#ifdef PCIE_OOB
+ /* Assert device_wake so that UART_Rx is available */
+ if (dhd_bus_set_device_wake(pub->bus, TRUE)) {
+ DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+#endif /* PCIE_OOB */
+ if (dhd_stop_cmd_timer(pub)) {
+ DHD_ERROR(("%s: dhd_stop_cmd_timer() failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+ dhd_wakeup_ioctl_event(pub, IOCTL_RETURN_ON_ERROR);
+ if (!dhd_query_bus_erros(pub))
+ dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_COMMAND_TO);
+ } else {
+ DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+ }
+}
- /* End raw_fmts with NULL as strstr expects NULL terminated strings */
- raw_fmts[read_size] = '\0';
+int
+dhd_start_cmd_timer(dhd_pub_t *pub)
+{
+ int ret = BCME_OK;
+ unsigned long flags = 0;
+ uint32 cmd_to_ms;
- /* Get ramstart address */
- raw_fmts_loc = raw_fmts;
- if (!(count & RAMSTART_BIT) &&
- (cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
- strlen(ramstart_str)))) {
- cptr = cptr - BYTES_AHEAD_NUM;
- sscanf(cptr, "%x %c text_start", ramstart, &c);
- count |= RAMSTART_BIT;
- }
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit_null;
+ }
+ DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
+ cmd_to_ms = pub->timeout_info->cmd_timeout_val;
- /* Get ram rodata start address */
- raw_fmts_loc = raw_fmts;
- if (!(count & RDSTART_BIT) &&
- (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
- strlen(rodata_start_str)))) {
- cptr = cptr - BYTES_AHEAD_NUM;
- sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
- count |= RDSTART_BIT;
- }
+ if (pub->timeout_info->cmd_timeout_val == 0) {
+ /* Disable Command timer timeout */
+ DHD_INFO(("DHD: Command Timeout Disabled\n"));
+ goto exit;
+ }
+ if (pub->timeout_info->cmd_timer_active) {
+ DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ } else {
+ pub->timeout_info->cmd_timer = osl_timer_init(pub->osh,
+ "cmd_timer", dhd_cmd_timeout, pub);
+ osl_timer_update(pub->osh, pub->timeout_info->cmd_timer,
+ cmd_to_ms, 0);
+ pub->timeout_info->cmd_timer_active = TRUE;
+ }
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s Cmd Timer started\n", __FUNCTION__));
+ }
+exit:
+ DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
+exit_null:
+ return ret;
+}
- /* Get ram rodata end address */
- raw_fmts_loc = raw_fmts;
- if (!(count & RDEND_BIT) &&
- (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
- strlen(rodata_end_str)))) {
- cptr = cptr - BYTES_AHEAD_NUM;
- sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
- count |= RDEND_BIT;
- }
+int
+dhd_stop_cmd_timer(dhd_pub_t *pub)
+{
+ int ret = BCME_OK;
+ unsigned long flags = 0;
- if (error < (int)read_size) {
- /*
- * since we reset file pos back to earlier pos by
- * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
- * The reason for this is if string is spreaded across
- * bytes, the read function should not miss it.
- * So if ret value is less than read_size, reached EOF don't read further
- */
- break;
- }
- memset(raw_fmts, 0, read_size);
- /*
- * go back to predefined NUM of bytes so that we won't miss
- * the string and addr even if it comes as splited in next read.
- */
- dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
- }
-
-fail:
- if (raw_fmts) {
- MFREE(osh, raw_fmts, read_size + 1);
- raw_fmts = NULL;
- }
- if (count == ALL_MAP_VAL) {
- return BCME_OK;
- }
- else {
- DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
- count));
- return BCME_ERROR;
- }
-
-} /* dhd_parse_map_file */
-
-#ifdef PCIE_FULL_DONGLE
-int
-dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
- dhd_event_log_t *event_data)
-{
- uint32 infobuf_version;
- info_buf_payload_hdr_t *payload_hdr_ptr;
- uint16 payload_hdr_type;
- uint16 payload_hdr_length;
-
- DHD_TRACE(("%s:Enter\n", __FUNCTION__));
-
- if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
- DHD_ERROR(("%s: infobuf too small for version field\n",
- __FUNCTION__));
- goto exit;
- }
- infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
- PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
- if (infobuf_version != PCIE_INFOBUF_V1) {
- DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
- __FUNCTION__, infobuf_version));
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
goto exit;
}
+ DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
- /* Version 1 infobuf has a single type/length (and then value) field */
- if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
- DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
- __FUNCTION__));
- goto exit;
+ if (pub->timeout_info->cmd_timer_active) {
+ osl_timer_del(pub->osh, pub->timeout_info->cmd_timer);
+ pub->timeout_info->cmd_timer_active = FALSE;
}
- /* Process/parse the common info payload header (type/length) */
- payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
- payload_hdr_type = ltoh16(payload_hdr_ptr->type);
- payload_hdr_length = ltoh16(payload_hdr_ptr->length);
- if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
- DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
- __FUNCTION__, payload_hdr_type));
- goto exit;
+ else {
+ DHD_INFO(("DHD: CMD timer is not active\n"));
}
- PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
-
- /* Validate that the specified length isn't bigger than the
- * provided data.
- */
- if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
- DHD_ERROR(("%s: infobuf logtrace length is bigger"
- " than actual buffer data\n", __FUNCTION__));
- goto exit;
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s Cmd Timer Stopped\n", __FUNCTION__));
}
- dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
- event_data, payload_hdr_length);
-
- return BCME_OK;
-
+ DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
exit:
- return BCME_ERROR;
-} /* dhd_event_logtrace_infobuf_pkt_process */
-#endif /* PCIE_FULL_DONGLE */
-#endif /* SHOW_LOGTRACE */
-
-#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
-
-/* To handle the TDLS event in the dhd_common.c
- */
-int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
-{
- int ret = BCME_OK;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- ret = dhd_tdls_update_peer_info(dhd_pub, event);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
return ret;
}
-int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
+static int
+__dhd_stop_join_timer(dhd_pub_t *pub)
{
- tdls_peer_node_t *cur = NULL, *prev = NULL;
- if (!dhd_pub)
- return BCME_ERROR;
- cur = dhd_pub->peer_tbl.node;
-
- if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
+ int ret = BCME_OK;
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ASSERT(0);
return BCME_ERROR;
+ }
- while (cur != NULL) {
- prev = cur;
- cur = cur->next;
- MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
+ if (pub->timeout_info->join_timer_active) {
+ osl_timer_del(pub->osh, pub->timeout_info->join_timer);
+ pub->timeout_info->join_timer_active = FALSE;
+ } else {
+ DHD_INFO(("DHD: JOIN timer is not active\n"));
}
- dhd_pub->peer_tbl.tdls_peer_count = 0;
- dhd_pub->peer_tbl.node = NULL;
- return BCME_OK;
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s: Join Timer Stopped\n", __FUNCTION__));
+ }
+ return ret;
}
-#endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
-/* pretty hex print a contiguous buffer
-* based on the debug level specified
-*/
-void
-dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level)
+static void
+dhd_join_timeout(void *ctx)
{
- char line[128], *p;
- int len = sizeof(line);
- int nchar;
- uint i;
+ dhd_pub_t *pub = (dhd_pub_t *)ctx;
+ unsigned long flags;
- if (msg && (msg[0] != '\0')) {
- if (dbg_level == DHD_ERROR_VAL)
- DHD_ERROR(("%s:\n", msg));
- else if (dbg_level == DHD_INFO_VAL)
- DHD_INFO(("%s:\n", msg));
- else if (dbg_level == DHD_TRACE_VAL)
- DHD_TRACE(("%s:\n", msg));
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ASSERT(0);
+ return;
}
- p = line;
- for (i = 0; i < nbytes; i++) {
- if (i % 16 == 0) {
- nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
- p += nchar;
- len -= nchar;
- }
- if (len > 0) {
- nchar = snprintf(p, len, "%02x ", buf[i]);
- p += nchar;
- len -= nchar;
+ DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
+ if (pub->timeout_info->join_timer_active) {
+ DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+ if (dhd_stop_join_timer(pub)) {
+ DHD_ERROR(("%s: dhd_stop_join_timer() failed\n", __FUNCTION__));
+ ASSERT(0);
}
+ if (pub->timeout_info->cmd_join_error) {
+ DHD_ERROR(("\nERROR JOIN TIMEOUT TO:%d:0x%x\n",
+ pub->timeout_info->join_timeout_val,
+ pub->timeout_info->cmd_join_error));
+#ifdef DHD_FW_COREDUMP
+ /* collect core dump and crash */
+ pub->memdump_enabled = DUMP_MEMFILE_BUGON;
+ pub->memdump_type = DUMP_TYPE_JOIN_TIMEOUT;
+ dhd_bus_mem_dump(pub);
+#endif /* DHD_FW_COREDUMP */
- if (i % 16 == 15) {
- /* flush line */
- if (dbg_level == DHD_ERROR_VAL)
- DHD_ERROR(("%s:\n", line));
- else if (dbg_level == DHD_INFO_VAL)
- DHD_INFO(("%s:\n", line));
- else if (dbg_level == DHD_TRACE_VAL)
- DHD_TRACE(("%s:\n", line));
- p = line;
- len = sizeof(line);
}
- }
-
- /* flush last partial line */
- if (p != line) {
- if (dbg_level == DHD_ERROR_VAL)
- DHD_ERROR(("%s:\n", line));
- else if (dbg_level == DHD_INFO_VAL)
- DHD_INFO(("%s:\n", line));
- else if (dbg_level == DHD_TRACE_VAL)
- DHD_TRACE(("%s:\n", line));
+ } else {
+ DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
}
}
-#ifdef DUMP_IOCTL_IOV_LIST
-void
-dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
+int
+dhd_start_join_timer(dhd_pub_t *pub)
{
- dll_t *item;
- dhd_iov_li_t *iov_li;
- dhd->dump_iovlist_len++;
+ int ret = BCME_OK;
+ unsigned long flags = 0;
+ uint32 join_to_ms;
- if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
- item = dll_head_p(list_head);
- iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
- dll_delete(item);
- MFREE(dhd->osh, iov_li, sizeof(*iov_li));
- dhd->dump_iovlist_len--;
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit;
}
- dll_append(list_head, node);
-}
-void
-dhd_iov_li_print(dll_t *list_head)
-{
- dhd_iov_li_t *iov_li;
- dll_t *item, *next;
- uint8 index = 0;
- for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
- next = dll_next_p(item);
- iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
- DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd));
+ join_to_ms = pub->timeout_info->join_timeout_val;
+ DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
+ if (pub->timeout_info->join_timer_active) {
+ DHD_ERROR(("%s:Stoping active timer\n", __FUNCTION__));
+ __dhd_stop_join_timer(pub);
+ }
+ if (pub->timeout_info->join_timeout_val == 0) {
+ /* Disable Join timer timeout */
+ DHD_INFO(("DHD: Join Timeout Disabled\n"));
+ } else {
+ pub->timeout_info->join_timer = osl_timer_init(pub->osh,
+ "join_timer", dhd_join_timeout, pub);
+ osl_timer_update(pub->osh, pub->timeout_info->join_timer, join_to_ms, 0);
+ pub->timeout_info->join_timer_active = TRUE;
+ pub->timeout_info->cmd_join_error |= WLC_SSID_MASK;
+ }
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s:Join Timer started 0x%x\n", __FUNCTION__,
+ pub->timeout_info->cmd_join_error));
}
+ DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+exit:
+ return ret;
}
-void
-dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
+int
+dhd_stop_join_timer(dhd_pub_t *pub)
{
- dll_t *item;
- dhd_iov_li_t *iov_li;
- while (!(dll_empty(list_head))) {
- item = dll_head_p(list_head);
- iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
- dll_delete(item);
- MFREE(dhd->osh, iov_li, sizeof(*iov_li));
- }
+ int ret = BCME_OK;
+ unsigned long flags;
+
+ DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
+ ret = __dhd_stop_join_timer(pub);
+ DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
+ return ret;
}
-#endif /* DUMP_IOCTL_IOV_LIST */
-/* configuations of ecounters to be enabled by default in FW */
-static ecounters_cfg_t ecounters_cfg_tbl[] = {
- /* Global ecounters */
- {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE},
- // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
- // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
-
- /* Slice specific ecounters */
- {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE},
- {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE},
- {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX},
-
- /* Interface specific ecounters */
- {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
- {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC},
- {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
- {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT},
-
- /* secondary interface */
-};
+static void
+dhd_scan_timeout(void *ctx)
+{
+ dhd_pub_t *pub = (dhd_pub_t *)ctx;
+ unsigned long flags;
-static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
- /* Interface specific event ecounters */
- {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
-};
+ if (pub->timeout_info == NULL) {
+ DHD_ERROR(("timeout_info pointer is NULL\n"));
+ ASSERT(0);
+ return;
+ }
+
+ DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
+ if (pub->timeout_info && pub->timeout_info->scan_timer_active) {
+ DHD_ERROR(("\nERROR SCAN TIMEOUT TO:%d\n", pub->timeout_info->scan_timeout_val));
+ DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+ dhd_stop_scan_timer(pub);
+ if (!dhd_query_bus_erros(pub))
+ dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_SCAN_TO);
+ } else {
+ DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+ }
+}
-/* Accepts an argument to -s, -g or -f and creates an XTLV */
int
-dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx,
- uint16 stats_rep, uint8 **xtlv)
+dhd_start_scan_timer(dhd_pub_t *pub)
{
- uint8 *req_xtlv = NULL;
- ecounters_stats_types_report_req_t *req;
- bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf;
- ecountersv2_xtlv_list_elt_t temp;
- uint16 xtlv_len = 0, total_len = 0;
- int rc = BCME_OK;
-
- /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
- temp.id = stats_rep;
- temp.len = 0;
-
- /* Hence len/data = 0/NULL */
- xtlv_len += temp.len + BCM_XTLV_HDR_SIZE;
-
- /* Total length of the container */
- total_len = BCM_XTLV_HDR_SIZE +
- OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len;
-
- /* Now allocate a structure for the entire request */
- if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) {
- rc = BCME_NOMEM;
- goto fail;
- }
-
- /* container XTLV context */
- bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len,
- BCM_XTLV_OPTION_ALIGN32);
+ int ret = BCME_OK;
+ unsigned long flags = 0;
+ uint32 scan_to_ms;
- /* Fill other XTLVs in the container. Leave space for XTLV headers */
- req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE);
- req->flags = type;
- if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) {
- req->slice_mask = 0x1 << if_slice_idx;
- } else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) {
- req->if_index = if_slice_idx;
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit_null;
}
+ DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
+ scan_to_ms = pub->timeout_info->scan_timeout_val;
- /* Fill remaining XTLVs */
- bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len,
- BCM_XTLV_OPTION_ALIGN32);
- if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) {
- DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id));
- rc = BCME_ERROR;
- goto fail;
+ if (pub->timeout_info->scan_timer_active) {
+ /* NOTE : New scan timeout value will be effective
+ * only once current scan is completed.
+ */
+ DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ goto exit;
}
- /* fill the top level container and get done with the XTLV container */
- rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL,
- bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t,
- stats_types_req));
-
- if (rc) {
- DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags));
- goto fail;
+ if (pub->timeout_info->scan_timeout_val == 0) {
+ /* Disable Scan timer timeout */
+ DHD_INFO(("DHD: Scan Timeout Disabled\n"));
+ } else {
+ pub->timeout_info->scan_timer = osl_timer_init(pub->osh, "scan_timer",
+ dhd_scan_timeout, pub);
+ pub->timeout_info->scan_timer_active = TRUE;
+ osl_timer_update(pub->osh, pub->timeout_info->scan_timer, scan_to_ms, 0);
}
-
-fail:
- if (rc && req_xtlv) {
- MFREE(dhd->osh, req_xtlv, total_len);
- req_xtlv = NULL;
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s Scan Timer started\n", __FUNCTION__));
}
-
- /* update the xtlv pointer */
- *xtlv = req_xtlv;
- return rc;
+exit:
+ DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+exit_null:
+ return ret;
}
int
-dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask)
+dhd_stop_scan_timer(dhd_pub_t *pub)
{
- wl_el_set_type_t logset_type, logset_op;
- int ret = BCME_ERROR;
- int i = 0, err = 0;
+ int ret = BCME_OK;
+ unsigned long flags = 0;
- if (!dhd || !logset_mask)
- return BCME_BADARG;
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit;
+ }
+ DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
- *logset_mask = 0;
- memset(&logset_type, 0, sizeof(logset_type));
- memset(&logset_op, 0, sizeof(logset_op));
- logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION);
- logset_type.len = htod16(sizeof(wl_el_set_type_t));
- for (i = 0; i < dhd->event_log_max_sets; i++) {
- logset_type.set = i;
- err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type,
- sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE);
- /* the iovar may return 'unsupported' error if a log set number is not present
- * in the fw, so we should not return on error !
- */
- if (err == BCME_OK &&
- logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) {
- *logset_mask |= 0x01u << i;
- ret = BCME_OK;
- DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i));
- }
+ if (pub->timeout_info->scan_timer_active) {
+ osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
+ pub->timeout_info->scan_timer_active = FALSE;
+ }
+ else {
+ DHD_INFO(("DHD: SCAN timer is not active\n"));
}
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__));
+ }
+ DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
+exit:
return ret;
}
-static int
-dhd_ecounter_autoconfig(dhd_pub_t *dhd)
+static void
+dhd_bus_timeout(void *ctx)
{
- int rc = BCME_OK;
- uint32 buf;
- rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+ dhd_pub_t *pub = (dhd_pub_t *)ctx;
+ unsigned long flags;
- if (rc != BCME_OK) {
+ if (pub->timeout_info == NULL) {
+ DHD_ERROR(("timeout_info pointer is NULL\n"));
+ ASSERT(0);
+ return;
+ }
- if (rc != BCME_UNSUPPORTED) {
- rc = BCME_OK;
- DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
- } else {
- DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
+ DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
+ if (pub->timeout_info->bus_timer_active) {
+ DHD_ERROR(("\nERROR BUS TIMEOUT TO:%d\n", pub->timeout_info->bus_timeout_val));
+ DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+#ifdef PCIE_OOB
+ /* Assert device_wake so that UART_Rx is available */
+ if (dhd_bus_set_device_wake(pub->bus, TRUE)) {
+ DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__));
+ ASSERT(0);
}
+#endif /* PCIE_OOB */
+ if (dhd_stop_bus_timer(pub)) {
+ DHD_ERROR(("%s: dhd_stop_bus_timer() failed\n", __FUNCTION__));
+ ASSERT(0);
+ }
+ if (!dhd_query_bus_erros(pub))
+ dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_OQS_TO);
+ } else {
+ DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
}
-
- return rc;
}
int
-dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
+dhd_start_bus_timer(dhd_pub_t *pub)
{
- int rc = BCME_OK;
- if (enable) {
- if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
- if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
- DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
- } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
- DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
- }
- }
+ int ret = BCME_OK;
+ unsigned long flags = 0;
+ uint32 bus_to_ms;
+
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit_null;
+ }
+ DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
+ bus_to_ms = pub->timeout_info->bus_timeout_val;
+
+ if (pub->timeout_info->bus_timeout_val == 0) {
+ /* Disable Bus timer timeout */
+ DHD_INFO(("DHD: Bus Timeout Disabled\n"));
+ goto exit;
+ }
+ if (pub->timeout_info->bus_timer_active) {
+ DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
+ ret = BCME_ERROR;
+ ASSERT(0);
} else {
- if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
- DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
- } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
- DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
- }
+ pub->timeout_info->bus_timer = osl_timer_init(pub->osh,
+ "bus_timer", dhd_bus_timeout, pub);
+ pub->timeout_info->bus_timer_active = TRUE;
+ osl_timer_update(pub->osh, pub->timeout_info->bus_timer, bus_to_ms, 0);
+ }
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s: BUS Timer started\n", __FUNCTION__));
}
- return rc;
+exit:
+ DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+exit_null:
+ return ret;
}
int
-dhd_start_ecounters(dhd_pub_t *dhd)
+dhd_stop_bus_timer(dhd_pub_t *pub)
{
- uint8 i = 0;
- uint8 *start_ptr;
- int rc = BCME_OK;
- bcm_xtlv_t *elt;
- ecounters_config_request_v2_t *req = NULL;
- ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL;
- ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL;
- uint16 total_processed_containers_len = 0;
-
- for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) {
- ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i];
-
- if ((list_elt = (ecountersv2_processed_xtlv_list_elt *)
- MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) {
- DHD_ERROR(("Ecounters v2: No memory to process\n"));
- goto fail;
- }
-
- rc = dhd_create_ecounters_params(dhd, ecounter_stat->type,
- ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data);
+ int ret = BCME_OK;
+ unsigned long flags = 0;
- if (rc) {
- DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
- ecounter_stat->stats_rep, rc));
+ if (!pub->timeout_info) {
+ DHD_ERROR(("DHD: timeout_info NULL\n"));
+ ret = BCME_ERROR;
+ ASSERT(0);
+ goto exit;
+ }
+ DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
- /* Free allocated memory and go to fail to release any memories allocated
- * in previous iterations. Note that list_elt->data gets populated in
- * dhd_create_ecounters_params() and gets freed there itself.
- */
- MFREE(dhd->osh, list_elt, sizeof(*list_elt));
- list_elt = NULL;
- goto fail;
- }
- elt = (bcm_xtlv_t *) list_elt->data;
-
- /* Put the elements in the order they are processed */
- if (processed_containers_list == NULL) {
- processed_containers_list = list_elt;
- } else {
- tail->next = list_elt;
- }
- tail = list_elt;
- /* Size of the XTLV returned */
- total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
- }
-
- /* Now create ecounters config request with totallength */
- req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) +
- total_processed_containers_len);
-
- if (req == NULL) {
- rc = BCME_NOMEM;
- goto fail;
- }
-
- req->version = ECOUNTERS_VERSION_2;
- req->logset = EVENT_LOG_SET_ECOUNTERS;
- req->reporting_period = ECOUNTERS_DEFAULT_PERIOD;
- req->num_reports = ECOUNTERS_NUM_REPORTS;
- req->len = total_processed_containers_len +
- OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
-
- /* Copy config */
- start_ptr = req->ecounters_xtlvs;
-
- /* Now go element by element in the list */
- while (processed_containers_list) {
- list_elt = processed_containers_list;
-
- elt = (bcm_xtlv_t *)list_elt->data;
-
- memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
- start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
- processed_containers_list = processed_containers_list->next;
-
- /* Free allocated memories */
- MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
- MFREE(dhd->osh, list_elt, sizeof(*list_elt));
- }
-
- if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
- DHD_ERROR(("failed to start ecounters\n"));
+ if (pub->timeout_info->bus_timer_active) {
+ osl_timer_del(pub->osh, pub->timeout_info->bus_timer);
+ pub->timeout_info->bus_timer_active = FALSE;
}
-
-fail:
- if (req) {
- MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len);
+ else {
+ DHD_INFO(("DHD: BUS timer is not active\n"));
}
-
- /* Now go element by element in the list */
- while (processed_containers_list) {
- list_elt = processed_containers_list;
- elt = (bcm_xtlv_t *)list_elt->data;
- processed_containers_list = processed_containers_list->next;
-
- /* Free allocated memories */
- MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
- MFREE(dhd->osh, list_elt, sizeof(*list_elt));
+ if (ret == BCME_OK) {
+ DHD_INFO(("%s: Bus Timer Stopped\n", __FUNCTION__));
}
- return rc;
+ DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
+exit:
+ return ret;
}
int
-dhd_stop_ecounters(dhd_pub_t *dhd)
+dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd)
{
- int rc = BCME_OK;
- ecounters_config_request_v2_t *req;
-
- /* Now create ecounters config request with totallength */
- req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
-
- if (req == NULL) {
- rc = BCME_NOMEM;
- goto fail;
- }
-
- req->version = ECOUNTERS_VERSION_2;
- req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
-
- if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
- DHD_ERROR(("failed to stop ecounters\n"));
- }
-
-fail:
- if (req) {
- MFREE(dhd->osh, req, sizeof(*req));
+ DHD_INFO(("%s: id:%d\n", __FUNCTION__, id));
+ if (pub->timeout_info) {
+ pub->timeout_info->cmd_request_id = id;
+ pub->timeout_info->cmd = cmd;
+ return BCME_OK;
+ } else {
+ return BCME_ERROR;
}
- return rc;
}
-/* configured event_id_array for event ecounters */
-typedef struct event_id_array {
- uint8 event_id;
- uint8 str_idx;
-} event_id_array_t;
-
-/* get event id array only from event_ecounters_cfg_tbl[] */
-static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array)
+uint16
+dhd_get_request_id(dhd_pub_t *pub)
{
- uint8 i;
- uint8 idx = 0;
- int32 prev_evt_id = -1;
-
- for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) {
- if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) {
- if (prev_evt_id >= 0)
- idx++;
- event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id;
- event_array[idx].str_idx = i;
- }
- prev_evt_id = event_ecounters_cfg_tbl[i].event_id;
+ if (pub->timeout_info) {
+ return (pub->timeout_info->cmd_request_id);
+ } else {
+ return 0;
}
- return idx;
}
-/* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
-#define ECNTRS_MAX_XTLV_NUM (31 * 2)
-
-int
-dhd_start_event_ecounters(dhd_pub_t *dhd)
+void
+dhd_set_join_error(dhd_pub_t *pub, uint32 mask)
{
- uint8 i, j = 0;
- uint8 event_id_cnt = 0;
- uint16 processed_containers_len = 0;
- uint16 max_xtlv_len = 0;
- int rc = BCME_OK;
- uint8 *ptr;
- uint8 *data;
- event_id_array_t *id_array;
- bcm_xtlv_t *elt = NULL;
- event_ecounters_config_request_v2_t *req = NULL;
-
- id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) *
- ARRAYSIZE(event_ecounters_cfg_tbl));
-
- if (id_array == NULL) {
- rc = BCME_NOMEM;
- goto fail;
+ DHD_INFO(("Setting join Error %d\n", mask));
+ if (pub->timeout_info) {
+ pub->timeout_info->cmd_join_error |= mask;
}
- event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array);
-
- max_xtlv_len = ((BCM_XTLV_HDR_SIZE +
- OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) *
- ECNTRS_MAX_XTLV_NUM);
-
- /* Now create ecounters config request with max allowed length */
- req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh,
- sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
-
- if (req == NULL) {
- rc = BCME_NOMEM;
- goto fail;
- }
-
- for (i = 0; i <= event_id_cnt; i++) {
- /* req initialization by event id */
- req->version = ECOUNTERS_VERSION_2;
- req->logset = EVENT_LOG_SET_ECOUNTERS;
- req->event_id = id_array[i].event_id;
- req->flags = EVENT_ECOUNTERS_FLAGS_ADD;
- req->len = 0;
- processed_containers_len = 0;
-
- /* Copy config */
- ptr = req->ecounters_xtlvs;
-
- for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) {
- event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j];
- if (id_array[i].event_id != event_ecounter_stat->event_id)
- break;
-
- rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type,
- event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep,
- &data);
-
- if (rc) {
- DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
- __FUNCTION__, event_ecounter_stat->stats_rep, rc));
- goto fail;
- }
-
- elt = (bcm_xtlv_t *)data;
-
- memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
- ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
- processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
-
- /* Free allocated memories alloced by dhd_create_ecounters_params */
- MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
-
- if (processed_containers_len > max_xtlv_len) {
- DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
- __FUNCTION__));
- rc = BCME_BADLEN;
- goto fail;
- }
- }
-
- req->len = processed_containers_len +
- OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
-
- DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
- __FUNCTION__, req->version, req->logset, req->event_id,
- req->flags, req->len));
-
- rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE);
-
- if (rc < 0) {
- DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
- req->event_id, rc));
- goto fail;
- }
- }
-
-fail:
- /* Free allocated memories */
- if (req) {
- MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
- }
- if (id_array) {
- MFREE(dhd->osh, id_array, sizeof(event_id_array_t) *
- ARRAYSIZE(event_ecounters_cfg_tbl));
- }
-
- return rc;
}
-int
-dhd_stop_event_ecounters(dhd_pub_t *dhd)
+void
+dhd_clear_join_error(dhd_pub_t *pub, uint32 mask)
{
- int rc = BCME_OK;
- event_ecounters_config_request_v2_t *req;
-
- /* Now create ecounters config request with totallength */
- req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
-
- if (req == NULL) {
- rc = BCME_NOMEM;
- goto fail;
- }
-
- req->version = ECOUNTERS_VERSION_2;
- req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL;
- req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
-
- if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
- DHD_ERROR(("failed to stop event_ecounters\n"));
+ DHD_INFO(("clear join Error %d\n", mask));
+ if (pub->timeout_info) {
+ pub->timeout_info->cmd_join_error &= ~mask;
}
-
-fail:
- if (req) {
- MFREE(dhd->osh, req, sizeof(*req));
- }
- return rc;
}
-#ifdef DHD_LOG_DUMP
-int
-dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
- log_dump_section_hdr_t *sec_hdr,
- char *text_hdr, int buflen, uint32 sec_type)
+void
+dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val)
{
- uint32 rlen = 0;
- uint32 data_len = 0;
- void *data = NULL;
- unsigned long flags = 0;
- int ret = 0;
- dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
- int pos = 0;
- int fpos_sechdr = 0;
-
- if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
- return BCME_BADARG;
- }
- /* do not allow further writes to the ring
- * till we flush it
- */
- DHD_DBG_RING_LOCK(ring->lock, flags);
- ring->state = RING_SUSPEND;
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
- if (dhdp->concise_dbg_buf) {
- /* re-use concise debug buffer temporarily
- * to pull ring data, to write
- * record by record to file
- */
- data_len = CONCISE_DUMP_BUFLEN;
- data = dhdp->concise_dbg_buf;
- ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
- /* write the section header now with zero length,
- * once the correct length is found out, update
- * it later
- */
- fpos_sechdr = pos;
- sec_hdr->type = sec_type;
- sec_hdr->length = 0;
- ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
- sizeof(*sec_hdr), &pos);
- do {
- rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
- if (rlen > 0) {
- /* write the log */
- ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
- }
- DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
- } while ((rlen > 0));
- /* now update the section header length in the file */
- /* Complete ring size is dumped by HAL, hence updating length to ring size */
- sec_hdr->length = ring->ring_size;
- ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
- sizeof(*sec_hdr), &fpos_sechdr);
+ if (pub->timeout_info) {
+ *to_val = pub->timeout_info->scan_timeout_val;
} else {
- DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
+ *to_val = 0;
}
- DHD_DBG_RING_LOCK(ring->lock, flags);
- ring->state = RING_ACTIVE;
- /* Resetting both read and write pointer,
- * since all items are read.
- */
- ring->rp = ring->wp = 0;
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
- return ret;
}
-int
-dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
- unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
- char *text_hdr, uint32 sec_type)
+void
+dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val)
{
- uint32 rlen = 0;
- uint32 data_len = 0, total_len = 0;
- void *data = NULL;
- unsigned long fpos_sechdr = 0;
- unsigned long flags = 0;
- int ret = 0;
- dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
-
- if (!dhdp || !ring || !file || !sec_hdr ||
- !file_posn || !text_hdr)
- return BCME_BADARG;
-
- /* do not allow further writes to the ring
- * till we flush it
- */
- DHD_DBG_RING_LOCK(ring->lock, flags);
- ring->state = RING_SUSPEND;
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
- if (dhdp->concise_dbg_buf) {
- /* re-use concise debug buffer temporarily
- * to pull ring data, to write
- * record by record to file
- */
- data_len = CONCISE_DUMP_BUFLEN;
- data = dhdp->concise_dbg_buf;
- dhd_os_write_file_posn(file, file_posn, text_hdr,
- strlen(text_hdr));
- /* write the section header now with zero length,
- * once the correct length is found out, update
- * it later
- */
- dhd_init_sec_hdr(sec_hdr);
- fpos_sechdr = *file_posn;
- sec_hdr->type = sec_type;
- sec_hdr->length = 0;
- dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
- sizeof(*sec_hdr));
- do {
- rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
- if (rlen > 0) {
- /* write the log */
- ret = dhd_os_write_file_posn(file, file_posn, data, rlen);
- if (ret < 0) {
- DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
- DHD_DBG_RING_LOCK(ring->lock, flags);
- ring->state = RING_ACTIVE;
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- return BCME_ERROR;
- }
- }
- total_len += rlen;
- } while (rlen > 0);
- /* now update the section header length in the file */
- sec_hdr->length = total_len;
- dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr));
- } else {
- DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
+ if (pub->timeout_info) {
+ DHD_INFO(("Setting TO val:%d\n", to_val));
+ pub->timeout_info->scan_timeout_val = to_val;
}
-
- DHD_DBG_RING_LOCK(ring->lock, flags);
- ring->state = RING_ACTIVE;
- /* Resetting both read and write pointer,
- * since all items are read.
- */
- ring->rp = ring->wp = 0;
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- return BCME_OK;
}
-/* logdump cookie */
-#define MAX_LOGUDMP_COOKIE_CNT 10u
-#define LOGDUMP_COOKIE_STR_LEN 50u
-int
-dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size)
+void
+dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val)
{
- uint32 ring_size;
-
- if (!dhdp || !buf) {
- DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf));
- return BCME_ERROR;
- }
-
- ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT;
- if (buf_size < ring_size) {
- DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
- ring_size, buf_size));
- return BCME_ERROR;
- }
-
- dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
- LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
- DHD_RING_TYPE_FIXED);
- if (!dhdp->logdump_cookie) {
- DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
- return BCME_ERROR;
+ if (pub->timeout_info) {
+ *to_val = pub->timeout_info->join_timeout_val;
+ } else {
+ *to_val = 0;
}
-
- return BCME_OK;
}
void
-dhd_logdump_cookie_deinit(dhd_pub_t *dhdp)
+dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val)
{
- if (!dhdp) {
- return;
+ if (pub->timeout_info) {
+ DHD_INFO(("Setting TO val:%d\n", to_val));
+ pub->timeout_info->join_timeout_val = to_val;
}
- if (dhdp->logdump_cookie) {
- dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
- }
-
- return;
}
void
-dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type)
+dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val)
{
- char *ptr;
-
- if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) {
- DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
- " type = %p, cookie_cfg:%p\n", __FUNCTION__,
- dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL));
- return;
- }
- ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie);
- if (ptr == NULL) {
- DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
- return;
+ if (pub->timeout_info) {
+ *to_val = pub->timeout_info->cmd_timeout_val;
+ } else {
+ *to_val = 0;
}
- scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie);
- return;
}
-int
-dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size)
+void
+dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val)
{
- char *ptr;
-
- if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) {
- DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
- "cookie=%p cookie_cfg:%p\n", __FUNCTION__,
- dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL));
- return BCME_ERROR;
- }
- ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie);
- if (ptr == NULL) {
- DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
- return BCME_ERROR;
+ if (pub->timeout_info) {
+ DHD_INFO(("Setting TO val:%d\n", to_val));
+ pub->timeout_info->cmd_timeout_val = to_val;
}
- memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr)));
- dhd_ring_free_first(dhdp->logdump_cookie);
- return BCME_OK;
}
-int
-dhd_logdump_cookie_count(dhd_pub_t *dhdp)
+void
+dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val)
{
- if (!dhdp || !dhdp->logdump_cookie) {
- DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
- __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL));
- return 0;
+ if (pub->timeout_info) {
+ *to_val = pub->timeout_info->bus_timeout_val;
+ } else {
+ *to_val = 0;
}
- return dhd_ring_get_cur_size(dhdp->logdump_cookie);
}
-static inline int
-__dhd_log_dump_cookie_to_file(
- dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
- char *buf, uint32 buf_size)
+void
+dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val)
{
-
- uint32 remain = buf_size;
- int ret = BCME_ERROR;
- char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
- log_dump_section_hdr_t sec_hdr;
- uint32 read_idx;
- uint32 write_idx;
-
- read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
- write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
- while (dhd_logdump_cookie_count(dhdp) > 0) {
- memset(tmp_buf, 0, sizeof(tmp_buf));
- ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
- if (ret != BCME_OK) {
- return ret;
- }
- remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
- }
- dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
- dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
-
- ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
- if (ret < 0) {
- DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
- return ret;
- }
- sec_hdr.magic = LOG_DUMP_MAGIC;
- sec_hdr.timestamp = local_clock();
- sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
- sec_hdr.length = buf_size - remain;
-
- ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
- if (ret < 0) {
- DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
- return ret;
+ if (pub->timeout_info) {
+ DHD_INFO(("Setting TO val:%d\n", to_val));
+ pub->timeout_info->bus_timeout_val = to_val;
}
-
- ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
- if (ret < 0) {
- DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
- }
-
- return ret;
}
+#endif /* REPORT_FATAL_TIMEOUTS */
-uint32
-dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
+#ifdef SHOW_LOGTRACE
+int
+dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
+ dhd_event_log_t *event_log)
{
- int len = 0;
- char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
- log_dump_section_hdr_t sec_hdr;
- char *buf = NULL;
- int ret = BCME_ERROR;
- uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
- uint32 read_idx;
- uint32 write_idx;
- uint32 remain;
+ logstr_header_t *hdr = NULL;
+ uint32 *lognums = NULL;
+ char *logstrs = NULL;
+ int ram_index = 0;
+ char **fmts;
+ int num_fmts = 0;
+ int32 i = 0;
- remain = buf_size;
+ /* Remember header from the logstrs.bin file */
+ hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
+ sizeof(logstr_header_t));
- if (!dhdp || !dhdp->logdump_cookie) {
- DHD_ERROR(("%s At least one ptr is NULL "
- "dhdp = %p cookie %p\n",
- __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
- goto exit;
+ if (hdr->log_magic == LOGSTRS_MAGIC) {
+ /*
+ * logstrs.bin start with header.
+ */
+ num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
+ ram_index = (hdr->ram_lognums_offset -
+ hdr->rom_lognums_offset) / sizeof(uint32);
+ lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
+ logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
+ } else {
+ /*
+ * Legacy logstrs.bin format without header.
+ */
+ num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
+ if (num_fmts == 0) {
+ /* Legacy ROM/RAM logstrs.bin format:
+ * - ROM 'lognums' section
+ * - RAM 'lognums' section
+ * - ROM 'logstrs' section.
+ * - RAM 'logstrs' section.
+ *
+ * 'lognums' is an array of indexes for the strings in the
+ * 'logstrs' section. The first uint32 is 0 (index of first
+ * string in ROM 'logstrs' section).
+ *
+ * The 4324b5 is the only ROM that uses this legacy format. Use the
+ * fixed number of ROM fmtnums to find the start of the RAM
+ * 'lognums' section. Use the fixed first ROM string ("Con\n") to
+ * find the ROM 'logstrs' section.
+ */
+ #define NUM_4324B5_ROM_FMTS 186
+ #define FIRST_4324B5_ROM_LOGSTR "Con\n"
+ ram_index = NUM_4324B5_ROM_FMTS;
+ lognums = (uint32 *) raw_fmts;
+ num_fmts = ram_index;
+ logstrs = (char *) &raw_fmts[num_fmts << 2];
+ while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
+ num_fmts++;
+ logstrs = (char *) &raw_fmts[num_fmts << 2];
+ }
+ } else {
+ /* Legacy RAM-only logstrs.bin format:
+ * - RAM 'lognums' section
+ * - RAM 'logstrs' section.
+ *
+ * 'lognums' is an array of indexes for the strings in the
+ * 'logstrs' section. The first uint32 is an index to the
+ * start of 'logstrs'. Therefore, if this index is divided
+ * by 'sizeof(uint32)' it provides the number of logstr
+ * entries.
+ */
+ ram_index = 0;
+ lognums = (uint32 *) raw_fmts;
+ logstrs = (char *) &raw_fmts[num_fmts << 2];
+ }
}
-
- buf = (char *)MALLOCZ(dhdp->osh, buf_size);
- if (!buf) {
- DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
- goto exit;
+ fmts = MALLOC(osh, num_fmts * sizeof(char *));
+ if (fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
+ return BCME_ERROR;
}
+ event_log->fmts_size = num_fmts * sizeof(char *);
- read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
- write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
- while (dhd_logdump_cookie_count(dhdp) > 0) {
- memset(tmp_buf, 0, sizeof(tmp_buf));
- ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
- if (ret != BCME_OK) {
- goto exit;
+ for (i = 0; i < num_fmts; i++) {
+ /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
+ * (they are 0-indexed relative to 'rom_logstrs_offset').
+ *
+ * RAM lognums are already indexed to point to the correct RAM logstrs (they
+ * are 0-indexed relative to the start of the logstrs.bin file).
+ */
+ if (i == ram_index) {
+ logstrs = raw_fmts;
}
- remain -= (uint32)strlen(tmp_buf);
- }
- dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
- dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
- len += strlen(COOKIE_LOG_HDR);
- len += sizeof(sec_hdr);
- len += (buf_size - remain);
-exit:
- if (buf)
- MFREE(dhdp->osh, buf, buf_size);
- return len;
-}
-
-int
-dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
-{
- int ret = BCME_ERROR;
- char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
- log_dump_section_hdr_t sec_hdr;
- char *buf = NULL;
- uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
- int pos = 0;
- uint32 read_idx;
- uint32 write_idx;
- uint32 remain;
-
- remain = buf_size;
-
- if (!dhdp || !dhdp->logdump_cookie) {
- DHD_ERROR(("%s At least one ptr is NULL "
- "dhdp = %p cookie %p\n",
- __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
- goto exit;
- }
-
- buf = (char *)MALLOCZ(dhdp->osh, buf_size);
- if (!buf) {
- DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
- goto exit;
+ fmts[i] = &logstrs[lognums[i]];
}
+ event_log->fmts = fmts;
+ event_log->raw_fmts_size = logstrs_size;
+ event_log->raw_fmts = raw_fmts;
+ event_log->num_fmts = num_fmts;
- read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
- write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
- while (dhd_logdump_cookie_count(dhdp) > 0) {
- memset(tmp_buf, 0, sizeof(tmp_buf));
- ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
- if (ret != BCME_OK) {
- goto exit;
- }
- remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
- }
- dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
- dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
- ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
- sec_hdr.magic = LOG_DUMP_MAGIC;
- sec_hdr.timestamp = local_clock();
- sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
- sec_hdr.length = buf_size - remain;
- ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
- ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
-exit:
- if (buf)
- MFREE(dhdp->osh, buf, buf_size);
- return ret;
+ return BCME_OK;
}
-int
-dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
+int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
+ uint32 *rodata_end)
{
- char *buf;
- int ret = BCME_ERROR;
- uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
+ char *raw_fmts = NULL;
+ uint32 read_size = READ_NUM_BYTES;
+ int error = 0;
+ char * cptr = NULL;
+ char c;
+ uint8 count = 0;
- if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
- DHD_ERROR(("%s At least one ptr is NULL "
- "dhdp = %p cookie %p fp = %p f_pos = %p\n",
- __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
- return ret;
- }
+ *ramstart = 0;
+ *rodata_start = 0;
+ *rodata_end = 0;
- buf = (char *)MALLOCZ(dhdp->osh, buf_size);
- if (!buf) {
- DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
- return ret;
+ /* Allocate 1 byte more than read_size to terminate it with NULL */
+ raw_fmts = MALLOC(osh, read_size + 1);
+ if (raw_fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
+ goto fail;
}
- ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
- MFREE(dhdp->osh, buf, buf_size);
-
- return ret;
-}
-
-#endif /* DHD_LOG_DUMP */
-#ifdef DHD_LOG_DUMP
-#define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4
-void
-dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
-{
-#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
- log_dump_type_t *flush_type;
-#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
- uint64 current_time_sec;
+ /* read ram start, rodata_start and rodata_end values from map file */
+ while (count != ALL_MAP_VAL)
+ {
+ error = dhd_os_read_file(file, raw_fmts, read_size);
+ if (error < 0) {
+ DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
+ error));
+ goto fail;
+ }
- if (!dhdp) {
- DHD_ERROR(("dhdp is NULL !\n"));
- return;
- }
+ /* End raw_fmts with NULL as strstr expects NULL terminated strings */
+ raw_fmts[read_size] = '\0';
- if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
- DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
- return;
- }
+ /* Get ramstart address */
+ if ((cptr = strstr(raw_fmts, ramstart_str))) {
+ cptr = cptr - BYTES_AHEAD_NUM;
+ sscanf(cptr, "%x %c text_start", ramstart, &c);
+ count |= RAMSTART_BIT;
+ }
- current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+ /* Get ram rodata start address */
+ if ((cptr = strstr(raw_fmts, rodata_start_str))) {
+ cptr = cptr - BYTES_AHEAD_NUM;
+ sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
+ count |= RDSTART_BIT;
+ }
- DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
- __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
- DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
+ /* Get ram rodata end address */
+ if ((cptr = strstr(raw_fmts, rodata_end_str))) {
+ cptr = cptr - BYTES_AHEAD_NUM;
+ sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
+ count |= RDEND_BIT;
+ }
- if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
- DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
- __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
- return;
+ if (error < (int)read_size) {
+ /*
+ * since we reset file pos back to earlier pos by
+ * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
+ * The reason for this is if string is spreaded across
+ * bytes, the read function should not miss it.
+ * So if ret value is less than read_size, reached EOF don't read further
+ */
+ break;
+ }
+ memset(raw_fmts, 0, read_size);
+ /*
+ * go back to predefined NUM of bytes so that we won't miss
+ * the string and addr even if it comes as splited in next read.
+ */
+ dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
}
- clear_debug_dump_time(dhdp->debug_dump_time_str);
- /* */
- dhdp->debug_dump_subcmd = subcmd;
-
- dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
-#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
- /* flush_type is freed at do_dhd_log_dump function */
- flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
- if (flush_type) {
- *flush_type = DLD_BUF_TYPE_ALL;
- dhd_schedule_log_dump(dhdp, flush_type);
- } else {
- DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
- return;
+fail:
+ if (raw_fmts) {
+ MFREE(osh, raw_fmts, read_size + 1);
+ raw_fmts = NULL;
}
-#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
-
- /* Inside dhd_mem_dump, event notification will be sent to HAL and
- * from other context DHD pushes memdump, debug_dump and pktlog dump
- * to HAL and HAL will write into file
- */
-#if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
- dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
- dhd_bus_mem_dump(dhdp);
-#endif /* BCMPCIE && DHD_FW_COREDUMP */
-
-}
-#endif /* DHD_LOG_DUMP */
-
-#ifdef EWP_EDL
-/* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
-* The reason being that, in hikey, if we try to DMA_MAP prealloced memory
-* it is failing with an 'out of space in SWIOTLB' error
-*/
-int
-dhd_edl_mem_init(dhd_pub_t *dhd)
-{
- int ret = 0;
-
- memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem));
- ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
- __FUNCTION__));
+ if (count == ALL_MAP_VAL)
+ return BCME_OK;
+ else {
+ DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
+ count));
return BCME_ERROR;
}
- return BCME_OK;
-}
-/* NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
-* for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
-*/
-void
-dhd_edl_mem_deinit(dhd_pub_t *dhd)
-{
- if (dhd->edl_ring_mem.va != NULL)
- dhd_dma_buf_free(dhd, &dhd->edl_ring_mem);
}
+#ifdef PCIE_FULL_DONGLE
int
-dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
- void *evt_decode_data)
+dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
+ dhd_event_log_t *event_data)
{
- msg_hdr_edl_t *msg = NULL;
- cmn_msg_hdr_t *cmn_msg_hdr = NULL;
- uint8 *buf = NULL;
+ uint32 infobuf_version;
+ info_buf_payload_hdr_t *payload_hdr_ptr;
+ uint16 payload_hdr_type;
+ uint16 payload_hdr_length;
- if (!data || !dhdp || !evt_decode_data) {
- DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__));
- return BCME_ERROR;
- }
+ DHD_TRACE(("%s:Enter\n", __FUNCTION__));
- /* format of data in each work item in the EDL ring:
- * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
- * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
- */
- cmn_msg_hdr = (cmn_msg_hdr_t *)data;
- msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t));
- buf = (uint8 *)msg;
- /* validate the fields */
- if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) {
- DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
- " expected (0x%x)\n", __FUNCTION__,
- msg->infobuf_ver, PCIE_INFOBUF_V1));
- return BCME_VERSION;
- }
-
- /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
- if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) {
- DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
+ if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
+ DHD_ERROR(("%s: infobuf too small for version field\n",
__FUNCTION__));
- return BCME_BUFTOOLONG;
- }
-
- if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
- DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
- __FUNCTION__, ltoh16(msg->pyld_hdr.type)));
- return BCME_BADOPTION;
- }
-
- if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) {
- DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
- " than available buffer size %u\n", __FUNCTION__,
- ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id));
- return BCME_BADLEN;
- }
-
- /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
- buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr);
- dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data,
- ltoh16(msg->pyld_hdr.length));
-
- /* check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
- * copy the event data to the skb and send it up the stack
- */
- if (dhdp->logtrace_pkt_sendup) {
- DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__,
- (uint32)(ltoh16(msg->pyld_hdr.length) +
- sizeof(info_buf_payload_hdr_t) + 4)));
- dhd_sendup_info_buf(dhdp, (uint8 *)msg);
- }
-
- return BCME_OK;
-}
-#endif /* EWP_EDL */
-
-#if defined(SHOW_LOGTRACE)
-int
-dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath)
-{
- void *file = NULL;
- int size = 0;
- char buf[FW_VER_STR_LEN];
- char *str = NULL;
- int ret = BCME_OK;
-
- if (!fwpath)
- return BCME_BADARG;
-
- file = dhd_os_open_image1(dhdp, fwpath);
- if (!file) {
- ret = BCME_ERROR;
- goto exit;
- }
- size = dhd_os_get_image_size(file);
- if (!size) {
- ret = BCME_ERROR;
goto exit;
}
-
- /* seek to the last 'X' bytes in the file */
- if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) {
- ret = BCME_ERROR;
+ infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
+ PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
+ if (infobuf_version != PCIE_INFOBUF_V1) {
+ DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
+ __FUNCTION__, infobuf_version));
goto exit;
}
- /* read the last 'X' bytes of the file to a buffer */
- memset(buf, 0, FW_VER_STR_LEN);
- if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) {
- ret = BCME_ERROR;
+ /* Version 1 infobuf has a single type/length (and then value) field */
+ if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
+ DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
+ __FUNCTION__));
goto exit;
}
- /* search for 'Version' in the buffer */
- str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR));
- if (!str) {
- ret = BCME_ERROR;
+ /* Process/parse the common info payload header (type/length) */
+ payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
+ payload_hdr_type = ltoh16(payload_hdr_ptr->type);
+ payload_hdr_length = ltoh16(payload_hdr_ptr->length);
+ if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
+ DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
+ __FUNCTION__, payload_hdr_type));
goto exit;
}
- /* go back in the buffer to the last ascii character */
- while (str != buf &&
- (*str >= ' ' && *str <= '~')) {
- --str;
- }
- /* reverse the final decrement, so that str is pointing
- * to the first ascii character in the buffer
- */
- ++str;
+ PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
- if (strlen(str) > (FW_VER_STR_LEN - 1)) {
- ret = BCME_BADLEN;
+ /* Validate that the specified length isn't bigger than the
+ * provided data.
+ */
+ if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
+ DHD_ERROR(("%s: infobuf logtrace length is bigger"
+ " than actual buffer data\n", __FUNCTION__));
goto exit;
}
+ dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
+ event_data, payload_hdr_length);
- DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str));
- /* copy to global variable, so that in case FW load fails, the
- * core capture logs will contain FW version read from the file
- */
- memset(fw_version, 0, FW_VER_STR_LEN);
- strlcpy(fw_version, str, FW_VER_STR_LEN);
+ return BCME_OK;
exit:
- if (file)
- dhd_os_close_image1(dhdp, file);
+ return BCME_ERROR;
+}
+#endif /* PCIE_FULL_DONGLE */
+#endif /* SHOW_LOGTRACE */
+
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+/* To handle the TDLS event in the dhd_common.c
+ */
+int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
+{
+ int ret = BCME_OK;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ ret = dhd_tdls_update_peer_info(dhd_pub, event);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
return ret;
}
-#endif // endif
-#if defined(DHD_H2D_LOG_TIME_SYNC)
-/*
- * Helper function:
- * Used for Dongle console message time syncing with Host printk
- */
-void dhd_h2d_log_time_sync(dhd_pub_t *dhd)
+int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
{
- uint64 ts;
+ tdls_peer_node_t *cur = NULL, *prev = NULL;
+ if (!dhd_pub)
+ return BCME_ERROR;
+ cur = dhd_pub->peer_tbl.node;
- /*
- * local_clock() returns time in nano seconds.
- * Dongle understand only milli seconds time.
- */
- ts = local_clock();
- /* Nano seconds to milli seconds */
- do_div(ts, 1000000);
- if (dhd_wl_ioctl_set_intiovar(dhd, "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) {
- DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__));
- /* Stopping HOST Dongle console time syncing */
- dhd->dhd_rte_time_sync_ms = 0;
+ if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
+ return BCME_ERROR;
+
+ while (cur != NULL) {
+ prev = cur;
+ cur = cur->next;
+ MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
}
+ dhd_pub->peer_tbl.tdls_peer_count = 0;
+ dhd_pub->peer_tbl.node = NULL;
+ return BCME_OK;
}
-#endif /* DHD_H2D_LOG_TIME_SYNC */
+#endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
-#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
-int
-dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
+#ifdef DUMP_IOCTL_IOV_LIST
+void
+dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
{
- int ret = BCME_OK;
- bcm_xtlv_t *pxtlv = NULL;
- uint8 mybuf[DHD_IOVAR_BUF_SIZE];
- uint16 mybuf_len = sizeof(mybuf);
- pxtlv = (bcm_xtlv_t *)mybuf;
-
- ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
- &he_enab, BCM_XTLV_OPTION_ALIGN32);
+ dll_t *item;
+ dhd_iov_li_t *iov_li;
+ dhd->dump_iovlist_len++;
- if (ret != BCME_OK) {
- ret = -EINVAL;
- DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
- return ret;
+ if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
+ item = dll_head_p(list_head);
+ iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
+ dll_delete(item);
+ MFREE(dhd->osh, iov_li, sizeof(*iov_li));
+ dhd->dump_iovlist_len--;
}
+ dll_append(list_head, node);
+}
- ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
- __FUNCTION__, he_enab, bcmerrorstr(ret)));
- } else {
- DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
+void
+dhd_iov_li_print(dll_t *list_head)
+{
+ dhd_iov_li_t *iov_li;
+ dll_t *item, *next;
+ uint8 index = 0;
+ for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
+ next = dll_next_p(item);
+ iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
+ index++;
+ DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", index, iov_li->buff, iov_li->cmd));
}
+}
- return ret;
+void
+dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
+{
+ dll_t *item;
+ dhd_iov_li_t *iov_li;
+ while (!(dll_empty(list_head))) {
+ item = dll_head_p(list_head);
+ iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
+ dll_delete(item);
+ MFREE(dhd->osh, iov_li, sizeof(*iov_li));
+ }
}
-#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+#endif /* DUMP_IOCTL_IOV_LIST */
-\r
-#include <typedefs.h>\r
-#include <osl.h>\r
-\r
-#include <bcmendian.h>\r
-#include <bcmutils.h>\r
-#include <hndsoc.h>\r
-#include <bcmsdbus.h>\r
-#if defined(HW_OOB) || defined(FORCE_WOWLAN)\r
-#include <bcmdefs.h>\r
-#include <bcmsdh.h>\r
-#include <sdio.h>\r
-#include <sbchipc.h>\r
-#endif\r
-#ifdef WL_CFG80211\r
-#include <wl_cfg80211.h>\r
-#endif\r
-\r
-#include <dhd_config.h>\r
-#include <dhd_dbg.h>\r
-#include <wl_android.h>\r
-\r
-/* message levels */\r
-#define CONFIG_ERROR_LEVEL (1 << 0)\r
-#define CONFIG_TRACE_LEVEL (1 << 1)\r
-#define CONFIG_MSG_LEVEL (1 << 0)\r
-\r
-uint config_msg_level = CONFIG_ERROR_LEVEL | CONFIG_MSG_LEVEL;\r
-uint dump_msg_level = 0;\r
-\r
-#define CONFIG_MSG(x, args...) \\r
- do { \\r
- if (config_msg_level & CONFIG_MSG_LEVEL) { \\r
- printk(KERN_ERR "[dhd] %s : " x, __func__, ## args); \\r
- } \\r
- } while (0)\r
-#define CONFIG_ERROR(x, args...) \\r
- do { \\r
- if (config_msg_level & CONFIG_ERROR_LEVEL) { \\r
- printk(KERN_ERR "[dhd] CONFIG-ERROR) %s : " x, __func__, ## args); \\r
- } \\r
- } while (0)\r
-#define CONFIG_TRACE(x, args...) \\r
- do { \\r
- if (config_msg_level & CONFIG_TRACE_LEVEL) { \\r
- printk(KERN_INFO "[dhd] CONFIG-TRACE) %s : " x, __func__, ## args); \\r
- } \\r
- } while (0)\r
-\r
-#define MAXSZ_BUF 4096\r
-#define MAXSZ_CONFIG 8192\r
-\r
-#ifndef WL_CFG80211\r
-#define htod32(i) i\r
-#define htod16(i) i\r
-#define dtoh32(i) i\r
-#define dtoh16(i) i\r
-#define htodchanspec(i) i\r
-#define dtohchanspec(i) i\r
-#endif\r
-\r
-#if defined(PROP_TXSTATUS)\r
-#include <dhd_wlfc.h>\r
-#endif /* PROP_TXSTATUS */\r
-\r
-#define MAX_EVENT_BUF_NUM 16\r
-typedef struct eventmsg_buf {\r
- u16 num;\r
- struct {\r
- u16 type;\r
- bool set;\r
- } event [MAX_EVENT_BUF_NUM];\r
-} eventmsg_buf_t;\r
-\r
-typedef struct cihp_name_map_t {\r
- uint chip;\r
- uint chiprev;\r
- uint ag_type;\r
- char *chip_name;\r
- char *module_name;\r
-} cihp_name_map_t;\r
-\r
-/* Map of WLC_E events to connection failure strings */\r
-#define DONT_CARE 9999\r
-const cihp_name_map_t chip_name_map[] = {\r
- /* ChipID Chiprev AG ChipName ModuleName */\r
-#ifdef BCMSDIO\r
- {BCM43362_CHIP_ID, 0, DONT_CARE, "bcm40181a0", ""},\r
- {BCM43362_CHIP_ID, 1, DONT_CARE, "bcm40181a2", ""},\r
- {BCM4330_CHIP_ID, 4, FW_TYPE_G, "bcm40183b2", ""},\r
- {BCM4330_CHIP_ID, 4, FW_TYPE_AG, "bcm40183b2_ag", ""},\r
- {BCM43430_CHIP_ID, 0, DONT_CARE, "bcm43438a0", "ap6212"},\r
- {BCM43430_CHIP_ID, 1, DONT_CARE, "bcm43438a1", "ap6212a"},\r
- {BCM43430_CHIP_ID, 2, DONT_CARE, "bcm43436b0", "ap6236"},\r
- {BCM43012_CHIP_ID, 1, FW_TYPE_G, "bcm43013b0", ""},\r
- {BCM43012_CHIP_ID, 1, FW_TYPE_AG, "bcm43013c0_ag", ""},\r
- {BCM43012_CHIP_ID, 2, DONT_CARE, "bcm43013c1_ag", ""},\r
- {BCM4334_CHIP_ID, 3, DONT_CARE, "bcm4334b1_ag", ""},\r
- {BCM43340_CHIP_ID, 2, DONT_CARE, "bcm43341b0_ag", ""},\r
- {BCM43341_CHIP_ID, 2, DONT_CARE, "bcm43341b0_ag", ""},\r
- {BCM4324_CHIP_ID, 5, DONT_CARE, "bcm43241b4_ag", ""},\r
- {BCM4335_CHIP_ID, 2, DONT_CARE, "bcm4339a0_ag", ""},\r
- {BCM4339_CHIP_ID, 1, DONT_CARE, "bcm4339a0_ag", "ap6335"},\r
- {BCM4345_CHIP_ID, 6, DONT_CARE, "bcm43455c0_ag", "ap6255"},\r
- {BCM43454_CHIP_ID, 6, DONT_CARE, "bcm43455c0_ag", ""},\r
- {BCM4345_CHIP_ID, 9, DONT_CARE, "bcm43456c5_ag", "ap6256"},\r
- {BCM43454_CHIP_ID, 9, DONT_CARE, "bcm43456c5_ag", ""},\r
- {BCM4354_CHIP_ID, 1, DONT_CARE, "bcm4354a1_ag", ""},\r
- {BCM4354_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", "ap6356"},\r
- {BCM4356_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", ""},\r
- {BCM4371_CHIP_ID, 2, DONT_CARE, "bcm4356a2_ag", ""},\r
- {BCM43569_CHIP_ID, 3, DONT_CARE, "bcm4358a3_ag", ""},\r
- {BCM4359_CHIP_ID, 5, DONT_CARE, "bcm4359b1_ag", ""},\r
- {BCM4359_CHIP_ID, 9, DONT_CARE, "bcm4359c0_ag", "ap6398s"},\r
- {BCM43751_CHIP_ID, 1, DONT_CARE, "bcm43751a1_ag", ""},\r
- {BCM43751_CHIP_ID, 2, DONT_CARE, "bcm43751a2_ag", ""},\r
- {BCM43752_CHIP_ID, 1, DONT_CARE, "bcm43752a1_ag", ""},\r
- {BCM43752_CHIP_ID, 2, DONT_CARE, "bcm43752a2_ag", ""},\r
-#endif\r
-#ifdef BCMPCIE\r
- {BCM4354_CHIP_ID, 2, DONT_CARE, "bcm4356a2_pcie_ag", ""},\r
- {BCM4356_CHIP_ID, 2, DONT_CARE, "bcm4356a2_pcie_ag", ""},\r
- {BCM4359_CHIP_ID, 9, DONT_CARE, "bcm4359c0_pcie_ag", ""},\r
- {BCM43751_CHIP_ID, 1, DONT_CARE, "bcm43751a1_pcie_ag", ""},\r
- {BCM43751_CHIP_ID, 2, DONT_CARE, "bcm43751a2_pcie_ag", ""},\r
- {BCM43752_CHIP_ID, 1, DONT_CARE, "bcm43752a1_pcie_ag", ""},\r
- {BCM43752_CHIP_ID, 2, DONT_CARE, "bcm43752a2_pcie_ag", ""},\r
-#endif\r
-#ifdef BCMDBUS\r
- {BCM43143_CHIP_ID, 2, DONT_CARE, "bcm43143b0", ""},\r
- {BCM43242_CHIP_ID, 1, DONT_CARE, "bcm43242a1_ag", ""},\r
- {BCM43569_CHIP_ID, 2, DONT_CARE, "bcm4358u_ag", "ap62x8"},\r
-#endif\r
-};\r
-\r
-void\r
-dhd_conf_free_chip_nv_path_list(wl_chip_nv_path_list_ctrl_t *chip_nv_list)\r
-{\r
- CONFIG_TRACE("called\n");\r
-\r
- if (chip_nv_list->m_chip_nv_path_head) {\r
- CONFIG_TRACE("Free %p\n", chip_nv_list->m_chip_nv_path_head);\r
- kfree(chip_nv_list->m_chip_nv_path_head);\r
- }\r
- chip_nv_list->count = 0;\r
-}\r
-\r
-#ifdef BCMSDIO\r
-void\r
-dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list)\r
-{\r
- int i;\r
-\r
- CONFIG_TRACE("called\n");\r
- if (mac_list->m_mac_list_head) {\r
- for (i=0; i<mac_list->count; i++) {\r
- if (mac_list->m_mac_list_head[i].mac) {\r
- CONFIG_TRACE("Free mac %p\n", mac_list->m_mac_list_head[i].mac);\r
- kfree(mac_list->m_mac_list_head[i].mac);\r
- }\r
- }\r
- CONFIG_TRACE("Free m_mac_list_head %p\n", mac_list->m_mac_list_head);\r
- kfree(mac_list->m_mac_list_head);\r
- }\r
- mac_list->count = 0;\r
-}\r
-\r
-#if defined(HW_OOB) || defined(FORCE_WOWLAN)\r
-void\r
-dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih)\r
-{\r
- uint32 gpiocontrol, addr;\r
-\r
- if (CHIPID(sih->chip) == BCM43362_CHIP_ID) {\r
- CONFIG_MSG("Enable HW OOB for 43362\n");\r
- addr = SI_ENUM_BASE(sih) + OFFSETOF(chipcregs_t, gpiocontrol);\r
- gpiocontrol = bcmsdh_reg_read(sdh, addr, 4);\r
- gpiocontrol |= 0x2;\r
- bcmsdh_reg_write(sdh, addr, 4, gpiocontrol);\r
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL);\r
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL);\r
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL);\r
- }\r
-}\r
-#endif\r
-\r
-#define SBSDIO_CIS_SIZE_LIMIT 0x200\r
-void\r
-dhd_conf_get_otp(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih)\r
-{\r
- int i, err = -1;\r
- uint8 *ptr = 0, *ptpl_code = NULL;\r
- unsigned char tpl_code, tpl_link='\0';\r
- uint8 mac_header[3] = {0x80, 0x07, 0x19};\r
- uint8 *cis;\r
-\r
- if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) {\r
- CONFIG_ERROR("cis malloc failed\n");\r
- }\r
- bzero(cis, SBSDIO_CIS_SIZE_LIMIT);\r
-\r
- if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) {\r
- CONFIG_ERROR("cis read err %d\n", err);\r
- MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);\r
- return;\r
- }\r
- ptr = cis;\r
- do {\r
- /* 0xff means we're done */\r
- tpl_code = *ptr;\r
- ptpl_code = ptr;\r
- ptr++;\r
- if (tpl_code == 0xff)\r
- break;\r
-\r
- /* null entries have no link field or data */\r
- if (tpl_code == 0x00)\r
- continue;\r
-\r
- tpl_link = *ptr;\r
- ptr++;\r
- /* a size of 0xff also means we're done */\r
- if (tpl_link == 0xff)\r
- break;\r
- if (config_msg_level & CONFIG_TRACE_LEVEL) {\r
- prhex("TPL", ptpl_code, tpl_link+2);\r
- }\r
-\r
- if (tpl_code == 0x80 && tpl_link == 0x07 && *ptr == 0x19) {\r
- memcpy(&dhd->conf->otp_mac, ptr+1, 6);\r
- }\r
-#ifdef GET_OTP_MODULE_NAME\r
- else if (tpl_code == 0x8e && *ptr == 0x41) {\r
- int len = tpl_link - 1;\r
- if (len <= sizeof(dhd->conf->module_name) - 1) {\r
- strncpy(dhd->conf->module_name, ptr+1, len);\r
- CONFIG_MSG("module_name=%s\n", dhd->conf->module_name);\r
- } else {\r
- CONFIG_ERROR("len is too long %d >= %d\n",\r
- len, (int)sizeof(dhd->conf->module_name) - 1);\r
- }\r
- }\r
-#endif\r
-\r
- ptr += tpl_link;\r
- } while (1);\r
-\r
- if (!memcmp(ðer_null, &dhd->conf->otp_mac, ETHER_ADDR_LEN)) {\r
- ptr = cis;\r
- /* Special OTP */\r
- if (bcmsdh_reg_read(sdh, SI_ENUM_BASE(sih), 4) == 0x16044330) {\r
- for (i=0; i<SBSDIO_CIS_SIZE_LIMIT; i++) {\r
- if (!memcmp(mac_header, ptr, 3)) {\r
- memcpy(&dhd->conf->otp_mac, ptr+3, 6);\r
- break;\r
- }\r
- ptr++;\r
- }\r
- }\r
- }\r
-\r
- ASSERT(cis);\r
- MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);\r
-}\r
-\r
-void\r
-dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, char *fw_path)\r
-{\r
- int i, j;\r
- uint8 *mac = (uint8 *)&dhd->conf->otp_mac;\r
- int fw_num=0, mac_num=0;\r
- uint32 oui, nic;\r
- wl_mac_list_t *mac_list;\r
- wl_mac_range_t *mac_range;\r
- int fw_type, fw_type_new;\r
- char *name_ptr;\r
-\r
- mac_list = dhd->conf->fw_by_mac.m_mac_list_head;\r
- fw_num = dhd->conf->fw_by_mac.count;\r
- if (!mac_list || !fw_num)\r
- return;\r
-\r
- oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);\r
- nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]);\r
-\r
- /* find out the last '/' */\r
- i = strlen(fw_path);\r
- while (i > 0) {\r
- if (fw_path[i] == '/') {\r
- i++;\r
- break;\r
- }\r
- i--;\r
- }\r
- name_ptr = &fw_path[i];\r
-\r
- if (strstr(name_ptr, "_apsta"))\r
- fw_type = FW_TYPE_APSTA;\r
- else if (strstr(name_ptr, "_p2p"))\r
- fw_type = FW_TYPE_P2P;\r
- else if (strstr(name_ptr, "_mesh"))\r
- fw_type = FW_TYPE_MESH;\r
- else if (strstr(name_ptr, "_es"))\r
- fw_type = FW_TYPE_ES;\r
- else if (strstr(name_ptr, "_mfg"))\r
- fw_type = FW_TYPE_MFG;\r
- else\r
- fw_type = FW_TYPE_STA;\r
-\r
- for (i=0; i<fw_num; i++) {\r
- mac_num = mac_list[i].count;\r
- mac_range = mac_list[i].mac;\r
- if (strstr(mac_list[i].name, "_apsta"))\r
- fw_type_new = FW_TYPE_APSTA;\r
- else if (strstr(mac_list[i].name, "_p2p"))\r
- fw_type_new = FW_TYPE_P2P;\r
- else if (strstr(mac_list[i].name, "_mesh"))\r
- fw_type_new = FW_TYPE_MESH;\r
- else if (strstr(mac_list[i].name, "_es"))\r
- fw_type_new = FW_TYPE_ES;\r
- else if (strstr(mac_list[i].name, "_mfg"))\r
- fw_type_new = FW_TYPE_MFG;\r
- else\r
- fw_type_new = FW_TYPE_STA;\r
- if (fw_type != fw_type_new) {\r
- CONFIG_MSG("fw_typ=%d != fw_type_new=%d\n", fw_type, fw_type_new);\r
- continue;\r
- }\r
- for (j=0; j<mac_num; j++) {\r
- if (oui == mac_range[j].oui) {\r
- if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {\r
- strcpy(name_ptr, mac_list[i].name);\r
- CONFIG_MSG("matched oui=0x%06X, nic=0x%06X\n", oui, nic);\r
- CONFIG_MSG("fw_path=%s\n", fw_path);\r
- return;\r
- }\r
- }\r
- }\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, char *nv_path)\r
-{\r
- int i, j;\r
- uint8 *mac = (uint8 *)&dhd->conf->otp_mac;\r
- int nv_num=0, mac_num=0;\r
- uint32 oui, nic;\r
- wl_mac_list_t *mac_list;\r
- wl_mac_range_t *mac_range;\r
- char *pnv_name;\r
-\r
- mac_list = dhd->conf->nv_by_mac.m_mac_list_head;\r
- nv_num = dhd->conf->nv_by_mac.count;\r
- if (!mac_list || !nv_num)\r
- return;\r
-\r
- oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);\r
- nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]);\r
-\r
- /* find out the last '/' */\r
- i = strlen(nv_path);\r
- while (i > 0) {\r
- if (nv_path[i] == '/') break;\r
- i--;\r
- }\r
- pnv_name = &nv_path[i+1];\r
-\r
- for (i=0; i<nv_num; i++) {\r
- mac_num = mac_list[i].count;\r
- mac_range = mac_list[i].mac;\r
- for (j=0; j<mac_num; j++) {\r
- if (oui == mac_range[j].oui) {\r
- if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {\r
- strcpy(pnv_name, mac_list[i].name);\r
- CONFIG_MSG("matched oui=0x%06X, nic=0x%06X\n", oui, nic);\r
- CONFIG_MSG("nv_path=%s\n", nv_path);\r
- return;\r
- }\r
- }\r
- }\r
- }\r
-}\r
-#endif\r
-\r
-void\r
-dhd_conf_free_country_list(struct dhd_conf *conf)\r
-{\r
- country_list_t *country = conf->country_head;\r
- int count = 0;\r
-\r
- CONFIG_TRACE("called\n");\r
- while (country) {\r
- CONFIG_TRACE("Free cspec %s\n", country->cspec.country_abbrev);\r
- conf->country_head = country->next;\r
- kfree(country);\r
- country = conf->country_head;\r
- count++;\r
- }\r
- CONFIG_TRACE("%d country released\n", count);\r
-}\r
-\r
-void\r
-dhd_conf_free_mchan_list(struct dhd_conf *conf)\r
-{\r
- mchan_params_t *mchan = conf->mchan;\r
- int count = 0;\r
-\r
- CONFIG_TRACE("called\n");\r
- while (mchan) {\r
- CONFIG_TRACE("Free cspec %p\n", mchan);\r
- conf->mchan = mchan->next;\r
- kfree(mchan);\r
- mchan = conf->mchan;\r
- count++;\r
- }\r
- CONFIG_TRACE("%d mchan released\n", count);\r
-}\r
-\r
-int\r
-dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path)\r
-{\r
- int fw_type, ag_type;\r
- uint chip, chiprev;\r
- int i;\r
- char *name_ptr;\r
-\r
- chip = dhd->conf->chip;\r
- chiprev = dhd->conf->chiprev;\r
-\r
- if (fw_path[0] == '\0') {\r
-#ifdef CONFIG_BCMDHD_FW_PATH\r
- bcm_strncpy_s(fw_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);\r
- if (fw_path[0] == '\0')\r
-#endif\r
- {\r
- CONFIG_MSG("firmware path is null\n");\r
- return 0;\r
- }\r
- }\r
-#ifndef FW_PATH_AUTO_SELECT\r
- return DONT_CARE;\r
-#endif\r
-\r
- /* find out the last '/' */\r
- i = strlen(fw_path);\r
- while (i > 0) {\r
- if (fw_path[i] == '/') {\r
- i++;\r
- break;\r
- }\r
- i--;\r
- }\r
- name_ptr = &fw_path[i];\r
-#ifdef BAND_AG\r
- ag_type = FW_TYPE_AG;\r
-#else\r
- ag_type = strstr(name_ptr, "_ag") ? FW_TYPE_AG : FW_TYPE_G;\r
-#endif\r
- if (strstr(name_ptr, "_apsta"))\r
- fw_type = FW_TYPE_APSTA;\r
- else if (strstr(name_ptr, "_p2p"))\r
- fw_type = FW_TYPE_P2P;\r
- else if (strstr(name_ptr, "_mesh"))\r
- fw_type = FW_TYPE_MESH;\r
- else if (strstr(name_ptr, "_es"))\r
- fw_type = FW_TYPE_ES;\r
- else if (strstr(name_ptr, "_mfg"))\r
- fw_type = FW_TYPE_MFG;\r
- else\r
- fw_type = FW_TYPE_STA;\r
-\r
- for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {\r
- const cihp_name_map_t* row = &chip_name_map[i];\r
- if (row->chip == chip && row->chiprev == chiprev &&\r
- (row->ag_type == ag_type || row->ag_type == DONT_CARE)) {\r
- strcpy(name_ptr, "fw_");\r
- strcat(fw_path, row->chip_name);\r
-#ifdef BCMUSBDEV_COMPOSITE\r
- strcat(fw_path, "_cusb");\r
-#endif\r
- if (fw_type == FW_TYPE_APSTA)\r
- strcat(fw_path, "_apsta.bin");\r
- else if (fw_type == FW_TYPE_P2P)\r
- strcat(fw_path, "_p2p.bin");\r
- else if (fw_type == FW_TYPE_MESH)\r
- strcat(fw_path, "_mesh.bin");\r
- else if (fw_type == FW_TYPE_ES)\r
- strcat(fw_path, "_es.bin");\r
- else if (fw_type == FW_TYPE_MFG)\r
- strcat(fw_path, "_mfg.bin");\r
- else\r
- strcat(fw_path, ".bin");\r
- }\r
- }\r
-\r
- dhd->conf->fw_type = fw_type;\r
-\r
- CONFIG_TRACE("firmware_path=%s\n", fw_path);\r
- return ag_type;\r
-}\r
-\r
-void\r
-dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path, int ag_type)\r
-{\r
- uint chip, chiprev;\r
- int i;\r
- char *name_ptr;\r
-\r
- chip = dhd->conf->chip;\r
- chiprev = dhd->conf->chiprev;\r
-\r
- if (clm_path[0] == '\0') {\r
- CONFIG_MSG("clm path is null\n");\r
- return;\r
- }\r
-\r
- /* find out the last '/' */\r
- i = strlen(clm_path);\r
- while (i > 0) {\r
- if (clm_path[i] == '/') {\r
- i++;\r
- break;\r
- }\r
- i--;\r
- }\r
- name_ptr = &clm_path[i];\r
-\r
- for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {\r
- const cihp_name_map_t* row = &chip_name_map[i];\r
- if (row->chip == chip && row->chiprev == chiprev &&\r
- (row->ag_type == ag_type || row->ag_type == DONT_CARE)) {\r
- strcpy(name_ptr, "clm_");\r
- strcat(clm_path, row->chip_name);\r
- strcat(clm_path, ".blob");\r
- }\r
- }\r
-\r
- CONFIG_TRACE("clm_path=%s\n", clm_path);\r
-}\r
-\r
-void\r
-dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path, int ag_type)\r
-{\r
- uint chip, chiprev;\r
- int i;\r
- char *name_ptr;\r
-\r
- chip = dhd->conf->chip;\r
- chiprev = dhd->conf->chiprev;\r
-\r
- if (nv_path[0] == '\0') {\r
-#ifdef CONFIG_BCMDHD_NVRAM_PATH\r
- bcm_strncpy_s(nv_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);\r
- if (nv_path[0] == '\0')\r
-#endif\r
- {\r
- CONFIG_MSG("nvram path is null\n");\r
- return;\r
- }\r
- }\r
-\r
- /* find out the last '/' */\r
- i = strlen(nv_path);\r
- while (i > 0) {\r
- if (nv_path[i] == '/') {\r
- i++;\r
- break;\r
- }\r
- i--;\r
- }\r
- name_ptr = &nv_path[i];\r
-\r
- for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {\r
- const cihp_name_map_t* row = &chip_name_map[i];\r
- if (row->chip == chip && row->chiprev == chiprev &&\r
- (row->ag_type == ag_type || row->ag_type == DONT_CARE)) {\r
-#ifdef GET_OTP_MODULE_NAME\r
- if (strlen(dhd->conf->module_name)) {\r
- strcpy(name_ptr, "nvram_");\r
- strcat(name_ptr, dhd->conf->module_name);\r
- } else\r
-#endif\r
- if (strlen(row->module_name)){\r
- strcpy(name_ptr, "nvram_");\r
- strcat(name_ptr, row->module_name);\r
- } else\r
- continue;\r
-#ifdef BCMUSBDEV_COMPOSITE\r
- strcat(name_ptr, "_cusb");\r
-#endif\r
- strcat(name_ptr, ".txt");\r
- }\r
- }\r
-\r
- for (i=0; i<dhd->conf->nv_by_chip.count; i++) {\r
- if (chip==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chip &&\r
- chiprev==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chiprev) {\r
- strcpy(name_ptr, dhd->conf->nv_by_chip.m_chip_nv_path_head[i].name);\r
- break;\r
- }\r
- }\r
-\r
- CONFIG_TRACE("nvram_path=%s\n", nv_path);\r
-}\r
-\r
-void\r
-dhd_conf_copy_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path)\r
-{\r
- int i;\r
-\r
- if (src_path[0] == '\0') {\r
- CONFIG_MSG("src_path is null\n");\r
- return;\r
- } else\r
- strcpy(dst_path, src_path);\r
-\r
- /* find out the last '/' */\r
- i = strlen(dst_path);\r
- while (i > 0) {\r
- if (dst_path[i] == '/') {\r
- i++;\r
- break;\r
- }\r
- i--;\r
- }\r
- strcpy(&dst_path[i], dst_name);\r
-\r
- CONFIG_TRACE("dst_path=%s\n", dst_path);\r
-}\r
-\r
-#ifdef CONFIG_PATH_AUTO_SELECT\r
-void\r
-dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path)\r
-{\r
- uint chip, chiprev;\r
- int i;\r
- char *name_ptr;\r
-\r
- chip = dhd->conf->chip;\r
- chiprev = dhd->conf->chiprev;\r
-\r
- if (conf_path[0] == '\0') {\r
- CONFIG_MSG("config path is null\n");\r
- return;\r
- }\r
-\r
- /* find out the last '/' */\r
- i = strlen(conf_path);\r
- while (i > 0) {\r
- if (conf_path[i] == '/') {\r
- i++;\r
- break;\r
- }\r
- i--;\r
- }\r
- name_ptr = &conf_path[i];\r
-\r
- for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {\r
- const cihp_name_map_t* row = &chip_name_map[i];\r
- if (row->chip == chip && row->chiprev == chiprev) {\r
- strcpy(name_ptr, "config_");\r
- strcat(conf_path, row->chip_name);\r
- strcat(conf_path, ".txt");\r
- }\r
- }\r
-\r
- CONFIG_TRACE("config_path=%s\n", conf_path);\r
-}\r
-#endif\r
-\r
-void\r
-dhd_conf_set_path_params(dhd_pub_t *dhd, char *fw_path, char *nv_path)\r
-{\r
- int ag_type;\r
-\r
- /* External conf takes precedence if specified */\r
- dhd_conf_preinit(dhd);\r
-\r
- if (dhd->conf_path[0] == '\0') {\r
- dhd_conf_copy_path(dhd, "config.txt", dhd->conf_path, nv_path);\r
- }\r
- if (dhd->clm_path[0] == '\0') {\r
- dhd_conf_copy_path(dhd, "clm.blob", dhd->clm_path, fw_path);\r
- }\r
-#ifdef CONFIG_PATH_AUTO_SELECT\r
- dhd_conf_set_conf_name_by_chip(dhd, dhd->conf_path);\r
-#endif\r
-\r
- dhd_conf_read_config(dhd, dhd->conf_path);\r
-\r
- ag_type = dhd_conf_set_fw_name_by_chip(dhd, fw_path);\r
- dhd_conf_set_nv_name_by_chip(dhd, nv_path, ag_type);\r
- dhd_conf_set_clm_name_by_chip(dhd, dhd->clm_path, ag_type);\r
-#ifdef BCMSDIO\r
- dhd_conf_set_fw_name_by_mac(dhd, fw_path);\r
- dhd_conf_set_nv_name_by_mac(dhd, nv_path);\r
-#endif\r
-\r
- CONFIG_MSG("Final fw_path=%s\n", fw_path);\r
- CONFIG_MSG("Final nv_path=%s\n", nv_path);\r
- CONFIG_MSG("Final clm_path=%s\n", dhd->clm_path);\r
- CONFIG_MSG("Final conf_path=%s\n", dhd->conf_path);\r
-}\r
-\r
-int\r
-dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val,\r
- int def, bool down)\r
-{\r
- int ret = -1;\r
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
-\r
- if (val >= def) {\r
- if (down) {\r
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
- CONFIG_ERROR("WLC_DOWN setting failed %d\n", ret);\r
- }\r
- if (cmd == WLC_SET_VAR) {\r
- CONFIG_TRACE("set %s %d\n", name, val);\r
- bcm_mkiovar(name, (char *)&val, sizeof(val), iovbuf, sizeof(iovbuf));\r
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR("%s setting failed %d\n", name, ret);\r
- } else {\r
- CONFIG_TRACE("set %s %d %d\n", name, cmd, val);\r
- if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0)\r
- CONFIG_ERROR("%s setting failed %d\n", name, ret);\r
- }\r
- }\r
-\r
- return ret;\r
-}\r
-\r
-int\r
-dhd_conf_set_bufiovar(dhd_pub_t *dhd, int ifidx, uint cmd, char *name,\r
- char *buf, int len, bool down)\r
-{\r
- char iovbuf[WLC_IOCTL_SMLEN];\r
- s32 iovar_len;\r
- int ret = -1;\r
-\r
- if (down) {\r
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, ifidx)) < 0)\r
- CONFIG_ERROR("WLC_DOWN setting failed %d\n", ret);\r
- }\r
-\r
- if (cmd == WLC_SET_VAR) {\r
- iovar_len = bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf));\r
- if (iovar_len > 0)\r
- ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, iovar_len, TRUE, ifidx);\r
- else\r
- ret = BCME_BUFTOOSHORT;\r
- if (ret < 0)\r
- CONFIG_ERROR("%s setting failed %d, len=%d\n", name, ret, len);\r
- } else {\r
- if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, ifidx)) < 0)\r
- CONFIG_ERROR("%s setting failed %d\n", name, ret);\r
- }\r
-\r
- return ret;\r
-}\r
-\r
-int\r
-dhd_conf_get_iovar(dhd_pub_t *dhd, int ifidx, int cmd, char *name,\r
- char *buf, int len)\r
-{\r
- char iovbuf[WLC_IOCTL_SMLEN];\r
- int ret = -1;\r
-\r
- if (cmd == WLC_GET_VAR) {\r
- if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {\r
- ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), FALSE, ifidx);\r
- if (!ret) {\r
- memcpy(buf, iovbuf, len);\r
- } else {\r
- CONFIG_ERROR("get iovar %s failed %d\n", name, ret);\r
- }\r
- } else {\r
- CONFIG_ERROR("mkiovar %s failed\n", name);\r
- }\r
- } else {\r
- ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, FALSE, 0);\r
- if (ret < 0)\r
- CONFIG_ERROR("get iovar %s failed %d\n", name, ret);\r
- }\r
-\r
- return ret;\r
-}\r
-\r
-static int\r
-dhd_conf_rsdb_mode(dhd_pub_t *dhd, char *buf)\r
-{\r
- wl_config_t rsdb_mode_cfg = {1, 0};\r
-\r
- if (buf) {\r
- rsdb_mode_cfg.config = (int)simple_strtol(buf, NULL, 0);\r
- CONFIG_MSG("rsdb_mode %d\n", rsdb_mode_cfg.config);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "rsdb_mode", (char *)&rsdb_mode_cfg,\r
- sizeof(rsdb_mode_cfg), TRUE);\r
- }\r
-\r
- return 0;\r
-}\r
-\r
-typedef struct sub_cmd_t {\r
- char *name;\r
- uint16 id; /* id for the dongle f/w switch/case */\r
- uint16 type; /* base type of argument IOVT_XXXX */\r
-} sub_cmd_t;\r
-\r
-/* wl he sub cmd list */\r
-static const sub_cmd_t he_cmd_list[] = {\r
- {"enab", WL_HE_CMD_ENAB, IOVT_UINT8},\r
- {"features", WL_HE_CMD_FEATURES, IOVT_UINT32},\r
- {"bsscolor", WL_HE_CMD_BSSCOLOR, IOVT_UINT8},\r
- {"partialbsscolor", WL_HE_CMD_PARTIAL_BSSCOLOR, IOVT_UINT8},\r
- {"cap", WL_HE_CMD_CAP, IOVT_UINT8},\r
- {"staid", WL_HE_CMD_STAID, IOVT_UINT16},\r
- {"rtsdurthresh", WL_HE_CMD_RTSDURTHRESH, IOVT_UINT16},\r
- {"peduration", WL_HE_CMD_PEDURATION, IOVT_UINT8},\r
- {"testbed_mode", WL_HE_CMD_TESTBED_MODE, IOVT_UINT32},\r
- {"omi_ulmu_throttle", WL_HE_CMD_OMI_ULMU_THROTTLE, IOVT_UINT16},\r
- {"omi_dlmu_rr_mpf_map", WL_HE_CMD_OMI_DLMU_RSD_RCM_MPF_MAP, IOVT_UINT32},\r
- {"ulmu_disable_policy", WL_HE_CMD_ULMU_DISABLE_POLICY, IOVT_UINT8},\r
- {"sr_prohibit", WL_HE_CMD_SR_PROHIBIT, IOVT_UINT8},\r
-};\r
-\r
-static uint\r
-wl_he_iovt2len(uint iovt)\r
-{\r
- switch (iovt) {\r
- case IOVT_BOOL:\r
- case IOVT_INT8:\r
- case IOVT_UINT8:\r
- return sizeof(uint8);\r
- case IOVT_INT16:\r
- case IOVT_UINT16:\r
- return sizeof(uint16);\r
- case IOVT_INT32:\r
- case IOVT_UINT32:\r
- return sizeof(uint32);\r
- default:\r
- /* ASSERT(0); */\r
- return 0;\r
- }\r
-}\r
-\r
-static int\r
-dhd_conf_he_cmd(dhd_pub_t * dhd, char *buf)\r
-{\r
- int ret = BCME_OK, i;\r
- bcm_xtlv_t *pxtlv = NULL;\r
- uint8 mybuf[128];\r
- uint16 he_id = -1, he_len = 0, mybuf_len = sizeof(mybuf);\r
- uint32 he_val;\r
- const sub_cmd_t *tpl = he_cmd_list;\r
- char sub_cmd[32], he_val_str[10];\r
-\r
- if (buf) {\r
- sscanf(buf, "%s %s", sub_cmd, he_val_str);\r
- }\r
-\r
- for (i=0; i<ARRAY_SIZE(he_cmd_list); i++, tpl++) {\r
- if (!strcmp(tpl->name, sub_cmd)) {\r
- he_id = tpl->id;\r
- he_len = wl_he_iovt2len(tpl->type);\r
- break;\r
- }\r
- }\r
- if (he_id < 0) {\r
- CONFIG_ERROR("No he id found for %s\n", sub_cmd);\r
- return 0;\r
- }\r
-\r
- pxtlv = (bcm_xtlv_t *)mybuf;\r
-\r
- if (strlen(he_val_str)) {\r
- he_val = simple_strtol(he_val_str, NULL, 0);\r
- ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, he_id,\r
- he_len, (uint8 *)&he_val, BCM_XTLV_OPTION_ALIGN32);\r
- if (ret != BCME_OK) {\r
- CONFIG_ERROR("failed to pack he enab, err: %s\n", bcmerrorstr(ret));\r
- return 0;\r
- }\r
- CONFIG_MSG("he %s 0x%x\n", sub_cmd, he_val);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "he", (char *)&mybuf,\r
- sizeof(mybuf), TRUE);\r
- }\r
-\r
- return 0;\r
-}\r
-\r
-typedef int (tpl_parse_t)(dhd_pub_t *dhd, char *buf);\r
-\r
-typedef struct iovar_tpl_t {\r
- int cmd;\r
- char *name;\r
- tpl_parse_t *parse;\r
-} iovar_tpl_t;\r
-\r
-const iovar_tpl_t iovar_tpl_list[] = {\r
- {WLC_SET_VAR, "rsdb_mode", dhd_conf_rsdb_mode},\r
- {WLC_SET_VAR, "he", dhd_conf_he_cmd},\r
-};\r
-\r
-static int iovar_tpl_parse(const iovar_tpl_t *tpl, int tpl_count,\r
- dhd_pub_t *dhd, int cmd, char *name, char *buf)\r
-{\r
- int i, ret = 0;\r
-\r
- /* look for a matching code in the table */\r
- for (i = 0; i < tpl_count; i++, tpl++) {\r
- if (tpl->cmd == cmd && !strcmp(tpl->name, name))\r
- break;\r
- }\r
- if (i < tpl_count && tpl->parse) {\r
- ret = tpl->parse(dhd, buf);\r
- } else {\r
- ret = -1;\r
- }\r
-\r
- return ret;\r
-}\r
-\r
-bool\r
-dhd_conf_set_wl_cmd(dhd_pub_t *dhd, char *data, bool down)\r
-{\r
- int cmd, val, ret = 0, len;\r
- char name[32], *pch, *pick_tmp, *pick_tmp2, *pdata = NULL;\r
-\r
- /* Process wl_preinit:\r
- * wl_preinit=[cmd]=[val], [cmd]=[val]\r
- * Ex: wl_preinit=86=0, mpc=0\r
- */\r
-\r
- if (data == NULL)\r
- return FALSE;\r
-\r
- len = strlen(data);\r
- pdata = kmalloc(len+1, GFP_KERNEL);\r
- if (pdata == NULL) {\r
- CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", len+1);\r
- goto exit;\r
- }\r
- memset(pdata, 0, len+1);\r
- strcpy(pdata, data);\r
-\r
- pick_tmp = pdata;\r
- while (pick_tmp && (pick_tmp2 = bcmstrtok(&pick_tmp, ",", 0)) != NULL) {\r
- pch = bcmstrtok(&pick_tmp2, "=", 0);\r
- if (!pch)\r
- break;\r
- if (*pch == ' ') {\r
- pch++;\r
- }\r
- memset(name, 0 , sizeof (name));\r
- cmd = (int)simple_strtol(pch, NULL, 0);\r
- if (cmd == 0) {\r
- cmd = WLC_SET_VAR;\r
- strcpy(name, pch);\r
- }\r
- pch = bcmstrtok(&pick_tmp2, ",", 0);\r
- if (!pch) {\r
- break;\r
- }\r
- ret = iovar_tpl_parse(iovar_tpl_list, ARRAY_SIZE(iovar_tpl_list),\r
- dhd, cmd, name, pch);\r
- if (ret) {\r
- val = (int)simple_strtol(pch, NULL, 0);\r
- dhd_conf_set_intiovar(dhd, cmd, name, val, -1, down);\r
- }\r
- }\r
-\r
-exit:\r
- if (pdata)\r
- kfree(pdata);\r
- return true;\r
-}\r
-\r
-int\r
-dhd_conf_get_band(dhd_pub_t *dhd)\r
-{\r
- int band = -1;\r
-\r
- if (dhd && dhd->conf)\r
- band = dhd->conf->band;\r
- else\r
- CONFIG_ERROR("dhd or conf is NULL\n");\r
-\r
- return band;\r
-}\r
-\r
-int\r
-dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec)\r
-{\r
- int bcmerror = -1;\r
-\r
- memset(cspec, 0, sizeof(wl_country_t));\r
- bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t),\r
- FALSE, 0)) < 0)\r
- CONFIG_ERROR("country code getting failed %d\n", bcmerror);\r
-\r
- return bcmerror;\r
-}\r
-\r
-int\r
-dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec)\r
-{\r
- int bcmerror = -1;\r
- struct dhd_conf *conf = dhd->conf;\r
- country_list_t *country = conf->country_head;\r
-\r
-#ifdef CCODE_LIST\r
- bcmerror = dhd_ccode_map_country_list(dhd, cspec);\r
-#endif\r
-\r
- while (country != NULL) {\r
- if (!strncmp("**", country->cspec.country_abbrev, 2)) {\r
- memcpy(cspec->ccode, country->cspec.ccode, WLC_CNTRY_BUF_SZ);\r
- cspec->rev = country->cspec.rev;\r
- bcmerror = 0;\r
- break;\r
- } else if (!strncmp(cspec->country_abbrev,\r
- country->cspec.country_abbrev, 2)) {\r
- memcpy(cspec->ccode, country->cspec.ccode, WLC_CNTRY_BUF_SZ);\r
- cspec->rev = country->cspec.rev;\r
- bcmerror = 0;\r
- break;\r
- }\r
- country = country->next;\r
- }\r
-\r
- if (!bcmerror)\r
- CONFIG_MSG("%s/%d\n", cspec->ccode, cspec->rev);\r
-\r
- return bcmerror;\r
-}\r
-\r
-int\r
-dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec)\r
-{\r
- int bcmerror = -1;\r
-\r
- memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t));\r
-\r
- CONFIG_MSG("set country %s, revision %d\n", cspec->ccode, cspec->rev);\r
- bcmerror = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "country", (char *)cspec,\r
- sizeof(wl_country_t), FALSE);\r
- dhd_conf_get_country(dhd, cspec);\r
- CONFIG_MSG("Country code: %s (%s/%d)\n",\r
- cspec->country_abbrev, cspec->ccode, cspec->rev);\r
-\r
- return bcmerror;\r
-}\r
-\r
-int\r
-dhd_conf_fix_country(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- int band;\r
- wl_uint32_list_t *list;\r
- u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];\r
- wl_country_t cspec;\r
-\r
- if (!(dhd && dhd->conf)) {\r
- return bcmerror;\r
- }\r
-\r
- memset(valid_chan_list, 0, sizeof(valid_chan_list));\r
- list = (wl_uint32_list_t *)(void *) valid_chan_list;\r
- list->count = htod32(WL_NUMCHANNELS);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list,\r
- sizeof(valid_chan_list), FALSE, 0)) < 0) {\r
- CONFIG_ERROR("get channels failed with %d\n", bcmerror);\r
- }\r
-\r
- band = dhd_conf_get_band(dhd);\r
-\r
- if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G || band==-1) &&\r
- dtoh32(list->count)<11)) {\r
- CONFIG_ERROR("bcmerror=%d, # of channels %d\n",\r
- bcmerror, dtoh32(list->count));\r
- dhd_conf_map_country_list(dhd, &dhd->conf->cspec);\r
- if ((bcmerror = dhd_conf_set_country(dhd, &dhd->conf->cspec)) < 0) {\r
- strcpy(cspec.country_abbrev, "US");\r
- cspec.rev = 0;\r
- strcpy(cspec.ccode, "US");\r
- dhd_conf_map_country_list(dhd, &cspec);\r
- dhd_conf_set_country(dhd, &cspec);\r
- }\r
- }\r
-\r
- return bcmerror;\r
-}\r
-\r
-bool\r
-dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel)\r
-{\r
- int i;\r
- bool match = false;\r
-\r
- if (dhd && dhd->conf) {\r
- if (dhd->conf->channels.count == 0)\r
- return true;\r
- for (i=0; i<dhd->conf->channels.count; i++) {\r
- if (channel == dhd->conf->channels.channel[i])\r
- match = true;\r
- }\r
- } else {\r
- match = true;\r
- CONFIG_ERROR("dhd or conf is NULL\n");\r
- }\r
-\r
- return match;\r
-}\r
-\r
-int\r
-dhd_conf_set_roam(dhd_pub_t *dhd)\r
-{\r
- int bcmerror = -1;\r
- struct dhd_conf *conf = dhd->conf;\r
-\r
- dhd_roam_disable = conf->roam_off;\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_off", dhd->conf->roam_off, 0, FALSE);\r
-\r
- if (!conf->roam_off || !conf->roam_off_suspend) {\r
- CONFIG_MSG("set roam_trigger %d\n", conf->roam_trigger[0]);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER",\r
- (char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE);\r
-\r
- CONFIG_MSG("set roam_scan_period %d\n", conf->roam_scan_period[0]);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD",\r
- (char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE);\r
-\r
- CONFIG_MSG("set roam_delta %d\n", conf->roam_delta[0]);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA",\r
- (char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE);\r
-\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "fullroamperiod",\r
- dhd->conf->fullroamperiod, 1, FALSE);\r
- }\r
-\r
- return bcmerror;\r
-}\r
-\r
-void\r
-dhd_conf_add_to_eventbuffer(struct eventmsg_buf *ev, u16 event, bool set)\r
-{\r
- if (!ev || (event > WLC_E_LAST))\r
- return;\r
-\r
- if (ev->num < MAX_EVENT_BUF_NUM) {\r
- ev->event[ev->num].type = event;\r
- ev->event[ev->num].set = set;\r
- ev->num++;\r
- } else {\r
- CONFIG_ERROR("evenbuffer doesn't support > %u events. Update"\r
- " the define MAX_EVENT_BUF_NUM \n", MAX_EVENT_BUF_NUM);\r
- ASSERT(0);\r
- }\r
-}\r
-\r
-s32\r
-dhd_conf_apply_eventbuffer(dhd_pub_t *dhd, eventmsg_buf_t *ev)\r
-{\r
- char eventmask[WL_EVENTING_MASK_LEN];\r
- int i, ret = 0;\r
-\r
- if (!ev || (!ev->num))\r
- return -EINVAL;\r
-\r
- /* Read event_msgs mask */\r
- ret = dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "event_msgs", eventmask,\r
- sizeof(eventmask));\r
- if (unlikely(ret)) {\r
- CONFIG_ERROR("Get event_msgs error (%d)\n", ret);\r
- goto exit;\r
- }\r
-\r
- /* apply the set bits */\r
- for (i = 0; i < ev->num; i++) {\r
- if (ev->event[i].set)\r
- setbit(eventmask, ev->event[i].type);\r
- else\r
- clrbit(eventmask, ev->event[i].type);\r
- }\r
-\r
- /* Write updated Event mask */\r
- ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs", eventmask,\r
- sizeof(eventmask), FALSE);\r
- if (unlikely(ret)) {\r
- CONFIG_ERROR("Set event_msgs error (%d)\n", ret);\r
- }\r
-\r
-exit:\r
- return ret;\r
-}\r
-\r
-int\r
-dhd_conf_enable_roam_offload(dhd_pub_t *dhd, int enable)\r
-{\r
- int err;\r
- eventmsg_buf_t ev_buf;\r
-\r
- if (dhd->conf->roam_off_suspend)\r
- return 0;\r
-\r
- err = dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_offload", enable, 0, FALSE);\r
- if (err)\r
- return err;\r
-\r
- bzero(&ev_buf, sizeof(eventmsg_buf_t));\r
- dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable);\r
- dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable);\r
- dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable);\r
- dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable);\r
- dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable);\r
- dhd_conf_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable);\r
- err = dhd_conf_apply_eventbuffer(dhd, &ev_buf);\r
-\r
- CONFIG_TRACE("roam_offload %d\n", enable);\r
-\r
- return err;\r
-}\r
-\r
-void\r
-dhd_conf_set_bw_cap(dhd_pub_t *dhd)\r
-{\r
- struct {\r
- u32 band;\r
- u32 bw_cap;\r
- } param = {0, 0};\r
-\r
- if (dhd->conf->bw_cap[0] >= 0) {\r
- memset(¶m, 0, sizeof(param));\r
- param.band = WLC_BAND_2G;\r
- param.bw_cap = (uint)dhd->conf->bw_cap[0];\r
- CONFIG_MSG("set bw_cap 2g 0x%x\n", param.bw_cap);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "bw_cap", (char *)¶m,\r
- sizeof(param), TRUE);\r
- }\r
-\r
- if (dhd->conf->bw_cap[1] >= 0) {\r
- memset(¶m, 0, sizeof(param));\r
- param.band = WLC_BAND_5G;\r
- param.bw_cap = (uint)dhd->conf->bw_cap[1];\r
- CONFIG_MSG("set bw_cap 5g 0x%x\n", param.bw_cap);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "bw_cap", (char *)¶m,\r
- sizeof(param), TRUE);\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_get_wme(dhd_pub_t *dhd, int ifidx, int mode, edcf_acparam_t *acp)\r
-{\r
- int bcmerror = -1;\r
- char iovbuf[WLC_IOCTL_SMLEN];\r
- edcf_acparam_t *acparam;\r
-\r
- bzero(iovbuf, sizeof(iovbuf));\r
-\r
- /*\r
- * Get current acparams, using buf as an input buffer.\r
- * Return data is array of 4 ACs of wme params.\r
- */\r
- if (mode == 0)\r
- bcm_mkiovar("wme_ac_sta", NULL, 0, iovbuf, sizeof(iovbuf));\r
- else\r
- bcm_mkiovar("wme_ac_ap", NULL, 0, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf),\r
- FALSE, ifidx)) < 0) {\r
- CONFIG_ERROR("wme_ac_sta getting failed %d\n", bcmerror);\r
- return;\r
- }\r
- memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT);\r
-\r
- acparam = &acp[AC_BK];\r
- CONFIG_TRACE("BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
- acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
- acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- acparam->TXOP);\r
- acparam = &acp[AC_BE];\r
- CONFIG_TRACE("BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
- acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
- acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- acparam->TXOP);\r
- acparam = &acp[AC_VI];\r
- CONFIG_TRACE("VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
- acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
- acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- acparam->TXOP);\r
- acparam = &acp[AC_VO];\r
- CONFIG_TRACE("VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
- acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
- acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- acparam->TXOP);\r
-\r
- return;\r
-}\r
-\r
-void\r
-dhd_conf_update_wme(dhd_pub_t *dhd, int ifidx, int mode,\r
- edcf_acparam_t *acparam_cur, int aci)\r
-{\r
- int aifsn, ecwmin, ecwmax, txop;\r
- edcf_acparam_t *acp;\r
- struct dhd_conf *conf = dhd->conf;\r
- wme_param_t *wme;\r
-\r
- if (mode == 0)\r
- wme = &conf->wme_sta;\r
- else\r
- wme = &conf->wme_ap;\r
-\r
- /* Default value */\r
- aifsn = acparam_cur->ACI&EDCF_AIFSN_MASK;\r
- ecwmin = acparam_cur->ECW&EDCF_ECWMIN_MASK;\r
- ecwmax = (acparam_cur->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT;\r
- txop = acparam_cur->TXOP;\r
-\r
- /* Modified value */\r
- if (wme->aifsn[aci] > 0)\r
- aifsn = wme->aifsn[aci];\r
- if (wme->ecwmin[aci] > 0)\r
- ecwmin = wme->ecwmin[aci];\r
- if (wme->ecwmax[aci] > 0)\r
- ecwmax = wme->ecwmax[aci];\r
- if (wme->txop[aci] > 0)\r
- txop = wme->txop[aci];\r
-\r
- if (!(wme->aifsn[aci] || wme->ecwmin[aci] ||\r
- wme->ecwmax[aci] || wme->txop[aci]))\r
- return;\r
-\r
- /* Update */\r
- acp = acparam_cur;\r
- acp->ACI = (acp->ACI & ~EDCF_AIFSN_MASK) | (aifsn & EDCF_AIFSN_MASK);\r
- acp->ECW = ((ecwmax << EDCF_ECWMAX_SHIFT) & EDCF_ECWMAX_MASK) | (acp->ECW & EDCF_ECWMIN_MASK);\r
- acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK));\r
- acp->TXOP = txop;\r
-\r
- CONFIG_MSG("wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",\r
- mode?"ap":"sta", acp->ACI, acp->ACI&EDCF_AIFSN_MASK,\r
- acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
- acp->TXOP);\r
-\r
- /*\r
- * Now use buf as an output buffer.\r
- * Put WME acparams after "wme_ac\0" in buf.\r
- * NOTE: only one of the four ACs can be set at a time.\r
- */\r
- if (mode == 0)\r
- dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wme_ac_sta", (char *)acp,\r
- sizeof(edcf_acparam_t), FALSE);\r
- else\r
- dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "wme_ac_ap", (char *)acp,\r
- sizeof(edcf_acparam_t), FALSE);\r
-\r
-}\r
-\r
-void\r
-dhd_conf_set_wme(dhd_pub_t *dhd, int ifidx, int mode)\r
-{\r
- edcf_acparam_t acparam_cur[AC_COUNT];\r
-\r
- if (dhd && dhd->conf) {\r
- if (!dhd->conf->force_wme_ac) {\r
- CONFIG_TRACE("force_wme_ac is not enabled %d\n",\r
- dhd->conf->force_wme_ac);\r
- return;\r
- }\r
-\r
- CONFIG_TRACE("Before change:\n");\r
- dhd_conf_get_wme(dhd, ifidx, mode, acparam_cur);\r
-\r
- dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_BK], AC_BK);\r
- dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_BE], AC_BE);\r
- dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_VI], AC_VI);\r
- dhd_conf_update_wme(dhd, ifidx, mode, &acparam_cur[AC_VO], AC_VO);\r
-\r
- CONFIG_TRACE("After change:\n");\r
- dhd_conf_get_wme(dhd, ifidx, mode, acparam_cur);\r
- } else {\r
- CONFIG_ERROR("dhd or conf is NULL\n");\r
- }\r
-\r
- return;\r
-}\r
-\r
-void\r
-dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int p2p_mode, int miracast_mode)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- mchan_params_t *mchan = conf->mchan;\r
- bool set = true;\r
-\r
- while (mchan != NULL) {\r
- set = true;\r
- set &= (mchan->bw >= 0);\r
- set &= ((mchan->p2p_mode == -1) | (mchan->p2p_mode == p2p_mode));\r
- set &= ((mchan->miracast_mode == -1) | (mchan->miracast_mode == miracast_mode));\r
- if (set) {\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "mchan_bw", mchan->bw, 0, FALSE);\r
- }\r
- mchan = mchan->next;\r
- }\r
-\r
- return;\r
-}\r
-\r
-#ifdef PKT_FILTER_SUPPORT\r
-void\r
-dhd_conf_add_pkt_filter(dhd_pub_t *dhd)\r
-{\r
- int i, j;\r
- char str[16];\r
-#define MACS "%02x%02x%02x%02x%02x%02x"\r
-\r
- /*\r
- * Filter in less pkt: ARP(0x0806, ID is 105), BRCM(0x886C), 802.1X(0x888E)\r
- * 1) dhd_master_mode=1\r
- * 2) pkt_filter_delete=100, 102, 103, 104, 105, 106, 107\r
- * 3) pkt_filter_add=131 0 0 12 0xFFFF 0x886C, 132 0 0 12 0xFFFF 0x888E\r
- * 4) magic_pkt_filter_add=141 0 1 12\r
- */\r
- for(i=0; i<dhd->conf->pkt_filter_add.count; i++) {\r
- dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i];\r
- CONFIG_MSG("%s\n", dhd->pktfilter[i+dhd->pktfilter_count]);\r
- }\r
- dhd->pktfilter_count += i;\r
-\r
- if (dhd->conf->magic_pkt_filter_add) {\r
- strcat(dhd->conf->magic_pkt_filter_add, " 0x");\r
- strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");\r
- for (j=0; j<16; j++)\r
- strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");\r
- strcat(dhd->conf->magic_pkt_filter_add, " 0x");\r
- strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");\r
- sprintf(str, MACS, MAC2STRDBG(dhd->mac.octet));\r
- for (j=0; j<16; j++)\r
- strncat(dhd->conf->magic_pkt_filter_add, str, 12);\r
- dhd->pktfilter[dhd->pktfilter_count] = dhd->conf->magic_pkt_filter_add;\r
- dhd->pktfilter_count += 1;\r
- }\r
-}\r
-\r
-bool\r
-dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id)\r
-{\r
- int i;\r
-\r
- if (dhd && dhd->conf) {\r
- for (i=0; i<dhd->conf->pkt_filter_del.count; i++) {\r
- if (id == dhd->conf->pkt_filter_del.id[i]) {\r
- CONFIG_MSG("%d\n", dhd->conf->pkt_filter_del.id[i]);\r
- return true;\r
- }\r
- }\r
- return false;\r
- }\r
- return false;\r
-}\r
-\r
-void\r
-dhd_conf_discard_pkt_filter(dhd_pub_t *dhd)\r
-{\r
- dhd->pktfilter_count = 6;\r
- dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL;\r
- dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";\r
- dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E";\r
- dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = "103 0 0 0 0xFFFF 0x3333";\r
- dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;\r
- /* Do not enable ARP to pkt filter if dhd_master_mode is false.*/\r
- dhd->pktfilter[DHD_ARP_FILTER_NUM] = NULL;\r
-\r
- /* IPv4 broadcast address XXX.XXX.XXX.255 */\r
- dhd->pktfilter[dhd->pktfilter_count] = "110 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF";\r
- dhd->pktfilter_count++;\r
- /* discard IPv4 multicast address 224.0.0.0/4 */\r
- dhd->pktfilter[dhd->pktfilter_count] = "111 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0";\r
- dhd->pktfilter_count++;\r
- /* discard IPv6 multicast address FF00::/8 */\r
- dhd->pktfilter[dhd->pktfilter_count] = "112 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF";\r
- dhd->pktfilter_count++;\r
- /* discard Netbios pkt */\r
- dhd->pktfilter[dhd->pktfilter_count] = "121 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089";\r
- dhd->pktfilter_count++;\r
-\r
-}\r
-#endif /* PKT_FILTER_SUPPORT */\r
-\r
-int\r
-dhd_conf_get_pm(dhd_pub_t *dhd)\r
-{\r
- if (dhd && dhd->conf) {\r
- return dhd->conf->pm;\r
- }\r
- return -1;\r
-}\r
-\r
-int\r
-dhd_conf_check_hostsleep(dhd_pub_t *dhd, int cmd, void *buf, int len,\r
- int *hostsleep_set, int *hostsleep_val, int *ret)\r
-{\r
- if (dhd->conf->insuspend & (NO_TXCTL_IN_SUSPEND | WOWL_IN_SUSPEND)) {\r
- if (cmd == WLC_SET_VAR) {\r
- char *psleep = NULL;\r
- psleep = strstr(buf, "hostsleep");\r
- if (psleep) {\r
- *hostsleep_set = 1;\r
- memcpy(hostsleep_val, psleep+strlen("hostsleep")+1, sizeof(int));\r
- }\r
- }\r
- if (dhd->hostsleep && (!*hostsleep_set || *hostsleep_val)) {\r
- CONFIG_TRACE("block all none hostsleep clr cmd\n");\r
- *ret = BCME_EPERM;\r
- goto exit;\r
- } else if (*hostsleep_set && *hostsleep_val) {\r
- CONFIG_TRACE("hostsleep %d => %d\n", dhd->hostsleep, *hostsleep_val);\r
- dhd->hostsleep = *hostsleep_val;\r
- if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {\r
- dhd_txflowcontrol(dhd, ALL_INTERFACES, ON);\r
- }\r
- if (dhd->hostsleep == 2) {\r
- *ret = 0;\r
- goto exit;\r
- }\r
- } else if (dhd->hostsleep == 2 && !*hostsleep_val) {\r
- CONFIG_TRACE("hostsleep %d => %d\n", dhd->hostsleep, *hostsleep_val);\r
- dhd->hostsleep = *hostsleep_val;\r
- if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {\r
- dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);\r
- }\r
- *ret = 0;\r
- goto exit;\r
- }\r
- }\r
-#ifdef NO_POWER_SAVE\r
- if (cmd == WLC_SET_PM) {\r
- if (*(const u32*)buf != 0) {\r
- CONFIG_TRACE("skip PM\n");\r
- *ret = BCME_OK;\r
- goto exit;\r
- }\r
- } else if (cmd == WLC_SET_VAR) {\r
- int cmd_len = strlen("mpc");\r
- if (!strncmp(buf, "mpc", cmd_len)) {\r
- if (*((u32 *)((u8*)buf+cmd_len+1)) != 0) {\r
- CONFIG_TRACE("skip mpc\n");\r
- *ret = BCME_OK;\r
- goto exit;\r
- }\r
- }\r
- }\r
-#endif\r
-\r
- return 0;\r
-exit:\r
- return -1;\r
-}\r
-\r
-void\r
-dhd_conf_get_hostsleep(dhd_pub_t *dhd,\r
- int hostsleep_set, int hostsleep_val, int ret)\r
-{\r
- if (dhd->conf->insuspend & (NO_TXCTL_IN_SUSPEND | WOWL_IN_SUSPEND)) {\r
- if (hostsleep_set) {\r
- if (hostsleep_val && ret) {\r
- CONFIG_TRACE("reset hostsleep %d => 0\n", dhd->hostsleep);\r
- dhd->hostsleep = 0;\r
- if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {\r
- dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);\r
- }\r
- } else if (!hostsleep_val && !ret) {\r
- CONFIG_TRACE("set hostsleep %d => 0\n", dhd->hostsleep);\r
- dhd->hostsleep = 0;\r
- if (dhd->conf->insuspend & NO_TXDATA_IN_SUSPEND) {\r
- dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);\r
- }\r
- }\r
- }\r
- }\r
-}\r
-\r
-#ifdef WL_EXT_WOWL\r
-#define WL_WOWL_TCPFIN (1 << 26)\r
-typedef struct wl_wowl_pattern2 {\r
- char cmd[4];\r
- wl_wowl_pattern_t wowl_pattern;\r
-} wl_wowl_pattern2_t;\r
-static int\r
-dhd_conf_wowl_pattern(dhd_pub_t *dhd, bool add, char *data)\r
-{\r
- uint buf_len = 0;\r
- int id, type, polarity, offset;\r
- char cmd[4]="\0", mask[128]="\0", pattern[128]="\0", mask_tmp[128]="\0", *pmask_tmp;\r
- uint32 masksize, patternsize, pad_len = 0;\r
- wl_wowl_pattern2_t *wowl_pattern2 = NULL;\r
- char *mask_and_pattern;\r
- int ret = 0, i, j, v;\r
-\r
- if (data) {\r
- if (add)\r
- strcpy(cmd, "add");\r
- else\r
- strcpy(cmd, "clr");\r
- if (!strcmp(cmd, "clr")) {\r
- CONFIG_TRACE("wowl_pattern clr\n");\r
- ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "wowl_pattern", cmd,\r
- sizeof(cmd), FALSE);\r
- goto exit;\r
- }\r
- sscanf(data, "%d %d %d %d %s %s", &id, &type, &polarity, &offset,\r
- mask_tmp, pattern);\r
- masksize = strlen(mask_tmp) -2;\r
- CONFIG_TRACE("0 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);\r
-\r
- // add pading\r
- if (masksize % 16)\r
- pad_len = (16 - masksize % 16);\r
- for (i=0; i<pad_len; i++)\r
- strcat(mask_tmp, "0");\r
- masksize += pad_len;\r
- CONFIG_TRACE("1 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);\r
-\r
- // translate 0x00 to 0, others to 1\r
- j = 0;\r
- pmask_tmp = &mask_tmp[2];\r
- for (i=0; i<masksize/2; i++) {\r
- if(strncmp(&pmask_tmp[i*2], "00", 2))\r
- pmask_tmp[j] = '1';\r
- else\r
- pmask_tmp[j] = '0';\r
- j++;\r
- }\r
- pmask_tmp[j] = '\0';\r
- masksize = masksize / 2;\r
- CONFIG_TRACE("2 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);\r
-\r
- // reorder per 8bits\r
- pmask_tmp = &mask_tmp[2];\r
- for (i=0; i<masksize/8; i++) {\r
- char c;\r
- for (j=0; j<4; j++) {\r
- c = pmask_tmp[i*8+j];\r
- pmask_tmp[i*8+j] = pmask_tmp[(i+1)*8-j-1];\r
- pmask_tmp[(i+1)*8-j-1] = c;\r
- }\r
- }\r
- CONFIG_TRACE("3 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);\r
-\r
- // translate 8bits to 1byte\r
- j = 0; v = 0;\r
- pmask_tmp = &mask_tmp[2];\r
- strcpy(mask, "0x");\r
- for (i=0; i<masksize; i++) {\r
- v = (v<<1) | (pmask_tmp[i]=='1');\r
- if (((i+1)%4) == 0) {\r
- if (v < 10)\r
- mask[j+2] = v + '0';\r
- else\r
- mask[j+2] = (v-10) + 'a';\r
- j++;\r
- v = 0;\r
- }\r
- }\r
- mask[j+2] = '\0';\r
- masksize = j/2;\r
- CONFIG_TRACE("4 mask=%s, masksize=%d\n", mask, masksize);\r
-\r
- patternsize = (strlen(pattern)-2)/2;\r
- buf_len = sizeof(wl_wowl_pattern2_t) + patternsize + masksize;\r
- wowl_pattern2 = kmalloc(buf_len, GFP_KERNEL);\r
- if (wowl_pattern2 == NULL) {\r
- CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", buf_len);\r
- goto exit;\r
- }\r
- memset(wowl_pattern2, 0, sizeof(wl_wowl_pattern2_t));\r
-\r
- strncpy(wowl_pattern2->cmd, cmd, sizeof(cmd));\r
- wowl_pattern2->wowl_pattern.id = id;\r
- wowl_pattern2->wowl_pattern.type = 0;\r
- wowl_pattern2->wowl_pattern.offset = offset;\r
- mask_and_pattern = (char*)wowl_pattern2 + sizeof(wl_wowl_pattern2_t);\r
-\r
- wowl_pattern2->wowl_pattern.masksize = masksize;\r
- ret = wl_pattern_atoh(mask, mask_and_pattern);\r
- if (ret == -1) {\r
- CONFIG_ERROR("rejecting mask=%s\n", mask);\r
- goto exit;\r
- }\r
-\r
- mask_and_pattern += wowl_pattern2->wowl_pattern.masksize;\r
- wowl_pattern2->wowl_pattern.patternoffset = sizeof(wl_wowl_pattern_t) +\r
- wowl_pattern2->wowl_pattern.masksize;\r
-\r
- wowl_pattern2->wowl_pattern.patternsize = patternsize;\r
- ret = wl_pattern_atoh(pattern, mask_and_pattern);\r
- if (ret == -1) {\r
- CONFIG_ERROR("rejecting pattern=%s\n", pattern);\r
- goto exit;\r
- }\r
-\r
- CONFIG_TRACE("%s %d %s %s\n", cmd, offset, mask, pattern);\r
-\r
- ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "wowl_pattern",\r
- (char *)wowl_pattern2, buf_len, FALSE);\r
- }\r
-\r
-exit:\r
- if (wowl_pattern2)\r
- kfree(wowl_pattern2);\r
- return ret;\r
-}\r
-\r
-static int\r
-dhd_conf_wowl_wakeind(dhd_pub_t *dhd, bool clear)\r
-{\r
- s8 iovar_buf[WLC_IOCTL_SMLEN];\r
- wl_wowl_wakeind_t *wake = NULL;\r
- int ret = -1;\r
- char clr[6]="clear", wakeind_str[32]="\0";\r
-\r
- if (clear) {\r
- CONFIG_TRACE("wowl_wakeind clear\n");\r
- ret = dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "wowl_wakeind",\r
- clr, sizeof(clr), 0);\r
- } else {\r
- ret = dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "wowl_wakeind",\r
- iovar_buf, sizeof(iovar_buf));\r
- if (!ret) {\r
- wake = (wl_wowl_wakeind_t *) iovar_buf;\r
- if (wake->ucode_wakeind & WL_WOWL_MAGIC)\r
- strcpy(wakeind_str, "(MAGIC packet)");\r
- if (wake->ucode_wakeind & WL_WOWL_NET)\r
- strcpy(wakeind_str, "(Netpattern)");\r
- if (wake->ucode_wakeind & WL_WOWL_DIS)\r
- strcpy(wakeind_str, "(Disassoc/Deauth)");\r
- if (wake->ucode_wakeind & WL_WOWL_BCN)\r
- strcpy(wakeind_str, "(Loss of beacon)");\r
- if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_TIME)\r
- strcpy(wakeind_str, "(TCPKA timeout)");\r
- if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_DATA)\r
- strcpy(wakeind_str, "(TCPKA data)");\r
- if (wake->ucode_wakeind & WL_WOWL_TCPFIN)\r
- strcpy(wakeind_str, "(TCP FIN)");\r
- CONFIG_MSG("wakeind=0x%x %s\n", wake->ucode_wakeind, wakeind_str);\r
- }\r
- }\r
-\r
- return ret;\r
-}\r
-#endif\r
-\r
-int\r
-dhd_conf_mkeep_alive(dhd_pub_t *dhd, int ifidx, int id, int period,\r
- char *packet, bool bcast)\r
-{\r
- wl_mkeep_alive_pkt_t *mkeep_alive_pktp;\r
- int ret = 0, len_bytes=0, buf_len=0;\r
- char *buf = NULL, *iovar_buf = NULL;\r
- uint8 *pdata;\r
-\r
- CONFIG_TRACE("id=%d, period=%d, packet=%s\n", id, period, packet);\r
- if (period >= 0) {\r
- buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);\r
- if (buf == NULL) {\r
- CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);\r
- goto exit;\r
- }\r
- iovar_buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);\r
- if (iovar_buf == NULL) {\r
- CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);\r
- goto exit;\r
- }\r
- mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *)buf;\r
- mkeep_alive_pktp->version = htod16(WL_MKEEP_ALIVE_VERSION);\r
- mkeep_alive_pktp->length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);\r
- mkeep_alive_pktp->keep_alive_id = id;\r
- buf_len += WL_MKEEP_ALIVE_FIXED_LEN;\r
- mkeep_alive_pktp->period_msec = period;\r
- if (packet && strlen(packet)) {\r
- len_bytes = wl_pattern_atoh(packet, (char *)mkeep_alive_pktp->data);\r
- buf_len += len_bytes;\r
- if (bcast) {\r
- memcpy(mkeep_alive_pktp->data, ðer_bcast, ETHER_ADDR_LEN);\r
- }\r
- ret = dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "cur_etheraddr",\r
- iovar_buf, WLC_IOCTL_SMLEN);\r
- if (!ret) {\r
- pdata = mkeep_alive_pktp->data;\r
- memcpy(pdata+6, iovar_buf, ETHER_ADDR_LEN);\r
- }\r
- }\r
- mkeep_alive_pktp->len_bytes = htod16(len_bytes);\r
- ret = dhd_conf_set_bufiovar(dhd, ifidx, WLC_SET_VAR, "mkeep_alive",\r
- buf, buf_len, FALSE);\r
- }\r
-\r
-exit:\r
- if (buf)\r
- kfree(buf);\r
- if (iovar_buf)\r
- kfree(iovar_buf);\r
- return ret;\r
-}\r
-\r
-#ifdef ARP_OFFLOAD_SUPPORT\r
-void\r
-dhd_conf_set_garp(dhd_pub_t *dhd, int ifidx, uint32 ipa, bool enable)\r
-{\r
- int i, len = 0, total_len = WLC_IOCTL_SMLEN;\r
- char *iovar_buf = NULL, *packet = NULL;\r
-\r
- if (!dhd->conf->garp || ifidx != 0 || !(dhd->op_mode & DHD_FLAG_STA_MODE))\r
- return;\r
-\r
- CONFIG_TRACE("enable=%d\n", enable);\r
-\r
- if (enable) {\r
- iovar_buf = kmalloc(total_len, GFP_KERNEL);\r
- if (iovar_buf == NULL) {\r
- CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", total_len);\r
- goto exit;\r
- }\r
- packet = kmalloc(total_len, GFP_KERNEL);\r
- if (packet == NULL) {\r
- CONFIG_ERROR("Failed to allocate buffer of %d bytes\n", total_len);\r
- goto exit;\r
- }\r
- dhd_conf_get_iovar(dhd, ifidx, WLC_GET_VAR, "cur_etheraddr", iovar_buf, total_len);\r
-\r
- len += snprintf(packet+len, total_len, "0xffffffffffff");\r
- for (i=0; i<ETHER_ADDR_LEN; i++)\r
- len += snprintf(packet+len, total_len, "%02x", iovar_buf[i]);\r
- len += snprintf(packet+len, total_len, "08060001080006040001");\r
- // Sender Hardware Addr.\r
- for (i=0; i<ETHER_ADDR_LEN; i++)\r
- len += snprintf(packet+len, total_len, "%02x", iovar_buf[i]);\r
- // Sender IP Addr.\r
- len += snprintf(packet+len, total_len, "%02x%02x%02x%02x",\r
- ipa&0xff, (ipa>>8)&0xff, (ipa>>16)&0xff, (ipa>>24)&0xff);\r
- // Target Hardware Addr.\r
- len += snprintf(packet+len, total_len, "ffffffffffff");\r
- // Target IP Addr.\r
- len += snprintf(packet+len, total_len, "%02x%02x%02x%02x",\r
- ipa&0xff, (ipa>>8)&0xff, (ipa>>16)&0xff, (ipa>>24)&0xff);\r
- len += snprintf(packet+len, total_len, "000000000000000000000000000000000000");\r
- }\r
-\r
- dhd_conf_mkeep_alive(dhd, ifidx, 0, dhd->conf->keep_alive_period, packet, TRUE);\r
-\r
-exit:\r
- if (iovar_buf)\r
- kfree(iovar_buf);\r
- if (packet)\r
- kfree(packet);\r
- return;\r
-}\r
-#endif\r
-\r
-uint\r
-dhd_conf_get_insuspend(dhd_pub_t *dhd, uint mask)\r
-{\r
- uint insuspend = 0;\r
-\r
- if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- insuspend = dhd->conf->insuspend &\r
- (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND |\r
- ROAM_OFFLOAD_IN_SUSPEND | WOWL_IN_SUSPEND);\r
- } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {\r
- insuspend = dhd->conf->insuspend &\r
- (NO_EVENT_IN_SUSPEND | NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND |\r
- AP_DOWN_IN_SUSPEND | AP_FILTER_IN_SUSPEND);\r
- }\r
-\r
- return (insuspend & mask);\r
-}\r
-\r
-#ifdef SUSPEND_EVENT\r
-void\r
-dhd_conf_set_suspend_event(dhd_pub_t *dhd, int suspend)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- struct ether_addr bssid;\r
- char suspend_eventmask[WL_EVENTING_MASK_LEN];\r
- wl_event_msg_t msg;\r
- int pm;\r
-#ifdef WL_CFG80211\r
- struct net_device *net;\r
-#endif /* defined(WL_CFG80211) */\r
-\r
- CONFIG_TRACE("Enter\n");\r
- if (suspend) {\r
-#ifdef PROP_TXSTATUS\r
-#if defined(BCMSDIO) || defined(BCMDBUS)\r
- if (dhd->wlfc_enabled) {\r
- dhd_wlfc_deinit(dhd);\r
- conf->wlfc = TRUE;\r
- } else {\r
- conf->wlfc = FALSE;\r
- }\r
-#endif /* BCMSDIO || BCMDBUS */\r
-#endif /* PROP_TXSTATUS */\r
- dhd_conf_get_iovar(dhd, 0, WLC_GET_VAR, "event_msgs",\r
- conf->resume_eventmask, sizeof(conf->resume_eventmask));\r
- memset(suspend_eventmask, 0, sizeof(suspend_eventmask));\r
- setbit(suspend_eventmask, WLC_E_ESCAN_RESULT);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs",\r
- suspend_eventmask, sizeof(suspend_eventmask), FALSE);\r
- if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- memset(&bssid, 0, ETHER_ADDR_LEN);\r
- dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, FALSE, 0);\r
- if (memcmp(ðer_null, &bssid, ETHER_ADDR_LEN))\r
- memcpy(&conf->bssid_insuspend, &bssid, ETHER_ADDR_LEN);\r
- else\r
- memset(&conf->bssid_insuspend, 0, ETHER_ADDR_LEN);\r
- }\r
- }\r
- else {\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "event_msgs",\r
- conf->resume_eventmask, sizeof(conf->resume_eventmask), FALSE);\r
- if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- if (memcmp(ðer_null, &conf->bssid_insuspend, ETHER_ADDR_LEN)) {\r
- memset(&bssid, 0, ETHER_ADDR_LEN);\r
- dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN,\r
- FALSE, 0);\r
- if (memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) {\r
- dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", 0, 0, FALSE);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "send_nulldata",\r
- (char *)&bssid, ETHER_ADDR_LEN, FALSE);\r
- OSL_SLEEP(100);\r
- if (conf->pm >= 0)\r
- pm = conf->pm;\r
- else\r
- pm = PM_FAST;\r
- dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE);\r
- } else {\r
- CONFIG_TRACE("send WLC_E_DEAUTH_IND event\n");\r
- bzero(&msg, sizeof(wl_event_msg_t));\r
- memcpy(&msg.addr, &conf->bssid_insuspend, ETHER_ADDR_LEN);\r
- msg.event_type = hton32(WLC_E_DEAUTH_IND);\r
- msg.status = 0;\r
- msg.reason = hton32(DOT11_RC_DEAUTH_LEAVING);\r
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW)\r
- wl_ext_event_send(dhd->event_params, &msg, NULL);\r
-#endif\r
-#ifdef WL_CFG80211\r
- net = dhd_idx2net(dhd, 0);\r
- if (net) {\r
- wl_cfg80211_event(net, &msg, NULL);\r
- }\r
-#endif /* defined(WL_CFG80211) */\r
- }\r
- }\r
-#ifdef PROP_TXSTATUS\r
-#if defined(BCMSDIO) || defined(BCMDBUS)\r
- if (conf->wlfc) {\r
- dhd_wlfc_init(dhd);\r
- dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE);\r
- }\r
-#endif\r
-#endif /* PROP_TXSTATUS */\r
- }\r
- }\r
-\r
-}\r
-#endif\r
-\r
-#if defined(WL_CFG80211) || defined(WL_ESCAN)\r
-static void\r
-dhd_conf_wait_event_complete(struct dhd_pub *dhd, int ifidx)\r
-{\r
- s32 timeout = -1;\r
-\r
- timeout = wait_event_interruptible_timeout(dhd->conf->event_complete,\r
- wl_ext_event_complete(dhd, ifidx), msecs_to_jiffies(10000));\r
- if (timeout <= 0 || !wl_ext_event_complete(dhd, ifidx)) {\r
- wl_ext_event_complete(dhd, ifidx);\r
- CONFIG_ERROR("timeout\n");\r
- }\r
-}\r
-#endif\r
-\r
-int\r
-dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- uint insuspend = 0;\r
- int pm;\r
-#ifdef BCMSDIO\r
- uint32 intstatus = 0;\r
- int ret = 0;\r
-#endif\r
-#ifdef WL_EXT_WOWL\r
- int i;\r
-#endif\r
-\r
- insuspend = dhd_conf_get_insuspend(dhd, ALL_IN_SUSPEND);\r
- if (insuspend)\r
- CONFIG_MSG("op_mode %d, suspend %d, suspended %d, insuspend 0x%x, suspend_mode=%d\n",\r
- dhd->op_mode, suspend, conf->suspended, insuspend, conf->suspend_mode);\r
-\r
- if (conf->suspended == suspend || !dhd->up) {\r
- return 0;\r
- }\r
-\r
- if (suspend) {\r
- if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_off",\r
- dhd->conf->roam_off_suspend, 0, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bcn_li_dtim",\r
- dhd->conf->suspend_bcn_li_dtim, 0, FALSE);\r
- if (insuspend & ROAM_OFFLOAD_IN_SUSPEND)\r
- dhd_conf_enable_roam_offload(dhd, 2);\r
- } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {\r
- if (insuspend & AP_DOWN_IN_SUSPEND) {\r
- dhd_conf_set_intiovar(dhd, WLC_DOWN, "WLC_DOWN", 1, 0, FALSE);\r
- }\r
- }\r
-#if defined(WL_CFG80211) || defined(WL_ESCAN)\r
- if (insuspend & (NO_EVENT_IN_SUSPEND|NO_TXCTL_IN_SUSPEND|WOWL_IN_SUSPEND)) {\r
- if (conf->suspend_mode == PM_NOTIFIER)\r
- dhd_conf_wait_event_complete(dhd, 0);\r
- }\r
-#endif\r
- if (insuspend & NO_TXDATA_IN_SUSPEND) {\r
- dhd_txflowcontrol(dhd, ALL_INTERFACES, ON);\r
- }\r
-#if defined(WL_CFG80211) || defined(WL_ESCAN)\r
- if (insuspend & (NO_EVENT_IN_SUSPEND|NO_TXCTL_IN_SUSPEND|WOWL_IN_SUSPEND)) {\r
- if (conf->suspend_mode == PM_NOTIFIER)\r
- wl_ext_user_sync(dhd, 0, TRUE);\r
- }\r
-#endif\r
-#ifdef SUSPEND_EVENT\r
- if (insuspend & NO_EVENT_IN_SUSPEND) {\r
- dhd_conf_set_suspend_event(dhd, suspend);\r
- }\r
-#endif\r
- if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- if (conf->pm_in_suspend >= 0)\r
- pm = conf->pm_in_suspend;\r
- else if (conf->pm >= 0)\r
- pm = conf->pm;\r
- else\r
- pm = PM_FAST;\r
- dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE);\r
- }\r
- dhd_conf_set_wl_cmd(dhd, conf->wl_suspend, FALSE);\r
-#ifdef WL_EXT_WOWL\r
- if ((insuspend & WOWL_IN_SUSPEND) && dhd_master_mode) {\r
- dhd_conf_wowl_pattern(dhd, FALSE, "clr");\r
- for(i=0; i<conf->pkt_filter_add.count; i++) {\r
- dhd_conf_wowl_pattern(dhd, TRUE, conf->pkt_filter_add.filter[i]);\r
- }\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl", conf->wowl, 0, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl_activate", 1, 0, FALSE);\r
- dhd_conf_wowl_wakeind(dhd, TRUE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "hostsleep", 1, 0, FALSE);\r
-#ifdef BCMSDIO\r
- ret = dhd_bus_sleep(dhd, TRUE, &intstatus);\r
- CONFIG_TRACE("ret = %d, intstatus = 0x%x\n", ret, intstatus);\r
-#endif\r
- } else\r
-#endif\r
- if (insuspend & NO_TXCTL_IN_SUSPEND) {\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "hostsleep", 2, 0, FALSE);\r
-#ifdef BCMSDIO\r
- ret = dhd_bus_sleep(dhd, TRUE, &intstatus);\r
- CONFIG_TRACE("ret = %d, intstatus = 0x%x\n", ret, intstatus);\r
-#endif\r
- }\r
- conf->suspended = TRUE;\r
- } else {\r
- if (insuspend & (WOWL_IN_SUSPEND | NO_TXCTL_IN_SUSPEND)) {\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "hostsleep", 0, 0, FALSE);\r
- }\r
-#ifdef WL_EXT_WOWL\r
- if (insuspend & WOWL_IN_SUSPEND) {\r
- dhd_conf_wowl_wakeind(dhd, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl_activate", 0, 0, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "wowl", 0, 0, FALSE);\r
- dhd_conf_wowl_pattern(dhd, FALSE, "clr");\r
- }\r
-#endif\r
- dhd_conf_set_wl_cmd(dhd, conf->wl_resume, FALSE);\r
- dhd_conf_get_iovar(dhd, 0, WLC_GET_PM, "WLC_GET_PM", (char *)&pm, sizeof(pm));\r
- CONFIG_TRACE("PM in suspend = %d\n", pm);\r
-#ifdef SUSPEND_EVENT\r
- if (insuspend & NO_EVENT_IN_SUSPEND) {\r
- dhd_conf_set_suspend_event(dhd, suspend);\r
- }\r
-#endif\r
-#if defined(WL_CFG80211) || defined(WL_ESCAN)\r
- if (insuspend & (NO_EVENT_IN_SUSPEND|NO_TXCTL_IN_SUSPEND|WOWL_IN_SUSPEND)) {\r
- if (conf->suspend_mode == PM_NOTIFIER)\r
- wl_ext_user_sync(dhd, 0, FALSE);\r
- }\r
-#endif\r
- if (insuspend & NO_TXDATA_IN_SUSPEND) {\r
- dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);\r
- }\r
- if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- if (insuspend & ROAM_OFFLOAD_IN_SUSPEND)\r
- dhd_conf_enable_roam_offload(dhd, 0);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bcn_li_dtim", 0, 0, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_off",\r
- dhd->conf->roam_off, 0, FALSE);\r
- } else if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {\r
- if (insuspend & AP_DOWN_IN_SUSPEND) {\r
- dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE);\r
- }\r
- }\r
- if (dhd->op_mode & DHD_FLAG_STA_MODE) {\r
- if (conf->pm >= 0)\r
- pm = conf->pm;\r
- else\r
- pm = PM_FAST;\r
- dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", pm, 0, FALSE);\r
- }\r
- conf->suspended = FALSE;\r
- }\r
-\r
- return 0;\r
-}\r
-\r
-#ifdef PROP_TXSTATUS\r
-int\r
-dhd_conf_get_disable_proptx(dhd_pub_t *dhd)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- int disable_proptx = -1;\r
- int fw_proptx = 0;\r
-\r
- /* check fw proptx priority:\r
- * 1st: check fw support by wl cap\r
- * 2nd: 4334/43340/43341/43241 support proptx but not show in wl cap, so enable it by default\r
- * if you would like to disable it, please set disable_proptx=1 in config.txt\r
- * 3th: disable when proptxstatus not support in wl cap\r
- */\r
- if (FW_SUPPORTED(dhd, proptxstatus)) {\r
- fw_proptx = 1;\r
- } else if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID ||\r
- dhd->conf->chip == BCM43340_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {\r
- fw_proptx = 1;\r
- } else {\r
- fw_proptx = 0;\r
- }\r
-\r
- /* returned disable_proptx value:\r
- * -1: disable in STA and enable in P2P(follow original dhd settings when PROP_TXSTATUS_VSDB enabled)\r
- * 0: depend on fw support\r
- * 1: always disable proptx\r
- */\r
- if (conf->disable_proptx == 0) {\r
- // check fw support as well\r
- if (fw_proptx)\r
- disable_proptx = 0;\r
- else\r
- disable_proptx = 1;\r
- } else if (conf->disable_proptx >= 1) {\r
- disable_proptx = 1;\r
- } else {\r
- // check fw support as well\r
- if (fw_proptx)\r
- disable_proptx = -1;\r
- else\r
- disable_proptx = 1;\r
- }\r
-\r
- CONFIG_MSG("fw_proptx=%d, disable_proptx=%d\n", fw_proptx, disable_proptx);\r
-\r
- return disable_proptx;\r
-}\r
-#endif\r
-\r
-uint\r
-pick_config_vars(char *varbuf, uint len, uint start_pos, char *pickbuf, int picklen)\r
-{\r
- bool findNewline, changenewline=FALSE, pick=FALSE;\r
- int column;\r
- uint n, pick_column=0;\r
-\r
- findNewline = FALSE;\r
- column = 0;\r
-\r
- if (start_pos >= len) {\r
- CONFIG_ERROR("wrong start pos\n");\r
- return 0;\r
- }\r
-\r
- for (n = start_pos; n < len; n++) {\r
- if (varbuf[n] == '\r')\r
- continue;\r
- if ((findNewline || changenewline) && varbuf[n] != '\n')\r
- continue;\r
- findNewline = FALSE;\r
- if (varbuf[n] == '#') {\r
- findNewline = TRUE;\r
- continue;\r
- }\r
- if (varbuf[n] == '\\') {\r
- changenewline = TRUE;\r
- continue;\r
- }\r
- if (!changenewline && varbuf[n] == '\n') {\r
- if (column == 0)\r
- continue;\r
- column = 0;\r
- continue;\r
- }\r
- if (changenewline && varbuf[n] == '\n') {\r
- changenewline = FALSE;\r
- continue;\r
- }\r
-\r
- if (column==0 && !pick) { // start to pick\r
- pick = TRUE;\r
- column++;\r
- pick_column = 0;\r
- } else {\r
- if (pick && column==0) { // stop to pick\r
- pick = FALSE;\r
- break;\r
- } else\r
- column++;\r
- }\r
- if (pick) {\r
- if (varbuf[n] == 0x9)\r
- continue;\r
- if (pick_column >= picklen)\r
- break;\r
- pickbuf[pick_column] = varbuf[n];\r
- pick_column++;\r
- }\r
- }\r
-\r
- return n; // return current position\r
-}\r
-\r
-bool\r
-dhd_conf_read_chiprev(dhd_pub_t *dhd, int *chip_match,\r
- char *full_param, uint len_param)\r
-{\r
- char *data = full_param+len_param, *pick_tmp, *pch;\r
- uint chip = 0, rev = 0;\r
-\r
- /* Process chip, regrev:\r
- * chip=[chipid], rev==[rev]\r
- * Ex: chip=0x4359, rev=9\r
- */\r
- if (!strncmp("chip=", full_param, len_param)) {\r
- chip = (int)simple_strtol(data, NULL, 0);\r
- pick_tmp = data;\r
- pch = bcmstrstr(pick_tmp, "rev=");\r
- if (pch) {\r
- rev = (int)simple_strtol(pch+strlen("rev="), NULL, 0);\r
- }\r
- if (chip == dhd->conf->chip && rev == dhd->conf->chiprev)\r
- *chip_match = 1;\r
- else\r
- *chip_match = 0;\r
- CONFIG_MSG("chip=0x%x, rev=%d, chip_match=%d\n", chip, rev, *chip_match);\r
- }\r
-\r
- return TRUE;\r
-}\r
-\r
-bool\r
-dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- char *data = full_param+len_param;\r
-\r
- if (!strncmp("dhd_msg_level=", full_param, len_param)) {\r
- dhd_msg_level = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("dhd_msg_level = 0x%X\n", dhd_msg_level);\r
- }\r
- else if (!strncmp("dump_msg_level=", full_param, len_param)) {\r
- dump_msg_level = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("dump_msg_level = 0x%X\n", dump_msg_level);\r
- }\r
-#ifdef BCMSDIO\r
- else if (!strncmp("sd_msglevel=", full_param, len_param)) {\r
- sd_msglevel = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("sd_msglevel = 0x%X\n", sd_msglevel);\r
- }\r
-#endif\r
-#ifdef BCMDBUS\r
- else if (!strncmp("dbus_msglevel=", full_param, len_param)) {\r
- dbus_msglevel = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("dbus_msglevel = 0x%X\n", dbus_msglevel);\r
- }\r
-#endif\r
- else if (!strncmp("android_msg_level=", full_param, len_param)) {\r
- android_msg_level = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("android_msg_level = 0x%X\n", android_msg_level);\r
- }\r
- else if (!strncmp("config_msg_level=", full_param, len_param)) {\r
- config_msg_level = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("config_msg_level = 0x%X\n", config_msg_level);\r
- }\r
-#ifdef WL_CFG80211\r
- else if (!strncmp("wl_dbg_level=", full_param, len_param)) {\r
- wl_dbg_level = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("wl_dbg_level = 0x%X\n", wl_dbg_level);\r
- }\r
-#endif\r
-#if defined(WL_WIRELESS_EXT)\r
- else if (!strncmp("iw_msg_level=", full_param, len_param)) {\r
- iw_msg_level = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("iw_msg_level = 0x%X\n", iw_msg_level);\r
- }\r
-#endif\r
-#if defined(DHD_DEBUG)\r
- else if (!strncmp("dhd_console_ms=", full_param, len_param)) {\r
- dhd->dhd_console_ms = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("dhd_console_ms = 0x%X\n", dhd->dhd_console_ms);\r
- }\r
-#endif\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-void\r
-dhd_conf_read_wme_ac_value(wme_param_t *wme, char *pick, int ac_val)\r
-{\r
- char *pick_tmp, *pch;\r
-\r
- pick_tmp = pick;\r
- pch = bcmstrstr(pick_tmp, "aifsn ");\r
- if (pch) {\r
- wme->aifsn[ac_val] = (int)simple_strtol(pch+strlen("aifsn "), NULL, 0);\r
- CONFIG_MSG("ac_val=%d, aifsn=%d\n", ac_val, wme->aifsn[ac_val]);\r
- }\r
- pick_tmp = pick;\r
- pch = bcmstrstr(pick_tmp, "ecwmin ");\r
- if (pch) {\r
- wme->ecwmin[ac_val] = (int)simple_strtol(pch+strlen("ecwmin "), NULL, 0);\r
- CONFIG_MSG("ac_val=%d, ecwmin=%d\n", ac_val, wme->ecwmin[ac_val]);\r
- }\r
- pick_tmp = pick;\r
- pch = bcmstrstr(pick_tmp, "ecwmax ");\r
- if (pch) {\r
- wme->ecwmax[ac_val] = (int)simple_strtol(pch+strlen("ecwmax "), NULL, 0);\r
- CONFIG_MSG("ac_val=%d, ecwmax=%d\n", ac_val, wme->ecwmax[ac_val]);\r
- }\r
- pick_tmp = pick;\r
- pch = bcmstrstr(pick_tmp, "txop ");\r
- if (pch) {\r
- wme->txop[ac_val] = (int)simple_strtol(pch+strlen("txop "), NULL, 0);\r
- CONFIG_MSG("ac_val=%d, txop=0x%x\n", ac_val, wme->txop[ac_val]);\r
- }\r
-\r
-}\r
-\r
-bool\r
-dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
-\r
- // wme_ac_sta_be=aifsn 1 ecwmin 2 ecwmax 3 txop 0x5e\r
- // wme_ac_sta_vo=aifsn 1 ecwmin 1 ecwmax 1 txop 0x5e\r
-\r
- if (!strncmp("force_wme_ac=", full_param, len_param)) {\r
- conf->force_wme_ac = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("force_wme_ac = %d\n", conf->force_wme_ac);\r
- }\r
- else if (!strncmp("wme_ac_sta_be=", full_param, len_param)) {\r
- dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BE);\r
- }\r
- else if (!strncmp("wme_ac_sta_bk=", full_param, len_param)) {\r
- dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BK);\r
- }\r
- else if (!strncmp("wme_ac_sta_vi=", full_param, len_param)) {\r
- dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VI);\r
- }\r
- else if (!strncmp("wme_ac_sta_vo=", full_param, len_param)) {\r
- dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VO);\r
- }\r
- else if (!strncmp("wme_ac_ap_be=", full_param, len_param)) {\r
- dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BE);\r
- }\r
- else if (!strncmp("wme_ac_ap_bk=", full_param, len_param)) {\r
- dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BK);\r
- }\r
- else if (!strncmp("wme_ac_ap_vi=", full_param, len_param)) {\r
- dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VI);\r
- }\r
- else if (!strncmp("wme_ac_ap_vo=", full_param, len_param)) {\r
- dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VO);\r
- }\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-#ifdef BCMSDIO\r
-bool\r
-dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- int i, j;\r
- char *pch, *pick_tmp;\r
- wl_mac_list_t *mac_list;\r
- wl_mac_range_t *mac_range;\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
-\r
- /* Process fw_by_mac:\r
- * fw_by_mac=[fw_mac_num] \\r
- * [fw_name1] [mac_num1] [oui1-1] [nic_start1-1] [nic_end1-1] \\r
- * [oui1-1] [nic_start1-1] [nic_end1-1]... \\r
- * [oui1-n] [nic_start1-n] [nic_end1-n] \\r
- * [fw_name2] [mac_num2] [oui2-1] [nic_start2-1] [nic_end2-1] \\r
- * [oui2-1] [nic_start2-1] [nic_end2-1]... \\r
- * [oui2-n] [nic_start2-n] [nic_end2-n] \\r
- * Ex: fw_by_mac=2 \\r
- * fw_bcmdhd1.bin 2 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \\r
- * fw_bcmdhd2.bin 3 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \\r
- * 0x983B16 0x916157 0x916487\r
- */\r
-\r
- if (!strncmp("fw_by_mac=", full_param, len_param)) {\r
- dhd_conf_free_mac_list(&conf->fw_by_mac);\r
- pick_tmp = data;\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);\r
- if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count,\r
- GFP_KERNEL))) {\r
- conf->fw_by_mac.count = 0;\r
- CONFIG_ERROR("kmalloc failed\n");\r
- }\r
- CONFIG_MSG("fw_count=%d\n", conf->fw_by_mac.count);\r
- conf->fw_by_mac.m_mac_list_head = mac_list;\r
- for (i=0; i<conf->fw_by_mac.count; i++) {\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- strcpy(mac_list[i].name, pch);\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);\r
- CONFIG_MSG("name=%s, mac_count=%d\n",\r
- mac_list[i].name, mac_list[i].count);\r
- if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count,\r
- GFP_KERNEL))) {\r
- mac_list[i].count = 0;\r
- CONFIG_ERROR("kmalloc failed\n");\r
- break;\r
- }\r
- mac_list[i].mac = mac_range;\r
- for (j=0; j<mac_list[i].count; j++) {\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- mac_range[j].oui = (uint32)simple_strtol(pch, NULL, 0);\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);\r
- CONFIG_MSG("oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",\r
- mac_range[j].oui, mac_range[j].nic_start, mac_range[j].nic_end);\r
- }\r
- }\r
- }\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-bool\r
-dhd_conf_read_nv_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- int i, j;\r
- char *pch, *pick_tmp;\r
- wl_mac_list_t *mac_list;\r
- wl_mac_range_t *mac_range;\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
-\r
- /* Process nv_by_mac:\r
- * [nv_by_mac]: The same format as fw_by_mac\r
- */\r
- if (!strncmp("nv_by_mac=", full_param, len_param)) {\r
- dhd_conf_free_mac_list(&conf->nv_by_mac);\r
- pick_tmp = data;\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);\r
- if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count,\r
- GFP_KERNEL))) {\r
- conf->nv_by_mac.count = 0;\r
- CONFIG_ERROR("kmalloc failed\n");\r
- }\r
- CONFIG_MSG("nv_count=%d\n", conf->nv_by_mac.count);\r
- conf->nv_by_mac.m_mac_list_head = mac_list;\r
- for (i=0; i<conf->nv_by_mac.count; i++) {\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- strcpy(mac_list[i].name, pch);\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);\r
- CONFIG_MSG("name=%s, mac_count=%d\n",\r
- mac_list[i].name, mac_list[i].count);\r
- if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count,\r
- GFP_KERNEL))) {\r
- mac_list[i].count = 0;\r
- CONFIG_ERROR("kmalloc failed\n");\r
- break;\r
- }\r
- mac_list[i].mac = mac_range;\r
- for (j=0; j<mac_list[i].count; j++) {\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- mac_range[j].oui = (uint32)simple_strtol(pch, NULL, 0);\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);\r
- CONFIG_MSG("oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",\r
- mac_range[j].oui, mac_range[j].nic_start, mac_range[j].nic_end);\r
- }\r
- }\r
- }\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-#endif\r
-\r
-bool\r
-dhd_conf_read_nv_by_chip(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- int i;\r
- char *pch, *pick_tmp;\r
- wl_chip_nv_path_t *chip_nv_path;\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
-\r
- /* Process nv_by_chip:\r
- * nv_by_chip=[nv_chip_num] \\r
- * [chip1] [chiprev1] [nv_name1] [chip2] [chiprev2] [nv_name2] \\r
- * Ex: nv_by_chip=2 \\r
- * 43430 0 nvram_ap6212.txt 43430 1 nvram_ap6212a.txt \\r
- */\r
- if (!strncmp("nv_by_chip=", full_param, len_param)) {\r
- dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);\r
- pick_tmp = data;\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- conf->nv_by_chip.count = (uint32)simple_strtol(pch, NULL, 0);\r
- if (!(chip_nv_path = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_chip.count,\r
- GFP_KERNEL))) {\r
- conf->nv_by_chip.count = 0;\r
- CONFIG_ERROR("kmalloc failed\n");\r
- }\r
- CONFIG_MSG("nv_by_chip_count=%d\n", conf->nv_by_chip.count);\r
- conf->nv_by_chip.m_chip_nv_path_head = chip_nv_path;\r
- for (i=0; i<conf->nv_by_chip.count; i++) {\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- chip_nv_path[i].chip = (uint32)simple_strtol(pch, NULL, 0);\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- chip_nv_path[i].chiprev = (uint32)simple_strtol(pch, NULL, 0);\r
- pch = bcmstrtok(&pick_tmp, " ", 0);\r
- strcpy(chip_nv_path[i].name, pch);\r
- CONFIG_MSG("chip=0x%x, chiprev=%d, name=%s\n",\r
- chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name);\r
- }\r
- }\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-bool\r
-dhd_conf_read_roam_params(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
-\r
- if (!strncmp("roam_off=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->roam_off = 0;\r
- else\r
- conf->roam_off = 1;\r
- CONFIG_MSG("roam_off = %d\n", conf->roam_off);\r
- }\r
- else if (!strncmp("roam_off_suspend=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->roam_off_suspend = 0;\r
- else\r
- conf->roam_off_suspend = 1;\r
- CONFIG_MSG("roam_off_suspend = %d\n", conf->roam_off_suspend);\r
- }\r
- else if (!strncmp("roam_trigger=", full_param, len_param)) {\r
- conf->roam_trigger[0] = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("roam_trigger = %d\n", conf->roam_trigger[0]);\r
- }\r
- else if (!strncmp("roam_scan_period=", full_param, len_param)) {\r
- conf->roam_scan_period[0] = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("roam_scan_period = %d\n", conf->roam_scan_period[0]);\r
- }\r
- else if (!strncmp("roam_delta=", full_param, len_param)) {\r
- conf->roam_delta[0] = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("roam_delta = %d\n", conf->roam_delta[0]);\r
- }\r
- else if (!strncmp("fullroamperiod=", full_param, len_param)) {\r
- conf->fullroamperiod = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("fullroamperiod = %d\n", conf->fullroamperiod);\r
- } else\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-bool\r
-dhd_conf_read_country(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- country_list_t *country_next = NULL, *country;\r
- int i, count = 0;\r
- char *pch, *pick_tmp, *pick_tmp2;\r
- char *data = full_param+len_param;\r
- uint len_data = strlen(data);\r
-\r
- /* Process country_list:\r
- * country_list=[country1]:[ccode1]/[regrev1],\r
- * [country2]:[ccode2]/[regrev2] \\r
- * Ex: country_list=US:US/0, TW:TW/1\r
- */\r
- if (!strncmp("ccode=", full_param, len_param)) {\r
- len_data = min((uint)WLC_CNTRY_BUF_SZ, len_data);\r
- memset(&conf->cspec, 0, sizeof(wl_country_t));\r
- memcpy(conf->cspec.country_abbrev, data, len_data);\r
- memcpy(conf->cspec.ccode, data, len_data);\r
- CONFIG_MSG("ccode = %s\n", conf->cspec.ccode);\r
- }\r
- else if (!strncmp("regrev=", full_param, len_param)) {\r
- conf->cspec.rev = (int32)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("regrev = %d\n", conf->cspec.rev);\r
- }\r
- else if (!strncmp("country_list=", full_param, len_param)) {\r
- dhd_conf_free_country_list(conf);\r
- pick_tmp = data;\r
- for (i=0; i<CONFIG_COUNTRY_LIST_SIZE; i++) {\r
- pick_tmp2 = bcmstrtok(&pick_tmp, ", ", 0);\r
- if (!pick_tmp2)\r
- break;\r
- pch = bcmstrtok(&pick_tmp2, ":", 0);\r
- if (!pch)\r
- break;\r
- country = NULL;\r
- if (!(country = kmalloc(sizeof(country_list_t), GFP_KERNEL))) {\r
- CONFIG_ERROR("kmalloc failed\n");\r
- break;\r
- }\r
- memset(country, 0, sizeof(country_list_t));\r
-\r
- memcpy(country->cspec.country_abbrev, pch, 2);\r
- pch = bcmstrtok(&pick_tmp2, "/", 0);\r
- if (!pch) {\r
- kfree(country);\r
- break;\r
- }\r
- memcpy(country->cspec.ccode, pch, 2);\r
- pch = bcmstrtok(&pick_tmp2, "/", 0);\r
- if (!pch) {\r
- kfree(country);\r
- break;\r
- }\r
- country->cspec.rev = (int32)simple_strtol(pch, NULL, 10);\r
- count++;\r
- if (!conf->country_head) {\r
- conf->country_head = country;\r
- country_next = country;\r
- } else {\r
- country_next->next = country;\r
- country_next = country;\r
- }\r
- CONFIG_TRACE("abbrev=%s, ccode=%s, regrev=%d\n",\r
- country->cspec.country_abbrev, country->cspec.ccode, country->cspec.rev);\r
- }\r
- CONFIG_MSG("%d country in list\n", count);\r
- }\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-bool\r
-dhd_conf_read_mchan_params(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- int i;\r
- char *pch, *pick_tmp, *pick_tmp2;\r
- struct dhd_conf *conf = dhd->conf;\r
- mchan_params_t *mchan_next = NULL, *mchan;\r
- char *data = full_param+len_param;\r
-\r
- /* Process mchan_bw:\r
- * mchan_bw=[val]/[any/go/gc]/[any/source/sink]\r
- * Ex: mchan_bw=80/go/source, 30/gc/sink\r
- */\r
- if (!strncmp("mchan_bw=", full_param, len_param)) {\r
- dhd_conf_free_mchan_list(conf);\r
- pick_tmp = data;\r
- for (i=0; i<MCHAN_MAX_NUM; i++) {\r
- pick_tmp2 = bcmstrtok(&pick_tmp, ", ", 0);\r
- if (!pick_tmp2)\r
- break;\r
- pch = bcmstrtok(&pick_tmp2, "/", 0);\r
- if (!pch)\r
- break;\r
-\r
- mchan = NULL;\r
- if (!(mchan = kmalloc(sizeof(mchan_params_t), GFP_KERNEL))) {\r
- CONFIG_ERROR("kmalloc failed\n");\r
- break;\r
- }\r
- memset(mchan, 0, sizeof(mchan_params_t));\r
-\r
- mchan->bw = (int)simple_strtol(pch, NULL, 0);\r
- if (mchan->bw < 0 || mchan->bw > 100) {\r
- CONFIG_ERROR("wrong bw %d\n", mchan->bw);\r
- kfree(mchan);\r
- break;\r
- }\r
-\r
- pch = bcmstrtok(&pick_tmp2, "/", 0);\r
- if (!pch) {\r
- kfree(mchan);\r
- break;\r
- } else {\r
- if (bcmstrstr(pch, "any")) {\r
- mchan->p2p_mode = -1;\r
- } else if (bcmstrstr(pch, "go")) {\r
- mchan->p2p_mode = WL_P2P_IF_GO;\r
- } else if (bcmstrstr(pch, "gc")) {\r
- mchan->p2p_mode = WL_P2P_IF_CLIENT;\r
- }\r
- }\r
- pch = bcmstrtok(&pick_tmp2, "/", 0);\r
- if (!pch) {\r
- kfree(mchan);\r
- break;\r
- } else {\r
- if (bcmstrstr(pch, "any")) {\r
- mchan->miracast_mode = -1;\r
- } else if (bcmstrstr(pch, "source")) {\r
- mchan->miracast_mode = MIRACAST_SOURCE;\r
- } else if (bcmstrstr(pch, "sink")) {\r
- mchan->miracast_mode = MIRACAST_SINK;\r
- }\r
- }\r
- if (!conf->mchan) {\r
- conf->mchan = mchan;\r
- mchan_next = mchan;\r
- } else {\r
- mchan_next->next = mchan;\r
- mchan_next = mchan;\r
- }\r
- CONFIG_TRACE("mchan_bw=%d/%d/%d\n", mchan->bw,mchan->p2p_mode,\r
- mchan->miracast_mode);\r
- }\r
- }\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-#ifdef PKT_FILTER_SUPPORT\r
-bool\r
-dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
- char *pch, *pick_tmp;\r
- int i;\r
-\r
- /* Process pkt filter:\r
- * 1) pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000\r
- * 2) pkt_filter_delete=100, 102, 103, 104, 105\r
- * 3) magic_pkt_filter_add=141 0 1 12\r
- */\r
- if (!strncmp("dhd_master_mode=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- dhd_master_mode = FALSE;\r
- else\r
- dhd_master_mode = TRUE;\r
- CONFIG_MSG("dhd_master_mode = %d\n", dhd_master_mode);\r
- }\r
- else if (!strncmp("pkt_filter_add=", full_param, len_param)) {\r
- pick_tmp = data;\r
- pch = bcmstrtok(&pick_tmp, ",.-", 0);\r
- i=0;\r
- while (pch != NULL && i<DHD_CONF_FILTER_MAX) {\r
- strcpy(&conf->pkt_filter_add.filter[i][0], pch);\r
- CONFIG_MSG("pkt_filter_add[%d][] = %s\n",\r
- i, &conf->pkt_filter_add.filter[i][0]);\r
- pch = bcmstrtok(&pick_tmp, ",.-", 0);\r
- i++;\r
- }\r
- conf->pkt_filter_add.count = i;\r
- }\r
- else if (!strncmp("pkt_filter_delete=", full_param, len_param) ||\r
- !strncmp("pkt_filter_del=", full_param, len_param)) {\r
- pick_tmp = data;\r
- pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
- i=0;\r
- while (pch != NULL && i<DHD_CONF_FILTER_MAX) {\r
- conf->pkt_filter_del.id[i] = (uint32)simple_strtol(pch, NULL, 10);\r
- pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
- i++;\r
- }\r
- conf->pkt_filter_del.count = i;\r
- CONFIG_MSG("pkt_filter_del id = ");\r
- for (i=0; i<conf->pkt_filter_del.count; i++)\r
- printf("%d ", conf->pkt_filter_del.id[i]);\r
- printf("\n");\r
- }\r
- else if (!strncmp("magic_pkt_filter_add=", full_param, len_param)) {\r
- if (conf->magic_pkt_filter_add) {\r
- kfree(conf->magic_pkt_filter_add);\r
- conf->magic_pkt_filter_add = NULL;\r
- }\r
- if (!(conf->magic_pkt_filter_add = kmalloc(MAGIC_PKT_FILTER_LEN, GFP_KERNEL))) {\r
- CONFIG_ERROR("kmalloc failed\n");\r
- } else {\r
- memset(conf->magic_pkt_filter_add, 0, MAGIC_PKT_FILTER_LEN);\r
- strcpy(conf->magic_pkt_filter_add, data);\r
- CONFIG_MSG("magic_pkt_filter_add = %s\n", conf->magic_pkt_filter_add);\r
- }\r
- }\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-#endif\r
-\r
-#ifdef ISAM_PREINIT\r
-#if !defined(WL_EXT_IAPSTA)\r
-#error "WL_EXT_IAPSTA should be defined to enable ISAM_PREINIT"\r
-#endif /* !WL_EXT_IAPSTA */\r
-/*\r
- * isam_init=mode [sta|ap|apsta|dualap] vifname [wlan1]\r
- * isam_config=ifname [wlan0|wlan1] ssid [xxx] chan [x]\r
- hidden [y|n] maxassoc [x]\r
- amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]\r
- emode [none|wep|tkip|aes|tkipaes]\r
- key [xxxxx]\r
- * isam_enable=ifname [wlan0|wlan1]\r
-*/\r
-bool\r
-dhd_conf_read_isam(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
-\r
- if (!strncmp("isam_init=", full_param, len_param)) {\r
- sprintf(conf->isam_init, "isam_init %s", data);\r
- CONFIG_MSG("isam_init=%s\n", conf->isam_init);\r
- }\r
- else if (!strncmp("isam_config=", full_param, len_param)) {\r
- sprintf(conf->isam_config, "isam_config %s", data);\r
- CONFIG_MSG("isam_config=%s\n", conf->isam_config);\r
- }\r
- else if (!strncmp("isam_enable=", full_param, len_param)) {\r
- sprintf(conf->isam_enable, "isam_enable %s", data);\r
- CONFIG_MSG("isam_enable=%s\n", conf->isam_enable);\r
- }\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-#endif\r
-\r
-#ifdef IDHCP\r
-bool\r
-dhd_conf_read_dhcp_params(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
- struct ipv4_addr ipa_set;\r
-\r
- if (!strncmp("dhcpc_enable=", full_param, len_param)) {\r
- conf->dhcpc_enable = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("dhcpc_enable = %d\n", conf->dhcpc_enable);\r
- }\r
- else if (!strncmp("dhcpd_enable=", full_param, len_param)) {\r
- conf->dhcpd_enable = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("dhcpd_enable = %d\n", conf->dhcpd_enable);\r
- }\r
- else if (!strncmp("dhcpd_ip_addr=", full_param, len_param)) {\r
- if (!bcm_atoipv4(data, &ipa_set)) {\r
- CONFIG_ERROR("dhcpd_ip_addr adress setting failed.n");\r
- return false;\r
- }\r
- memcpy(&conf->dhcpd_ip_addr, &ipa_set, sizeof(struct ipv4_addr));\r
- CONFIG_MSG("dhcpd_ip_addr = %s\n", data);\r
- }\r
- else if (!strncmp("dhcpd_ip_mask=", full_param, len_param)) {\r
- if (!bcm_atoipv4(data, &ipa_set)) {\r
- CONFIG_ERROR("dhcpd_ip_mask adress setting failed\n");\r
- return false;\r
- }\r
- memcpy(&conf->dhcpd_ip_mask, &ipa_set, sizeof(struct ipv4_addr));\r
- CONFIG_MSG("dhcpd_ip_mask = %s\n", data);\r
- }\r
- else if (!strncmp("dhcpd_ip_start=", full_param, len_param)) {\r
- if (!bcm_atoipv4(data, &ipa_set)) {\r
- CONFIG_ERROR("dhcpd_ip_start adress setting failed\n");\r
- return false;\r
- }\r
- memcpy(&conf->dhcpd_ip_start, &ipa_set, sizeof(struct ipv4_addr));\r
- CONFIG_MSG("dhcpd_ip_start = %s\n", data);\r
- }\r
- else if (!strncmp("dhcpd_ip_end=", full_param, len_param)) {\r
- if (!bcm_atoipv4(data, &ipa_set)) {\r
- CONFIG_ERROR("dhcpd_ip_end adress setting failed\n");\r
- return false;\r
- }\r
- memcpy(&conf->dhcpd_ip_end, &ipa_set, sizeof(struct ipv4_addr));\r
- CONFIG_MSG("dhcpd_ip_end = %s\n", data);\r
- }\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-#endif\r
-\r
-#ifdef BCMSDIO\r
-bool\r
-dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
-\r
- if (!strncmp("dhd_doflow=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- dhd_doflow = FALSE;\r
- else\r
- dhd_doflow = TRUE;\r
- CONFIG_MSG("dhd_doflow = %d\n", dhd_doflow);\r
- }\r
- else if (!strncmp("dhd_slpauto=", full_param, len_param) ||\r
- !strncmp("kso_enable=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- dhd_slpauto = FALSE;\r
- else\r
- dhd_slpauto = TRUE;\r
- CONFIG_MSG("dhd_slpauto = %d\n", dhd_slpauto);\r
- }\r
- else if (!strncmp("use_rxchain=", full_param, len_param)) {\r
- conf->use_rxchain = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("use_rxchain = %d\n", conf->use_rxchain);\r
- }\r
- else if (!strncmp("dhd_txminmax=", full_param, len_param)) {\r
- conf->dhd_txminmax = (uint)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("dhd_txminmax = %d\n", conf->dhd_txminmax);\r
- }\r
- else if (!strncmp("txinrx_thres=", full_param, len_param)) {\r
- conf->txinrx_thres = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("txinrx_thres = %d\n", conf->txinrx_thres);\r
- }\r
-#if defined(HW_OOB)\r
- else if (!strncmp("oob_enabled_later=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->oob_enabled_later = FALSE;\r
- else\r
- conf->oob_enabled_later = TRUE;\r
- CONFIG_MSG("oob_enabled_later = %d\n", conf->oob_enabled_later);\r
- }\r
-#endif\r
- else if (!strncmp("dpc_cpucore=", full_param, len_param)) {\r
- conf->dpc_cpucore = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("dpc_cpucore = %d\n", conf->dpc_cpucore);\r
- }\r
- else if (!strncmp("rxf_cpucore=", full_param, len_param)) {\r
- conf->rxf_cpucore = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("rxf_cpucore = %d\n", conf->rxf_cpucore);\r
- }\r
-#if defined(BCMSDIOH_TXGLOM)\r
- else if (!strncmp("txglomsize=", full_param, len_param)) {\r
- conf->txglomsize = (uint)simple_strtol(data, NULL, 10);\r
- if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)\r
- conf->txglomsize = SDPCM_MAXGLOM_SIZE;\r
- CONFIG_MSG("txglomsize = %d\n", conf->txglomsize);\r
- }\r
- else if (!strncmp("txglom_ext=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->txglom_ext = FALSE;\r
- else\r
- conf->txglom_ext = TRUE;\r
- CONFIG_MSG("txglom_ext = %d\n", conf->txglom_ext);\r
- if (conf->txglom_ext) {\r
- if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID))\r
- conf->txglom_bucket_size = 1680;\r
- else if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||\r
- conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID)\r
- conf->txglom_bucket_size = 1684;\r
- }\r
- CONFIG_MSG("txglom_bucket_size = %d\n", conf->txglom_bucket_size);\r
- }\r
- else if (!strncmp("bus:rxglom=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->bus_rxglom = FALSE;\r
- else\r
- conf->bus_rxglom = TRUE;\r
- CONFIG_MSG("bus:rxglom = %d\n", conf->bus_rxglom);\r
- }\r
- else if (!strncmp("deferred_tx_len=", full_param, len_param)) {\r
- conf->deferred_tx_len = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("deferred_tx_len = %d\n", conf->deferred_tx_len);\r
- }\r
- else if (!strncmp("txctl_tmo_fix=", full_param, len_param)) {\r
- conf->txctl_tmo_fix = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("txctl_tmo_fix = %d\n", conf->txctl_tmo_fix);\r
- }\r
- else if (!strncmp("tx_max_offset=", full_param, len_param)) {\r
- conf->tx_max_offset = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("tx_max_offset = %d\n", conf->tx_max_offset);\r
- }\r
- else if (!strncmp("txglom_mode=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->txglom_mode = FALSE;\r
- else\r
- conf->txglom_mode = TRUE;\r
- CONFIG_MSG("txglom_mode = %d\n", conf->txglom_mode);\r
- }\r
-#if defined(SDIO_ISR_THREAD)\r
- else if (!strncmp("intr_extn=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->intr_extn = FALSE;\r
- else\r
- conf->intr_extn = TRUE;\r
- CONFIG_MSG("intr_extn = %d\n", conf->intr_extn);\r
- }\r
-#endif\r
-#ifdef BCMSDIO_RXLIM_POST\r
- else if (!strncmp("rxlim_en=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->rxlim_en = FALSE;\r
- else\r
- conf->rxlim_en = TRUE;\r
- CONFIG_MSG("rxlim_en = %d\n", conf->rxlim_en);\r
- }\r
-#endif\r
-#endif\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-#endif\r
-\r
-#ifdef BCMPCIE\r
-bool\r
-dhd_conf_read_pcie_params(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
-\r
- if (!strncmp("bus:deepsleep_disable=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->bus_deepsleep_disable = 0;\r
- else\r
- conf->bus_deepsleep_disable = 1;\r
- CONFIG_MSG("bus:deepsleep_disable = %d\n", conf->bus_deepsleep_disable);\r
- }\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-#endif\r
-\r
-bool\r
-dhd_conf_read_pm_params(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
-\r
- if (!strncmp("deepsleep=", full_param, len_param)) {\r
- if (!strncmp(data, "1", 1))\r
- conf->deepsleep = TRUE;\r
- else\r
- conf->deepsleep = FALSE;\r
- CONFIG_MSG("deepsleep = %d\n", conf->deepsleep);\r
- }\r
- else if (!strncmp("PM=", full_param, len_param)) {\r
- conf->pm = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("PM = %d\n", conf->pm);\r
- }\r
- else if (!strncmp("pm_in_suspend=", full_param, len_param)) {\r
- conf->pm_in_suspend = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("pm_in_suspend = %d\n", conf->pm_in_suspend);\r
- }\r
- else if (!strncmp("suspend_mode=", full_param, len_param)) {\r
- conf->suspend_mode = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("suspend_mode = %d\n", conf->suspend_mode);\r
- if (conf->suspend_mode == PM_NOTIFIER)\r
- conf->insuspend |= (NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND);\r
- }\r
- else if (!strncmp("suspend_bcn_li_dtim=", full_param, len_param)) {\r
- conf->suspend_bcn_li_dtim = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("suspend_bcn_li_dtim = %d\n", conf->suspend_bcn_li_dtim);\r
- }\r
- else if (!strncmp("xmit_in_suspend=", full_param, len_param)) {\r
- if (!strncmp(data, "1", 1))\r
- conf->insuspend &= ~NO_TXDATA_IN_SUSPEND;\r
- else\r
- conf->insuspend |= NO_TXDATA_IN_SUSPEND;\r
- CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend);\r
- }\r
- else if (!strncmp("insuspend=", full_param, len_param)) {\r
- conf->insuspend = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("insuspend = 0x%x\n", conf->insuspend);\r
- }\r
-#ifdef WL_EXT_WOWL\r
- else if (!strncmp("wowl=", full_param, len_param)) {\r
- conf->wowl = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("wowl = 0x%x\n", conf->wowl);\r
- }\r
-#endif\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-#ifdef GET_CUSTOM_MAC_FROM_CONFIG\r
-int\r
-bcm_str2hex(const char *p, char *ea, int size)\r
-{\r
- int i = 0;\r
- char *ep;\r
-\r
- for (;;) {\r
- ea[i++] = (char) bcm_strtoul(p, &ep, 16);\r
- p = ep;\r
- if (!*p++ || i == size)\r
- break;\r
- }\r
-\r
- return (i == size);\r
-}\r
-#endif\r
-\r
-bool\r
-dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- char *data = full_param+len_param;\r
- char *pch, *pick_tmp;\r
- int i;\r
-#ifdef GET_CUSTOM_MAC_FROM_CONFIG\r
- struct ether_addr ea_addr;\r
- char macpad[56];\r
-#endif\r
-\r
- if (!strncmp("dhd_poll=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->dhd_poll = 0;\r
- else\r
- conf->dhd_poll = 1;\r
- CONFIG_MSG("dhd_poll = %d\n", conf->dhd_poll);\r
- }\r
- else if (!strncmp("dhd_watchdog_ms=", full_param, len_param)) {\r
- dhd_watchdog_ms = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("dhd_watchdog_ms = %d\n", dhd_watchdog_ms);\r
- }\r
- else if (!strncmp("band=", full_param, len_param)) {\r
- /* Process band:\r
- * band=a for 5GHz only and band=b for 2.4GHz only\r
- */\r
- if (!strcmp(data, "b"))\r
- conf->band = WLC_BAND_2G;\r
- else if (!strcmp(data, "a"))\r
- conf->band = WLC_BAND_5G;\r
- else\r
- conf->band = WLC_BAND_AUTO;\r
- CONFIG_MSG("band = %d\n", conf->band);\r
- }\r
- else if (!strncmp("bw_cap_2g=", full_param, len_param)) {\r
- conf->bw_cap[0] = (uint)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("bw_cap_2g = %d\n", conf->bw_cap[0]);\r
- }\r
- else if (!strncmp("bw_cap_5g=", full_param, len_param)) {\r
- conf->bw_cap[1] = (uint)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("bw_cap_5g = %d\n", conf->bw_cap[1]);\r
- }\r
- else if (!strncmp("bw_cap=", full_param, len_param)) {\r
- pick_tmp = data;\r
- pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
- if (pch != NULL) {\r
- conf->bw_cap[0] = (uint32)simple_strtol(pch, NULL, 0);\r
- CONFIG_MSG("bw_cap 2g = %d\n", conf->bw_cap[0]);\r
- }\r
- pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
- if (pch != NULL) {\r
- conf->bw_cap[1] = (uint32)simple_strtol(pch, NULL, 0);\r
- CONFIG_MSG("bw_cap 5g = %d\n", conf->bw_cap[1]);\r
- }\r
- }\r
- else if (!strncmp("channels=", full_param, len_param)) {\r
- pick_tmp = data;\r
- pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
- i=0;\r
- while (pch != NULL && i<WL_NUMCHANNELS) {\r
- conf->channels.channel[i] = (uint32)simple_strtol(pch, NULL, 10);\r
- pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
- i++;\r
- }\r
- conf->channels.count = i;\r
- CONFIG_MSG("channels = ");\r
- for (i=0; i<conf->channels.count; i++)\r
- printf("%d ", conf->channels.channel[i]);\r
- printf("\n");\r
- }\r
- else if (!strncmp("keep_alive_period=", full_param, len_param)) {\r
- conf->keep_alive_period = (uint)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("keep_alive_period = %d\n", conf->keep_alive_period);\r
- }\r
-#ifdef ARP_OFFLOAD_SUPPORT\r
- else if (!strncmp("garp=", full_param, len_param)) {\r
- if (!strncmp(data, "0", 1))\r
- conf->garp = FALSE;\r
- else\r
- conf->garp = TRUE;\r
- CONFIG_MSG("garp = %d\n", conf->garp);\r
- }\r
-#endif\r
- else if (!strncmp("srl=", full_param, len_param)) {\r
- conf->srl = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("srl = %d\n", conf->srl);\r
- }\r
- else if (!strncmp("lrl=", full_param, len_param)) {\r
- conf->lrl = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("lrl = %d\n", conf->lrl);\r
- }\r
- else if (!strncmp("bcn_timeout=", full_param, len_param)) {\r
- conf->bcn_timeout= (uint)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("bcn_timeout = %d\n", conf->bcn_timeout);\r
- }\r
- else if (!strncmp("frameburst=", full_param, len_param)) {\r
- conf->frameburst = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("frameburst = %d\n", conf->frameburst);\r
- }\r
- else if (!strncmp("disable_proptx=", full_param, len_param)) {\r
- conf->disable_proptx = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("disable_proptx = %d\n", conf->disable_proptx);\r
- }\r
-#ifdef DHDTCPACK_SUPPRESS\r
- else if (!strncmp("tcpack_sup_mode=", full_param, len_param)) {\r
- conf->tcpack_sup_mode = (uint)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("tcpack_sup_mode = %d\n", conf->tcpack_sup_mode);\r
- }\r
-#endif\r
- else if (!strncmp("pktprio8021x=", full_param, len_param)) {\r
- conf->pktprio8021x = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("pktprio8021x = %d\n", conf->pktprio8021x);\r
- }\r
-#if defined(BCMSDIO) || defined(BCMPCIE)\r
- else if (!strncmp("dhd_txbound=", full_param, len_param)) {\r
- dhd_txbound = (uint)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("dhd_txbound = %d\n", dhd_txbound);\r
- }\r
- else if (!strncmp("dhd_rxbound=", full_param, len_param)) {\r
- dhd_rxbound = (uint)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("dhd_rxbound = %d\n", dhd_rxbound);\r
- }\r
-#endif\r
- else if (!strncmp("orphan_move=", full_param, len_param)) {\r
- conf->orphan_move = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("orphan_move = %d\n", conf->orphan_move);\r
- }\r
- else if (!strncmp("tsq=", full_param, len_param)) {\r
- conf->tsq = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("tsq = %d\n", conf->tsq);\r
- }\r
- else if (!strncmp("ctrl_resched=", full_param, len_param)) {\r
- conf->ctrl_resched = (int)simple_strtol(data, NULL, 10);\r
- CONFIG_MSG("ctrl_resched = %d\n", conf->ctrl_resched);\r
- }\r
- else if (!strncmp("in4way=", full_param, len_param)) {\r
- conf->in4way = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("in4way = 0x%x\n", conf->in4way);\r
- }\r
- else if (!strncmp("wl_preinit=", full_param, len_param)) {\r
- if (conf->wl_preinit) {\r
- kfree(conf->wl_preinit);\r
- conf->wl_preinit = NULL;\r
- }\r
- if (!(conf->wl_preinit = kmalloc(len_param+1, GFP_KERNEL))) {\r
- CONFIG_ERROR("kmalloc failed\n");\r
- } else {\r
- memset(conf->wl_preinit, 0, len_param+1);\r
- strcpy(conf->wl_preinit, data);\r
- CONFIG_MSG("wl_preinit = %s\n", conf->wl_preinit);\r
- }\r
- }\r
- else if (!strncmp("wl_suspend=", full_param, len_param)) {\r
- if (conf->wl_suspend) {\r
- kfree(conf->wl_suspend);\r
- conf->wl_suspend = NULL;\r
- }\r
- if (!(conf->wl_suspend = kmalloc(len_param+1, GFP_KERNEL))) {\r
- CONFIG_ERROR("kmalloc failed\n");\r
- } else {\r
- memset(conf->wl_suspend, 0, len_param+1);\r
- strcpy(conf->wl_suspend, data);\r
- CONFIG_MSG("wl_suspend = %s\n", conf->wl_suspend);\r
- }\r
- }\r
- else if (!strncmp("wl_resume=", full_param, len_param)) {\r
- if (conf->wl_resume) {\r
- kfree(conf->wl_resume);\r
- conf->wl_resume = NULL;\r
- }\r
- if (!(conf->wl_resume = kmalloc(len_param+1, GFP_KERNEL))) {\r
- CONFIG_ERROR("kmalloc failed\n");\r
- } else {\r
- memset(conf->wl_resume, 0, len_param+1);\r
- strcpy(conf->wl_resume, data);\r
- CONFIG_MSG("wl_resume = %s\n", conf->wl_resume);\r
- }\r
- }\r
-#ifdef GET_CUSTOM_MAC_FROM_CONFIG\r
- else if (!strncmp("mac=", full_param, len_param)) {\r
- if (!bcm_ether_atoe(data, &ea_addr)) {\r
- CONFIG_ERROR("mac adress read error");\r
- return false;\r
- }\r
- memcpy(&conf->hw_ether, &ea_addr, ETHER_ADDR_LEN);\r
- CONFIG_MSG("mac = %s\n", data);\r
- }\r
- else if (!strncmp("macpad=", full_param, len_param)) {\r
- if (!bcm_str2hex(data, macpad, sizeof(macpad))) {\r
- CONFIG_ERROR("macpad adress read error");\r
- return false;\r
- }\r
- memcpy(&conf->hw_ether[ETHER_ADDR_LEN], macpad, sizeof(macpad));\r
- if (config_msg_level & CONFIG_TRACE_LEVEL) {\r
- printf("macpad =\n");\r
- for (i=0; i<sizeof(macpad); i++) {\r
- printf("0x%02x, ", conf->hw_ether[ETHER_ADDR_LEN+i]);\r
- if ((i+1)%8 == 0)\r
- printf("\n");\r
- }\r
- }\r
- }\r
-#endif\r
-#ifdef PROPTX_MAXCOUNT\r
- else if (!strncmp("proptx_maxcnt_2g=", full_param, len_param)) {\r
- conf->proptx_maxcnt_2g = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("proptx_maxcnt_2g = 0x%x\n", conf->proptx_maxcnt_2g);\r
- }\r
- else if (!strncmp("proptx_maxcnt_5g=", full_param, len_param)) {\r
- conf->proptx_maxcnt_5g = (int)simple_strtol(data, NULL, 0);\r
- CONFIG_MSG("proptx_maxcnt_5g = 0x%x\n", conf->proptx_maxcnt_5g);\r
- }\r
-#endif\r
- else\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-int\r
-dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path)\r
-{\r
- int bcmerror = -1, chip_match = -1;\r
- uint len = 0, start_pos=0, end_pos=0;\r
- void *image = NULL;\r
- char *memblock = NULL;\r
- char *bufp, *pick = NULL, *pch;\r
- bool conf_file_exists;\r
- uint len_param;\r
-\r
- conf_file_exists = ((conf_path != NULL) && (conf_path[0] != '\0'));\r
- if (!conf_file_exists) {\r
- CONFIG_MSG("config path %s\n", conf_path);\r
- return (0);\r
- }\r
-\r
- if (conf_file_exists) {\r
- image = dhd_os_open_image1(dhd, conf_path);\r
- if (image == NULL) {\r
- CONFIG_MSG("Ignore config file %s\n", conf_path);\r
- goto err;\r
- }\r
- }\r
-\r
- memblock = MALLOC(dhd->osh, MAXSZ_CONFIG);\r
- if (memblock == NULL) {\r
- CONFIG_ERROR("Failed to allocate memory %d bytes\n", MAXSZ_CONFIG);\r
- goto err;\r
- }\r
-\r
- pick = MALLOC(dhd->osh, MAXSZ_BUF);\r
- if (!pick) {\r
- CONFIG_ERROR("Failed to allocate memory %d bytes\n", MAXSZ_BUF);\r
- goto err;\r
- }\r
-\r
- /* Read variables */\r
- if (conf_file_exists) {\r
- len = dhd_os_get_image_block(memblock, MAXSZ_CONFIG, image);\r
- }\r
- if (len > 0 && len < MAXSZ_CONFIG) {\r
- bufp = (char *)memblock;\r
- bufp[len] = 0;\r
-\r
- while (start_pos < len) {\r
- memset(pick, 0, MAXSZ_BUF);\r
- end_pos = pick_config_vars(bufp, len, start_pos, pick, MAXSZ_BUF);\r
- if (end_pos - start_pos >= MAXSZ_BUF)\r
- CONFIG_ERROR("out of buf to read MAXSIZ_BUF=%d\n", MAXSZ_BUF);\r
- start_pos = end_pos;\r
- pch = strchr(pick, '=');\r
- if (pch != NULL) {\r
- len_param = pch-pick+1;\r
- if (len_param == strlen(pick)) {\r
- CONFIG_ERROR("not a right parameter %s\n", pick);\r
- continue;\r
- }\r
- } else {\r
- CONFIG_ERROR("not a right parameter %s\n", pick);\r
- continue;\r
- }\r
-\r
- dhd_conf_read_chiprev(dhd, &chip_match, pick, len_param);\r
- if (!chip_match)\r
- continue;\r
-\r
- if (dhd_conf_read_log_level(dhd, pick, len_param))\r
- continue;\r
- else if (dhd_conf_read_roam_params(dhd, pick, len_param))\r
- continue;\r
- else if (dhd_conf_read_wme_ac_params(dhd, pick, len_param))\r
- continue;\r
-#ifdef BCMSDIO\r
- else if (dhd_conf_read_fw_by_mac(dhd, pick, len_param))\r
- continue;\r
- else if (dhd_conf_read_nv_by_mac(dhd, pick, len_param))\r
- continue;\r
-#endif\r
- else if (dhd_conf_read_nv_by_chip(dhd, pick, len_param))\r
- continue;\r
- else if (dhd_conf_read_country(dhd, pick, len_param))\r
- continue;\r
- else if (dhd_conf_read_mchan_params(dhd, pick, len_param))\r
- continue;\r
-#ifdef PKT_FILTER_SUPPORT\r
- else if (dhd_conf_read_pkt_filter(dhd, pick, len_param))\r
- continue;\r
-#endif /* PKT_FILTER_SUPPORT */\r
-#ifdef ISAM_PREINIT\r
- else if (dhd_conf_read_isam(dhd, pick, len_param))\r
- continue;\r
-#endif /* ISAM_PREINIT */\r
-#ifdef IDHCP\r
- else if (dhd_conf_read_dhcp_params(dhd, pick, len_param))\r
- continue;\r
-#endif /* IDHCP */\r
-#ifdef BCMSDIO\r
- else if (dhd_conf_read_sdio_params(dhd, pick, len_param))\r
- continue;\r
-#endif /* BCMSDIO */\r
-#ifdef BCMPCIE\r
- else if (dhd_conf_read_pcie_params(dhd, pick, len_param))\r
- continue;\r
-#endif /* BCMPCIE */\r
- else if (dhd_conf_read_pm_params(dhd, pick, len_param))\r
- continue;\r
- else if (dhd_conf_read_others(dhd, pick, len_param))\r
- continue;\r
- else\r
- continue;\r
- }\r
-\r
- bcmerror = 0;\r
- } else {\r
- CONFIG_ERROR("error reading config file: %d\n", len);\r
- bcmerror = BCME_SDIO_ERROR;\r
- }\r
-\r
-err:\r
- if (pick)\r
- MFREE(dhd->osh, pick, MAXSZ_BUF);\r
-\r
- if (memblock)\r
- MFREE(dhd->osh, memblock, MAXSZ_CONFIG);\r
-\r
- if (image)\r
- dhd_os_close_image1(dhd, image);\r
-\r
- return bcmerror;\r
-}\r
-\r
-int\r
-dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev)\r
-{\r
- CONFIG_MSG("chip=0x%x, chiprev=%d\n", chip, chiprev);\r
- dhd->conf->chip = chip;\r
- dhd->conf->chiprev = chiprev;\r
- return 0;\r
-}\r
-\r
-uint\r
-dhd_conf_get_chip(void *context)\r
-{\r
- dhd_pub_t *dhd = context;\r
-\r
- if (dhd && dhd->conf)\r
- return dhd->conf->chip;\r
- return 0;\r
-}\r
-\r
-uint\r
-dhd_conf_get_chiprev(void *context)\r
-{\r
- dhd_pub_t *dhd = context;\r
-\r
- if (dhd && dhd->conf)\r
- return dhd->conf->chiprev;\r
- return 0;\r
-}\r
-\r
-#ifdef BCMSDIO\r
-void\r
-dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
-\r
- if (enable) {\r
-#if defined(BCMSDIOH_TXGLOM_EXT)\r
- if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||\r
- conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||\r
- conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {\r
- conf->txglom_mode = SDPCM_TXGLOM_CPY;\r
- }\r
-#endif\r
- // other parameters set in preinit or config.txt\r
- if (conf->txglom_ext)\r
- CONFIG_MSG("txglom_ext=%d, txglom_bucket_size=%d\n",\r
- conf->txglom_ext, conf->txglom_bucket_size);\r
- CONFIG_MSG("txglom_mode=%s\n",\r
- conf->txglom_mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy");\r
- CONFIG_MSG("txglomsize=%d, deferred_tx_len=%d\n",\r
- conf->txglomsize, conf->deferred_tx_len);\r
- CONFIG_MSG("txinrx_thres=%d, dhd_txminmax=%d\n",\r
- conf->txinrx_thres, conf->dhd_txminmax);\r
- CONFIG_MSG("tx_max_offset=%d, txctl_tmo_fix=%d\n",\r
- conf->tx_max_offset, conf->txctl_tmo_fix);\r
- } else {\r
- // clear txglom parameters\r
- conf->txglom_ext = FALSE;\r
- conf->txglom_bucket_size = 0;\r
- conf->txglomsize = 0;\r
- conf->deferred_tx_len = 0;\r
- }\r
-\r
-}\r
-#endif\r
-\r
-void\r
-dhd_conf_postinit_ioctls(dhd_pub_t *dhd)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
- char wl_preinit[] = "assoc_retry_max=20";\r
-#ifdef NO_POWER_SAVE\r
- char wl_no_power_save[] = "mpc=0, 86=0";\r
- dhd_conf_set_wl_cmd(dhd, wl_no_power_save, FALSE);\r
-#endif\r
-\r
- dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE);\r
- dhd_conf_map_country_list(dhd, &conf->cspec);\r
- dhd_conf_set_country(dhd, &conf->cspec);\r
- dhd_conf_fix_country(dhd);\r
- dhd_conf_get_country(dhd, &dhd->dhd_cspec);\r
-\r
- dhd_conf_set_intiovar(dhd, WLC_SET_BAND, "WLC_SET_BAND", conf->band, 0, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bcn_timeout", conf->bcn_timeout, 0, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_PM, "WLC_SET_PM", conf->pm, 0, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_SRL, "WLC_SET_SRL", conf->srl, 0, FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_LRL, "WLC_SET_LRL", conf->lrl, 0, FALSE);\r
- dhd_conf_set_bw_cap(dhd);\r
- dhd_conf_set_roam(dhd);\r
-\r
-#if defined(BCMPCIE)\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bus:deepsleep_disable",\r
- conf->bus_deepsleep_disable, 0, FALSE);\r
-#endif /* defined(BCMPCIE) */\r
-\r
-#ifdef IDHCP\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", conf->dhcpc_enable,\r
- 0, FALSE);\r
- if (conf->dhcpd_enable >= 0) {\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_addr",\r
- (char *)&conf->dhcpd_ip_addr, sizeof(conf->dhcpd_ip_addr), FALSE);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_mask",\r
- (char *)&conf->dhcpd_ip_mask, sizeof(conf->dhcpd_ip_mask), FALSE);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_start",\r
- (char *)&conf->dhcpd_ip_start, sizeof(conf->dhcpd_ip_start), FALSE);\r
- dhd_conf_set_bufiovar(dhd, 0, WLC_SET_VAR, "dhcpd_ip_end",\r
- (char *)&conf->dhcpd_ip_end, sizeof(conf->dhcpd_ip_end), FALSE);\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpd_enable",\r
- conf->dhcpd_enable, 0, FALSE);\r
- }\r
-#endif\r
- dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG",\r
- conf->frameburst, 0, FALSE);\r
-\r
- dhd_conf_set_wl_cmd(dhd, wl_preinit, TRUE);\r
-#if defined(BCMSDIO)\r
- {\r
- char ampdu_mpdu[] = "ampdu_mpdu=16";\r
- dhd_conf_set_wl_cmd(dhd, ampdu_mpdu, TRUE);\r
- }\r
-#endif\r
- if (conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||\r
- conf->chip == BCM4371_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||\r
- conf->chip == BCM43569_CHIP_ID ||\r
- conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID) {\r
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", 1, 0, FALSE);\r
- }\r
-#if defined(WLEASYMESH)\r
- {\r
- char ezmesh[] = "mbss=1, rsdb_mode=0";\r
- dhd_conf_set_wl_cmd(dhd, ezmesh, TRUE);\r
- }\r
-#endif\r
- dhd_conf_set_wl_cmd(dhd, conf->wl_preinit, TRUE);\r
-\r
-#ifndef WL_CFG80211\r
- dhd_conf_set_intiovar(dhd, WLC_UP, "WLC_UP", 0, 0, FALSE);\r
-#endif\r
-\r
-}\r
-\r
-int\r
-dhd_conf_preinit(dhd_pub_t *dhd)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
-\r
- CONFIG_TRACE("Enter\n");\r
-\r
-#ifdef BCMSDIO\r
- dhd_conf_free_mac_list(&conf->fw_by_mac);\r
- dhd_conf_free_mac_list(&conf->nv_by_mac);\r
-#endif\r
- dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);\r
- dhd_conf_free_country_list(conf);\r
- dhd_conf_free_mchan_list(conf);\r
- if (conf->magic_pkt_filter_add) {\r
- kfree(conf->magic_pkt_filter_add);\r
- conf->magic_pkt_filter_add = NULL;\r
- }\r
- if (conf->wl_preinit) {\r
- kfree(conf->wl_preinit);\r
- conf->wl_preinit = NULL;\r
- }\r
- if (conf->wl_suspend) {\r
- kfree(conf->wl_suspend);\r
- conf->wl_suspend = NULL;\r
- }\r
- if (conf->wl_resume) {\r
- kfree(conf->wl_resume);\r
- conf->wl_resume = NULL;\r
- }\r
- conf->band = -1;\r
- memset(&conf->bw_cap, -1, sizeof(conf->bw_cap));\r
- if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {\r
- strcpy(conf->cspec.country_abbrev, "ALL");\r
- strcpy(conf->cspec.ccode, "ALL");\r
- conf->cspec.rev = 0;\r
- } else if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||\r
- conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||\r
- conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||\r
- conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID) {\r
- strcpy(conf->cspec.country_abbrev, "CN");\r
- strcpy(conf->cspec.ccode, "CN");\r
- conf->cspec.rev = 38;\r
- } else {\r
- strcpy(conf->cspec.country_abbrev, "CN");\r
- strcpy(conf->cspec.ccode, "CN");\r
- conf->cspec.rev = 0;\r
- }\r
- memset(&conf->channels, 0, sizeof(wl_channel_list_t));\r
- conf->roam_off = 1;\r
- conf->roam_off_suspend = 1;\r
- conf->roam_trigger[0] = -65;\r
- conf->roam_trigger[1] = WLC_BAND_ALL;\r
- conf->roam_scan_period[0] = 10;\r
- conf->roam_scan_period[1] = WLC_BAND_ALL;\r
- conf->roam_delta[0] = 10;\r
- conf->roam_delta[1] = WLC_BAND_ALL;\r
- conf->fullroamperiod = 20;\r
- conf->keep_alive_period = 30000;\r
-#ifdef ARP_OFFLOAD_SUPPORT\r
- conf->garp = FALSE;\r
-#endif\r
- conf->force_wme_ac = 0;\r
- memset(&conf->wme_sta, 0, sizeof(wme_param_t));\r
- memset(&conf->wme_ap, 0, sizeof(wme_param_t));\r
-#ifdef PKT_FILTER_SUPPORT\r
- memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t));\r
- memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t));\r
-#endif\r
- conf->srl = -1;\r
- conf->lrl = -1;\r
- conf->bcn_timeout = 16;\r
- conf->disable_proptx = -1;\r
- conf->dhd_poll = -1;\r
-#ifdef BCMSDIO\r
- conf->use_rxchain = 0;\r
- conf->bus_rxglom = TRUE;\r
- conf->txglom_ext = FALSE;\r
- conf->tx_max_offset = 0;\r
- conf->txglomsize = SDPCM_DEFGLOM_SIZE;\r
- conf->txctl_tmo_fix = 300;\r
- conf->txglom_mode = SDPCM_TXGLOM_MDESC;\r
- conf->deferred_tx_len = 0;\r
- conf->dhd_txminmax = 1;\r
- conf->txinrx_thres = -1;\r
-#if defined(SDIO_ISR_THREAD)\r
- conf->intr_extn = FALSE;\r
-#endif\r
-#ifdef BCMSDIO_RXLIM_POST\r
- conf->rxlim_en = TRUE;\r
-#endif\r
-#if defined(HW_OOB)\r
- conf->oob_enabled_later = FALSE;\r
-#endif\r
-#endif\r
-#ifdef BCMPCIE\r
- conf->bus_deepsleep_disable = 1;\r
-#endif\r
- conf->dpc_cpucore = -1;\r
- conf->rxf_cpucore = -1;\r
- conf->frameburst = -1;\r
- conf->deepsleep = FALSE;\r
- conf->pm = -1;\r
- conf->pm_in_suspend = -1;\r
- conf->insuspend = 0;\r
- conf->suspend_mode = EARLY_SUSPEND;\r
- conf->suspend_bcn_li_dtim = -1;\r
-#ifdef WL_EXT_WOWL\r
- dhd_master_mode = TRUE;\r
- conf->wowl = WL_WOWL_NET|WL_WOWL_DIS|WL_WOWL_BCN;\r
- conf->insuspend |= (WOWL_IN_SUSPEND | NO_TXDATA_IN_SUSPEND);\r
-#endif\r
- if (conf->suspend_mode == PM_NOTIFIER)\r
- conf->insuspend |= (NO_TXDATA_IN_SUSPEND | NO_TXCTL_IN_SUSPEND);\r
- conf->suspended = FALSE;\r
-#ifdef SUSPEND_EVENT\r
- memset(&conf->resume_eventmask, 0, sizeof(conf->resume_eventmask));\r
- memset(&conf->bssid_insuspend, 0, ETHER_ADDR_LEN);\r
- conf->wlfc = FALSE;\r
-#endif\r
-#ifdef GET_CUSTOM_MAC_FROM_CONFIG\r
- memset(&conf->hw_ether, 0, sizeof(conf->hw_ether));\r
-#endif\r
-#ifdef IDHCP\r
- conf->dhcpc_enable = -1;\r
- conf->dhcpd_enable = -1;\r
-#endif\r
- conf->orphan_move = 0;\r
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))\r
- conf->tsq = 10;\r
-#else\r
- conf->tsq = 0;\r
-#endif\r
-#ifdef DHDTCPACK_SUPPRESS\r
-#ifdef BCMPCIE\r
- conf->tcpack_sup_mode = TCPACK_SUP_HOLD;\r
-#else\r
- conf->tcpack_sup_mode = TCPACK_SUP_OFF;\r
-#endif\r
-#endif\r
- conf->pktprio8021x = -1;\r
- conf->ctrl_resched = 2;\r
- conf->in4way = NO_SCAN_IN4WAY | DONT_DELETE_GC_AFTER_WPS | WAIT_DISCONNECTED;\r
-#ifdef PROPTX_MAXCOUNT\r
- conf->proptx_maxcnt_2g = 46;\r
- conf->proptx_maxcnt_5g = WL_TXSTATUS_FREERUNCTR_MASK;\r
-#endif /* DYNAMIC_PROPTX_MAXCOUNT */\r
-#ifdef ISAM_PREINIT\r
- memset(conf->isam_init, 0, sizeof(conf->isam_init));\r
- memset(conf->isam_config, 0, sizeof(conf->isam_config));\r
- memset(conf->isam_enable, 0, sizeof(conf->isam_enable));\r
-#endif\r
-#ifdef CUSTOMER_HW_AMLOGIC\r
- dhd_slpauto = FALSE;\r
-#ifdef BCMSDIO\r
- conf->txglom_mode = SDPCM_TXGLOM_CPY;\r
-#endif\r
-#endif\r
-#if defined(SDIO_ISR_THREAD)\r
- if (conf->chip == BCM43012_CHIP_ID ||\r
- conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||\r
- conf->chip == BCM43454_CHIP_ID || conf->chip == BCM4345_CHIP_ID ||\r
- conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||\r
- conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||\r
- conf->chip == BCM4359_CHIP_ID ||\r
- conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID) {\r
- conf->intr_extn = TRUE;\r
- }\r
-#endif\r
- if ((conf->chip == BCM43430_CHIP_ID && conf->chiprev == 2) ||\r
- conf->chip == BCM43012_CHIP_ID ||\r
- conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||\r
- conf->chip == BCM43454_CHIP_ID || conf->chip == BCM4345_CHIP_ID ||\r
- conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||\r
- conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||\r
- conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||\r
- conf->chip == BCM43751_CHIP_ID || conf->chip == BCM43752_CHIP_ID) {\r
-#ifdef DHDTCPACK_SUPPRESS\r
-#ifdef BCMSDIO\r
- conf->tcpack_sup_mode = TCPACK_SUP_REPLACE;\r
-#endif\r
-#endif\r
-#if defined(BCMSDIO) || defined(BCMPCIE)\r
- dhd_rxbound = 128;\r
- dhd_txbound = 64;\r
-#endif\r
- conf->frameburst = 1;\r
-#ifdef BCMSDIO\r
- conf->dhd_txminmax = -1;\r
- conf->txinrx_thres = 128;\r
-#endif\r
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))\r
- conf->orphan_move = 1;\r
-#else\r
- conf->orphan_move = 0;\r
-#endif\r
- }\r
-\r
-#ifdef BCMSDIO\r
-#if defined(BCMSDIOH_TXGLOM_EXT)\r
- if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||\r
- conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||\r
- conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {\r
- conf->txglom_ext = TRUE;\r
- } else {\r
- conf->txglom_ext = FALSE;\r
- }\r
- if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {\r
- conf->txglom_bucket_size = 1680; // fixed value, don't change\r
- conf->txglomsize = 6;\r
- }\r
- if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID ||\r
- conf->chip == BCM43341_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {\r
- conf->txglom_bucket_size = 1684; // fixed value, don't change\r
- conf->txglomsize = 16;\r
- }\r
-#endif\r
- if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)\r
- conf->txglomsize = SDPCM_MAXGLOM_SIZE;\r
-#endif\r
- init_waitqueue_head(&conf->event_complete);\r
-\r
- return 0;\r
-}\r
-\r
-int\r
-dhd_conf_reset(dhd_pub_t *dhd)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
-\r
-#ifdef BCMSDIO\r
- dhd_conf_free_mac_list(&conf->fw_by_mac);\r
- dhd_conf_free_mac_list(&conf->nv_by_mac);\r
-#endif\r
- dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);\r
- dhd_conf_free_country_list(conf);\r
- dhd_conf_free_mchan_list(conf);\r
- if (conf->magic_pkt_filter_add) {\r
- kfree(conf->magic_pkt_filter_add);\r
- conf->magic_pkt_filter_add = NULL;\r
- }\r
- if (conf->wl_preinit) {\r
- kfree(conf->wl_preinit);\r
- conf->wl_preinit = NULL;\r
- }\r
- if (conf->wl_suspend) {\r
- kfree(conf->wl_suspend);\r
- conf->wl_suspend = NULL;\r
- }\r
- if (conf->wl_resume) {\r
- kfree(conf->wl_resume);\r
- conf->wl_resume = NULL;\r
- }\r
- memset(conf, 0, sizeof(dhd_conf_t));\r
- return 0;\r
-}\r
-\r
-int\r
-dhd_conf_attach(dhd_pub_t *dhd)\r
-{\r
- dhd_conf_t *conf;\r
-\r
- CONFIG_TRACE("Enter\n");\r
-\r
- if (dhd->conf != NULL) {\r
- CONFIG_MSG("config is attached before!\n");\r
- return 0;\r
- }\r
- /* Allocate private bus interface state */\r
- if (!(conf = MALLOC(dhd->osh, sizeof(dhd_conf_t)))) {\r
- CONFIG_ERROR("MALLOC failed\n");\r
- goto fail;\r
- }\r
- memset(conf, 0, sizeof(dhd_conf_t));\r
-\r
- dhd->conf = conf;\r
-\r
- return 0;\r
-\r
-fail:\r
- if (conf != NULL)\r
- MFREE(dhd->osh, conf, sizeof(dhd_conf_t));\r
- return BCME_NOMEM;\r
-}\r
-\r
-void\r
-dhd_conf_detach(dhd_pub_t *dhd)\r
-{\r
- struct dhd_conf *conf = dhd->conf;\r
-\r
- CONFIG_TRACE("Enter\n");\r
- if (dhd->conf) {\r
-#ifdef BCMSDIO\r
- dhd_conf_free_mac_list(&conf->fw_by_mac);\r
- dhd_conf_free_mac_list(&conf->nv_by_mac);\r
-#endif\r
- dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);\r
- dhd_conf_free_country_list(conf);\r
- dhd_conf_free_mchan_list(conf);\r
- if (conf->magic_pkt_filter_add) {\r
- kfree(conf->magic_pkt_filter_add);\r
- conf->magic_pkt_filter_add = NULL;\r
- }\r
- if (conf->wl_preinit) {\r
- kfree(conf->wl_preinit);\r
- conf->wl_preinit = NULL;\r
- }\r
- if (conf->wl_suspend) {\r
- kfree(conf->wl_suspend);\r
- conf->wl_suspend = NULL;\r
- }\r
- if (conf->wl_resume) {\r
- kfree(conf->wl_resume);\r
- conf->wl_resume = NULL;\r
- }\r
- MFREE(dhd->osh, conf, sizeof(dhd_conf_t));\r
- }\r
- dhd->conf = NULL;\r
-}\r
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <hndsoc.h>
+#include <bcmsdbus.h>
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
+#include <bcmdefs.h>
+#include <bcmsdh.h>
+#include <sdio.h>
+#include <sbchipc.h>
+#endif
+
+#include <dhd_config.h>
+#include <dhd_dbg.h>
+
+/* message levels */
+#define CONFIG_ERROR_LEVEL 0x0001
+#define CONFIG_TRACE_LEVEL 0x0002
+
+uint config_msg_level = CONFIG_ERROR_LEVEL;
+
+#define CONFIG_ERROR(x) \
+ do { \
+ if (config_msg_level & CONFIG_ERROR_LEVEL) { \
+ printk(KERN_ERR "CONFIG-ERROR) "); \
+ printk x; \
+ } \
+ } while (0)
+#define CONFIG_TRACE(x) \
+ do { \
+ if (config_msg_level & CONFIG_TRACE_LEVEL) { \
+ printk(KERN_ERR "CONFIG-TRACE) "); \
+ printk x; \
+ } \
+ } while (0)
+
+#define MAXSZ_BUF 1000
+#define MAXSZ_CONFIG 4096
+
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
+
+typedef struct cihp_name_map_t {
+ uint chip;
+ uint chiprev;
+ uint ag_type;
+ bool clm;
+ char *chip_name;
+ char *module_name;
+} cihp_name_map_t;
+
+/* Map of WLC_E events to connection failure strings */
+#define DONT_CARE 9999
+const cihp_name_map_t chip_name_map [] = {
+ /* ChipID Chiprev AG CLM ChipName ModuleName */
+#ifdef BCMSDIO
+ {BCM43362_CHIP_ID, 0, DONT_CARE, FALSE, "bcm40181a0", ""},
+ {BCM43362_CHIP_ID, 1, DONT_CARE, FALSE, "bcm40181a2", ""},
+ {BCM4330_CHIP_ID, 4, FW_TYPE_G, FALSE, "bcm40183b2", ""},
+ {BCM4330_CHIP_ID, 4, FW_TYPE_AG, FALSE, "bcm40183b2_ag", ""},
+ {BCM43430_CHIP_ID, 0, DONT_CARE, FALSE, "bcm43438a0", "ap6212"},
+ {BCM43430_CHIP_ID, 1, DONT_CARE, FALSE, "bcm43438a1", "ap6212a"},
+ {BCM43430_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43436b0", "ap6236"},
+ {BCM43012_CHIP_ID, 1, DONT_CARE, TRUE, "bcm43013b0", ""},
+ {BCM4334_CHIP_ID, 3, DONT_CARE, FALSE, "bcm4334b1_ag", ""},
+ {BCM43340_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43341b0_ag", ""},
+ {BCM43341_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43341b0_ag", ""},
+ {BCM4324_CHIP_ID, 5, DONT_CARE, FALSE, "bcm43241b4_ag", ""},
+ {BCM4335_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4339a0_ag", ""},
+ {BCM4339_CHIP_ID, 1, DONT_CARE, FALSE, "bcm4339a0_ag", "ap6335"},
+ {BCM4345_CHIP_ID, 6, DONT_CARE, FALSE, "bcm43455c0_ag", "ap6255"},
+ {BCM43454_CHIP_ID, 6, DONT_CARE, FALSE, "bcm43455c0_ag", ""},
+ {BCM4345_CHIP_ID, 9, DONT_CARE, FALSE, "bcm43456c5_ag", "ap6256"},
+ {BCM43454_CHIP_ID, 9, DONT_CARE, FALSE, "bcm43456c5_ag", ""},
+ {BCM4354_CHIP_ID, 1, DONT_CARE, FALSE, "bcm4354a1_ag", ""},
+ {BCM4354_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", "ap6356"},
+ {BCM4356_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", ""},
+ {BCM4371_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_ag", ""},
+ {BCM43569_CHIP_ID, 3, DONT_CARE, FALSE, "bcm4358a3_ag", ""},
+ {BCM4359_CHIP_ID, 5, DONT_CARE, FALSE, "bcm4359b1_ag", ""},
+ {BCM4359_CHIP_ID, 9, DONT_CARE, FALSE, "bcm4359c0_ag", "ap6398s"},
+ {BCM4362_CHIP_ID, 0, DONT_CARE, TRUE, "bcm43752a0_ag", ""},
+#endif
+#ifdef BCMPCIE
+ {BCM4354_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_pcie_ag", ""},
+ {BCM4356_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4356a2_pcie_ag", ""},
+ {BCM4359_CHIP_ID, 9, DONT_CARE, FALSE, "bcm4359c0_pcie_ag", ""},
+ {BCM4362_CHIP_ID, 0, DONT_CARE, TRUE, "bcm43752a0_pcie_ag", ""},
+#endif
+#ifdef BCMDBUS
+ {BCM43143_CHIP_ID, 2, DONT_CARE, FALSE, "bcm43143b0", ""},
+ {BCM43242_CHIP_ID, 1, DONT_CARE, FALSE, "bcm43242a1_ag", ""},
+ {BCM43569_CHIP_ID, 2, DONT_CARE, FALSE, "bcm4358u_ag", "ap62x8"},
+#endif
+};
+
+#ifdef BCMSDIO
+void
+dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list)
+{
+ int i;
+
+ CONFIG_TRACE(("%s called\n", __FUNCTION__));
+ if (mac_list->m_mac_list_head) {
+ for (i=0; i<mac_list->count; i++) {
+ if (mac_list->m_mac_list_head[i].mac) {
+ CONFIG_TRACE(("%s Free mac %p\n", __FUNCTION__, mac_list->m_mac_list_head[i].mac));
+ kfree(mac_list->m_mac_list_head[i].mac);
+ }
+ }
+ CONFIG_TRACE(("%s Free m_mac_list_head %p\n", __FUNCTION__, mac_list->m_mac_list_head));
+ kfree(mac_list->m_mac_list_head);
+ }
+ mac_list->count = 0;
+}
+
+void
+dhd_conf_free_chip_nv_path_list(wl_chip_nv_path_list_ctrl_t *chip_nv_list)
+{
+ CONFIG_TRACE(("%s called\n", __FUNCTION__));
+
+ if (chip_nv_list->m_chip_nv_path_head) {
+ CONFIG_TRACE(("%s Free %p\n", __FUNCTION__, chip_nv_list->m_chip_nv_path_head));
+ kfree(chip_nv_list->m_chip_nv_path_head);
+ }
+ chip_nv_list->count = 0;
+}
+
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
+void
+dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip)
+{
+ uint32 gpiocontrol, addr;
+
+ if (CHIPID(chip) == BCM43362_CHIP_ID) {
+ printf("%s: Enable HW OOB for 43362\n", __FUNCTION__);
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, gpiocontrol);
+ gpiocontrol = bcmsdh_reg_read(sdh, addr, 4);
+ gpiocontrol |= 0x2;
+ bcmsdh_reg_write(sdh, addr, 4, gpiocontrol);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10005, 0xf, NULL);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10006, 0x0, NULL);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, 0x10007, 0x2, NULL);
+ }
+}
+#endif
+
+#define SBSDIO_CIS_SIZE_LIMIT 0x200
+#define F0_BLOCK_SIZE 32
+int
+dhd_conf_set_blksize(bcmsdh_info_t *sdh)
+{
+ int err = 0;
+ uint fn, numfn;
+ int32 blksize = 0, cur_blksize = 0;
+ uint8 cisd;
+
+ numfn = bcmsdh_query_iofnum(sdh);
+
+ for (fn = 0; fn <= numfn; fn++) {
+ if (!fn)
+ blksize = F0_BLOCK_SIZE;
+ else {
+ bcmsdh_cisaddr_read(sdh, fn, &cisd, 24);
+ blksize = cisd;
+ bcmsdh_cisaddr_read(sdh, fn, &cisd, 25);
+ blksize |= cisd << 8;
+ }
+#ifdef CUSTOM_SDIO_F2_BLKSIZE
+ if (fn == 2 && blksize > CUSTOM_SDIO_F2_BLKSIZE) {
+ blksize = CUSTOM_SDIO_F2_BLKSIZE;
+ }
+#endif
+ bcmsdh_iovar_op(sdh, "sd_blocksize", &fn, sizeof(int32),
+ &cur_blksize, sizeof(int32), FALSE);
+ if (cur_blksize != blksize) {
+ printf("%s: fn=%d, blksize=%d, cur_blksize=%d\n", __FUNCTION__,
+ fn, blksize, cur_blksize);
+ blksize |= (fn<<16);
+ if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &blksize,
+ sizeof(blksize), TRUE) != BCME_OK) {
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
+ err = -1;
+ }
+ }
+ }
+
+ return err;
+}
+
+int
+dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac)
+{
+ int i, err = -1;
+ uint8 *ptr = 0;
+ unsigned char tpl_code, tpl_link='\0';
+ uint8 header[3] = {0x80, 0x07, 0x19};
+ uint8 *cis;
+
+ if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) {
+ CONFIG_ERROR(("%s: cis malloc failed\n", __FUNCTION__));
+ return err;
+ }
+ bzero(cis, SBSDIO_CIS_SIZE_LIMIT);
+
+ if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) {
+ CONFIG_ERROR(("%s: cis read err %d\n", __FUNCTION__, err));
+ MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);
+ return err;
+ }
+ err = -1; // reset err;
+ ptr = cis;
+ do {
+ /* 0xff means we're done */
+ tpl_code = *ptr;
+ ptr++;
+ if (tpl_code == 0xff)
+ break;
+
+ /* null entries have no link field or data */
+ if (tpl_code == 0x00)
+ continue;
+
+ tpl_link = *ptr;
+ ptr++;
+ /* a size of 0xff also means we're done */
+ if (tpl_link == 0xff)
+ break;
+ if (config_msg_level & CONFIG_TRACE_LEVEL) {
+ printf("%s: tpl_code=0x%02x, tpl_link=0x%02x, tag=0x%02x\n",
+ __FUNCTION__, tpl_code, tpl_link, *ptr);
+ printk("%s: value:", __FUNCTION__);
+ for (i=0; i<tpl_link-1; i++) {
+ printk("%02x ", ptr[i+1]);
+ if ((i+1) % 16 == 0)
+ printk("\n");
+ }
+ printk("\n");
+ }
+
+ if (tpl_code == 0x80 && tpl_link == 0x07 && *ptr == 0x19)
+ break;
+
+ ptr += tpl_link;
+ } while (1);
+
+ if (tpl_code == 0x80 && tpl_link == 0x07 && *ptr == 0x19) {
+ /* Normal OTP */
+ memcpy(mac, ptr+1, 6);
+ err = 0;
+ } else {
+ ptr = cis;
+ /* Special OTP */
+ if (bcmsdh_reg_read(sdh, SI_ENUM_BASE, 4) == 0x16044330) {
+ for (i=0; i<SBSDIO_CIS_SIZE_LIMIT; i++) {
+ if (!memcmp(header, ptr, 3)) {
+ memcpy(mac, ptr+3, 6);
+ err = 0;
+ break;
+ }
+ ptr++;
+ }
+ }
+ }
+
+ ASSERT(cis);
+ MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);
+
+ return err;
+}
+
+void
+dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path)
+{
+ int i, j;
+ uint8 mac[6]={0};
+ int fw_num=0, mac_num=0;
+ uint32 oui, nic;
+ wl_mac_list_t *mac_list;
+ wl_mac_range_t *mac_range;
+ int fw_type, fw_type_new;
+ char *name_ptr;
+
+ mac_list = dhd->conf->fw_by_mac.m_mac_list_head;
+ fw_num = dhd->conf->fw_by_mac.count;
+ if (!mac_list || !fw_num)
+ return;
+
+ if (dhd_conf_get_mac(dhd, sdh, mac)) {
+ CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__));
+ return;
+ }
+ oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);
+ nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]);
+
+ /* find out the last '/' */
+ i = strlen(fw_path);
+ while (i > 0) {
+ if (fw_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ name_ptr = &fw_path[i];
+
+ if (strstr(name_ptr, "_apsta"))
+ fw_type = FW_TYPE_APSTA;
+ else if (strstr(name_ptr, "_p2p"))
+ fw_type = FW_TYPE_P2P;
+ else if (strstr(name_ptr, "_mesh"))
+ fw_type = FW_TYPE_MESH;
+ else if (strstr(name_ptr, "_es"))
+ fw_type = FW_TYPE_ES;
+ else if (strstr(name_ptr, "_mfg"))
+ fw_type = FW_TYPE_MFG;
+ else
+ fw_type = FW_TYPE_STA;
+
+ for (i=0; i<fw_num; i++) {
+ mac_num = mac_list[i].count;
+ mac_range = mac_list[i].mac;
+ if (strstr(mac_list[i].name, "_apsta"))
+ fw_type_new = FW_TYPE_APSTA;
+ else if (strstr(mac_list[i].name, "_p2p"))
+ fw_type_new = FW_TYPE_P2P;
+ else if (strstr(mac_list[i].name, "_mesh"))
+ fw_type_new = FW_TYPE_MESH;
+ else if (strstr(mac_list[i].name, "_es"))
+ fw_type_new = FW_TYPE_ES;
+ else if (strstr(mac_list[i].name, "_mfg"))
+ fw_type_new = FW_TYPE_MFG;
+ else
+ fw_type_new = FW_TYPE_STA;
+ if (fw_type != fw_type_new) {
+ printf("%s: fw_typ=%d != fw_type_new=%d\n", __FUNCTION__, fw_type, fw_type_new);
+ continue;
+ }
+ for (j=0; j<mac_num; j++) {
+ if (oui == mac_range[j].oui) {
+ if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {
+ strcpy(name_ptr, mac_list[i].name);
+ printf("%s: matched oui=0x%06X, nic=0x%06X\n",
+ __FUNCTION__, oui, nic);
+ printf("%s: fw_path=%s\n", __FUNCTION__, fw_path);
+ return;
+ }
+ }
+ }
+ }
+}
+
+void
+dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path)
+{
+ int i, j;
+ uint8 mac[6]={0};
+ int nv_num=0, mac_num=0;
+ uint32 oui, nic;
+ wl_mac_list_t *mac_list;
+ wl_mac_range_t *mac_range;
+ char *pnv_name;
+
+ mac_list = dhd->conf->nv_by_mac.m_mac_list_head;
+ nv_num = dhd->conf->nv_by_mac.count;
+ if (!mac_list || !nv_num)
+ return;
+
+ if (dhd_conf_get_mac(dhd, sdh, mac)) {
+ CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__));
+ return;
+ }
+ oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);
+ nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]);
+
+ /* find out the last '/' */
+ i = strlen(nv_path);
+ while (i > 0) {
+ if (nv_path[i] == '/') break;
+ i--;
+ }
+ pnv_name = &nv_path[i+1];
+
+ for (i=0; i<nv_num; i++) {
+ mac_num = mac_list[i].count;
+ mac_range = mac_list[i].mac;
+ for (j=0; j<mac_num; j++) {
+ if (oui == mac_range[j].oui) {
+ if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {
+ strcpy(pnv_name, mac_list[i].name);
+ printf("%s: matched oui=0x%06X, nic=0x%06X\n",
+ __FUNCTION__, oui, nic);
+ printf("%s: nv_path=%s\n", __FUNCTION__, nv_path);
+ return;
+ }
+ }
+ }
+ }
+}
+#endif
+
+void
+dhd_conf_free_country_list(conf_country_list_t *country_list)
+{
+ int i;
+
+ CONFIG_TRACE(("%s called\n", __FUNCTION__));
+ for (i=0; i<country_list->count; i++) {
+ if (country_list->cspec[i]) {
+ CONFIG_TRACE(("%s Free cspec %p\n", __FUNCTION__, country_list->cspec[i]));
+ kfree(country_list->cspec[i]);
+ }
+ }
+ country_list->count = 0;
+}
+
+void
+dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path)
+{
+ int fw_type, ag_type;
+ uint chip, chiprev;
+ int i;
+ char *name_ptr;
+
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+
+ if (fw_path[0] == '\0') {
+#ifdef CONFIG_BCMDHD_FW_PATH
+ bcm_strncpy_s(fw_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);
+ if (fw_path[0] == '\0')
+#endif
+ {
+ printf("firmware path is null\n");
+ return;
+ }
+ }
+#ifndef FW_PATH_AUTO_SELECT
+ return;
+#endif
+
+ /* find out the last '/' */
+ i = strlen(fw_path);
+ while (i > 0) {
+ if (fw_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ name_ptr = &fw_path[i];
+#ifdef BAND_AG
+ ag_type = FW_TYPE_AG;
+#else
+ ag_type = strstr(name_ptr, "_ag") ? FW_TYPE_AG : FW_TYPE_G;
+#endif
+ if (strstr(name_ptr, "_apsta"))
+ fw_type = FW_TYPE_APSTA;
+ else if (strstr(name_ptr, "_p2p"))
+ fw_type = FW_TYPE_P2P;
+ else if (strstr(name_ptr, "_mesh"))
+ fw_type = FW_TYPE_MESH;
+ else if (strstr(name_ptr, "_es"))
+ fw_type = FW_TYPE_ES;
+ else if (strstr(name_ptr, "_mfg"))
+ fw_type = FW_TYPE_MFG;
+ else
+ fw_type = FW_TYPE_STA;
+
+ for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {
+ const cihp_name_map_t* row = &chip_name_map[i];
+ if (row->chip == chip && row->chiprev == chiprev &&
+ (row->ag_type == ag_type || row->ag_type == DONT_CARE)) {
+ strcpy(name_ptr, "fw_");
+ strcat(fw_path, row->chip_name);
+#ifdef BCMUSBDEV_COMPOSITE
+ strcat(fw_path, "_cusb");
+#endif
+ if (fw_type == FW_TYPE_APSTA)
+ strcat(fw_path, "_apsta.bin");
+ else if (fw_type == FW_TYPE_P2P)
+ strcat(fw_path, "_p2p.bin");
+ else if (fw_type == FW_TYPE_MESH)
+ strcat(fw_path, "_mesh.bin");
+ else if (fw_type == FW_TYPE_ES)
+ strcat(fw_path, "_es.bin");
+ else if (fw_type == FW_TYPE_MFG)
+ strcat(fw_path, "_mfg.bin");
+ else
+ strcat(fw_path, ".bin");
+ }
+ }
+
+ dhd->conf->fw_type = fw_type;
+
+ CONFIG_TRACE(("%s: firmware_path=%s\n", __FUNCTION__, fw_path));
+}
+
+void
+dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path)
+{
+ uint chip, chiprev;
+ int i;
+ char *name_ptr;
+
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+
+ if (clm_path[0] == '\0') {
+ printf("clm path is null\n");
+ return;
+ }
+
+ /* find out the last '/' */
+ i = strlen(clm_path);
+ while (i > 0) {
+ if (clm_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ name_ptr = &clm_path[i];
+
+ for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {
+ const cihp_name_map_t* row = &chip_name_map[i];
+ if (row->chip == chip && row->chiprev == chiprev && row->clm) {
+ strcpy(name_ptr, "clm_");
+ strcat(clm_path, row->chip_name);
+ strcat(clm_path, ".blob");
+ }
+ }
+
+ CONFIG_TRACE(("%s: clm_path=%s\n", __FUNCTION__, clm_path));
+}
+
+void
+dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path)
+{
+ uint chip, chiprev;
+ int i;
+ char *name_ptr;
+
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+
+ if (nv_path[0] == '\0') {
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+ bcm_strncpy_s(nv_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);
+ if (nv_path[0] == '\0')
+#endif
+ {
+ printf("nvram path is null\n");
+ return;
+ }
+ }
+
+ /* find out the last '/' */
+ i = strlen(nv_path);
+ while (i > 0) {
+ if (nv_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ name_ptr = &nv_path[i];
+
+ for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {
+ const cihp_name_map_t* row = &chip_name_map[i];
+ if (row->chip == chip && row->chiprev == chiprev && strlen(row->module_name)) {
+ strcpy(name_ptr, "nvram_");
+ strcat(name_ptr, row->module_name);
+#ifdef BCMUSBDEV_COMPOSITE
+ strcat(name_ptr, "_cusb");
+#endif
+ strcat(name_ptr, ".txt");
+ }
+ }
+
+ for (i=0; i<dhd->conf->nv_by_chip.count; i++) {
+ if (chip==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chip &&
+ chiprev==dhd->conf->nv_by_chip.m_chip_nv_path_head[i].chiprev) {
+ strcpy(name_ptr, dhd->conf->nv_by_chip.m_chip_nv_path_head[i].name);
+ break;
+ }
+ }
+
+ CONFIG_TRACE(("%s: nvram_path=%s\n", __FUNCTION__, nv_path));
+}
+
+void
+dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path)
+{
+ int i;
+
+ if (src_path[0] == '\0') {
+ printf("src_path is null\n");
+ return;
+ } else
+ strcpy(dst_path, src_path);
+
+ /* find out the last '/' */
+ i = strlen(dst_path);
+ while (i > 0) {
+ if (dst_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ strcpy(&dst_path[i], dst_name);
+
+ CONFIG_TRACE(("%s: dst_path=%s\n", __FUNCTION__, dst_path));
+}
+
+#ifdef CONFIG_PATH_AUTO_SELECT
+void
+dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path)
+{
+ uint chip, chiprev;
+ int i;
+ char *name_ptr;
+
+ chip = dhd->conf->chip;
+ chiprev = dhd->conf->chiprev;
+
+ if (conf_path[0] == '\0') {
+ printf("config path is null\n");
+ return;
+ }
+
+ /* find out the last '/' */
+ i = strlen(conf_path);
+ while (i > 0) {
+ if (conf_path[i] == '/') {
+ i++;
+ break;
+ }
+ i--;
+ }
+ name_ptr = &conf_path[i];
+
+ for (i = 0; i < sizeof(chip_name_map)/sizeof(chip_name_map[0]); i++) {
+ const cihp_name_map_t* row = &chip_name_map[i];
+ if (row->chip == chip && row->chiprev == chiprev) {
+ strcpy(name_ptr, "config_");
+ strcat(conf_path, row->chip_name);
+ strcat(conf_path, ".txt");
+ }
+ }
+
+ CONFIG_TRACE(("%s: config_path=%s\n", __FUNCTION__, conf_path));
+}
+#endif
+
+int
+dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val,
+ int def, bool down)
+{
+ int ret = -1;
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
+
+ if (val >= def) {
+ if (down) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)
+ CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret));
+ }
+ if (cmd == WLC_SET_VAR) {
+ CONFIG_TRACE(("%s: set %s %d\n", __FUNCTION__, name, val));
+ bcm_mkiovar(name, (char *)&val, sizeof(val), iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));
+ } else {
+ CONFIG_TRACE(("%s: set %s %d %d\n", __FUNCTION__, name, cmd, val));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, &val, sizeof(val), TRUE, 0)) < 0)
+ CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));
+ }
+ }
+
+ return ret;
+}
+
+int
+dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf,
+ int len, bool down)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ int ret = -1;
+
+ if (down) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)
+ CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, ret));
+ }
+
+ if (cmd == WLC_SET_VAR) {
+ bcm_mkiovar(name, buf, len, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));
+ } else {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, TRUE, 0)) < 0)
+ CONFIG_ERROR(("%s: %s setting failed %d\n", __FUNCTION__, name, ret));
+ }
+
+ return ret;
+}
+
+int
+dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int ifidx)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ int ret = -1;
+
+ if (cmd == WLC_GET_VAR) {
+ if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
+ ret = dhd_wl_ioctl_cmd(dhd, cmd, iovbuf, sizeof(iovbuf), FALSE, ifidx);
+ if (!ret) {
+ memcpy(buf, iovbuf, len);
+ } else {
+ CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret));
+ }
+ } else {
+ CONFIG_ERROR(("%s: mkiovar %s failed\n", __FUNCTION__, name));
+ }
+ } else {
+ ret = dhd_wl_ioctl_cmd(dhd, cmd, buf, len, FALSE, 0);
+ if (ret < 0)
+ CONFIG_ERROR(("%s: get iovar %s failed %d\n", __FUNCTION__, name, ret));
+ }
+
+ return ret;
+}
+
+uint
+dhd_conf_get_band(dhd_pub_t *dhd)
+{
+ int band = -1;
+
+ if (dhd && dhd->conf)
+ band = dhd->conf->band;
+ else
+ CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__));
+
+ return band;
+}
+
+int
+dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec)
+{
+ int bcmerror = -1;
+
+ memset(cspec, 0, sizeof(wl_country_t));
+ bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t));
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), FALSE, 0)) < 0)
+ CONFIG_ERROR(("%s: country code getting failed %d\n", __FUNCTION__, bcmerror));
+
+ return bcmerror;
+}
+
+int
+dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec)
+{
+ int bcmerror = -1, i;
+ struct dhd_conf *conf = dhd->conf;
+ conf_country_list_t *country_list = &conf->country_list;
+
+ for (i = 0; i < country_list->count; i++) {
+ if (!strncmp(cspec->country_abbrev, country_list->cspec[i]->country_abbrev, 2)) {
+ memcpy(cspec->ccode, country_list->cspec[i]->ccode, WLC_CNTRY_BUF_SZ);
+ cspec->rev = country_list->cspec[i]->rev;
+ bcmerror = 0;
+ }
+ }
+
+ if (!bcmerror)
+ printf("%s: %s/%d\n", __FUNCTION__, cspec->ccode, cspec->rev);
+
+ return bcmerror;
+}
+
+int
+dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec)
+{
+ int bcmerror = -1;
+
+ memset(&dhd->dhd_cspec, 0, sizeof(wl_country_t));
+
+ printf("%s: set country %s, revision %d\n", __FUNCTION__, cspec->ccode, cspec->rev);
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "country", (char *)cspec, sizeof(wl_country_t), FALSE);
+ dhd_conf_get_country(dhd, cspec);
+ printf("Country code: %s (%s/%d)\n", cspec->country_abbrev, cspec->ccode, cspec->rev);
+
+ return bcmerror;
+}
+
+int
+dhd_conf_fix_country(dhd_pub_t *dhd)
+{
+ int bcmerror = -1;
+ uint band;
+ wl_uint32_list_t *list;
+ u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ wl_country_t cspec;
+
+ if (!(dhd && dhd->conf)) {
+ return bcmerror;
+ }
+
+ memset(valid_chan_list, 0, sizeof(valid_chan_list));
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), FALSE, 0)) < 0) {
+ CONFIG_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, bcmerror));
+ }
+
+ band = dhd_conf_get_band(dhd);
+
+ if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G) &&
+ dtoh32(list->count)<11)) {
+ CONFIG_ERROR(("%s: bcmerror=%d, # of channels %d\n",
+ __FUNCTION__, bcmerror, dtoh32(list->count)));
+ dhd_conf_map_country_list(dhd, &dhd->conf->cspec);
+ if ((bcmerror = dhd_conf_set_country(dhd, &dhd->conf->cspec)) < 0) {
+ strcpy(cspec.country_abbrev, "US");
+ cspec.rev = 0;
+ strcpy(cspec.ccode, "US");
+ dhd_conf_map_country_list(dhd, &cspec);
+ dhd_conf_set_country(dhd, &cspec);
+ }
+ }
+
+ return bcmerror;
+}
+
+bool
+dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel)
+{
+ int i;
+ bool match = false;
+
+ if (dhd && dhd->conf) {
+ if (dhd->conf->channels.count == 0)
+ return true;
+ for (i=0; i<dhd->conf->channels.count; i++) {
+ if (channel == dhd->conf->channels.channel[i])
+ match = true;
+ }
+ } else {
+ match = true;
+ CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__));
+ }
+
+ return match;
+}
+
+int
+dhd_conf_set_roam(dhd_pub_t *dhd)
+{
+ int bcmerror = -1;
+ struct dhd_conf *conf = dhd->conf;
+
+ dhd_roam_disable = conf->roam_off;
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "roam_off", dhd->conf->roam_off, 0, FALSE);
+
+ if (!conf->roam_off || !conf->roam_off_suspend) {
+ printf("%s: set roam_trigger %d\n", __FUNCTION__, conf->roam_trigger[0]);
+ dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_TRIGGER, "WLC_SET_ROAM_TRIGGER",
+ (char *)conf->roam_trigger, sizeof(conf->roam_trigger), FALSE);
+
+ printf("%s: set roam_scan_period %d\n", __FUNCTION__, conf->roam_scan_period[0]);
+ dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_SCAN_PERIOD, "WLC_SET_ROAM_SCAN_PERIOD",
+ (char *)conf->roam_scan_period, sizeof(conf->roam_scan_period), FALSE);
+
+ printf("%s: set roam_delta %d\n", __FUNCTION__, conf->roam_delta[0]);
+ dhd_conf_set_bufiovar(dhd, WLC_SET_ROAM_DELTA, "WLC_SET_ROAM_DELTA",
+ (char *)conf->roam_delta, sizeof(conf->roam_delta), FALSE);
+
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "fullroamperiod", dhd->conf->fullroamperiod, 1, FALSE);
+ }
+
+ return bcmerror;
+}
+
+void
+dhd_conf_set_bw_cap(dhd_pub_t *dhd)
+{
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
+
+ if (dhd->conf->bw_cap[0] >= 0) {
+ memset(¶m, 0, sizeof(param));
+ param.band = WLC_BAND_2G;
+ param.bw_cap = (uint)dhd->conf->bw_cap[0];
+ printf("%s: set bw_cap 2g 0x%x\n", __FUNCTION__, param.bw_cap);
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)¶m, sizeof(param), TRUE);
+ }
+
+ if (dhd->conf->bw_cap[1] >= 0) {
+ memset(¶m, 0, sizeof(param));
+ param.band = WLC_BAND_5G;
+ param.bw_cap = (uint)dhd->conf->bw_cap[1];
+ printf("%s: set bw_cap 5g 0x%x\n", __FUNCTION__, param.bw_cap);
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "bw_cap", (char *)¶m, sizeof(param), TRUE);
+ }
+}
+
+void
+dhd_conf_get_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acp)
+{
+ int bcmerror = -1;
+ char iovbuf[WLC_IOCTL_SMLEN];
+ edcf_acparam_t *acparam;
+
+ bzero(iovbuf, sizeof(iovbuf));
+
+ /*
+ * Get current acparams, using buf as an input buffer.
+ * Return data is array of 4 ACs of wme params.
+ */
+ if (mode == 0)
+ bcm_mkiovar("wme_ac_sta", NULL, 0, iovbuf, sizeof(iovbuf));
+ else
+ bcm_mkiovar("wme_ac_ap", NULL, 0, iovbuf, sizeof(iovbuf));
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) {
+ CONFIG_ERROR(("%s: wme_ac_sta getting failed %d\n", __FUNCTION__, bcmerror));
+ return;
+ }
+ memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT);
+
+ acparam = &acp[AC_BK];
+ CONFIG_TRACE(("%s: BK: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",
+ __FUNCTION__,
+ acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,
+ acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,
+ acparam->TXOP));
+ acparam = &acp[AC_BE];
+ CONFIG_TRACE(("%s: BE: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",
+ __FUNCTION__,
+ acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,
+ acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,
+ acparam->TXOP));
+ acparam = &acp[AC_VI];
+ CONFIG_TRACE(("%s: VI: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",
+ __FUNCTION__,
+ acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,
+ acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,
+ acparam->TXOP));
+ acparam = &acp[AC_VO];
+ CONFIG_TRACE(("%s: VO: aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",
+ __FUNCTION__,
+ acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,
+ acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,
+ acparam->TXOP));
+
+ return;
+}
+
+void
+dhd_conf_update_wme(dhd_pub_t *dhd, int mode, edcf_acparam_t *acparam_cur, int aci)
+{
+ int aifsn, ecwmin, ecwmax, txop;
+ edcf_acparam_t *acp;
+ struct dhd_conf *conf = dhd->conf;
+ wme_param_t *wme;
+
+ if (mode == 0)
+ wme = &conf->wme_sta;
+ else
+ wme = &conf->wme_ap;
+
+ /* Default value */
+ aifsn = acparam_cur->ACI&EDCF_AIFSN_MASK;
+ ecwmin = acparam_cur->ECW&EDCF_ECWMIN_MASK;
+ ecwmax = (acparam_cur->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT;
+ txop = acparam_cur->TXOP;
+
+ /* Modified value */
+ if (wme->aifsn[aci] > 0)
+ aifsn = wme->aifsn[aci];
+ if (wme->ecwmin[aci] > 0)
+ ecwmin = wme->ecwmin[aci];
+ if (wme->ecwmax[aci] > 0)
+ ecwmax = wme->ecwmax[aci];
+ if (wme->txop[aci] > 0)
+ txop = wme->txop[aci];
+
+ if (!(wme->aifsn[aci] || wme->ecwmin[aci] ||
+ wme->ecwmax[aci] || wme->txop[aci]))
+ return;
+
+ /* Update */
+ acp = acparam_cur;
+ acp->ACI = (acp->ACI & ~EDCF_AIFSN_MASK) | (aifsn & EDCF_AIFSN_MASK);
+ acp->ECW = ((ecwmax << EDCF_ECWMAX_SHIFT) & EDCF_ECWMAX_MASK) | (acp->ECW & EDCF_ECWMIN_MASK);
+ acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK));
+ acp->TXOP = txop;
+
+ printf("%s: wme_ac %s aci %d aifsn %d ecwmin %d ecwmax %d txop 0x%x\n",
+ __FUNCTION__, mode?"ap":"sta",
+ acp->ACI, acp->ACI&EDCF_AIFSN_MASK,
+ acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,
+ acp->TXOP);
+
+ /*
+ * Now use buf as an output buffer.
+ * Put WME acparams after "wme_ac\0" in buf.
+ * NOTE: only one of the four ACs can be set at a time.
+ */
+ if (mode == 0)
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_sta", (char *)acp, sizeof(edcf_acparam_t), FALSE);
+ else
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "wme_ac_ap", (char *)acp, sizeof(edcf_acparam_t), FALSE);
+
+}
+
+void
+dhd_conf_set_wme(dhd_pub_t *dhd, int mode)
+{
+ edcf_acparam_t acparam_cur[AC_COUNT];
+
+ if (dhd && dhd->conf) {
+ if (!dhd->conf->force_wme_ac) {
+ CONFIG_TRACE(("%s: force_wme_ac is not enabled %d\n",
+ __FUNCTION__, dhd->conf->force_wme_ac));
+ return;
+ }
+
+ CONFIG_TRACE(("%s: Before change:\n", __FUNCTION__));
+ dhd_conf_get_wme(dhd, mode, acparam_cur);
+
+ dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BK], AC_BK);
+ dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_BE], AC_BE);
+ dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VI], AC_VI);
+ dhd_conf_update_wme(dhd, mode, &acparam_cur[AC_VO], AC_VO);
+
+ CONFIG_TRACE(("%s: After change:\n", __FUNCTION__));
+ dhd_conf_get_wme(dhd, mode, acparam_cur);
+ } else {
+ CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__));
+ }
+
+ return;
+}
+
+void
+dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int p2p_mode, int miracast_mode)
+{
+ int i;
+ struct dhd_conf *conf = dhd->conf;
+ bool set = true;
+
+ for (i=0; i<MCHAN_MAX_NUM; i++) {
+ set = true;
+ set &= (conf->mchan[i].bw >= 0);
+ set &= ((conf->mchan[i].p2p_mode == -1) | (conf->mchan[i].p2p_mode == p2p_mode));
+ set &= ((conf->mchan[i].miracast_mode == -1) | (conf->mchan[i].miracast_mode == miracast_mode));
+ if (set) {
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "mchan_bw", conf->mchan[i].bw, 0, FALSE);
+ }
+ }
+
+ return;
+}
+
+#ifdef PKT_FILTER_SUPPORT
+void
+dhd_conf_add_pkt_filter(dhd_pub_t *dhd)
+{
+ int i, j;
+ char str[16];
+#define MACS "%02x%02x%02x%02x%02x%02x"
+
+ /*
+ * Filter in less pkt: ARP(0x0806, ID is 105), BRCM(0x886C), 802.1X(0x888E)
+ * 1) dhd_master_mode=1
+ * 2) pkt_filter_del=100, 102, 103, 104, 105
+ * 3) pkt_filter_add=131 0 0 12 0xFFFF 0x886C, 132 0 0 12 0xFFFF 0x888E
+ * 4) magic_pkt_filter_add=141 0 1 12
+ */
+ for(i=0; i<dhd->conf->pkt_filter_add.count; i++) {
+ dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i];
+ printf("%s: %s\n", __FUNCTION__, dhd->pktfilter[i+dhd->pktfilter_count]);
+ }
+ dhd->pktfilter_count += i;
+
+ if (dhd->conf->magic_pkt_filter_add) {
+ strcat(dhd->conf->magic_pkt_filter_add, " 0x");
+ strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");
+ for (j=0; j<16; j++)
+ strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");
+ strcat(dhd->conf->magic_pkt_filter_add, " 0x");
+ strcat(dhd->conf->magic_pkt_filter_add, "FFFFFFFFFFFF");
+ sprintf(str, MACS, MAC2STRDBG(dhd->mac.octet));
+ for (j=0; j<16; j++)
+ strncat(dhd->conf->magic_pkt_filter_add, str, 12);
+ dhd->pktfilter[dhd->pktfilter_count] = dhd->conf->magic_pkt_filter_add;
+ dhd->pktfilter_count += 1;
+ }
+}
+
+bool
+dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id)
+{
+ int i;
+
+ if (dhd && dhd->conf) {
+ for (i=0; i<dhd->conf->pkt_filter_del.count; i++) {
+ if (id == dhd->conf->pkt_filter_del.id[i]) {
+ printf("%s: %d\n", __FUNCTION__, dhd->conf->pkt_filter_del.id[i]);
+ return true;
+ }
+ }
+ return false;
+ }
+ return false;
+}
+
+void
+dhd_conf_discard_pkt_filter(dhd_pub_t *dhd)
+{
+ dhd->pktfilter_count = 6;
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL;
+ dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E";
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = "103 0 0 0 0xFFFF 0x3333";
+ dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
+ /* Do not enable ARP to pkt filter if dhd_master_mode is false.*/
+ dhd->pktfilter[DHD_ARP_FILTER_NUM] = NULL;
+
+ /* IPv4 broadcast address XXX.XXX.XXX.255 */
+ dhd->pktfilter[dhd->pktfilter_count] = "110 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF";
+ dhd->pktfilter_count++;
+ /* discard IPv4 multicast address 224.0.0.0/4 */
+ dhd->pktfilter[dhd->pktfilter_count] = "111 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0";
+ dhd->pktfilter_count++;
+ /* discard IPv6 multicast address FF00::/8 */
+ dhd->pktfilter[dhd->pktfilter_count] = "112 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF";
+ dhd->pktfilter_count++;
+ /* discard Netbios pkt */
+ dhd->pktfilter[dhd->pktfilter_count] = "121 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089";
+ dhd->pktfilter_count++;
+
+}
+#endif /* PKT_FILTER_SUPPORT */
+
+int
+dhd_conf_get_pm(dhd_pub_t *dhd)
+{
+ if (dhd && dhd->conf) {
+ return dhd->conf->pm;
+ }
+ return -1;
+}
+
+#define AP_IN_SUSPEND 1
+#define AP_DOWN_IN_SUSPEND 2
+int
+dhd_conf_get_ap_mode_in_suspend(dhd_pub_t *dhd)
+{
+ int mode = 0;
+
+ /* returned ap_in_suspend value:
+ * 0: nothing
+ * 1: ap enabled in suspend
+ * 2: ap enabled, but down in suspend
+ */
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ mode = dhd->conf->ap_in_suspend;
+ }
+
+ return mode;
+}
+
+int
+dhd_conf_set_ap_in_suspend(dhd_pub_t *dhd, int suspend)
+{
+ int mode = 0;
+ uint wl_down = 1;
+
+ mode = dhd_conf_get_ap_mode_in_suspend(dhd);
+ if (mode)
+ printf("%s: suspend %d, mode %d\n", __FUNCTION__, suspend, mode);
+ if (suspend) {
+ if (mode == AP_IN_SUSPEND) {
+#ifdef SUSPEND_EVENT
+ if (dhd->conf->suspend_eventmask_enable) {
+ char *eventmask = dhd->conf->suspend_eventmask;
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs", eventmask, sizeof(eventmask), TRUE);
+ }
+#endif
+ } else if (mode == AP_DOWN_IN_SUSPEND)
+ dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+ } else {
+ if (mode == AP_IN_SUSPEND) {
+#ifdef SUSPEND_EVENT
+ if (dhd->conf->suspend_eventmask_enable) {
+ char *eventmask = dhd->conf->resume_eventmask;
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "event_msgs", eventmask, sizeof(eventmask), TRUE);
+ }
+#endif
+ } else if (mode == AP_DOWN_IN_SUSPEND) {
+ wl_down = 0;
+ dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+ }
+ }
+
+ return mode;
+}
+
+void
+dhd_conf_set_eapol_status(dhd_pub_t *dhd, char *ifname, char *dump_data)
+{
+ unsigned char type;
+ int pair, ack, mic, kerr, req, sec, install;
+ unsigned short us_tmp;
+
+ if (!(dhd->conf->in4way&DONT_DELETE_GC_AFTER_WPS) || strncmp(ifname, "p2p", 3)) {
+ return;
+ }
+
+ type = dump_data[15];
+ if ((type == 0) && (dump_data[22] == 254) && (dump_data[30] == 5)) {
+ dhd->conf->eapol_status = EAPOL_STATUS_WPS_DONE;
+ CONFIG_TRACE(("EAP Packet, WSC Done\n"));
+ } else if (type == 3 && dump_data[18] == 2) {
+ us_tmp = (dump_data[19] << 8) | dump_data[20];
+ pair = 0 != (us_tmp & 0x08);
+ ack = 0 != (us_tmp & 0x80);
+ mic = 0 != (us_tmp & 0x100);
+ kerr = 0 != (us_tmp & 0x400);
+ req = 0 != (us_tmp & 0x800);
+ sec = 0 != (us_tmp & 0x200);
+ install = 0 != (us_tmp & 0x40);
+ if (pair && !install && !ack && mic && sec && !req && !kerr) {
+ dhd->conf->eapol_status = EAPOL_STATUS_M4;
+ CONFIG_TRACE(("EAPOL Packet, 4-way handshake, M4\n"));
+ }
+ }
+
+ return;
+}
+
+#ifdef PROP_TXSTATUS
+int
+dhd_conf_get_disable_proptx(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+ int disable_proptx = -1;
+ int fw_proptx = 0;
+
+ /* check fw proptx priority:
+ * 1st: check fw support by wl cap
+ * 2nd: 4334/43340/43341/43241 support proptx but not show in wl cap, so enable it by default
+ * if you would like to disable it, please set disable_proptx=1 in config.txt
+ * 3th: disable when proptxstatus not support in wl cap
+ */
+ if (FW_SUPPORTED(dhd, proptxstatus)) {
+ fw_proptx = 1;
+ } else if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID ||
+ dhd->conf->chip == BCM43340_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {
+ fw_proptx = 1;
+ } else {
+ fw_proptx = 0;
+ }
+
+ /* returned disable_proptx value:
+ * -1: disable in STA and enable in P2P(follow original dhd settings when PROP_TXSTATUS_VSDB enabled)
+ * 0: depend on fw support
+ * 1: always disable proptx
+ */
+ if (conf->disable_proptx == 0) {
+ // check fw support as well
+ if (fw_proptx)
+ disable_proptx = 0;
+ else
+ disable_proptx = 1;
+ } else if (conf->disable_proptx >= 1) {
+ disable_proptx = 1;
+ } else {
+ // check fw support as well
+ if (fw_proptx)
+ disable_proptx = -1;
+ else
+ disable_proptx = 1;
+ }
+
+ printf("%s: fw_proptx=%d, disable_proptx=%d\n", __FUNCTION__, fw_proptx, disable_proptx);
+
+ return disable_proptx;
+}
+#endif
+
+uint
+pick_config_vars(char *varbuf, uint len, uint start_pos, char *pickbuf)
+{
+ bool findNewline, changenewline=FALSE, pick=FALSE;
+ int column;
+ uint n, pick_column=0;
+
+ findNewline = FALSE;
+ column = 0;
+
+ if (start_pos >= len) {
+ CONFIG_ERROR(("%s: wrong start pos\n", __FUNCTION__));
+ return 0;
+ }
+
+ for (n = start_pos; n < len; n++) {
+ if (varbuf[n] == '\r')
+ continue;
+ if ((findNewline || changenewline) && varbuf[n] != '\n')
+ continue;
+ findNewline = FALSE;
+ if (varbuf[n] == '#') {
+ findNewline = TRUE;
+ continue;
+ }
+ if (varbuf[n] == '\\') {
+ changenewline = TRUE;
+ continue;
+ }
+ if (!changenewline && varbuf[n] == '\n') {
+ if (column == 0)
+ continue;
+ column = 0;
+ continue;
+ }
+ if (changenewline && varbuf[n] == '\n') {
+ changenewline = FALSE;
+ continue;
+ }
+
+ if (column==0 && !pick) { // start to pick
+ pick = TRUE;
+ column++;
+ pick_column = 0;
+ } else {
+ if (pick && column==0) { // stop to pick
+ pick = FALSE;
+ break;
+ } else
+ column++;
+ }
+ if (pick) {
+ if (varbuf[n] == 0x9)
+ continue;
+ pickbuf[pick_column] = varbuf[n];
+ pick_column++;
+ }
+ }
+
+ return n; // return current position
+}
+
+bool
+dhd_conf_read_log_level(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ char *data = full_param+len_param;
+
+ if (!strncmp("dhd_msg_level=", full_param, len_param)) {
+ dhd_msg_level = (int)simple_strtol(data, NULL, 0);
+ printf("%s: dhd_msg_level = 0x%X\n", __FUNCTION__, dhd_msg_level);
+ }
+#ifdef BCMSDIO
+ else if (!strncmp("sd_msglevel=", full_param, len_param)) {
+ sd_msglevel = (int)simple_strtol(data, NULL, 0);
+ printf("%s: sd_msglevel = 0x%X\n", __FUNCTION__, sd_msglevel);
+ }
+#endif
+#ifdef BCMDBUS
+ else if (!strncmp("dbus_msglevel=", full_param, len_param)) {
+ dbus_msglevel = (int)simple_strtol(data, NULL, 0);
+ printf("%s: dbus_msglevel = 0x%X\n", __FUNCTION__, dbus_msglevel);
+ }
+#endif
+ else if (!strncmp("android_msg_level=", full_param, len_param)) {
+ android_msg_level = (int)simple_strtol(data, NULL, 0);
+ printf("%s: android_msg_level = 0x%X\n", __FUNCTION__, android_msg_level);
+ }
+ else if (!strncmp("config_msg_level=", full_param, len_param)) {
+ config_msg_level = (int)simple_strtol(data, NULL, 0);
+ printf("%s: config_msg_level = 0x%X\n", __FUNCTION__, config_msg_level);
+ }
+#ifdef WL_CFG80211
+ else if (!strncmp("wl_dbg_level=", full_param, len_param)) {
+ wl_dbg_level = (int)simple_strtol(data, NULL, 0);
+ printf("%s: wl_dbg_level = 0x%X\n", __FUNCTION__, wl_dbg_level);
+ }
+#endif
+#if defined(WL_WIRELESS_EXT)
+ else if (!strncmp("iw_msg_level=", full_param, len_param)) {
+ iw_msg_level = (int)simple_strtol(data, NULL, 0);
+ printf("%s: iw_msg_level = 0x%X\n", __FUNCTION__, iw_msg_level);
+ }
+#endif
+#if defined(DHD_DEBUG)
+ else if (!strncmp("dhd_console_ms=", full_param, len_param)) {
+ dhd_console_ms = (int)simple_strtol(data, NULL, 0);
+ printf("%s: dhd_console_ms = 0x%X\n", __FUNCTION__, dhd_console_ms);
+ }
+#endif
+ else
+ return false;
+
+ return true;
+}
+
+void
+dhd_conf_read_wme_ac_value(wme_param_t *wme, char *pick, int ac_val)
+{
+ char *pick_tmp, *pch;
+
+ pick_tmp = pick;
+ pch = bcmstrstr(pick_tmp, "aifsn ");
+ if (pch) {
+ wme->aifsn[ac_val] = (int)simple_strtol(pch+strlen("aifsn "), NULL, 0);
+ printf("%s: ac_val=%d, aifsn=%d\n", __FUNCTION__, ac_val, wme->aifsn[ac_val]);
+ }
+ pick_tmp = pick;
+ pch = bcmstrstr(pick_tmp, "ecwmin ");
+ if (pch) {
+ wme->ecwmin[ac_val] = (int)simple_strtol(pch+strlen("ecwmin "), NULL, 0);
+ printf("%s: ac_val=%d, ecwmin=%d\n", __FUNCTION__, ac_val, wme->ecwmin[ac_val]);
+ }
+ pick_tmp = pick;
+ pch = bcmstrstr(pick_tmp, "ecwmax ");
+ if (pch) {
+ wme->ecwmax[ac_val] = (int)simple_strtol(pch+strlen("ecwmax "), NULL, 0);
+ printf("%s: ac_val=%d, ecwmax=%d\n", __FUNCTION__, ac_val, wme->ecwmax[ac_val]);
+ }
+ pick_tmp = pick;
+ pch = bcmstrstr(pick_tmp, "txop ");
+ if (pch) {
+ wme->txop[ac_val] = (int)simple_strtol(pch+strlen("txop "), NULL, 0);
+ printf("%s: ac_val=%d, txop=0x%x\n", __FUNCTION__, ac_val, wme->txop[ac_val]);
+ }
+
+}
+
+bool
+dhd_conf_read_wme_ac_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ // wme_ac_sta_be=aifsn 1 ecwmin 2 ecwmax 3 txop 0x5e
+ // wme_ac_sta_vo=aifsn 1 ecwmin 1 ecwmax 1 txop 0x5e
+
+ if (!strncmp("force_wme_ac=", full_param, len_param)) {
+ conf->force_wme_ac = (int)simple_strtol(data, NULL, 10);
+ printf("%s: force_wme_ac = %d\n", __FUNCTION__, conf->force_wme_ac);
+ }
+ else if (!strncmp("wme_ac_sta_be=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BE);
+ }
+ else if (!strncmp("wme_ac_sta_bk=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_BK);
+ }
+ else if (!strncmp("wme_ac_sta_vi=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VI);
+ }
+ else if (!strncmp("wme_ac_sta_vo=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_sta, data, AC_VO);
+ }
+ else if (!strncmp("wme_ac_ap_be=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BE);
+ }
+ else if (!strncmp("wme_ac_ap_bk=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_BK);
+ }
+ else if (!strncmp("wme_ac_ap_vi=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VI);
+ }
+ else if (!strncmp("wme_ac_ap_vo=", full_param, len_param)) {
+ dhd_conf_read_wme_ac_value(&conf->wme_ap, data, AC_VO);
+ }
+ else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_fw_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ int i, j;
+ char *pch, *pick_tmp;
+ wl_mac_list_t *mac_list;
+ wl_mac_range_t *mac_range;
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ /* Process fw_by_mac:
+ * fw_by_mac=[fw_mac_num] \
+ * [fw_name1] [mac_num1] [oui1-1] [nic_start1-1] [nic_end1-1] \
+ * [oui1-1] [nic_start1-1] [nic_end1-1]... \
+ * [oui1-n] [nic_start1-n] [nic_end1-n] \
+ * [fw_name2] [mac_num2] [oui2-1] [nic_start2-1] [nic_end2-1] \
+ * [oui2-1] [nic_start2-1] [nic_end2-1]... \
+ * [oui2-n] [nic_start2-n] [nic_end2-n] \
+ * Ex: fw_by_mac=2 \
+ * fw_bcmdhd1.bin 2 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \
+ * fw_bcmdhd2.bin 3 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \
+ * 0x983B16 0x916157 0x916487
+ */
+
+ if (!strncmp("fw_by_mac=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);
+ if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count, GFP_KERNEL))) {
+ conf->fw_by_mac.count = 0;
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ }
+ printf("%s: fw_count=%d\n", __FUNCTION__, conf->fw_by_mac.count);
+ conf->fw_by_mac.m_mac_list_head = mac_list;
+ for (i=0; i<conf->fw_by_mac.count; i++) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ strcpy(mac_list[i].name, pch);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);
+ printf("%s: name=%s, mac_count=%d\n", __FUNCTION__,
+ mac_list[i].name, mac_list[i].count);
+ if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) {
+ mac_list[i].count = 0;
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ break;
+ }
+ mac_list[i].mac = mac_range;
+ for (j=0; j<mac_list[i].count; j++) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].oui = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);
+ printf("%s: oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",
+ __FUNCTION__, mac_range[j].oui,
+ mac_range[j].nic_start, mac_range[j].nic_end);
+ }
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_nv_by_mac(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ int i, j;
+ char *pch, *pick_tmp;
+ wl_mac_list_t *mac_list;
+ wl_mac_range_t *mac_range;
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ /* Process nv_by_mac:
+ * [nv_by_mac]: The same format as fw_by_mac
+ */
+ if (!strncmp("nv_by_mac=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);
+ if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count, GFP_KERNEL))) {
+ conf->nv_by_mac.count = 0;
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ }
+ printf("%s: nv_count=%d\n", __FUNCTION__, conf->nv_by_mac.count);
+ conf->nv_by_mac.m_mac_list_head = mac_list;
+ for (i=0; i<conf->nv_by_mac.count; i++) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ strcpy(mac_list[i].name, pch);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);
+ printf("%s: name=%s, mac_count=%d\n", __FUNCTION__,
+ mac_list[i].name, mac_list[i].count);
+ if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) {
+ mac_list[i].count = 0;
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ break;
+ }
+ mac_list[i].mac = mac_range;
+ for (j=0; j<mac_list[i].count; j++) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].oui = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);
+ printf("%s: oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",
+ __FUNCTION__, mac_range[j].oui,
+ mac_range[j].nic_start, mac_range[j].nic_end);
+ }
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_nv_by_chip(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ int i;
+ char *pch, *pick_tmp;
+ wl_chip_nv_path_t *chip_nv_path;
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ /* Process nv_by_chip:
+ * nv_by_chip=[nv_chip_num] \
+ * [chip1] [chiprev1] [nv_name1] [chip2] [chiprev2] [nv_name2] \
+ * Ex: nv_by_chip=2 \
+ * 43430 0 nvram_ap6212.txt 43430 1 nvram_ap6212a.txt \
+ */
+ if (!strncmp("nv_by_chip=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ conf->nv_by_chip.count = (uint32)simple_strtol(pch, NULL, 0);
+ if (!(chip_nv_path = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_chip.count, GFP_KERNEL))) {
+ conf->nv_by_chip.count = 0;
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ }
+ printf("%s: nv_by_chip_count=%d\n", __FUNCTION__, conf->nv_by_chip.count);
+ conf->nv_by_chip.m_chip_nv_path_head = chip_nv_path;
+ for (i=0; i<conf->nv_by_chip.count; i++) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ chip_nv_path[i].chip = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ chip_nv_path[i].chiprev = (uint32)simple_strtol(pch, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ strcpy(chip_nv_path[i].name, pch);
+ printf("%s: chip=0x%x, chiprev=%d, name=%s\n", __FUNCTION__,
+ chip_nv_path[i].chip, chip_nv_path[i].chiprev, chip_nv_path[i].name);
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_roam_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ if (!strncmp("roam_off=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->roam_off = 0;
+ else
+ conf->roam_off = 1;
+ printf("%s: roam_off = %d\n", __FUNCTION__, conf->roam_off);
+ }
+ else if (!strncmp("roam_off_suspend=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->roam_off_suspend = 0;
+ else
+ conf->roam_off_suspend = 1;
+ printf("%s: roam_off_suspend = %d\n", __FUNCTION__, conf->roam_off_suspend);
+ }
+ else if (!strncmp("roam_trigger=", full_param, len_param)) {
+ conf->roam_trigger[0] = (int)simple_strtol(data, NULL, 10);
+ printf("%s: roam_trigger = %d\n", __FUNCTION__,
+ conf->roam_trigger[0]);
+ }
+ else if (!strncmp("roam_scan_period=", full_param, len_param)) {
+ conf->roam_scan_period[0] = (int)simple_strtol(data, NULL, 10);
+ printf("%s: roam_scan_period = %d\n", __FUNCTION__,
+ conf->roam_scan_period[0]);
+ }
+ else if (!strncmp("roam_delta=", full_param, len_param)) {
+ conf->roam_delta[0] = (int)simple_strtol(data, NULL, 10);
+ printf("%s: roam_delta = %d\n", __FUNCTION__, conf->roam_delta[0]);
+ }
+ else if (!strncmp("fullroamperiod=", full_param, len_param)) {
+ conf->fullroamperiod = (int)simple_strtol(data, NULL, 10);
+ printf("%s: fullroamperiod = %d\n", __FUNCTION__,
+ conf->fullroamperiod);
+ } else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_country_list(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ int i;
+ char *pch, *pick_tmp, *pick_tmp2;
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+ wl_country_t *cspec;
+ conf_country_list_t *country_list = NULL;
+
+ /* Process country_list:
+ * country_list=[country1]:[ccode1]/[regrev1],
+ * [country2]:[ccode2]/[regrev2] \
+ * Ex: country_list=US:US/0, TW:TW/1
+ */
+ if (!strncmp("country_list=", full_param, len_param)) {
+ country_list = &dhd->conf->country_list;
+ }
+ if (country_list) {
+ pick_tmp = data;
+ for (i=0; i<CONFIG_COUNTRY_LIST_SIZE; i++) {
+ pick_tmp2 = bcmstrtok(&pick_tmp, ", ", 0);
+ if (!pick_tmp2)
+ break;
+ pch = bcmstrtok(&pick_tmp2, ":", 0);
+ if (!pch)
+ break;
+ cspec = NULL;
+ if (!(cspec = kmalloc(sizeof(wl_country_t), GFP_KERNEL))) {
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ break;
+ }
+ memset(cspec, 0, sizeof(wl_country_t));
+
+ strcpy(cspec->country_abbrev, pch);
+ pch = bcmstrtok(&pick_tmp2, "/", 0);
+ if (!pch) {
+ kfree(cspec);
+ break;
+ }
+ memcpy(cspec->ccode, pch, 2);
+ pch = bcmstrtok(&pick_tmp2, "/", 0);
+ if (!pch) {
+ kfree(cspec);
+ break;
+ }
+ cspec->rev = (int32)simple_strtol(pch, NULL, 10);
+ country_list->count++;
+ country_list->cspec[i] = cspec;
+ CONFIG_TRACE(("%s: country_list abbrev=%s, ccode=%s, regrev=%d\n", __FUNCTION__,
+ cspec->country_abbrev, cspec->ccode, cspec->rev));
+ }
+ if (!strncmp("country_list=", full_param, len_param)) {
+ printf("%s: %d country in list\n", __FUNCTION__, conf->country_list.count);
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_mchan_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ int i;
+ char *pch, *pick_tmp, *pick_tmp2;
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ /* Process mchan_bw:
+ * mchan_bw=[val]/[any/go/gc]/[any/source/sink]
+ * Ex: mchan_bw=80/go/source, 30/gc/sink
+ */
+ if (!strncmp("mchan_bw=", full_param, len_param)) {
+ pick_tmp = data;
+ for (i=0; i<MCHAN_MAX_NUM; i++) {
+ pick_tmp2 = bcmstrtok(&pick_tmp, ", ", 0);
+ if (!pick_tmp2)
+ break;
+ pch = bcmstrtok(&pick_tmp2, "/", 0);
+ if (!pch) {
+ break;
+ } else {
+ conf->mchan[i].bw = (int)simple_strtol(pch, NULL, 0);
+ if (conf->mchan[i].bw < 0 || conf->mchan[i].bw > 100) {
+ CONFIG_ERROR(("%s: wrong bw %d\n", __FUNCTION__, conf->mchan[i].bw));
+ conf->mchan[i].bw = 0;
+ break;
+ }
+ }
+ pch = bcmstrtok(&pick_tmp2, "/", 0);
+ if (!pch) {
+ break;
+ } else {
+ if (bcmstrstr(pch, "any")) {
+ conf->mchan[i].p2p_mode = -1;
+ } else if (bcmstrstr(pch, "go")) {
+ conf->mchan[i].p2p_mode = WL_P2P_IF_GO;
+ } else if (bcmstrstr(pch, "gc")) {
+ conf->mchan[i].p2p_mode = WL_P2P_IF_CLIENT;
+ }
+ }
+ pch = bcmstrtok(&pick_tmp2, "/", 0);
+ if (!pch) {
+ break;
+ } else {
+ if (bcmstrstr(pch, "any")) {
+ conf->mchan[i].miracast_mode = -1;
+ } else if (bcmstrstr(pch, "source")) {
+ conf->mchan[i].miracast_mode = MIRACAST_SOURCE;
+ } else if (bcmstrstr(pch, "sink")) {
+ conf->mchan[i].miracast_mode = MIRACAST_SINK;
+ }
+ }
+ }
+ for (i=0; i<MCHAN_MAX_NUM; i++) {
+ if (conf->mchan[i].bw >= 0)
+ printf("%s: mchan_bw=%d/%d/%d\n", __FUNCTION__,
+ conf->mchan[i].bw, conf->mchan[i].p2p_mode, conf->mchan[i].miracast_mode);
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+
+#ifdef PKT_FILTER_SUPPORT
+bool
+dhd_conf_read_pkt_filter(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+ char *pch, *pick_tmp;
+ int i;
+
+ /* Process pkt filter:
+ * 1) pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000
+ * 2) pkt_filter_del=100, 102, 103, 104, 105
+ * 3) magic_pkt_filter_add=141 0 1 12
+ */
+ if (!strncmp("dhd_master_mode=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ dhd_master_mode = FALSE;
+ else
+ dhd_master_mode = TRUE;
+ printf("%s: dhd_master_mode = %d\n", __FUNCTION__, dhd_master_mode);
+ }
+ else if (!strncmp("pkt_filter_add=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, ",.-", 0);
+ i=0;
+ while (pch != NULL && i<DHD_CONF_FILTER_MAX) {
+ strcpy(&conf->pkt_filter_add.filter[i][0], pch);
+ printf("%s: pkt_filter_add[%d][] = %s\n", __FUNCTION__, i, &conf->pkt_filter_add.filter[i][0]);
+ pch = bcmstrtok(&pick_tmp, ",.-", 0);
+ i++;
+ }
+ conf->pkt_filter_add.count = i;
+ }
+ else if (!strncmp("pkt_filter_del=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ i=0;
+ while (pch != NULL && i<DHD_CONF_FILTER_MAX) {
+ conf->pkt_filter_del.id[i] = (uint32)simple_strtol(pch, NULL, 10);
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ i++;
+ }
+ conf->pkt_filter_del.count = i;
+ printf("%s: pkt_filter_del id = ", __FUNCTION__);
+ for (i=0; i<conf->pkt_filter_del.count; i++)
+ printf("%d ", conf->pkt_filter_del.id[i]);
+ printf("\n");
+ }
+ else if (!strncmp("magic_pkt_filter_add=", full_param, len_param)) {
+ if (!(conf->magic_pkt_filter_add = kmalloc(MAGIC_PKT_FILTER_LEN, GFP_KERNEL))) {
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ } else {
+ memset(conf->magic_pkt_filter_add, 0, MAGIC_PKT_FILTER_LEN);
+ strcpy(conf->magic_pkt_filter_add, data);
+ printf("%s: magic_pkt_filter_add = %s\n", __FUNCTION__, conf->magic_pkt_filter_add);
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+#ifdef ISAM_PREINIT
+/*
+ * isam_init=mode [sta|ap|apsta|dualap] vifname [wlan1]
+ * isam_config=ifname [wlan0|wlan1] ssid [xxx] chan [x]
+ hidden [y|n] maxassoc [x]
+ amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]
+ emode [none|wep|tkip|aes|tkipaes]
+ key [xxxxx]
+ * isam_enable=ifname [wlan0|wlan1]
+*/
+bool
+dhd_conf_read_isam(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ if (!strncmp("isam_init=", full_param, len_param)) {
+ sprintf(conf->isam_init, "isam_init %s", data);
+ printf("%s: isam_init=%s\n", __FUNCTION__, conf->isam_init);
+ }
+ else if (!strncmp("isam_config=", full_param, len_param)) {
+ sprintf(conf->isam_config, "isam_config %s", data);
+ printf("%s: isam_config=%s\n", __FUNCTION__, conf->isam_config);
+ }
+ else if (!strncmp("isam_enable=", full_param, len_param)) {
+ sprintf(conf->isam_enable, "isam_enable %s", data);
+ printf("%s: isam_enable=%s\n", __FUNCTION__, conf->isam_enable);
+ }
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+#ifdef IDHCP
+bool
+dhd_conf_read_dhcp_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+ struct ipv4_addr ipa_set;
+
+ if (!strncmp("dhcpc_enable=", full_param, len_param)) {
+ conf->dhcpc_enable = (int)simple_strtol(data, NULL, 10);
+ printf("%s: dhcpc_enable = %d\n", __FUNCTION__, conf->dhcpc_enable);
+ }
+ else if (!strncmp("dhcpd_enable=", full_param, len_param)) {
+ conf->dhcpd_enable = (int)simple_strtol(data, NULL, 10);
+ printf("%s: dhcpd_enable = %d\n", __FUNCTION__, conf->dhcpd_enable);
+ }
+ else if (!strncmp("dhcpd_ip_addr=", full_param, len_param)) {
+ if (!bcm_atoipv4(data, &ipa_set))
+ printf("%s : dhcpd_ip_addr adress setting failed.\n", __FUNCTION__);
+ conf->dhcpd_ip_addr = ipa_set;
+ printf("%s: dhcpd_ip_addr = %s\n",__FUNCTION__, data);
+ }
+ else if (!strncmp("dhcpd_ip_mask=", full_param, len_param)) {
+ if (!bcm_atoipv4(data, &ipa_set))
+ printf("%s : dhcpd_ip_mask adress setting failed.\n", __FUNCTION__);
+ conf->dhcpd_ip_mask = ipa_set;
+ printf("%s: dhcpd_ip_mask = %s\n",__FUNCTION__, data);
+ }
+ else if (!strncmp("dhcpd_ip_start=", full_param, len_param)) {
+ if (!bcm_atoipv4(data, &ipa_set))
+ printf("%s : dhcpd_ip_start adress setting failed.\n", __FUNCTION__);
+ conf->dhcpd_ip_start = ipa_set;
+ printf("%s: dhcpd_ip_start = %s\n",__FUNCTION__, data);
+ }
+ else if (!strncmp("dhcpd_ip_end=", full_param, len_param)) {
+ if (!bcm_atoipv4(data, &ipa_set))
+ printf("%s : dhcpd_ip_end adress setting failed.\n", __FUNCTION__);
+ conf->dhcpd_ip_end = ipa_set;
+ printf("%s: dhcpd_ip_end = %s\n",__FUNCTION__, data);
+ }
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+#ifdef BCMSDIO
+bool
+dhd_conf_read_sdio_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ if (!strncmp("dhd_doflow=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ dhd_doflow = FALSE;
+ else
+ dhd_doflow = TRUE;
+ printf("%s: dhd_doflow = %d\n", __FUNCTION__, dhd_doflow);
+ }
+ else if (!strncmp("dhd_slpauto=", full_param, len_param) ||
+ !strncmp("kso_enable=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ dhd_slpauto = FALSE;
+ else
+ dhd_slpauto = TRUE;
+ printf("%s: dhd_slpauto = %d\n", __FUNCTION__, dhd_slpauto);
+ }
+ else if (!strncmp("use_rxchain=", full_param, len_param)) {
+ conf->use_rxchain = (int)simple_strtol(data, NULL, 10);
+ printf("%s: use_rxchain = %d\n", __FUNCTION__, conf->use_rxchain);
+ }
+ else if (!strncmp("dhd_txminmax=", full_param, len_param)) {
+ conf->dhd_txminmax = (uint)simple_strtol(data, NULL, 10);
+ printf("%s: dhd_txminmax = %d\n", __FUNCTION__, conf->dhd_txminmax);
+ }
+ else if (!strncmp("txinrx_thres=", full_param, len_param)) {
+ conf->txinrx_thres = (int)simple_strtol(data, NULL, 10);
+ printf("%s: txinrx_thres = %d\n", __FUNCTION__, conf->txinrx_thres);
+ }
+ else if (!strncmp("sd_f2_blocksize=", full_param, len_param)) {
+ conf->sd_f2_blocksize = (int)simple_strtol(data, NULL, 10);
+ printf("%s: sd_f2_blocksize = %d\n", __FUNCTION__, conf->sd_f2_blocksize);
+ }
+#if defined(HW_OOB)
+ else if (!strncmp("oob_enabled_later=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->oob_enabled_later = FALSE;
+ else
+ conf->oob_enabled_later = TRUE;
+ printf("%s: oob_enabled_later = %d\n", __FUNCTION__, conf->oob_enabled_later);
+ }
+#endif
+ else if (!strncmp("dpc_cpucore=", full_param, len_param)) {
+ conf->dpc_cpucore = (int)simple_strtol(data, NULL, 10);
+ printf("%s: dpc_cpucore = %d\n", __FUNCTION__, conf->dpc_cpucore);
+ }
+ else if (!strncmp("rxf_cpucore=", full_param, len_param)) {
+ conf->rxf_cpucore = (int)simple_strtol(data, NULL, 10);
+ printf("%s: rxf_cpucore = %d\n", __FUNCTION__, conf->rxf_cpucore);
+ }
+ else if (!strncmp("orphan_move=", full_param, len_param)) {
+ conf->orphan_move = (int)simple_strtol(data, NULL, 10);
+ printf("%s: orphan_move = %d\n", __FUNCTION__, conf->orphan_move);
+ }
+#if defined(BCMSDIOH_TXGLOM)
+ else if (!strncmp("txglomsize=", full_param, len_param)) {
+ conf->txglomsize = (uint)simple_strtol(data, NULL, 10);
+ if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)
+ conf->txglomsize = SDPCM_MAXGLOM_SIZE;
+ printf("%s: txglomsize = %d\n", __FUNCTION__, conf->txglomsize);
+ }
+ else if (!strncmp("txglom_ext=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->txglom_ext = FALSE;
+ else
+ conf->txglom_ext = TRUE;
+ printf("%s: txglom_ext = %d\n", __FUNCTION__, conf->txglom_ext);
+ if (conf->txglom_ext) {
+ if ((conf->chip == BCM43362_CHIP_ID) || (conf->chip == BCM4330_CHIP_ID))
+ conf->txglom_bucket_size = 1680;
+ else if (conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||
+ conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID)
+ conf->txglom_bucket_size = 1684;
+ }
+ printf("%s: txglom_bucket_size = %d\n", __FUNCTION__, conf->txglom_bucket_size);
+ }
+ else if (!strncmp("bus:rxglom=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->bus_rxglom = FALSE;
+ else
+ conf->bus_rxglom = TRUE;
+ printf("%s: bus:rxglom = %d\n", __FUNCTION__, conf->bus_rxglom);
+ }
+ else if (!strncmp("deferred_tx_len=", full_param, len_param)) {
+ conf->deferred_tx_len = (int)simple_strtol(data, NULL, 10);
+ printf("%s: deferred_tx_len = %d\n", __FUNCTION__, conf->deferred_tx_len);
+ }
+ else if (!strncmp("txctl_tmo_fix=", full_param, len_param)) {
+ conf->txctl_tmo_fix = (int)simple_strtol(data, NULL, 0);
+ printf("%s: txctl_tmo_fix = %d\n", __FUNCTION__, conf->txctl_tmo_fix);
+ }
+ else if (!strncmp("tx_max_offset=", full_param, len_param)) {
+ conf->tx_max_offset = (int)simple_strtol(data, NULL, 10);
+ printf("%s: tx_max_offset = %d\n", __FUNCTION__, conf->tx_max_offset);
+ }
+ else if (!strncmp("txglom_mode=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->txglom_mode = FALSE;
+ else
+ conf->txglom_mode = TRUE;
+ printf("%s: txglom_mode = %d\n", __FUNCTION__, conf->txglom_mode);
+ }
+#endif
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+#ifdef BCMPCIE
+bool
+dhd_conf_read_pcie_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ if (!strncmp("bus:deepsleep_disable=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->bus_deepsleep_disable = 0;
+ else
+ conf->bus_deepsleep_disable = 1;
+ printf("%s: bus:deepsleep_disable = %d\n", __FUNCTION__, conf->bus_deepsleep_disable);
+ }
+ else
+ return false;
+
+ return true;
+}
+#endif
+
+bool
+dhd_conf_read_pm_params(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+
+ if (!strncmp("deepsleep=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->deepsleep = TRUE;
+ else
+ conf->deepsleep = FALSE;
+ printf("%s: deepsleep = %d\n", __FUNCTION__, conf->deepsleep);
+ }
+ else if (!strncmp("PM=", full_param, len_param)) {
+ conf->pm = (int)simple_strtol(data, NULL, 10);
+ printf("%s: PM = %d\n", __FUNCTION__, conf->pm);
+ }
+ else if (!strncmp("pm_in_suspend=", full_param, len_param)) {
+ conf->pm_in_suspend = (int)simple_strtol(data, NULL, 10);
+ printf("%s: pm_in_suspend = %d\n", __FUNCTION__, conf->pm_in_suspend);
+ }
+ else if (!strncmp("suspend_bcn_li_dtim=", full_param, len_param)) {
+ conf->suspend_bcn_li_dtim = (int)simple_strtol(data, NULL, 10);
+ printf("%s: suspend_bcn_li_dtim = %d\n", __FUNCTION__, conf->suspend_bcn_li_dtim);
+ }
+ else if (!strncmp("xmit_in_suspend=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->xmit_in_suspend = TRUE;
+ else
+ conf->xmit_in_suspend = FALSE;
+ printf("%s: xmit_in_suspend = %d\n", __FUNCTION__, conf->xmit_in_suspend);
+ }
+ else if (!strncmp("ap_in_suspend=", full_param, len_param)) {
+ conf->ap_in_suspend = (int)simple_strtol(data, NULL, 10);
+ printf("%s: ap_in_suspend = %d\n", __FUNCTION__, conf->ap_in_suspend);
+ }
+#ifdef SUSPEND_EVENT
+ else if (!strncmp("suspend_eventmask_enable=", full_param, len_param)) {
+ if (!strncmp(data, "1", 1))
+ conf->suspend_eventmask_enable = TRUE;
+ else
+ conf->suspend_eventmask_enable = FALSE;
+ printf("%s: suspend_eventmask_enable = %d\n", __FUNCTION__, conf->suspend_eventmask_enable);
+ }
+#endif
+ else
+ return false;
+
+ return true;
+}
+
+bool
+dhd_conf_read_others(dhd_pub_t *dhd, char *full_param, uint len_param)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char *data = full_param+len_param;
+ uint len_data = strlen(data);
+ char *pch, *pick_tmp;
+ int i;
+
+ if (!strncmp("dhd_poll=", full_param, len_param)) {
+ if (!strncmp(data, "0", 1))
+ conf->dhd_poll = 0;
+ else
+ conf->dhd_poll = 1;
+ printf("%s: dhd_poll = %d\n", __FUNCTION__, conf->dhd_poll);
+ }
+ else if (!strncmp("dhd_watchdog_ms=", full_param, len_param)) {
+ dhd_watchdog_ms = (int)simple_strtol(data, NULL, 10);
+ printf("%s: dhd_watchdog_ms = %d\n", __FUNCTION__, dhd_watchdog_ms);
+ }
+ else if (!strncmp("band=", full_param, len_param)) {
+ /* Process band:
+ * band=a for 5GHz only and band=b for 2.4GHz only
+ */
+ if (!strcmp(data, "b"))
+ conf->band = WLC_BAND_2G;
+ else if (!strcmp(data, "a"))
+ conf->band = WLC_BAND_5G;
+ else
+ conf->band = WLC_BAND_AUTO;
+ printf("%s: band = %d\n", __FUNCTION__, conf->band);
+ }
+ else if (!strncmp("bw_cap_2g=", full_param, len_param)) {
+ conf->bw_cap[0] = (uint)simple_strtol(data, NULL, 0);
+ printf("%s: bw_cap_2g = %d\n", __FUNCTION__, conf->bw_cap[0]);
+ }
+ else if (!strncmp("bw_cap_5g=", full_param, len_param)) {
+ conf->bw_cap[1] = (uint)simple_strtol(data, NULL, 0);
+ printf("%s: bw_cap_5g = %d\n", __FUNCTION__, conf->bw_cap[1]);
+ }
+ else if (!strncmp("bw_cap=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ if (pch != NULL) {
+ conf->bw_cap[0] = (uint32)simple_strtol(pch, NULL, 0);
+ printf("%s: bw_cap 2g = %d\n", __FUNCTION__, conf->bw_cap[0]);
+ }
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ if (pch != NULL) {
+ conf->bw_cap[1] = (uint32)simple_strtol(pch, NULL, 0);
+ printf("%s: bw_cap 5g = %d\n", __FUNCTION__, conf->bw_cap[1]);
+ }
+ }
+ else if (!strncmp("ccode=", full_param, len_param)) {
+ memset(&conf->cspec, 0, sizeof(wl_country_t));
+ memcpy(conf->cspec.country_abbrev, data, len_data);
+ memcpy(conf->cspec.ccode, data, len_data);
+ printf("%s: ccode = %s\n", __FUNCTION__, conf->cspec.ccode);
+ }
+ else if (!strncmp("regrev=", full_param, len_param)) {
+ conf->cspec.rev = (int32)simple_strtol(data, NULL, 10);
+ printf("%s: regrev = %d\n", __FUNCTION__, conf->cspec.rev);
+ }
+ else if (!strncmp("channels=", full_param, len_param)) {
+ pick_tmp = data;
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ i=0;
+ while (pch != NULL && i<WL_NUMCHANNELS) {
+ conf->channels.channel[i] = (uint32)simple_strtol(pch, NULL, 10);
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);
+ i++;
+ }
+ conf->channels.count = i;
+ printf("%s: channels = ", __FUNCTION__);
+ for (i=0; i<conf->channels.count; i++)
+ printf("%d ", conf->channels.channel[i]);
+ printf("\n");
+ }
+ else if (!strncmp("keep_alive_period=", full_param, len_param)) {
+ conf->keep_alive_period = (uint)simple_strtol(data, NULL, 10);
+ printf("%s: keep_alive_period = %d\n", __FUNCTION__,
+ conf->keep_alive_period);
+ }
+ else if (!strncmp("phy_oclscdenable=", full_param, len_param)) {
+ conf->phy_oclscdenable = (int)simple_strtol(data, NULL, 10);
+ printf("%s: phy_oclscdenable = %d\n", __FUNCTION__, conf->phy_oclscdenable);
+ }
+ else if (!strncmp("srl=", full_param, len_param)) {
+ conf->srl = (int)simple_strtol(data, NULL, 10);
+ printf("%s: srl = %d\n", __FUNCTION__, conf->srl);
+ }
+ else if (!strncmp("lrl=", full_param, len_param)) {
+ conf->lrl = (int)simple_strtol(data, NULL, 10);
+ printf("%s: lrl = %d\n", __FUNCTION__, conf->lrl);
+ }
+ else if (!strncmp("bcn_timeout=", full_param, len_param)) {
+ conf->bcn_timeout= (uint)simple_strtol(data, NULL, 10);
+ printf("%s: bcn_timeout = %d\n", __FUNCTION__, conf->bcn_timeout);
+ }
+ else if (!strncmp("txbf=", full_param, len_param)) {
+ conf->txbf = (int)simple_strtol(data, NULL, 10);
+ printf("%s: txbf = %d\n", __FUNCTION__, conf->txbf);
+ }
+ else if (!strncmp("frameburst=", full_param, len_param)) {
+ conf->frameburst = (int)simple_strtol(data, NULL, 10);
+ printf("%s: frameburst = %d\n", __FUNCTION__, conf->frameburst);
+ }
+ else if (!strncmp("disable_proptx=", full_param, len_param)) {
+ conf->disable_proptx = (int)simple_strtol(data, NULL, 10);
+ printf("%s: disable_proptx = %d\n", __FUNCTION__, conf->disable_proptx);
+ }
+#ifdef DHDTCPACK_SUPPRESS
+ else if (!strncmp("tcpack_sup_mode=", full_param, len_param)) {
+ conf->tcpack_sup_mode = (uint)simple_strtol(data, NULL, 10);
+ printf("%s: tcpack_sup_mode = %d\n", __FUNCTION__, conf->tcpack_sup_mode);
+ }
+#endif
+ else if (!strncmp("pktprio8021x=", full_param, len_param)) {
+ conf->pktprio8021x = (int)simple_strtol(data, NULL, 10);
+ printf("%s: pktprio8021x = %d\n", __FUNCTION__, conf->pktprio8021x);
+ }
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ else if (!strncmp("dhd_txbound=", full_param, len_param)) {
+ dhd_txbound = (uint)simple_strtol(data, NULL, 10);
+ printf("%s: dhd_txbound = %d\n", __FUNCTION__, dhd_txbound);
+ }
+ else if (!strncmp("dhd_rxbound=", full_param, len_param)) {
+ dhd_rxbound = (uint)simple_strtol(data, NULL, 10);
+ printf("%s: dhd_rxbound = %d\n", __FUNCTION__, dhd_rxbound);
+ }
+#endif
+ else if (!strncmp("tsq=", full_param, len_param)) {
+ conf->tsq = (int)simple_strtol(data, NULL, 10);
+ printf("%s: tsq = %d\n", __FUNCTION__, conf->tsq);
+ }
+ else if (!strncmp("ctrl_resched=", full_param, len_param)) {
+ conf->ctrl_resched = (int)simple_strtol(data, NULL, 10);
+ printf("%s: ctrl_resched = %d\n", __FUNCTION__, conf->ctrl_resched);
+ }
+ else if (!strncmp("dhd_ioctl_timeout_msec=", full_param, len_param)) {
+ conf->dhd_ioctl_timeout_msec = (int)simple_strtol(data, NULL, 10);
+ printf("%s: dhd_ioctl_timeout_msec = %d\n", __FUNCTION__, conf->dhd_ioctl_timeout_msec);
+ }
+ else if (!strncmp("in4way=", full_param, len_param)) {
+ conf->in4way = (int)simple_strtol(data, NULL, 0);
+ printf("%s: in4way = 0x%x\n", __FUNCTION__, conf->in4way);
+ }
+ else if (!strncmp("max_wait_gc_time=", full_param, len_param)) {
+ conf->max_wait_gc_time = (int)simple_strtol(data, NULL, 0);
+ printf("%s: max_wait_gc_time = %d\n", __FUNCTION__, conf->max_wait_gc_time);
+ }
+ else if (!strncmp("wl_preinit=", full_param, len_param)) {
+ if (!(conf->wl_preinit = kmalloc(len_param+1, GFP_KERNEL))) {
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ } else {
+ memset(conf->wl_preinit, 0, len_param+1);
+ strcpy(conf->wl_preinit, data);
+ printf("%s: wl_preinit = %s\n", __FUNCTION__, conf->wl_preinit);
+ }
+ }
+ else
+ return false;
+
+ return true;
+}
+
+int
+dhd_conf_read_config(dhd_pub_t *dhd, char *conf_path)
+{
+ int bcmerror = -1;
+ uint len = 0, start_pos=0;
+ void * image = NULL;
+ char * memblock = NULL;
+ char *bufp, *pick = NULL, *pch;
+ bool conf_file_exists;
+ uint len_param;
+
+ conf_file_exists = ((conf_path != NULL) && (conf_path[0] != '\0'));
+ if (!conf_file_exists) {
+ printf("%s: config path %s\n", __FUNCTION__, conf_path);
+ return (0);
+ }
+
+ if (conf_file_exists) {
+ image = dhd_os_open_image(conf_path);
+ if (image == NULL) {
+ printf("%s: Ignore config file %s\n", __FUNCTION__, conf_path);
+ goto err;
+ }
+ }
+
+ memblock = MALLOC(dhd->osh, MAXSZ_CONFIG);
+ if (memblock == NULL) {
+ CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, MAXSZ_CONFIG));
+ goto err;
+ }
+
+ pick = MALLOC(dhd->osh, MAXSZ_BUF);
+ if (!pick) {
+ CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, MAXSZ_BUF));
+ goto err;
+ }
+
+ /* Read variables */
+ if (conf_file_exists) {
+ len = dhd_os_get_image_block(memblock, MAXSZ_CONFIG, image);
+ }
+ if (len > 0 && len < MAXSZ_CONFIG) {
+ bufp = (char *)memblock;
+ bufp[len] = 0;
+
+ while (start_pos < len) {
+ memset(pick, 0, MAXSZ_BUF);
+ start_pos = pick_config_vars(bufp, len, start_pos, pick);
+ pch = strchr(pick, '=');
+ if (pch != NULL) {
+ len_param = pch-pick+1;
+ if (len_param == strlen(pick)) {
+ CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick));
+ continue;
+ }
+ } else {
+ CONFIG_ERROR(("%s: not a right parameter %s\n", __FUNCTION__, pick));
+ continue;
+ }
+
+ if (dhd_conf_read_log_level(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_roam_params(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_wme_ac_params(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_fw_by_mac(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_nv_by_mac(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_nv_by_chip(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_country_list(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_mchan_params(dhd, pick, len_param))
+ continue;
+#ifdef PKT_FILTER_SUPPORT
+ else if (dhd_conf_read_pkt_filter(dhd, pick, len_param))
+ continue;
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef ISAM_PREINIT
+ else if (dhd_conf_read_isam(dhd, pick, len_param))
+ continue;
+#endif /* ISAM_PREINIT */
+#ifdef IDHCP
+ else if (dhd_conf_read_dhcp_params(dhd, pick, len_param))
+ continue;
+#endif /* IDHCP */
+#ifdef BCMSDIO
+ else if (dhd_conf_read_sdio_params(dhd, pick, len_param))
+ continue;
+#endif /* BCMSDIO */
+#ifdef BCMPCIE
+ else if (dhd_conf_read_pcie_params(dhd, pick, len_param))
+ continue;
+#endif /* BCMPCIE */
+ else if (dhd_conf_read_pm_params(dhd, pick, len_param))
+ continue;
+ else if (dhd_conf_read_others(dhd, pick, len_param))
+ continue;
+ else
+ continue;
+ }
+
+ bcmerror = 0;
+ } else {
+ CONFIG_ERROR(("%s: error reading config file: %d\n", __FUNCTION__, len));
+ bcmerror = BCME_SDIO_ERROR;
+ }
+
+err:
+ if (pick)
+ MFREE(dhd->osh, pick, MAXSZ_BUF);
+
+ if (memblock)
+ MFREE(dhd->osh, memblock, MAXSZ_CONFIG);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+int
+dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev)
+{
+ printf("%s: chip=0x%x, chiprev=%d\n", __FUNCTION__, chip, chiprev);
+ dhd->conf->chip = chip;
+ dhd->conf->chiprev = chiprev;
+ return 0;
+}
+
+uint
+dhd_conf_get_chip(void *context)
+{
+ dhd_pub_t *dhd = context;
+
+ if (dhd && dhd->conf)
+ return dhd->conf->chip;
+ return 0;
+}
+
+uint
+dhd_conf_get_chiprev(void *context)
+{
+ dhd_pub_t *dhd = context;
+
+ if (dhd && dhd->conf)
+ return dhd->conf->chiprev;
+ return 0;
+}
+
+#ifdef BCMSDIO
+void
+dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable)
+{
+ struct dhd_conf *conf = dhd->conf;
+
+ if (enable) {
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||
+ conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||
+ conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {
+ conf->txglom_mode = SDPCM_TXGLOM_CPY;
+ }
+#endif
+ // other parameters set in preinit or config.txt
+ } else {
+ // clear txglom parameters
+ conf->txglom_ext = FALSE;
+ conf->txglom_bucket_size = 0;
+ conf->txglomsize = 0;
+ conf->deferred_tx_len = 0;
+ }
+ if (conf->txglom_ext)
+ printf("%s: txglom_ext=%d, txglom_bucket_size=%d\n", __FUNCTION__,
+ conf->txglom_ext, conf->txglom_bucket_size);
+ printf("%s: txglom_mode=%s\n", __FUNCTION__,
+ conf->txglom_mode==SDPCM_TXGLOM_MDESC?"multi-desc":"copy");
+ printf("%s: txglomsize=%d, deferred_tx_len=%d\n", __FUNCTION__,
+ conf->txglomsize, conf->deferred_tx_len);
+ printf("%s: txinrx_thres=%d, dhd_txminmax=%d\n", __FUNCTION__,
+ conf->txinrx_thres, conf->dhd_txminmax);
+ printf("%s: tx_max_offset=%d, txctl_tmo_fix=%d\n", __FUNCTION__,
+ conf->tx_max_offset, conf->txctl_tmo_fix);
+
+}
+#endif
+
+static int
+dhd_conf_rsdb_mode(dhd_pub_t *dhd, char *buf)
+{
+ char *pch;
+ wl_config_t rsdb_mode_cfg = {1, 0};
+
+ pch = buf;
+ rsdb_mode_cfg.config = (int)simple_strtol(pch, NULL, 0);
+
+ if (pch) {
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "rsdb_mode", (char *)&rsdb_mode_cfg,
+ sizeof(rsdb_mode_cfg), TRUE);
+ printf("%s: rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config);
+ }
+
+ return 0;
+}
+
+typedef int (tpl_parse_t)(dhd_pub_t *dhd, char *buf);
+
+typedef struct iovar_tpl_t {
+ int cmd;
+ char *name;
+ tpl_parse_t *parse;
+} iovar_tpl_t;
+
+const iovar_tpl_t iovar_tpl_list[] = {
+ {WLC_SET_VAR, "rsdb_mode", dhd_conf_rsdb_mode},
+};
+
+static int iovar_tpl_parse(const iovar_tpl_t *tpl, int tpl_count,
+ dhd_pub_t *dhd, int cmd, char *name, char *buf)
+{
+ int i, ret = 0;
+
+ /* look for a matching code in the table */
+ for (i = 0; i < tpl_count; i++, tpl++) {
+ if (tpl->cmd == cmd && !strcmp(tpl->name, name))
+ break;
+ }
+ if (i < tpl_count && tpl->parse) {
+ ret = tpl->parse(dhd, buf);
+ } else {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+bool
+dhd_conf_set_wl_preinit(dhd_pub_t *dhd, char *data)
+{
+ int cmd, val, ret = 0;
+ char name[32], *pch, *pick_tmp, *pick_tmp2;
+
+ /* Process wl_preinit:
+ * wl_preinit=[cmd]=[val], [cmd]=[val]
+ * Ex: wl_preinit=86=0, mpc=0
+ */
+ pick_tmp = data;
+ while (pick_tmp && (pick_tmp2 = bcmstrtok(&pick_tmp, ",", 0)) != NULL) {
+ pch = bcmstrtok(&pick_tmp2, "=", 0);
+ if (!pch)
+ break;
+ if (*pch == ' ') {
+ pch++;
+ }
+ memset(name, 0 , sizeof (name));
+ cmd = (int)simple_strtol(pch, NULL, 0);
+ if (cmd == 0) {
+ cmd = WLC_SET_VAR;
+ strcpy(name, pch);
+ }
+ pch = bcmstrtok(&pick_tmp2, ",", 0);
+ if (!pch) {
+ break;
+ }
+ ret = iovar_tpl_parse(iovar_tpl_list, ARRAY_SIZE(iovar_tpl_list),
+ dhd, cmd, name, pch);
+ if (ret) {
+ val = (int)simple_strtol(pch, NULL, 0);
+ dhd_conf_set_intiovar(dhd, cmd, name, val, -1, TRUE);
+ }
+ }
+
+ return true;
+}
+
+void
+dhd_conf_postinit_ioctls(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+ char wl_preinit[] = "assoc_retry_max=30";
+
+ dhd_conf_set_intiovar(dhd, WLC_UP, "up", 0, 0, FALSE);
+ dhd_conf_map_country_list(dhd, &conf->cspec);
+ dhd_conf_set_country(dhd, &conf->cspec);
+ dhd_conf_fix_country(dhd);
+ dhd_conf_get_country(dhd, &dhd->dhd_cspec);
+
+ dhd_conf_set_intiovar(dhd, WLC_SET_BAND, "WLC_SET_BAND", conf->band, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bcn_timeout", conf->bcn_timeout, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, WLC_SET_PM, "PM", conf->pm, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, WLC_SET_SRL, "WLC_SET_SRL", conf->srl, 0, TRUE);
+ dhd_conf_set_intiovar(dhd, WLC_SET_LRL, "WLC_SET_LRL", conf->lrl, 0, FALSE);
+ dhd_conf_set_bw_cap(dhd);
+ dhd_conf_set_roam(dhd);
+
+#if defined(BCMPCIE)
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "bus:deepsleep_disable",
+ conf->bus_deepsleep_disable, 0, FALSE);
+#endif /* defined(BCMPCIE) */
+
+#ifdef IDHCP
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpc_enable", conf->dhcpc_enable, 0, FALSE);
+ if (dhd->conf->dhcpd_enable >= 0) {
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_addr",
+ (char *)&conf->dhcpd_ip_addr, sizeof(conf->dhcpd_ip_addr), FALSE);
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_mask",
+ (char *)&conf->dhcpd_ip_mask, sizeof(conf->dhcpd_ip_mask), FALSE);
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_start",
+ (char *)&conf->dhcpd_ip_start, sizeof(conf->dhcpd_ip_start), FALSE);
+ dhd_conf_set_bufiovar(dhd, WLC_SET_VAR, "dhcpd_ip_end",
+ (char *)&conf->dhcpd_ip_end, sizeof(conf->dhcpd_ip_end), FALSE);
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "dhcpd_enable",
+ conf->dhcpd_enable, 0, FALSE);
+ }
+#endif
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "txbf", conf->txbf, 0, FALSE);
+ dhd_conf_set_intiovar(dhd, WLC_SET_FAKEFRAG, "WLC_SET_FAKEFRAG", conf->frameburst, 0, FALSE);
+ dhd_conf_set_wl_preinit(dhd, wl_preinit);
+ dhd_conf_set_wl_preinit(dhd, conf->wl_preinit);
+
+#ifndef WL_CFG80211
+ dhd_conf_set_intiovar(dhd, WLC_UP, "up", 0, 0, FALSE);
+#endif
+
+}
+
+int
+dhd_conf_preinit(dhd_pub_t *dhd)
+{
+ struct dhd_conf *conf = dhd->conf;
+ int i;
+
+ CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef BCMSDIO
+ dhd_conf_free_mac_list(&conf->fw_by_mac);
+ dhd_conf_free_mac_list(&conf->nv_by_mac);
+ dhd_conf_free_chip_nv_path_list(&conf->nv_by_chip);
+#endif
+ dhd_conf_free_country_list(&conf->country_list);
+ if (conf->magic_pkt_filter_add) {
+ kfree(conf->magic_pkt_filter_add);
+ conf->magic_pkt_filter_add = NULL;
+ }
+ if (conf->wl_preinit) {
+ kfree(conf->wl_preinit);
+ conf->wl_preinit = NULL;
+ }
+ memset(&conf->country_list, 0, sizeof(conf_country_list_t));
+ conf->band = -1;
+ memset(&conf->bw_cap, -1, sizeof(conf->bw_cap));
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {
+ strcpy(conf->cspec.country_abbrev, "ALL");
+ strcpy(conf->cspec.ccode, "ALL");
+ conf->cspec.rev = 0;
+ } else if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||
+ conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||
+ conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||
+ conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||
+ conf->chip == BCM4362_CHIP_ID) {
+ strcpy(conf->cspec.country_abbrev, "CN");
+ strcpy(conf->cspec.ccode, "CN");
+ conf->cspec.rev = 38;
+ } else {
+ strcpy(conf->cspec.country_abbrev, "CN");
+ strcpy(conf->cspec.ccode, "CN");
+ conf->cspec.rev = 0;
+ }
+ memset(&conf->channels, 0, sizeof(wl_channel_list_t));
+ conf->roam_off = 1;
+ conf->roam_off_suspend = 1;
+#ifdef CUSTOM_ROAM_TRIGGER_SETTING
+ conf->roam_trigger[0] = CUSTOM_ROAM_TRIGGER_SETTING;
+#else
+ conf->roam_trigger[0] = -65;
+#endif
+ conf->roam_trigger[1] = WLC_BAND_ALL;
+ conf->roam_scan_period[0] = 10;
+ conf->roam_scan_period[1] = WLC_BAND_ALL;
+#ifdef CUSTOM_ROAM_DELTA_SETTING
+ conf->roam_delta[0] = CUSTOM_ROAM_DELTA_SETTING;
+#else
+ conf->roam_delta[0] = 15;
+#endif
+ conf->roam_delta[1] = WLC_BAND_ALL;
+#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
+ conf->fullroamperiod = 60;
+#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+ conf->fullroamperiod = 120;
+#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+#ifdef CUSTOM_KEEP_ALIVE_SETTING
+ conf->keep_alive_period = CUSTOM_KEEP_ALIVE_SETTING;
+#else
+ conf->keep_alive_period = 28000;
+#endif
+ conf->force_wme_ac = 0;
+ memset(&conf->wme_sta, 0, sizeof(wme_param_t));
+ memset(&conf->wme_ap, 0, sizeof(wme_param_t));
+ conf->phy_oclscdenable = -1;
+#ifdef PKT_FILTER_SUPPORT
+ memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t));
+ memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t));
+#endif
+ conf->srl = -1;
+ conf->lrl = -1;
+ conf->bcn_timeout = 16;
+ conf->txbf = -1;
+ conf->disable_proptx = -1;
+ conf->dhd_poll = -1;
+#ifdef BCMSDIO
+ conf->use_rxchain = 0;
+ conf->bus_rxglom = TRUE;
+ conf->txglom_ext = FALSE;
+ conf->tx_max_offset = 0;
+ conf->txglomsize = SDPCM_DEFGLOM_SIZE;
+ conf->txctl_tmo_fix = 300;
+ conf->txglom_mode = SDPCM_TXGLOM_CPY;
+ conf->deferred_tx_len = 0;
+ conf->dhd_txminmax = 1;
+ conf->txinrx_thres = -1;
+ conf->sd_f2_blocksize = 0;
+#if defined(HW_OOB)
+ conf->oob_enabled_later = FALSE;
+#endif
+ conf->orphan_move = 0;
+#endif
+#ifdef BCMPCIE
+ conf->bus_deepsleep_disable = 1;
+#endif
+ conf->dpc_cpucore = -1;
+ conf->rxf_cpucore = -1;
+ conf->frameburst = -1;
+ conf->deepsleep = FALSE;
+ conf->pm = -1;
+ conf->pm_in_suspend = -1;
+ conf->suspend_bcn_li_dtim = -1;
+ conf->xmit_in_suspend = TRUE;
+ conf->ap_in_suspend = 0;
+#ifdef SUSPEND_EVENT
+ conf->suspend_eventmask_enable = FALSE;
+ memset(&conf->suspend_eventmask, 0, sizeof(conf->suspend_eventmask));
+ memset(&conf->resume_eventmask, 0, sizeof(conf->resume_eventmask));
+#endif
+#ifdef IDHCP
+ conf->dhcpc_enable = -1;
+ conf->dhcpd_enable = -1;
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ conf->tsq = 10;
+#else
+ conf->tsq = 0;
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#ifdef BCMPCIE
+ conf->tcpack_sup_mode = TCPACK_SUP_DEFAULT;
+#else
+ conf->tcpack_sup_mode = TCPACK_SUP_OFF;
+#endif
+#endif
+ conf->pktprio8021x = -1;
+ conf->ctrl_resched = 2;
+ conf->dhd_ioctl_timeout_msec = 0;
+ conf->in4way = NO_SCAN_IN4WAY;
+ conf->max_wait_gc_time = 300;
+#ifdef ISAM_PREINIT
+ memset(conf->isam_init, 0, sizeof(conf->isam_init));
+ memset(conf->isam_config, 0, sizeof(conf->isam_config));
+ memset(conf->isam_enable, 0, sizeof(conf->isam_enable));
+#endif
+ for (i=0; i<MCHAN_MAX_NUM; i++) {
+ memset(&conf->mchan[i], -1, sizeof(mchan_params_t));
+ }
+#ifdef CUSTOMER_HW_AMLOGIC
+ dhd_slpauto = FALSE;
+#endif
+ if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||
+ conf->chip == BCM4354_CHIP_ID || conf->chip == BCM4356_CHIP_ID ||
+ conf->chip == BCM4345_CHIP_ID || conf->chip == BCM4371_CHIP_ID ||
+ conf->chip == BCM43569_CHIP_ID || conf->chip == BCM4359_CHIP_ID ||
+ conf->chip == BCM4362_CHIP_ID) {
+#ifdef DHDTCPACK_SUPPRESS
+#ifdef BCMSDIO
+ conf->tcpack_sup_mode = TCPACK_SUP_REPLACE;
+#endif
+#endif
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ dhd_rxbound = 128;
+ dhd_txbound = 64;
+#endif
+ conf->txbf = 1;
+ conf->frameburst = 1;
+#ifdef BCMSDIO
+ conf->dhd_txminmax = -1;
+ conf->txinrx_thres = 128;
+ conf->sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ conf->orphan_move = 1;
+#else
+ conf->orphan_move = 0;
+#endif
+#endif
+ }
+
+#ifdef BCMSDIO
+#if defined(BCMSDIOH_TXGLOM_EXT)
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID ||
+ conf->chip == BCM43340_CHIP_ID || conf->chip == BCM43341_CHIP_ID ||
+ conf->chip == BCM4334_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {
+ conf->txglom_ext = TRUE;
+ } else {
+ conf->txglom_ext = FALSE;
+ }
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {
+ conf->txglom_bucket_size = 1680; // fixed value, don't change
+ conf->txglomsize = 6;
+ }
+ if (conf->chip == BCM4334_CHIP_ID || conf->chip == BCM43340_CHIP_ID ||
+ conf->chip == BCM43341_CHIP_ID || conf->chip == BCM4324_CHIP_ID) {
+ conf->txglom_bucket_size = 1684; // fixed value, don't change
+ conf->txglomsize = 16;
+ }
+#endif
+ if (conf->txglomsize > SDPCM_MAXGLOM_SIZE)
+ conf->txglomsize = SDPCM_MAXGLOM_SIZE;
+#endif
+
+ return 0;
+}
+
+int
+dhd_conf_reset(dhd_pub_t *dhd)
+{
+#ifdef BCMSDIO
+ dhd_conf_free_mac_list(&dhd->conf->fw_by_mac);
+ dhd_conf_free_mac_list(&dhd->conf->nv_by_mac);
+ dhd_conf_free_chip_nv_path_list(&dhd->conf->nv_by_chip);
+#endif
+ dhd_conf_free_country_list(&dhd->conf->country_list);
+ if (dhd->conf->magic_pkt_filter_add) {
+ kfree(dhd->conf->magic_pkt_filter_add);
+ dhd->conf->magic_pkt_filter_add = NULL;
+ }
+ if (dhd->conf->wl_preinit) {
+ kfree(dhd->conf->wl_preinit);
+ dhd->conf->wl_preinit = NULL;
+ }
+ memset(dhd->conf, 0, sizeof(dhd_conf_t));
+ return 0;
+}
+
+int
+dhd_conf_attach(dhd_pub_t *dhd)
+{
+ dhd_conf_t *conf;
+
+ CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhd->conf != NULL) {
+ printf("%s: config is attached before!\n", __FUNCTION__);
+ return 0;
+ }
+ /* Allocate private bus interface state */
+ if (!(conf = MALLOC(dhd->osh, sizeof(dhd_conf_t)))) {
+ CONFIG_ERROR(("%s: MALLOC failed\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(conf, 0, sizeof(dhd_conf_t));
+
+ dhd->conf = conf;
+
+ return 0;
+
+fail:
+ if (conf != NULL)
+ MFREE(dhd->osh, conf, sizeof(dhd_conf_t));
+ return BCME_NOMEM;
+}
+
+void
+dhd_conf_detach(dhd_pub_t *dhd)
+{
+ CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (dhd->conf) {
+#ifdef BCMSDIO
+ dhd_conf_free_mac_list(&dhd->conf->fw_by_mac);
+ dhd_conf_free_mac_list(&dhd->conf->nv_by_mac);
+ dhd_conf_free_chip_nv_path_list(&dhd->conf->nv_by_chip);
+#endif
+ dhd_conf_free_country_list(&dhd->conf->country_list);
+ if (dhd->conf->magic_pkt_filter_add) {
+ kfree(dhd->conf->magic_pkt_filter_add);
+ dhd->conf->magic_pkt_filter_add = NULL;
+ }
+ if (dhd->conf->wl_preinit) {
+ kfree(dhd->conf->wl_preinit);
+ dhd->conf->wl_preinit = NULL;
+ }
+ MFREE(dhd->osh, dhd->conf, sizeof(dhd_conf_t));
+ }
+ dhd->conf = NULL;
+}
#define _dhd_config_
#include <bcmdevs.h>
-#include <siutils.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <wlioctl.h>
} conf_pkt_filter_del_t;
#endif
-#define CONFIG_COUNTRY_LIST_SIZE 500
-typedef struct country_list {
- struct country_list *next;
- wl_country_t cspec;
-} country_list_t;
+#define CONFIG_COUNTRY_LIST_SIZE 100
+typedef struct conf_country_list {
+ uint32 count;
+ wl_country_t *cspec[CONFIG_COUNTRY_LIST_SIZE];
+} conf_country_list_t;
/* mchan_params */
#define MCHAN_MAX_NUM 4
#define MIRACAST_SOURCE 1
#define MIRACAST_SINK 2
typedef struct mchan_params {
- struct mchan_params *next;
int bw;
int p2p_mode;
int miracast_mode;
enum in4way_flags {
NO_SCAN_IN4WAY = (1 << (0)),
NO_BTC_IN4WAY = (1 << (1)),
- DONT_DELETE_GC_AFTER_WPS = (1 << (2)),
- WAIT_DISCONNECTED = (1 << (3)),
-};
-
-enum in_suspend_flags {
- NO_EVENT_IN_SUSPEND = (1 << (0)),
- NO_TXDATA_IN_SUSPEND = (1 << (1)),
- NO_TXCTL_IN_SUSPEND = (1 << (2)),
- AP_DOWN_IN_SUSPEND = (1 << (3)),
- ROAM_OFFLOAD_IN_SUSPEND = (1 << (4)),
- AP_FILTER_IN_SUSPEND = (1 << (5)),
- WOWL_IN_SUSPEND = (1 << (6)),
- ALL_IN_SUSPEND = 0xFFFFFFFF,
-};
-
-enum in_suspend_mode {
- EARLY_SUSPEND = 0,
- PM_NOTIFIER = 1
+ DONT_DELETE_GC_AFTER_WPS = (1 << (2))
};
enum eapol_status {
EAPOL_STATUS_NONE = 0,
- EAPOL_STATUS_REQID = 1,
- EAPOL_STATUS_RSPID = 2,
- EAPOL_STATUS_WSC_START = 3,
- EAPOL_STATUS_WPS_M1 = 4,
- EAPOL_STATUS_WPS_M2 = 5,
- EAPOL_STATUS_WPS_M3 = 6,
- EAPOL_STATUS_WPS_M4 = 7,
- EAPOL_STATUS_WPS_M5 = 8,
- EAPOL_STATUS_WPS_M6 = 9,
- EAPOL_STATUS_WPS_M7 = 10,
- EAPOL_STATUS_WPS_M8 = 11,
- EAPOL_STATUS_WSC_DONE = 12,
- EAPOL_STATUS_4WAY_START = 13,
- EAPOL_STATUS_4WAY_M1 = 14,
- EAPOL_STATUS_4WAY_M2 = 15,
- EAPOL_STATUS_4WAY_M3 = 16,
- EAPOL_STATUS_4WAY_M4 = 17,
- EAPOL_STATUS_GROUPKEY_M1 = 18,
- EAPOL_STATUS_GROUPKEY_M2 = 19,
- EAPOL_STATUS_4WAY_DONE = 20
+ EAPOL_STATUS_WPS_DONE,
+ EAPOL_STATUS_M4
};
typedef struct dhd_conf {
uint chip;
uint chiprev;
-#ifdef GET_OTP_MODULE_NAME
- char module_name[16];
-#endif
- struct ether_addr otp_mac;
int fw_type;
-#ifdef BCMSDIO
wl_mac_list_ctrl_t fw_by_mac;
wl_mac_list_ctrl_t nv_by_mac;
-#endif
wl_chip_nv_path_list_ctrl_t nv_by_chip;
- country_list_t *country_head;
+ conf_country_list_t country_list;
int band;
int bw_cap[2];
wl_country_t cspec;
int roam_delta[2];
int fullroamperiod;
uint keep_alive_period;
-#ifdef ARP_OFFLOAD_SUPPORT
- bool garp;
-#endif
int force_wme_ac;
wme_param_t wme_sta;
wme_param_t wme_ap;
+ int phy_oclscdenable;
#ifdef PKT_FILTER_SUPPORT
conf_pkt_filter_add_t pkt_filter_add;
conf_pkt_filter_del_t pkt_filter_del;
int srl;
int lrl;
uint bcn_timeout;
+ int txbf;
int disable_proptx;
int dhd_poll;
#ifdef BCMSDIO
int txglom_bucket_size;
int txinrx_thres;
int dhd_txminmax; // -1=DATABUFCNT(bus)
+ uint sd_f2_blocksize;
bool oob_enabled_later;
-#if defined(SDIO_ISR_THREAD)
- bool intr_extn;
-#endif
-#ifdef BCMSDIO_RXLIM_POST
- bool rxlim_en;
-#endif
+ int orphan_move;
#endif
#ifdef BCMPCIE
int bus_deepsleep_disable;
bool deepsleep;
int pm;
int pm_in_suspend;
- int suspend_mode;
int suspend_bcn_li_dtim;
#ifdef DHDTCPACK_SUPPRESS
uint8 tcpack_sup_mode;
#endif
int pktprio8021x;
- uint insuspend;
- bool suspended;
+ int xmit_in_suspend;
+ int ap_in_suspend;
#ifdef SUSPEND_EVENT
+ bool suspend_eventmask_enable;
+ char suspend_eventmask[WL_EVENTING_MASK_LEN];
char resume_eventmask[WL_EVENTING_MASK_LEN];
- struct ether_addr bssid_insuspend;
- bool wlfc;
#endif
#ifdef IDHCP
int dhcpc_enable;
char isam_enable[50];
#endif
int ctrl_resched;
- mchan_params_t *mchan;
+ int dhd_ioctl_timeout_msec;
+ struct mchan_params mchan[MCHAN_MAX_NUM];
char *wl_preinit;
- char *wl_suspend;
- char *wl_resume;
int tsq;
- int orphan_move;
uint eapol_status;
uint in4way;
-#ifdef WL_EXT_WOWL
- uint wowl;
-#endif
-#ifdef GET_CUSTOM_MAC_FROM_CONFIG
- char hw_ether[62];
-#endif
- wait_queue_head_t event_complete;
-#ifdef PROPTX_MAXCOUNT
- int proptx_maxcnt_2g;
- int proptx_maxcnt_5g;
-#endif /* DYNAMIC_PROPTX_MAXCOUNT */
+ uint max_wait_gc_time;
} dhd_conf_t;
#ifdef BCMSDIO
-void dhd_conf_get_otp(dhd_pub_t *dhd, bcmsdh_info_t *sdh, si_t *sih);
+int dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac);
+void dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path);
+void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path);
#if defined(HW_OOB) || defined(FORCE_WOWLAN)
-void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, struct si_pub *sih);
+void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip);
#endif
void dhd_conf_set_txglom_params(dhd_pub_t *dhd, bool enable);
+int dhd_conf_set_blksize(bcmsdh_info_t *sdh);
+#endif
+void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path);
+void dhd_conf_set_clm_name_by_chip(dhd_pub_t *dhd, char *clm_path);
+void dhd_conf_set_nv_name_by_chip(dhd_pub_t *dhd, char *nv_path);
+void dhd_conf_set_path(dhd_pub_t *dhd, char *dst_name, char *dst_path, char *src_path);
+#ifdef CONFIG_PATH_AUTO_SELECT
+void dhd_conf_set_conf_name_by_chip(dhd_pub_t *dhd, char *conf_path);
#endif
-void dhd_conf_set_path_params(dhd_pub_t *dhd, char *fw_path, char *nv_path);
-int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val,
- int def, bool down);
-int dhd_conf_get_band(dhd_pub_t *dhd);
+int dhd_conf_set_intiovar(dhd_pub_t *dhd, uint cmd, char *name, int val, int def, bool down);
+int dhd_conf_get_iovar(dhd_pub_t *dhd, int cmd, char *name, char *buf, int len, int ifidx);
+int dhd_conf_set_bufiovar(dhd_pub_t *dhd, uint cmd, char *name, char *buf, int len, bool down);
+uint dhd_conf_get_band(dhd_pub_t *dhd);
int dhd_conf_set_country(dhd_pub_t *dhd, wl_country_t *cspec);
int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec);
int dhd_conf_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec);
-#ifdef CCODE_LIST
-int dhd_ccode_map_country_list(dhd_pub_t *dhd, wl_country_t *cspec);
-#endif
int dhd_conf_fix_country(dhd_pub_t *dhd);
bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel);
-void dhd_conf_set_wme(dhd_pub_t *dhd, int ifidx, int mode);
+void dhd_conf_set_wme(dhd_pub_t *dhd, int mode);
void dhd_conf_set_mchan_bw(dhd_pub_t *dhd, int go, int source);
void dhd_conf_add_pkt_filter(dhd_pub_t *dhd);
bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id);
uint dhd_conf_get_chip(void *context);
uint dhd_conf_get_chiprev(void *context);
int dhd_conf_get_pm(dhd_pub_t *dhd);
-int dhd_conf_check_hostsleep(dhd_pub_t *dhd, int cmd, void *buf, int len,
- int *hostsleep_set, int *hostsleep_val, int *ret);
-void dhd_conf_get_hostsleep(dhd_pub_t *dhd,
- int hostsleep_set, int hostsleep_val, int ret);
-int dhd_conf_mkeep_alive(dhd_pub_t *dhd, int ifidx, int id, int period,
- char *packet, bool bcast);
-#ifdef ARP_OFFLOAD_SUPPORT
-void dhd_conf_set_garp(dhd_pub_t *dhd, int ifidx, uint32 ipa, bool enable);
-#endif
+void dhd_conf_set_eapol_status(dhd_pub_t *dhd, char *ifname,
+ char *dump_data);
+
#ifdef PROP_TXSTATUS
int dhd_conf_get_disable_proptx(dhd_pub_t *dhd);
#endif
-uint dhd_conf_get_insuspend(dhd_pub_t *dhd, uint mask);
-int dhd_conf_set_suspend_resume(dhd_pub_t *dhd, int suspend);
+int dhd_conf_get_ap_mode_in_suspend(dhd_pub_t *dhd);
+int dhd_conf_set_ap_in_suspend(dhd_pub_t *dhd, int suspend);
void dhd_conf_postinit_ioctls(dhd_pub_t *dhd);
int dhd_conf_preinit(dhd_pub_t *dhd);
int dhd_conf_reset(dhd_pub_t *dhd);
int dhd_conf_attach(dhd_pub_t *dhd);
void dhd_conf_detach(dhd_pub_t *dhd);
void *dhd_get_pub(struct net_device *dev);
-int wl_pattern_atoh(char *src, char *dst);
-#ifdef BCMSDIO
-extern int dhd_bus_sleep(dhd_pub_t *dhdp, bool sleep, uint32 *intstatus);
-#endif
+void *dhd_get_conf(struct net_device *dev);
#endif /* _dhd_config_ */
+++ /dev/null
-/*\r
- * Broadcom Dongle Host Driver (DHD)\r
- *\r
- * Copyright (C) 1999-2018, Broadcom.\r
- *\r
- * Unless you and Broadcom execute a separate written software license\r
- * agreement governing use of this software, this software is licensed to you\r
- * under the terms of the GNU General Public License version 2 (the "GPL"),\r
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the\r
- * following added to such license:\r
- *\r
- * As a special exception, the copyright holders of this software give you\r
- * permission to link this software with independent modules, and to copy and\r
- * distribute the resulting executable under terms of your choice, provided that\r
- * you also meet, for each linked independent module, the terms and conditions of\r
- * the license of that module. An independent module is a module which is not\r
- * derived from this software. The special exception does not apply to any\r
- * modifications of the software.\r
- *\r
- * Notwithstanding the above, under no circumstances may you combine this\r
- * software in any way with any other Broadcom software provided under a license\r
- * other than the GPL, without Broadcom's express prior written consent.\r
- *\r
- * $Id: dhd_csi.c 606280 2015-12-15 05:28:25Z $\r
- */\r
-#include <osl.h>\r
-\r
-#include <bcmutils.h>\r
-\r
-#include <bcmendian.h>\r
-#include <linuxver.h>\r
-#include <linux/list.h>\r
-#include <linux/sort.h>\r
-#include <dngl_stats.h>\r
-#include <wlioctl.h>\r
-\r
-#include <bcmevent.h>\r
-#include <dhd.h>\r
-#include <dhd_dbg.h>\r
-#include <dhd_csi.h>\r
-\r
-#define NULL_CHECK(p, s, err) \\r
- do { \\r
- if (!(p)) { \\r
- printf("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \\r
- err = BCME_ERROR; \\r
- return err; \\r
- } \\r
- } while (0)\r
-\r
-#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \\r
- (ts).tv_nsec / NSEC_PER_USEC)\r
-\r
-#define NULL_ADDR "\x00\x00\x00\x00\x00\x00"\r
-\r
-int\r
-dhd_csi_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)\r
-{\r
- int ret = BCME_OK;\r
- bool is_new = TRUE;\r
- cfr_dump_data_t *p_event;\r
- cfr_dump_list_t *ptr, *next, *new;\r
-\r
- NULL_CHECK(dhd, "dhd is NULL", ret);\r
-\r
- DHD_TRACE(("Enter %s\n", __FUNCTION__));\r
-\r
- if (!event_data) {\r
- DHD_ERROR(("%s: event_data is NULL\n", __FUNCTION__));\r
- return -EINVAL;\r
- }\r
- p_event = (cfr_dump_data_t *)event_data;\r
-\r
- /* check if this addr exist */\r
- if (!list_empty(&dhd->csi_list)) {\r
- list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) {\r
- if (bcmp(&ptr->entry.header.peer_macaddr, &p_event->header.peer_macaddr,\r
- ETHER_ADDR_LEN) == 0) {\r
- int pos = 0, dump_len = 0, remain = 0;\r
- is_new = FALSE;\r
- DHD_INFO(("CSI data exist\n"));\r
- if (p_event->header.status == 0) {\r
- bcopy(&p_event->header, &ptr->entry.header, sizeof(cfr_dump_header_t));\r
- dump_len = p_event->header.cfr_dump_length;\r
- if (dump_len < MAX_EVENT_SIZE) {\r
- bcopy(&p_event->data, &ptr->entry.data, dump_len);\r
- } else { \r
- /* for big csi data */\r
- uint8 *p = (uint8 *)&ptr->entry.data;\r
- remain = p_event->header.remain_length;\r
- if (remain) {\r
- pos = dump_len - remain - MAX_EVENT_SIZE;\r
- p += pos;\r
- bcopy(&p_event->data, p, MAX_EVENT_SIZE);\r
- }\r
- /* copy rest of csi data */\r
- else {\r
- pos = dump_len - (dump_len % MAX_EVENT_SIZE);\r
- p += pos;\r
- bcopy(&p_event->data, p, (dump_len % MAX_EVENT_SIZE));\r
- }\r
- }\r
- return BCME_OK;\r
- }\r
- }\r
- }\r
- }\r
- if (is_new) {\r
- if (dhd->csi_count < MAX_CSI_NUM) {\r
- new = (cfr_dump_list_t *)MALLOCZ(dhd->osh, sizeof(cfr_dump_list_t));\r
- if (!new){\r
- DHD_ERROR(("Malloc cfr dump list error\n"));\r
- return BCME_NOMEM;\r
- }\r
- bcopy(&p_event->header, &new->entry.header, sizeof(cfr_dump_header_t));\r
- DHD_INFO(("New entry data size %d\n", p_event->header.cfr_dump_length));\r
- /* for big csi data */\r
- if (p_event->header.remain_length) {\r
- DHD_TRACE(("remain %d\n", p_event->header.remain_length));\r
- bcopy(&p_event->data, &new->entry.data, MAX_EVENT_SIZE);\r
- }\r
- else\r
- bcopy(&p_event->data, &new->entry.data, p_event->header.cfr_dump_length);\r
- INIT_LIST_HEAD(&(new->list));\r
- list_add_tail(&(new->list), &dhd->csi_list);\r
- dhd->csi_count++;\r
- }\r
- else {\r
- DHD_TRACE(("Over maximum CSI Number 8. SKIP it.\n"));\r
- }\r
- }\r
- return ret;\r
-}\r
-\r
-int\r
-dhd_csi_init(dhd_pub_t *dhd)\r
-{\r
- int err = BCME_OK;\r
-\r
- NULL_CHECK(dhd, "dhd is NULL", err);\r
- INIT_LIST_HEAD(&dhd->csi_list);\r
- dhd->csi_count = 0;\r
-\r
- return err;\r
-}\r
-\r
-int\r
-dhd_csi_deinit(dhd_pub_t *dhd)\r
-{\r
- int err = BCME_OK;\r
- cfr_dump_list_t *ptr, *next;\r
-\r
- NULL_CHECK(dhd, "dhd is NULL", err);\r
-\r
- if (!list_empty(&dhd->csi_list)) {\r
- list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) {\r
- list_del(&ptr->list);\r
- MFREE(dhd->osh, ptr, sizeof(cfr_dump_list_t));\r
- }\r
- }\r
- return err;\r
-}\r
-\r
-void\r
-dhd_csi_clean_list(dhd_pub_t *dhd)\r
-{\r
- cfr_dump_list_t *ptr, *next;\r
- int num = 0;\r
-\r
- if (!dhd) {\r
- DHD_ERROR(("NULL POINTER: %s\n", __FUNCTION__));\r
- return;\r
- }\r
-\r
- if (!list_empty(&dhd->csi_list)) {\r
- list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) {\r
- if (0 == ptr->entry.header.remain_length) {\r
- list_del(&ptr->list);\r
- num++;\r
- MFREE(dhd->osh, ptr, sizeof(cfr_dump_list_t));\r
- }\r
- }\r
- }\r
- dhd->csi_count = 0;\r
- DHD_TRACE(("Clean up %d record\n", num));\r
-}\r
-\r
-int\r
-dhd_csi_dump_list(dhd_pub_t *dhd, char *buf)\r
-{\r
- int ret = BCME_OK;\r
- cfr_dump_list_t *ptr, *next;\r
- uint8 * pbuf = buf;\r
- int num = 0;\r
- int length = 0;\r
-\r
- NULL_CHECK(dhd, "dhd is NULL", ret);\r
-\r
- /* check if this addr exist */\r
- if (!list_empty(&dhd->csi_list)) {\r
- list_for_each_entry_safe(ptr, next, &dhd->csi_list, list) {\r
- if (ptr->entry.header.remain_length) {\r
- DHD_ERROR(("data not ready %d\n", ptr->entry.header.remain_length));\r
- continue;\r
- }\r
- bcopy(&ptr->entry.header, pbuf, sizeof(cfr_dump_header_t));\r
- length += sizeof(cfr_dump_header_t);\r
- pbuf += sizeof(cfr_dump_header_t);\r
- DHD_TRACE(("Copy data size %d\n", ptr->entry.header.cfr_dump_length));\r
- bcopy(&ptr->entry.data, pbuf, ptr->entry.header.cfr_dump_length);\r
- length += ptr->entry.header.cfr_dump_length;\r
- pbuf += ptr->entry.header.cfr_dump_length;\r
- num++;\r
- }\r
- }\r
- DHD_TRACE(("dump %d record %d bytes\n", num, length));\r
-\r
- return length;\r
-}\r
-\r
+++ /dev/null
-/*\r
- * Broadcom Dongle Host Driver (DHD), CSI\r
- *\r
- * Copyright (C) 1999-2018, Broadcom.\r
- *\r
- * Unless you and Broadcom execute a separate written software license\r
- * agreement governing use of this software, this software is licensed to you\r
- * under the terms of the GNU General Public License version 2 (the "GPL"),\r
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the\r
- * following added to such license:\r
- *\r
- * As a special exception, the copyright holders of this software give you\r
- * permission to link this software with independent modules, and to copy and\r
- * distribute the resulting executable under terms of your choice, provided that\r
- * you also meet, for each linked independent module, the terms and conditions of\r
- * the license of that module. An independent module is a module which is not\r
- * derived from this software. The special exception does not apply to any\r
- * modifications of the software.\r
- *\r
- * Notwithstanding the above, under no circumstances may you combine this\r
- * software in any way with any other Broadcom software provided under a license\r
- * other than the GPL, without Broadcom's express prior written consent.\r
- *\r
- * $Id: dhd_csi.h 558438 2015-05-22 06:05:11Z $\r
- */\r
-#ifndef __DHD_CSI_H__\r
-#define __DHD_CSI_H__\r
-\r
-/* Maxinum csi file dump size */\r
-#define MAX_CSI_FILESZ (32 * 1024)\r
-/* Maxinum subcarrier number */\r
-#define MAXINUM_CFR_DATA 256 * 4\r
-#define CSI_DUMP_PATH "/sys/bcm-dhd/csi"\r
-#define MAX_EVENT_SIZE 1400\r
-/* maximun csi number stored at dhd */\r
-#define MAX_CSI_NUM 8\r
-\r
-typedef struct cfr_dump_header {\r
- /* 0 - successful; 1 - Failed */\r
- uint8 status;\r
- /* Peer MAC address */\r
- uint8 peer_macaddr[6];\r
- /* Number of Space Time Streams */\r
- uint8 sts;\r
- /* Number of RX chain */\r
- uint8 num_rx;\r
- /* Number of subcarrier */\r
- uint16 num_carrier;\r
- /* Length of the CSI dump */\r
- uint32 cfr_dump_length;\r
- /* remain unsend CSI data length */\r
- uint32 remain_length;\r
- /* RSSI */\r
- int8 rssi;\r
-} __attribute__((packed)) cfr_dump_header_t;\r
-\r
-typedef struct cfr_dump_data {\r
- cfr_dump_header_t header;\r
- uint32 data[MAXINUM_CFR_DATA];\r
-} cfr_dump_data_t;\r
-\r
-typedef struct {\r
- struct list_head list;\r
- cfr_dump_data_t entry;\r
-} cfr_dump_list_t;\r
-\r
-int dhd_csi_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);\r
-\r
-int dhd_csi_init(dhd_pub_t *dhd);\r
-\r
-int dhd_csi_deinit(dhd_pub_t *dhd);\r
-\r
-void dhd_csi_clean_list(dhd_pub_t *dhd);\r
-\r
-int dhd_csi_dump_list(dhd_pub_t *dhd, char *buf);\r
-#endif /* __DHD_CSI_H__ */\r
-\r
/*
* Customer code to add GPIO control during WLAN start/stop
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_custom_gpio.c 717227 2017-08-23 13:51:13Z $
+ * $Id: dhd_custom_gpio.c 664997 2016-10-14 11:56:35Z $
*/
#include <typedefs.h>
#include <dhd_linux.h>
#include <wlioctl.h>
-#if defined(WL_WIRELESS_EXT)
-#include <wl_iw.h>
-#endif // endif
#define WL_ERROR(x) printf x
#define WL_TRACE(x)
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+#if defined(OOB_INTR_ONLY)
#if defined(BCMLXSDMMC)
extern int sdioh_mmc_irq(int irq);
{
int host_oob_irq = 0;
+#if defined(CUSTOMER_HW2)
host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr);
+#else
+#if defined(CUSTOM_OOB_GPIO_NUM)
+ if (dhd_oob_gpio_num < 0) {
+ dhd_oob_gpio_num = CUSTOM_OOB_GPIO_NUM;
+ }
+#endif /* CUSTOMER_OOB_GPIO_NUM */
+
+ if (dhd_oob_gpio_num < 0) {
+ WL_ERROR(("%s: ERROR customer specific Host GPIO is NOT defined \n",
+ __FUNCTION__));
+ return (dhd_oob_gpio_num);
+ }
+
+ WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
+ __FUNCTION__, dhd_oob_gpio_num));
+
+#endif
+
return (host_oob_irq);
}
-#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+#endif
/* Customer function to control hw specific wlan gpios */
int
return err;
}
-#if 0
+#ifdef GET_CUSTOM_MAC_ENABLE
/* Function to get custom MAC address */
int
dhd_custom_get_mac_address(void *adapter, unsigned char *buf)
return -EINVAL;
/* Customer access to MAC address stored outside of DHD driver */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
ret = wifi_platform_get_mac_addr(adapter, buf);
-#endif // endif
+#endif
#ifdef EXAMPLE_GET_MAC
/* EXAMPLE code */
{"TR", "TR", 0},
{"NO", "NO", 0},
#endif /* EXMAPLE_TABLE */
+#if defined(CUSTOMER_HW2)
#if defined(BCM4335_CHIP)
{"", "XZ", 11}, /* Universal if Country code is unknown or empty */
-#endif // endif
+#endif
{"AE", "AE", 1},
{"AR", "AR", 1},
{"AT", "AT", 1},
{"PS", "XZ", 11}, /* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */
{"TL", "XZ", 11}, /* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */
{"MH", "XZ", 11}, /* Universal if Country code is MARSHALL ISLANDS */
+#ifdef BCM4330_CHIP
+ {"RU", "RU", 1},
+ {"US", "US", 5}
+#endif
+#endif
};
+
/* Customized Locale convertor
* input : ISO 3166-1 country abbreviation
* output: customized cspec
cspec->rev = translate_custom_table[0].custom_locale_rev;
#endif /* EXMAPLE_TABLE */
return;
-#endif /* (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)) &&
- * (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
- */
+#endif /* defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)) */
}
+++ /dev/null
-/*
- * Platform Dependent file for Hikey
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id$
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/skbuff.h>
-#include <linux/wlan_plat.h>
-#include <linux/fcntl.h>
-#include <linux/fs.h>
-#include <linux/of_gpio.h>
-
-#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
-extern int dhd_init_wlan_mem(void);
-extern void *dhd_wlan_mem_prealloc(int section, unsigned long size);
-#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
-
-#define WIFI_TURNON_DELAY 200
-#define WLAN_REG_ON_GPIO 491
-#define WLAN_HOST_WAKE_GPIO 493
-
-static int wlan_reg_on = -1;
-#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan"
-#define WIFI_WL_REG_ON_PROPNAME "wl_reg_on"
-
-static int wlan_host_wake_up = -1;
-static int wlan_host_wake_irq = 0;
-#define WIFI_WLAN_HOST_WAKE_PROPNAME "wl_host_wake"
-
-int
-dhd_wifi_init_gpio(void)
-{
- int gpio_reg_on_val;
- /* ========== WLAN_PWR_EN ============ */
- char *wlan_node = DHD_DT_COMPAT_ENTRY;
- struct device_node *root_node = NULL;
-
- root_node = of_find_compatible_node(NULL, NULL, wlan_node);
- if (root_node) {
- wlan_reg_on = of_get_named_gpio(root_node, WIFI_WL_REG_ON_PROPNAME, 0);
- wlan_host_wake_up = of_get_named_gpio(root_node, WIFI_WLAN_HOST_WAKE_PROPNAME, 0);
- } else {
- printk(KERN_ERR "failed to get device node of BRCM WLAN, use default GPIOs\n");
- wlan_reg_on = WLAN_REG_ON_GPIO;
- wlan_host_wake_up = WLAN_HOST_WAKE_GPIO;
- }
-
- /* ========== WLAN_PWR_EN ============ */
- printk(KERN_INFO "%s: gpio_wlan_power : %d\n", __FUNCTION__, wlan_reg_on);
-
- /*
- * For reg_on, gpio_request will fail if the gpio is configured to output-high
- * in the dts using gpio-hog, so do not return error for failure.
- */
- if (gpio_request_one(wlan_reg_on, GPIOF_OUT_INIT_HIGH, "WL_REG_ON")) {
- printk(KERN_ERR "%s: Failed to request gpio %d for WL_REG_ON, "
- "might have configured in the dts\n",
- __FUNCTION__, wlan_reg_on);
- } else {
- printk(KERN_ERR "%s: gpio_request WL_REG_ON done - WLAN_EN: GPIO %d\n",
- __FUNCTION__, wlan_reg_on);
- }
-
- gpio_reg_on_val = gpio_get_value(wlan_reg_on);
- printk(KERN_INFO "%s: Initial WL_REG_ON: [%d]\n",
- __FUNCTION__, gpio_get_value(wlan_reg_on));
-
- if (gpio_reg_on_val == 0) {
- printk(KERN_INFO "%s: WL_REG_ON is LOW, drive it HIGH\n", __FUNCTION__);
- if (gpio_direction_output(wlan_reg_on, 1)) {
- printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__);
- return -EIO;
- }
- }
-
- printk(KERN_ERR "%s: WL_REG_ON is pulled up\n", __FUNCTION__);
-
- /* Wait for WIFI_TURNON_DELAY due to power stability */
- msleep(WIFI_TURNON_DELAY);
-
- /* ========== WLAN_HOST_WAKE ============ */
- printk(KERN_INFO "%s: gpio_wlan_host_wake : %d\n", __FUNCTION__, wlan_host_wake_up);
-
- if (gpio_request_one(wlan_host_wake_up, GPIOF_IN, "WLAN_HOST_WAKE")) {
- printk(KERN_ERR "%s: Failed to request gpio %d for WLAN_HOST_WAKE\n",
- __FUNCTION__, wlan_host_wake_up);
- return -ENODEV;
- } else {
- printk(KERN_ERR "%s: gpio_request WLAN_HOST_WAKE done"
- " - WLAN_HOST_WAKE: GPIO %d\n",
- __FUNCTION__, wlan_host_wake_up);
- }
-
- if (gpio_direction_input(wlan_host_wake_up)) {
- printk(KERN_ERR "%s: Failed to set WL_HOST_WAKE gpio direction\n", __FUNCTION__);
- }
-
- wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up);
-
- return 0;
-}
-
-extern void kirin_pcie_power_on_atu_fixup(void) __attribute__ ((weak));
-extern int kirin_pcie_lp_ctrl(u32 enable) __attribute__ ((weak));
-
-#ifndef BOARD_HIKEY_MODULAR
-int
-dhd_wlan_power(int onoff)
-{
- printk(KERN_INFO"------------------------------------------------");
- printk(KERN_INFO"------------------------------------------------\n");
- printk(KERN_INFO"%s Enter: power %s\n", __func__, onoff ? "on" : "off");
-
- if (onoff) {
- if (gpio_direction_output(wlan_reg_on, 1)) {
- printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__);
- return -EIO;
- }
- if (gpio_get_value(wlan_reg_on)) {
- printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n",
- gpio_get_value(wlan_reg_on));
- } else {
- printk("[%s] gpio value is 0. We need reinit.\n", __func__);
- if (gpio_direction_output(wlan_reg_on, 1)) {
- printk(KERN_ERR "%s: WL_REG_ON is "
- "failed to pull up\n", __func__);
- }
- }
-
- /* Wait for WIFI_TURNON_DELAY due to power stability */
- msleep(WIFI_TURNON_DELAY);
-
- /*
- * Call Kiric RC ATU fixup else si_attach will fail due to
- * improper BAR0/1 address translations
- */
- if (kirin_pcie_power_on_atu_fixup) {
- kirin_pcie_power_on_atu_fixup();
- } else {
- printk(KERN_ERR "[%s] kirin_pcie_power_on_atu_fixup is NULL. "
- "REG_ON may not work\n", __func__);
- }
- /* Enable ASPM after powering ON */
- if (kirin_pcie_lp_ctrl) {
- kirin_pcie_lp_ctrl(onoff);
- } else {
- printk(KERN_ERR "[%s] kirin_pcie_lp_ctrl is NULL. "
- "ASPM may not work\n", __func__);
- }
- } else {
- /* Disable ASPM before powering off */
- if (kirin_pcie_lp_ctrl) {
- kirin_pcie_lp_ctrl(onoff);
- } else {
- printk(KERN_ERR "[%s] kirin_pcie_lp_ctrl is NULL. "
- "ASPM may not work\n", __func__);
- }
- if (gpio_direction_output(wlan_reg_on, 0)) {
- printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__);
- return -EIO;
- }
- if (gpio_get_value(wlan_reg_on)) {
- printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n",
- gpio_get_value(wlan_reg_on));
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(dhd_wlan_power);
-#endif /* BOARD_HIKEY_MODULAR */
-
-static int
-dhd_wlan_reset(int onoff)
-{
- return 0;
-}
-
-static int
-dhd_wlan_set_carddetect(int val)
-{
- return 0;
-}
-
-#ifdef BCMSDIO
-static int dhd_wlan_get_wake_irq(void)
-{
- return gpio_to_irq(wlan_host_wake_up);
-}
-#endif /* BCMSDIO */
-
-#if defined(CONFIG_BCMDHD_OOB_HOST_WAKE) && defined(CONFIG_BCMDHD_GET_OOB_STATE)
-int
-dhd_get_wlan_oob_gpio(void)
-{
- return gpio_is_valid(wlan_host_wake_up) ?
- gpio_get_value(wlan_host_wake_up) : -1;
-}
-EXPORT_SYMBOL(dhd_get_wlan_oob_gpio);
-#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE && CONFIG_BCMDHD_GET_OOB_STATE */
-
-struct resource dhd_wlan_resources = {
- .name = "bcmdhd_wlan_irq",
- .start = 0, /* Dummy */
- .end = 0, /* Dummy */
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE |
- IORESOURCE_IRQ_HIGHEDGE,
-};
-EXPORT_SYMBOL(dhd_wlan_resources);
-
-struct wifi_platform_data dhd_wlan_control = {
-#ifndef BOARD_HIKEY_MODULAR
- .set_power = dhd_wlan_power,
-#endif /* BOARD_HIKEY_MODULAR */
- .set_reset = dhd_wlan_reset,
- .set_carddetect = dhd_wlan_set_carddetect,
-#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
- .mem_prealloc = dhd_wlan_mem_prealloc,
-#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
-#ifdef BCMSDIO
- .get_wake_irq = dhd_wlan_get_wake_irq,
-#endif // endif
-};
-EXPORT_SYMBOL(dhd_wlan_control);
-
-int
-dhd_wlan_init(void)
-{
- int ret;
-
- printk(KERN_INFO"%s: START.......\n", __FUNCTION__);
- ret = dhd_wifi_init_gpio();
- if (ret < 0) {
- printk(KERN_ERR "%s: failed to initiate GPIO, ret=%d\n",
- __FUNCTION__, ret);
- goto fail;
- }
-
- dhd_wlan_resources.start = wlan_host_wake_irq;
- dhd_wlan_resources.end = wlan_host_wake_irq;
-
-#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
- ret = dhd_init_wlan_mem();
- if (ret < 0) {
- printk(KERN_ERR "%s: failed to alloc reserved memory,"
- " ret=%d\n", __FUNCTION__, ret);
- }
-#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
-
-fail:
- printk(KERN_INFO"%s: FINISH.......\n", __FUNCTION__);
- return ret;
-}
-
-int
-dhd_wlan_deinit(void)
-{
- gpio_free(wlan_host_wake_up);
- gpio_free(wlan_reg_on);
- return 0;
-}
-#ifndef BOARD_HIKEY_MODULAR
-/* Required only for Built-in DHD */
-device_initcall(dhd_wlan_init);
-#endif /* BOARD_HIKEY_MODULAR */
/*
* Platform Dependent file for usage of Preallocted Memory
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_custom_memprealloc.c 805764 2019-02-20 08:46:57Z $
+ * $Id: dhd_custom_memprealloc.c 707595 2017-06-28 08:28:30Z $
*/
#include <linux/device.h>
#define WLAN_STATIC_DHD_LOG_DUMP_BUF 15
#define WLAN_STATIC_DHD_LOG_DUMP_BUF_EX 16
#define WLAN_STATIC_DHD_PKTLOG_DUMP_BUF 17
+#define WLAN_STATIC_STAT_REPORT_BUF 18
#define WLAN_SCAN_BUF_SIZE (64 * 1024)
#define WLAN_DHD_WLFC_BUF_SIZE (16 * 1024)
#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024)
#endif /* CONFIG_64BIT */
-/* Have 2MB ramsize to accomodate future chips */
-#define WLAN_DHD_MEMDUMP_SIZE (2048 * 1024)
+#define WLAN_DHD_MEMDUMP_SIZE (1536 * 1024)
#define PREALLOC_WLAN_SEC_NUM 4
#define PREALLOC_WLAN_BUF_NUM 160
#define WLAN_DHD_PKTID_IOCTL_MAP_SIZE ((WLAN_DHD_PKTID_IOCTL_MAP_HDR_SIZE) + \
((WLAN_MAX_PKTID_IOCTL_ITEMS+1) * WLAN_DHD_PKTID_IOCTL_MAP_ITEM_SIZE))
-#define DHD_LOG_DUMP_BUF_SIZE (1024 * 1024 * 4)
-#define DHD_LOG_DUMP_BUF_EX_SIZE (1024 * 1024 * 4)
+#define DHD_LOG_DUMP_BUF_SIZE (1024 * 1024)
+#define DHD_LOG_DUMP_BUF_EX_SIZE (8 * 1024)
#define DHD_PKTLOG_DUMP_BUF_SIZE (64 * 1024)
+#define DHD_STAT_REPORT_BUF_SIZE (128 * 1024)
+
#define WLAN_DHD_WLFC_HANGER_MAXITEMS 3072
#define WLAN_DHD_WLFC_HANGER_ITEM_SIZE 32
#define WLAN_DHD_WLFC_HANGER_SIZE ((WLAN_DHD_WLFC_HANGER_ITEM_SIZE) + \
static void *wlan_static_dhd_log_dump_buf = NULL;
static void *wlan_static_dhd_log_dump_buf_ex = NULL;
static void *wlan_static_dhd_pktlog_dump_buf = NULL;
+static void *wlan_static_stat_report_buf = NULL;
+
+#define GET_STATIC_BUF(section, config_size, req_size, buf) ({\
+ void *__ret; \
+ if (req_size > config_size) {\
+ pr_err("request " #section " size(%lu) is bigger than" \
+ " static size(%d)\n", \
+ req_size, config_size); \
+ __ret = NULL; \
+ } else { __ret = buf;} \
+ __ret; \
+})
void
*dhd_wlan_mem_prealloc(int section, unsigned long size)
return wlan_static_dhd_pktid_map;
}
+
if (section == WLAN_STATIC_DHD_PKTID_IOCTL_MAP) {
if (size > WLAN_DHD_PKTID_IOCTL_MAP_SIZE) {
pr_err("request DHD_PKTID_IOCTL_MAP size(%lu) is bigger than"
return wlan_static_dhd_pktlog_dump_buf;
}
+ if (section == WLAN_STATIC_STAT_REPORT_BUF) {
+ return GET_STATIC_BUF(WLAN_STATIC_STAT_REPORT_BUF,
+ DHD_STAT_REPORT_BUF_SIZE, size, wlan_static_stat_report_buf);
+ }
+
if ((section < 0) || (section >= PREALLOC_WLAN_SEC_NUM)) {
return NULL;
}
int i;
int j;
-#if !defined(CONFIG_BCMDHD_PCIE)
for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
- wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE, GFP_KERNEL);
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
if (!wlan_static_skb[i]) {
goto err_skb_alloc;
}
}
-#endif /* !CONFIG_BCMDHD_PCIE */
for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {
- wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE, GFP_KERNEL);
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);
if (!wlan_static_skb[i]) {
goto err_skb_alloc;
}
}
#if !defined(CONFIG_BCMDHD_PCIE)
- wlan_static_skb[i] = __dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE, GFP_KERNEL);
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);
if (!wlan_static_skb[i]) {
goto err_skb_alloc;
}
goto err_mem_alloc;
}
+ wlan_static_stat_report_buf = kmalloc(DHD_STAT_REPORT_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_stat_report_buf) {
+ pr_err("Failed to alloc wlan_static_stat_report_buf\n");
+ goto err_mem_alloc;
+ }
+
pr_err("%s: WIFI MEM Allocated\n", __FUNCTION__);
return 0;
kfree(wlan_static_dhd_pktlog_dump_buf);
}
+ if (wlan_static_stat_report_buf) {
+ kfree(wlan_static_stat_report_buf);
+ }
+
pr_err("Failed to mem_alloc for WLAN\n");
for (j = 0; j < i; j++) {
return -ENOMEM;
}
EXPORT_SYMBOL(dhd_init_wlan_mem);
-
-void
-dhd_exit_wlan_mem(void)
-{
- int i = 0;
-
-#ifdef CONFIG_BCMDHD_PREALLOC_MEMDUMP
- if (wlan_static_dhd_memdump_ram) {
- kfree(wlan_static_dhd_memdump_ram);
- }
-
-#endif /* CONFIG_BCMDHD_PREALLOC_MEMDUMP */
-
-#ifdef CONFIG_BCMDHD_PCIE
- if (wlan_static_if_flow_lkup) {
- kfree(wlan_static_if_flow_lkup);
- }
-
-#ifdef CONFIG_BCMDHD_PREALLOC_PKTIDMAP
- if (wlan_static_dhd_pktid_map) {
- kfree(wlan_static_dhd_pktid_map);
- }
-
- if (wlan_static_dhd_pktid_ioctl_map) {
- kfree(wlan_static_dhd_pktid_ioctl_map);
- }
-#endif /* CONFIG_BCMDHD_PREALLOC_PKTIDMAP */
-#else
- if (wlan_static_dhd_wlfc_buf) {
- kfree(wlan_static_dhd_wlfc_buf);
- }
-
- if (wlan_static_dhd_wlfc_hanger) {
- kfree(wlan_static_dhd_wlfc_hanger);
- }
-#endif /* CONFIG_BCMDHD_PCIE */
- if (wlan_static_dhd_info_buf) {
- kfree(wlan_static_dhd_info_buf);
- }
-
- if (wlan_static_dhd_log_dump_buf) {
- kfree(wlan_static_dhd_log_dump_buf);
- }
-
- if (wlan_static_dhd_log_dump_buf_ex) {
- kfree(wlan_static_dhd_log_dump_buf_ex);
- }
-
- if (wlan_static_scan_buf1) {
- kfree(wlan_static_scan_buf1);
- }
-
- if (wlan_static_scan_buf0) {
- kfree(wlan_static_scan_buf0);
- }
-
- if (wlan_static_dhd_pktlog_dump_buf) {
- kfree(wlan_static_dhd_pktlog_dump_buf);
- }
-
- pr_err("Failed to mem_alloc for WLAN\n");
-
- for (i = 0; i < PREALLOC_WLAN_SEC_NUM; i++) {
- if (wlan_mem_array[i].mem_ptr) {
- kfree(wlan_mem_array[i].mem_ptr);
- }
- }
-
- pr_err("Failed to skb_alloc for WLAN\n");
- for (i = 0; i < WLAN_SKB_BUF_NUM; i++) {
- dev_kfree_skb(wlan_static_skb[i]);
- }
-
- return;
-}
-EXPORT_SYMBOL(dhd_exit_wlan_mem);
#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
--- /dev/null
+/*
+ * Platform Dependent file for Qualcomm MSM/APQ
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: dhd_custom_msm.c 674523 2016-12-09 04:05:27Z $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/skbuff.h>
+#include <linux/wlan_plat.h>
+#include <linux/mmc/host.h>
+#include <linux/msm_pcie.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/of_gpio.h>
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998)
+#include <linux/msm_pcie.h>
+#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 */
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+extern int dhd_init_wlan_mem(void);
+extern void *dhd_wlan_mem_prealloc(int section, unsigned long size);
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+#define WIFI_TURNON_DELAY 200
+static int wlan_reg_on = -1;
+#define DHD_DT_COMPAT_ENTRY "android,bcmdhd_wlan"
+#ifdef CUSTOMER_HW2
+#define WIFI_WL_REG_ON_PROPNAME "wl_reg_on"
+#else
+#define WIFI_WL_REG_ON_PROPNAME "wlan-en-gpio"
+#endif /* CUSTOMER_HW2 */
+
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998)
+#define MSM_PCIE_CH_NUM 0
+#else
+#define MSM_PCIE_CH_NUM 1
+#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 */
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+static int wlan_host_wake_up = -1;
+static int wlan_host_wake_irq = 0;
+#ifdef CUSTOMER_HW2
+#define WIFI_WLAN_HOST_WAKE_PROPNAME "wl_host_wake"
+#else
+#define WIFI_WLAN_HOST_WAKE_PROPNAME "wlan-host-wake-gpio"
+#endif /* CUSTOMER_HW2 */
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+int __init
+dhd_wifi_init_gpio(void)
+{
+ char *wlan_node = DHD_DT_COMPAT_ENTRY;
+ struct device_node *root_node = NULL;
+
+ root_node = of_find_compatible_node(NULL, NULL, wlan_node);
+ if (!root_node) {
+ WARN(1, "failed to get device node of BRCM WLAN\n");
+ return -ENODEV;
+ }
+
+ /* ========== WLAN_PWR_EN ============ */
+ wlan_reg_on = of_get_named_gpio(root_node, WIFI_WL_REG_ON_PROPNAME, 0);
+ printk(KERN_INFO "%s: gpio_wlan_power : %d\n", __FUNCTION__, wlan_reg_on);
+
+ if (gpio_request_one(wlan_reg_on, GPIOF_OUT_INIT_LOW, "WL_REG_ON")) {
+ printk(KERN_ERR "%s: Faiiled to request gpio %d for WL_REG_ON\n",
+ __FUNCTION__, wlan_reg_on);
+ } else {
+ printk(KERN_ERR "%s: gpio_request WL_REG_ON done - WLAN_EN: GPIO %d\n",
+ __FUNCTION__, wlan_reg_on);
+ }
+
+ if (gpio_direction_output(wlan_reg_on, 1)) {
+ printk(KERN_ERR "%s: WL_REG_ON failed to pull up\n", __FUNCTION__);
+ } else {
+ printk(KERN_ERR "%s: WL_REG_ON is pulled up\n", __FUNCTION__);
+ }
+
+ if (gpio_get_value(wlan_reg_on)) {
+ printk(KERN_INFO "%s: Initial WL_REG_ON: [%d]\n",
+ __FUNCTION__, gpio_get_value(wlan_reg_on));
+ }
+
+ /* Wait for WIFI_TURNON_DELAY due to power stability */
+ msleep(WIFI_TURNON_DELAY);
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+ /* ========== WLAN_HOST_WAKE ============ */
+ wlan_host_wake_up = of_get_named_gpio(root_node, WIFI_WLAN_HOST_WAKE_PROPNAME, 0);
+ printk(KERN_INFO "%s: gpio_wlan_host_wake : %d\n", __FUNCTION__, wlan_host_wake_up);
+
+#ifndef CUSTOMER_HW2
+ if (gpio_request_one(wlan_host_wake_up, GPIOF_IN, "WLAN_HOST_WAKE")) {
+ printk(KERN_ERR "%s: Failed to request gpio %d for WLAN_HOST_WAKE\n",
+ __FUNCTION__, wlan_host_wake_up);
+ return -ENODEV;
+ } else {
+ printk(KERN_ERR "%s: gpio_request WLAN_HOST_WAKE done"
+ " - WLAN_HOST_WAKE: GPIO %d\n",
+ __FUNCTION__, wlan_host_wake_up);
+ }
+#endif /* !CUSTOMER_HW2 */
+
+ gpio_direction_input(wlan_host_wake_up);
+ wlan_host_wake_irq = gpio_to_irq(wlan_host_wake_up);
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+#if defined(CONFIG_BCM4359) || defined(CONFIG_BCM4361)
+ printk(KERN_INFO "%s: Call msm_pcie_enumerate\n", __FUNCTION__);
+ msm_pcie_enumerate(MSM_PCIE_CH_NUM);
+#endif /* CONFIG_BCM4359 || CONFIG_BCM4361 */
+
+ return 0;
+}
+
+int
+dhd_wlan_power(int onoff)
+{
+ printk(KERN_INFO"------------------------------------------------");
+ printk(KERN_INFO"------------------------------------------------\n");
+ printk(KERN_INFO"%s Enter: power %s\n", __func__, onoff ? "on" : "off");
+
+ if (onoff) {
+ if (gpio_direction_output(wlan_reg_on, 1)) {
+ printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__);
+ return -EIO;
+ }
+ if (gpio_get_value(wlan_reg_on)) {
+ printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n",
+ gpio_get_value(wlan_reg_on));
+ } else {
+ printk("[%s] gpio value is 0. We need reinit.\n", __func__);
+ if (gpio_direction_output(wlan_reg_on, 1)) {
+ printk(KERN_ERR "%s: WL_REG_ON is "
+ "failed to pull up\n", __func__);
+ }
+ }
+ } else {
+ if (gpio_direction_output(wlan_reg_on, 0)) {
+ printk(KERN_ERR "%s: WL_REG_ON is failed to pull up\n", __FUNCTION__);
+ return -EIO;
+ }
+ if (gpio_get_value(wlan_reg_on)) {
+ printk(KERN_INFO"WL_REG_ON on-step-2 : [%d]\n",
+ gpio_get_value(wlan_reg_on));
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dhd_wlan_power);
+
+static int
+dhd_wlan_reset(int onoff)
+{
+ return 0;
+}
+
+static int
+dhd_wlan_set_carddetect(int val)
+{
+ return 0;
+}
+
+struct resource dhd_wlan_resources = {
+ .name = "bcmdhd_wlan_irq",
+ .start = 0, /* Dummy */
+ .end = 0, /* Dummy */
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE |
+#ifdef CONFIG_BCMDHD_PCIE
+ IORESOURCE_IRQ_HIGHEDGE,
+#else
+ IORESOURCE_IRQ_HIGHLEVEL,
+#endif /* CONFIG_BCMDHD_PCIE */
+};
+EXPORT_SYMBOL(dhd_wlan_resources);
+
+struct wifi_platform_data dhd_wlan_control = {
+ .set_power = dhd_wlan_power,
+ .set_reset = dhd_wlan_reset,
+ .set_carddetect = dhd_wlan_set_carddetect,
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+ .mem_prealloc = dhd_wlan_mem_prealloc,
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+};
+EXPORT_SYMBOL(dhd_wlan_control);
+
+int __init
+dhd_wlan_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO"%s: START.......\n", __FUNCTION__);
+ ret = dhd_wifi_init_gpio();
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to initiate GPIO, ret=%d\n",
+ __FUNCTION__, ret);
+ goto fail;
+ }
+
+#ifdef CONFIG_BCMDHD_OOB_HOST_WAKE
+ dhd_wlan_resources.start = wlan_host_wake_irq;
+ dhd_wlan_resources.end = wlan_host_wake_irq;
+#endif /* CONFIG_BCMDHD_OOB_HOST_WAKE */
+
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM
+ ret = dhd_init_wlan_mem();
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to alloc reserved memory,"
+ " ret=%d\n", __FUNCTION__, ret);
+ }
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */
+
+fail:
+ printk(KERN_INFO"%s: FINISH.......\n", __FUNCTION__);
+ return ret;
+}
+#if defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998)
+#if defined(CONFIG_DEFERRED_INITCALLS)
+deferred_module_init(dhd_wlan_init);
+#else
+late_initcall(dhd_wlan_init);
+#endif /* CONFIG_DEFERRED_INITCALLS */
+#else
+device_initcall(dhd_wlan_init);
+#endif /* CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 */
/*
* Debug/trace/assert driver definitions for Dongle Host Driver.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_dbg.h 798329 2019-01-08 05:40:39Z $
+ * $Id: dhd_dbg.h 667145 2016-10-26 04:27:53Z $
*/
#ifndef _dhd_dbg_
#define _dhd_dbg_
+
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+extern void dhd_log_dump_print(const char *fmt, ...);
+extern void dhd_log_dump_print_drv(const char *fmt, ...);
+#endif
+
+#if defined(DHD_DEBUG)
+
#ifdef DHD_LOG_DUMP
+extern void dhd_log_dump_write(int type, const char *fmt, ...);
extern char *dhd_log_dump_get_timestamp(void);
-extern void dhd_log_dump_write(int type, char *binary_data,
- int binary_len, const char *fmt, ...);
#ifndef _DHD_LOG_DUMP_DEFINITIONS_
#define _DHD_LOG_DUMP_DEFINITIONS_
-#define GENERAL_LOG_HDR "\n-------------------- General log ---------------------------\n"
-#define PRESERVE_LOG_HDR "\n-------------------- Preserve log ---------------------------\n"
-#define SPECIAL_LOG_HDR "\n-------------------- Special log ---------------------------\n"
-#define DHD_DUMP_LOG_HDR "\n-------------------- 'dhd dump' log -----------------------\n"
-#define EXT_TRAP_LOG_HDR "\n-------------------- Extended trap data -------------------\n"
-#define HEALTH_CHK_LOG_HDR "\n-------------------- Health check data --------------------\n"
-#ifdef DHD_DUMP_PCIE_RINGS
-#define FLOWRING_DUMP_HDR "\n-------------------- Flowring dump --------------------\n"
-#endif /* DHD_DUMP_PCIE_RINGS */
-#define DHD_LOG_DUMP_WRITE(fmt, ...) \
- dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, NULL, 0, fmt, ##__VA_ARGS__)
-#define DHD_LOG_DUMP_WRITE_EX(fmt, ...) \
- dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, NULL, 0, fmt, ##__VA_ARGS__)
-#define DHD_LOG_DUMP_WRITE_PRSRV(fmt, ...) \
- dhd_log_dump_write(DLD_BUF_TYPE_PRESERVE, NULL, 0, fmt, ##__VA_ARGS__)
+#define DLD_BUF_TYPE_GENERAL 0
+#define DLD_BUF_TYPE_SPECIAL 1
+#define DHD_LOG_DUMP_WRITE(fmt, ...) dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, fmt, ##__VA_ARGS__)
+#define DHD_LOG_DUMP_WRITE_EX(fmt, ...) dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, fmt, ##__VA_ARGS__)
#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */
-#define CONCISE_DUMP_BUFLEN 16 * 1024
-#define ECNTRS_LOG_HDR "\n-------------------- Ecounters log --------------------------\n"
-#ifdef DHD_STATUS_LOGGING
-#define STATUS_LOG_HDR "\n-------------------- Status log -----------------------\n"
-#endif /* DHD_STATUS_LOGGING */
-#define RTT_LOG_HDR "\n-------------------- RTT log --------------------------\n"
-#define COOKIE_LOG_HDR "\n-------------------- Cookie List ----------------------------\n"
-#endif /* DHD_LOG_DUMP */
-
-#if defined(DHD_DEBUG)
-/* NON-NDIS cases */
-#ifdef DHD_LOG_DUMP
-/* Common case for EFI and non EFI */
+#ifdef DHD_EFI
#define DHD_ERROR(args) \
do { \
if (dhd_msg_level & DHD_ERROR_VAL) { \
printf args; \
- DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ dhd_log_dump_print_drv args; \
+ } \
+} while (0)
+#define DHD_INFO(args) \
+do { \
+ if (dhd_msg_level & DHD_INFO_VAL) { \
+ printf args; \
+ dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ dhd_log_dump_print_drv args; \
+ } \
+} while (0)
+#else /* DHD_EFI */
+#define DHD_ERROR(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
-
-/* !defined(DHD_EFI) and defined(DHD_LOG_DUMP) */
#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
+#endif /* DHD_EFI */
#else /* DHD_LOG_DUMP */
-/* !defined(DHD_LOG_DUMP cases) */
#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) printf args;} while (0)
#define DHD_INFO(args) do {if (dhd_msg_level & DHD_INFO_VAL) printf args;} while (0)
#endif /* DHD_LOG_DUMP */
-
#define DHD_TRACE(args) do {if (dhd_msg_level & DHD_TRACE_VAL) printf args;} while (0)
+#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
+#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
+#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
+#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
+#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
+#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
+#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
#ifdef DHD_LOG_DUMP
-/* LOG_DUMP defines common to EFI and NON-EFI */
-#define DHD_ERROR_MEM(args) \
+#ifndef DHD_EFI
+#define DHD_EVENT(args) \
do { \
- if (dhd_msg_level & DHD_ERROR_VAL) { \
- if (dhd_msg_level & DHD_ERROR_MEM_VAL) { \
- printf args; \
- } \
- DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ if (dhd_msg_level & DHD_EVENT_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
-#define DHD_IOVAR_MEM(args) \
+#else
+#define DHD_EVENT(args) \
do { \
- if (dhd_msg_level & DHD_ERROR_VAL) { \
- if (dhd_msg_level & DHD_IOVAR_MEM_VAL) { \
- printf args; \
- } \
- DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
- DHD_LOG_DUMP_WRITE args; \
+ if (dhd_msg_level & DHD_EVENT_VAL) { \
+ dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ dhd_log_dump_print args; \
} \
} while (0)
-#define DHD_LOG_MEM(args) \
+#endif /* !DHD_EFI */
+#else
+#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
+#endif /* DHD_LOG_DUMP */
+#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
+#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
+#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
+#define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0)
+#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0)
+#define DHD_PKT_MON(args) do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0)
+#ifdef DHD_LOG_DUMP
+#ifndef DHD_EFI
+#define DHD_MSGTRACE_LOG(args) \
do { \
- if (dhd_msg_level & DHD_ERROR_VAL) { \
- DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
+ if (dhd_msg_level & DHD_MSGTRACE_VAL) { \
+ printf args; \
+ DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
DHD_LOG_DUMP_WRITE args; \
- } \
+ } \
} while (0)
-/* NON-EFI builds with LOG DUMP enabled */
-#define DHD_EVENT(args) \
+#else
+#define DHD_MSGTRACE_LOG(args) \
do { \
- if (dhd_msg_level & DHD_EVENT_VAL) { \
- printf args; \
- DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
- DHD_LOG_DUMP_WRITE args; \
- } \
+ if (dhd_msg_level & DHD_MSGTRACE_VAL) { \
+ dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ dhd_log_dump_print args; \
+ } \
} while (0)
-#define DHD_PRSRV_MEM(args) \
-do { \
- if (dhd_msg_level & DHD_EVENT_VAL) { \
- if (dhd_msg_level & DHD_PRSRV_MEM_VAL) \
- printf args; \
- DHD_LOG_DUMP_WRITE_PRSRV("[%s]: ", dhd_log_dump_get_timestamp()); \
- DHD_LOG_DUMP_WRITE_PRSRV args; \
+#endif /* !DHD_EFI */
+#else
+#define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0)
+#endif /* DHD_LOG_DUMP */
+
+#if defined(DHD_LOG_DUMP) && defined(DHD_EFI)
+#define DHD_FWLOG(args) DHD_MSGTRACE_LOG(args)
+#else
+
+#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0)
+#endif /* DHD_LOG_DUMP & DHD_EFI */
+#define DHD_DBGIF(args) do {if (dhd_msg_level & DHD_DBGIF_VAL) printf args;} while (0)
+
+#ifdef DHD_LOG_DUMP
+#ifdef DHD_EFI
+#define DHD_ERROR_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ dhd_log_dump_print_drv args; \
} \
} while (0)
-
-/* Re-using 'DHD_MSGTRACE_VAL' for controlling printing of ecounter binary event
-* logs to console and debug dump -- need to cleanup in the future to use separate
-* 'DHD_ECNTR_VAL' bitmap flag. 'DHD_MSGTRACE_VAL' will be defined only
-* for non-android builds.
-*/
-#define DHD_ECNTR_LOG(args) \
-do { \
- if (dhd_msg_level & DHD_EVENT_VAL) { \
- if (dhd_msg_level & DHD_MSGTRACE_VAL) { \
- printf args; \
- DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
- DHD_LOG_DUMP_WRITE args; \
- } \
+#define DHD_ERROR_EX(args) DHD_ERROR(args)
+#else
+#define DHD_ERROR_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ if (dhd_msg_level & DHD_ERROR_MEM_VAL) { \
+ printf args; \
+ } \
+ DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
-
#define DHD_ERROR_EX(args) \
-do { \
+do { \
if (dhd_msg_level & DHD_ERROR_VAL) { \
printf args; \
- DHD_LOG_DUMP_WRITE_EX("[%s]: ", dhd_log_dump_get_timestamp()); \
+ DHD_LOG_DUMP_WRITE_EX("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
DHD_LOG_DUMP_WRITE_EX args; \
} \
} while (0)
-
-#define DHD_MSGTRACE_LOG(args) \
-do { \
- if (dhd_msg_level & DHD_MSGTRACE_VAL) { \
- printf args; \
- } \
- DHD_LOG_DUMP_WRITE("[%s]: ", dhd_log_dump_get_timestamp()); \
- DHD_LOG_DUMP_WRITE args; \
-} while (0)
-#else /* DHD_LOG_DUMP */
-/* !DHD_LOG_DUMP */
-#define DHD_MSGTRACE_LOG(args) do {if (dhd_msg_level & DHD_MSGTRACE_VAL) printf args;} while (0)
+#endif /* DHD_EFI */
+#else
#define DHD_ERROR_MEM(args) DHD_ERROR(args)
-#define DHD_IOVAR_MEM(args) DHD_ERROR(args)
-#define DHD_LOG_MEM(args) DHD_ERROR(args)
-#define DHD_EVENT(args) do {if (dhd_msg_level & DHD_EVENT_VAL) printf args;} while (0)
-#define DHD_ECNTR_LOG(args) DHD_EVENT(args)
-#define DHD_PRSRV_MEM(args) DHD_EVENT(args)
#define DHD_ERROR_EX(args) DHD_ERROR(args)
#endif /* DHD_LOG_DUMP */
-#define DHD_DATA(args) do {if (dhd_msg_level & DHD_DATA_VAL) printf args;} while (0)
-#define DHD_CTL(args) do {if (dhd_msg_level & DHD_CTL_VAL) printf args;} while (0)
-#define DHD_TIMER(args) do {if (dhd_msg_level & DHD_TIMER_VAL) printf args;} while (0)
-#define DHD_HDRS(args) do {if (dhd_msg_level & DHD_HDRS_VAL) printf args;} while (0)
-#define DHD_BYTES(args) do {if (dhd_msg_level & DHD_BYTES_VAL) printf args;} while (0)
-#define DHD_INTR(args) do {if (dhd_msg_level & DHD_INTR_VAL) printf args;} while (0)
-#define DHD_GLOM(args) do {if (dhd_msg_level & DHD_GLOM_VAL) printf args;} while (0)
-#define DHD_BTA(args) do {if (dhd_msg_level & DHD_BTA_VAL) printf args;} while (0)
-#define DHD_ISCAN(args) do {if (dhd_msg_level & DHD_ISCAN_VAL) printf args;} while (0)
-#define DHD_ARPOE(args) do {if (dhd_msg_level & DHD_ARPOE_VAL) printf args;} while (0)
-#define DHD_REORDER(args) do {if (dhd_msg_level & DHD_REORDER_VAL) printf args;} while (0)
-#define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0)
-#define DHD_RTT(args) do {if (dhd_msg_level & DHD_RTT_VAL) printf args;} while (0)
-#define DHD_PKT_MON(args) do {if (dhd_msg_level & DHD_PKT_MON_VAL) printf args;} while (0)
-
-#if defined(DHD_LOG_DUMP)
-#if defined(DHD_LOG_PRINT_RATE_LIMIT)
-#define DHD_FWLOG(args) \
- do { \
- if (dhd_msg_level & DHD_FWLOG_VAL) { \
- if (!log_print_threshold) \
- printf args; \
- DHD_LOG_DUMP_WRITE args; \
- } \
- } while (0)
+#ifdef CUSTOMER_HW4_DEBUG
+#define DHD_TRACE_HW4 DHD_ERROR
+#define DHD_INFO_HW4 DHD_ERROR
#else
-#define DHD_FWLOG(args) \
- do { \
- if (dhd_msg_level & DHD_FWLOG_VAL) { \
- printf args; \
- DHD_LOG_DUMP_WRITE args; \
- } \
- } while (0)
-#endif // endif
-#else /* DHD_LOG_DUMP */
-#define DHD_FWLOG(args) do {if (dhd_msg_level & DHD_FWLOG_VAL) printf args;} while (0)
-#endif /* DHD_LOG_DUMP */
-
-#define DHD_DBGIF(args) do {if (dhd_msg_level & DHD_DBGIF_VAL) printf args;} while (0)
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-#define DHD_RPM(args) do {if (dhd_msg_level & DHD_RPM_VAL) printf args;} while (0)
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
#define DHD_TRACE_HW4 DHD_TRACE
#define DHD_INFO_HW4 DHD_INFO
-#define DHD_ERROR_NO_HW4 DHD_ERROR
+#endif /* CUSTOMER_HW4_DEBUG */
#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL)
#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL)
#define DHD_INTR_ON() (dhd_msg_level & DHD_INTR_VAL)
#define DHD_GLOM_ON() (dhd_msg_level & DHD_GLOM_VAL)
#define DHD_EVENT_ON() (dhd_msg_level & DHD_EVENT_VAL)
-#define DHD_BTA_ON() (dhd_msg_level & DHD_BTA_VAL)
#define DHD_ISCAN_ON() (dhd_msg_level & DHD_ISCAN_VAL)
#define DHD_ARPOE_ON() (dhd_msg_level & DHD_ARPOE_VAL)
#define DHD_REORDER_ON() (dhd_msg_level & DHD_REORDER_VAL)
#define DHD_DBGIF_ON() (dhd_msg_level & DHD_DBGIF_VAL)
#define DHD_PKT_MON_ON() (dhd_msg_level & DHD_PKT_MON_VAL)
#define DHD_PKT_MON_DUMP_ON() (dhd_msg_level & DHD_PKT_MON_DUMP_VAL)
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-#define DHD_RPM_ON() (dhd_msg_level & DHD_RPM_VAL)
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
#else /* defined(BCMDBG) || defined(DHD_DEBUG) */
+#if defined(DHD_EFI)
+extern void dhd_log_dump_print_drv(const char *fmt, ...);
+extern char *dhd_log_dump_get_timestamp(void);
+#define DHD_ERROR(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ printf args; \
+ dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ dhd_log_dump_print_drv args; \
+ } \
+} while (0)
+#define DHD_INFO(args) \
+do { \
+ if (dhd_msg_level & DHD_INFO_VAL) { \
+ printf args; \
+ dhd_log_dump_print_drv("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ dhd_log_dump_print_drv args; \
+ } \
+} while (0)
+#define DHD_TRACE(args)
+#else /* DHD_EFI */
+
#define DHD_ERROR(args) do {if (dhd_msg_level & DHD_ERROR_VAL) \
printf args;} while (0)
#define DHD_TRACE(args)
#define DHD_INFO(args)
+#endif
#define DHD_DATA(args)
#define DHD_CTL(args)
#define DHD_INTR(args)
#define DHD_GLOM(args)
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+#define DHD_EVENT(args) \
+do { \
+ if (dhd_msg_level & DHD_EVENT_VAL) { \
+ dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ dhd_log_dump_print args; \
+ } \
+} while (0)
+#else
#define DHD_EVENT(args)
-#define DHD_ECNTR_LOG(args) DHD_EVENT(args)
-
-#define DHD_PRSRV_MEM(args) DHD_EVENT(args)
+#endif /* DHD_EFI && DHD_LOG_DUMP */
-#define DHD_BTA(args)
#define DHD_ISCAN(args)
#define DHD_ARPOE(args)
#define DHD_REORDER(args)
#define DHD_RTT(args)
#define DHD_PKT_MON(args)
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+#define DHD_MSGTRACE_LOG(args) \
+do { \
+ if (dhd_msg_level & DHD_MSGTRACE_VAL) { \
+ dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ dhd_log_dump_print args; \
+ } \
+} while (0)
+#define DHD_FWLOG(args) DHD_MSGTRACE_LOG(args)
+#else
#define DHD_MSGTRACE_LOG(args)
#define DHD_FWLOG(args)
+#endif /* DHD_EFI && DHD_LOG_DUMP */
#define DHD_DBGIF(args)
-#define DHD_ERROR_MEM(args) DHD_ERROR(args)
-#define DHD_IOVAR_MEM(args) DHD_ERROR(args)
-#define DHD_LOG_MEM(args) DHD_ERROR(args)
+#if defined(DHD_EFI) && defined(DHD_LOG_DUMP)
+#define DHD_ERROR_MEM(args) \
+do { \
+ if (dhd_msg_level & DHD_ERROR_VAL) { \
+ dhd_log_dump_print("[%s] %s: ", dhd_log_dump_get_timestamp(), __FUNCTION__); \
+ dhd_log_dump_print args; \
+ } \
+} while (0)
#define DHD_ERROR_EX(args) DHD_ERROR(args)
+#else
+#define DHD_ERROR_MEM(args) DHD_ERROR(args)
+#define DHD_ERROR_EX(args) DHD_ERROR(args)
+#endif /* DHD_EFI && DHD_LOG_DUMP */
+#ifdef CUSTOMER_HW4_DEBUG
+#define DHD_TRACE_HW4 DHD_ERROR
+#define DHD_INFO_HW4 DHD_ERROR
+#else
#define DHD_TRACE_HW4 DHD_TRACE
#define DHD_INFO_HW4 DHD_INFO
-#define DHD_ERROR_NO_HW4 DHD_ERROR
+#endif /* CUSTOMER_HW4_DEBUG */
#define DHD_ERROR_ON() 0
#define DHD_TRACE_ON() 0
#define DHD_INTR_ON() 0
#define DHD_GLOM_ON() 0
#define DHD_EVENT_ON() 0
-#define DHD_BTA_ON() 0
#define DHD_ISCAN_ON() 0
#define DHD_ARPOE_ON() 0
#define DHD_REORDER_ON() 0
#define DHD_MSGTRACE_ON() 0
#define DHD_FWLOG_ON() 0
#define DHD_DBGIF_ON() 0
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-#define DHD_RPM_ON() 0
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-#endif // endif
-
-#define PRINT_RATE_LIMIT_PERIOD 5000000u /* 5s in units of us */
-#define DHD_ERROR_RLMT(args) \
-do { \
- if (dhd_msg_level & DHD_ERROR_VAL) { \
- static uint64 __err_ts = 0; \
- static uint32 __err_cnt = 0; \
- uint64 __cur_ts = 0; \
- __cur_ts = OSL_SYSUPTIME_US(); \
- if (__err_ts == 0 || (__cur_ts > __err_ts && \
- (__cur_ts - __err_ts > PRINT_RATE_LIMIT_PERIOD))) { \
- __err_ts = __cur_ts; \
- DHD_ERROR(args); \
- DHD_ERROR(("[Repeats %u times]\n", __err_cnt)); \
- __err_cnt = 0; \
- } else { \
- ++__err_cnt; \
- } \
- } \
-} while (0)
-
-/* even in non-BCMDBG builds, logging of dongle iovars should be available */
-#define DHD_DNGL_IOVAR_SET(args) \
- do {if (dhd_msg_level & DHD_DNGL_IOVAR_SET_VAL) printf args;} while (0)
+#endif
#define DHD_LOG(args)
#define DHD_NONE(args)
extern int dhd_msg_level;
-#ifdef DHD_LOG_PRINT_RATE_LIMIT
-extern int log_print_threshold;
-#endif /* DHD_LOG_PRINT_RATE_LIMIT */
-
-#define DHD_RTT_MEM(args) DHD_LOG_MEM(args)
-#define DHD_RTT_ERR(args) DHD_ERROR(args)
/* Defines msg bits */
#include <dhdioctl.h>
+++ /dev/null
-/*
- * DHD debug ring API and structures
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- * $Id: dhd_dbg_ring.c 792099 2018-12-03 15:45:56Z $
- */
-#include <typedefs.h>
-#include <osl.h>
-#include <bcmutils.h>
-#include <bcmendian.h>
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhd_dbg.h>
-#include <dhd_dbg_ring.h>
-
-int
-dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
- uint32 ring_sz, void *allocd_buf, bool pull_inactive)
-{
- void *buf;
- unsigned long flags = 0;
-
- if (allocd_buf == NULL) {
- return BCME_NOMEM;
- } else {
- buf = allocd_buf;
- }
-
- ring->lock = DHD_DBG_RING_LOCK_INIT(dhdp->osh);
- if (!ring->lock)
- return BCME_NOMEM;
-
- DHD_DBG_RING_LOCK(ring->lock, flags);
- ring->id = id;
- strncpy(ring->name, name, DBGRING_NAME_MAX);
- ring->name[DBGRING_NAME_MAX - 1] = 0;
- ring->ring_size = ring_sz;
- ring->wp = ring->rp = 0;
- ring->ring_buf = buf;
- ring->threshold = DBGRING_FLUSH_THRESHOLD(ring);
- ring->state = RING_SUSPEND;
- ring->rem_len = 0;
- ring->sched_pull = TRUE;
- ring->pull_inactive = pull_inactive;
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
- return BCME_OK;
-}
-
-void
-dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring)
-{
- unsigned long flags = 0;
- DHD_DBG_RING_LOCK(ring->lock, flags);
- ring->id = 0;
- ring->name[0] = 0;
- ring->wp = ring->rp = 0;
- memset(&ring->stat, 0, sizeof(ring->stat));
- ring->threshold = 0;
- ring->state = RING_STOP;
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
- DHD_DBG_RING_LOCK_DEINIT(dhdp->osh, ring->lock);
-}
-
-void
-dhd_dbg_ring_sched_pull(dhd_dbg_ring_t *ring, uint32 pending_len,
- os_pullreq_t pull_fn, void *os_pvt, const int id)
-{
- unsigned long flags = 0;
- DHD_DBG_RING_LOCK(ring->lock, flags);
- /* if the current pending size is bigger than threshold and
- * threshold is set
- */
- if (ring->threshold > 0 &&
- (pending_len >= ring->threshold) && ring->sched_pull) {
- /*
- * Update the state and release the lock before calling
- * the pull_fn. Do not transfer control to other layers
- * with locks held. If the call back again calls into
- * the same layer fro this context, can lead to deadlock.
- */
- ring->sched_pull = FALSE;
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- pull_fn(os_pvt, id);
- } else {
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- }
-}
-
-uint32
-dhd_dbg_ring_get_pending_len(dhd_dbg_ring_t *ring)
-{
- uint32 pending_len = 0;
- unsigned long flags = 0;
- DHD_DBG_RING_LOCK(ring->lock, flags);
- if (ring->stat.written_bytes > ring->stat.read_bytes) {
- pending_len = ring->stat.written_bytes - ring->stat.read_bytes;
- } else if (ring->stat.written_bytes < ring->stat.read_bytes) {
- pending_len = PENDING_LEN_MAX - ring->stat.read_bytes + ring->stat.written_bytes;
- } else {
- pending_len = 0;
- }
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
- return pending_len;
-}
-
-int
-dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data)
-{
- unsigned long flags;
- uint32 w_len;
- uint32 avail_size;
- dhd_dbg_ring_entry_t *w_entry, *r_entry;
-
- if (!ring || !hdr || !data) {
- return BCME_BADARG;
- }
-
- DHD_DBG_RING_LOCK(ring->lock, flags);
-
- if (ring->state != RING_ACTIVE) {
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- return BCME_OK;
- }
-
- w_len = ENTRY_LENGTH(hdr);
-
- DHD_DBGIF(("%s: RING%d[%s] hdr->len=%u, w_len=%u, wp=%d, rp=%d, ring_start=0x%p;"
- " ring_size=%u\n",
- __FUNCTION__, ring->id, ring->name, hdr->len, w_len, ring->wp, ring->rp,
- ring->ring_buf, ring->ring_size));
-
- if (w_len > ring->ring_size) {
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- DHD_ERROR(("%s: RING%d[%s] w_len=%u, ring_size=%u,"
- " write size exceeds ring size !\n",
- __FUNCTION__, ring->id, ring->name, w_len, ring->ring_size));
- return BCME_BUFTOOLONG;
- }
- /* Claim the space */
- do {
- avail_size = DBG_RING_CHECK_WRITE_SPACE(ring->rp, ring->wp, ring->ring_size);
- if (avail_size <= w_len) {
- /* Prepare the space */
- if (ring->rp <= ring->wp) {
- ring->tail_padded = TRUE;
- ring->rem_len = ring->ring_size - ring->wp;
- DHD_DBGIF(("%s: RING%d[%s] Insuffient tail space,"
- " rp=%d, wp=%d, rem_len=%d, ring_size=%d,"
- " avail_size=%d, w_len=%d\n", __FUNCTION__,
- ring->id, ring->name, ring->rp, ring->wp,
- ring->rem_len, ring->ring_size, avail_size,
- w_len));
-
- /* 0 pad insufficient tail space */
- memset((uint8 *)ring->ring_buf + ring->wp, 0, ring->rem_len);
- /* If read pointer is still at the beginning, make some room */
- if (ring->rp == 0) {
- r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf +
- ring->rp);
- ring->rp += ENTRY_LENGTH(r_entry);
- ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
- DHD_DBGIF(("%s: rp at 0, move by one entry length"
- " (%u bytes)\n",
- __FUNCTION__, (uint32)ENTRY_LENGTH(r_entry)));
- }
- if (ring->rp == ring->wp) {
- ring->rp = 0;
- }
- ring->wp = 0;
- DHD_DBGIF(("%s: new rp=%u, wp=%u\n",
- __FUNCTION__, ring->rp, ring->wp));
- } else {
- /* Not enough space for new entry, free some up */
- r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf +
- ring->rp);
- /* check bounds before incrementing read ptr */
- if (ring->rp + ENTRY_LENGTH(r_entry) >= ring->ring_size) {
- DHD_ERROR(("%s: RING%d[%s] rp points out of boundary, "
- "ring->wp=%u, ring->rp=%u, ring->ring_size=%d\n",
- __FUNCTION__, ring->id, ring->name, ring->wp,
- ring->rp, ring->ring_size));
- ASSERT(0);
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- return BCME_BUFTOOSHORT;
- }
- ring->rp += ENTRY_LENGTH(r_entry);
- /* skip padding if there is one */
- if (ring->tail_padded &&
- ((ring->rp + ring->rem_len) == ring->ring_size)) {
- DHD_DBGIF(("%s: RING%d[%s] Found padding,"
- " avail_size=%d, w_len=%d, set rp=0\n",
- __FUNCTION__, ring->id, ring->name,
- avail_size, w_len));
- ring->rp = 0;
- ring->tail_padded = FALSE;
- ring->rem_len = 0;
- }
- ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
- DHD_DBGIF(("%s: RING%d[%s] read_bytes=%d, wp=%d, rp=%d\n",
- __FUNCTION__, ring->id, ring->name, ring->stat.read_bytes,
- ring->wp, ring->rp));
- }
- } else {
- break;
- }
- } while (TRUE);
-
- /* check before writing to the ring */
- if (ring->wp + w_len >= ring->ring_size) {
- DHD_ERROR(("%s: RING%d[%s] wp pointed out of ring boundary, "
- "wp=%d, ring_size=%d, w_len=%u\n", __FUNCTION__, ring->id,
- ring->name, ring->wp, ring->ring_size, w_len));
- ASSERT(0);
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- return BCME_BUFTOOLONG;
- }
-
- w_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->wp);
- /* header */
- memcpy(w_entry, hdr, DBG_RING_ENTRY_SIZE);
- w_entry->len = hdr->len;
- /* payload */
- memcpy((char *)w_entry + DBG_RING_ENTRY_SIZE, data, w_entry->len);
- /* update write pointer */
- ring->wp += w_len;
-
- /* update statistics */
- ring->stat.written_records++;
- ring->stat.written_bytes += w_len;
- DHD_DBGIF(("%s : RING%d[%s] written_records %d, written_bytes %d, read_bytes=%d,"
- " ring->threshold=%d, wp=%d, rp=%d\n", __FUNCTION__, ring->id, ring->name,
- ring->stat.written_records, ring->stat.written_bytes, ring->stat.read_bytes,
- ring->threshold, ring->wp, ring->rp));
-
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- return BCME_OK;
-}
-
-/*
- * This function folds ring->lock, so callers of this function
- * should not hold ring->lock.
- */
-int
-dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_header)
-{
- dhd_dbg_ring_entry_t *r_entry = NULL;
- uint32 rlen = 0;
- char *buf = NULL;
- unsigned long flags;
-
- if (!ring || !data || buf_len <= 0) {
- return 0;
- }
-
- DHD_DBG_RING_LOCK(ring->lock, flags);
-
- /* pull from ring is allowed for inactive (suspended) ring
- * in case of ecounters only, this is because, for ecounters
- * when a trap occurs the ring is suspended and data is then
- * pulled to dump it to a file. For other rings if ring is
- * not in active state return without processing (as before)
- */
- if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) {
- goto exit;
- }
-
- if (ring->rp == ring->wp) {
- goto exit;
- }
-
- DHD_DBGIF(("%s: RING%d[%s] buf_len=%u, wp=%d, rp=%d, ring_start=0x%p; ring_size=%u\n",
- __FUNCTION__, ring->id, ring->name, buf_len, ring->wp, ring->rp,
- ring->ring_buf, ring->ring_size));
-
- r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->rp);
-
- /* Boundary Check */
- rlen = ENTRY_LENGTH(r_entry);
- if ((ring->rp + rlen) > ring->ring_size) {
- DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d,"
- " current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen,
- ring->ring_size, ring->id, ring->name, ring->rp));
- rlen = 0;
- goto exit;
- }
-
- if (strip_header) {
- rlen = r_entry->len;
- buf = (char *)r_entry + DBG_RING_ENTRY_SIZE;
- } else {
- rlen = ENTRY_LENGTH(r_entry);
- buf = (char *)r_entry;
- }
- if (rlen > buf_len) {
- DHD_ERROR(("%s: buf len %d is too small for entry len %d\n",
- __FUNCTION__, buf_len, rlen));
- DHD_ERROR(("%s: ring %d[%s] - ring size=%d, wp=%d, rp=%d\n",
- __FUNCTION__, ring->id, ring->name, ring->ring_size,
- ring->wp, ring->rp));
- ASSERT(0);
- rlen = 0;
- goto exit;
- }
-
- memcpy(data, buf, rlen);
- /* update ring context */
- ring->rp += ENTRY_LENGTH(r_entry);
- /* don't pass wp but skip padding if there is one */
- if (ring->rp != ring->wp &&
- ring->tail_padded && ((ring->rp + ring->rem_len) >= ring->ring_size)) {
- DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n",
- __FUNCTION__, ring->id, ring->name, ring->rp, ring->wp));
- ring->rp = 0;
- ring->tail_padded = FALSE;
- ring->rem_len = 0;
- }
- if (ring->rp >= ring->ring_size) {
- DHD_ERROR(("%s: RING%d[%s] rp pointed out of ring boundary,"
- " rp=%d, ring_size=%d\n", __FUNCTION__, ring->id,
- ring->name, ring->rp, ring->ring_size));
- ASSERT(0);
- rlen = 0;
- goto exit;
- }
- ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
- DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__,
- ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp));
-
-exit:
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
- return rlen;
-}
-
-int
-dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len, bool strip_hdr)
-{
- int32 r_len, total_r_len = 0;
- unsigned long flags;
-
- if (!ring || !data)
- return 0;
-
- DHD_DBG_RING_LOCK(ring->lock, flags);
- if (!ring->pull_inactive && (ring->state != RING_ACTIVE)) {
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- return 0;
- }
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
- while (buf_len > 0) {
- r_len = dhd_dbg_ring_pull_single(ring, data, buf_len, strip_hdr);
- if (r_len == 0)
- break;
- data = (uint8 *)data + r_len;
- buf_len -= r_len;
- total_r_len += r_len;
- }
-
- return total_r_len;
-}
-
-int
-dhd_dbg_ring_config(dhd_dbg_ring_t *ring, int log_level, uint32 threshold)
-{
- unsigned long flags = 0;
- if (!ring)
- return BCME_BADADDR;
-
- if (ring->state == RING_STOP)
- return BCME_UNSUPPORTED;
-
- DHD_DBG_RING_LOCK(ring->lock, flags);
-
- if (log_level == 0)
- ring->state = RING_SUSPEND;
- else
- ring->state = RING_ACTIVE;
-
- ring->log_level = log_level;
- ring->threshold = MIN(threshold, DBGRING_FLUSH_THRESHOLD(ring));
-
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
- return BCME_OK;
-}
-
-void
-dhd_dbg_ring_start(dhd_dbg_ring_t *ring)
-{
- if (!ring)
- return;
-
- /* Initialize the information for the ring */
- ring->state = RING_SUSPEND;
- ring->log_level = 0;
- ring->rp = ring->wp = 0;
- ring->threshold = 0;
- memset(&ring->stat, 0, sizeof(struct ring_statistics));
- memset(ring->ring_buf, 0, ring->ring_size);
-}
+++ /dev/null
-/*
- * DHD debug ring header file
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- * $Id: dhd_dbg_ring.h 795094 2018-12-17 08:56:58Z $
- */
-
-#ifndef __DHD_DBG_RING_H__
-#define __DHD_DBG_RING_H__
-
-#include <bcmutils.h>
-
-#define PACKED_STRUCT __attribute__ ((packed))
-
-#define DBGRING_NAME_MAX 32
-
-enum dbg_ring_state {
- RING_STOP = 0, /* ring is not initialized */
- RING_ACTIVE, /* ring is live and logging */
- RING_SUSPEND /* ring is initialized but not logging */
-};
-
-/* each entry in dbg ring has below header, to handle
- * variable length records in ring
- */
-typedef struct dhd_dbg_ring_entry {
- uint16 len; /* payload length excluding the header */
- uint8 flags;
- uint8 type; /* Per ring specific */
- uint64 timestamp; /* present if has_timestamp bit is set. */
-} PACKED_STRUCT dhd_dbg_ring_entry_t;
-
-struct ring_statistics {
- /* number of bytes that was written to the buffer by driver */
- uint32 written_bytes;
- /* number of bytes that was read from the buffer by user land */
- uint32 read_bytes;
- /* number of records that was written to the buffer by driver */
- uint32 written_records;
-};
-
-typedef struct dhd_dbg_ring_status {
- uint8 name[DBGRING_NAME_MAX];
- uint32 flags;
- int ring_id; /* unique integer representing the ring */
- /* total memory size allocated for the buffer */
- uint32 ring_buffer_byte_size;
- uint32 verbose_level;
- /* number of bytes that was written to the buffer by driver */
- uint32 written_bytes;
- /* number of bytes that was read from the buffer by user land */
- uint32 read_bytes;
- /* number of records that was read from the buffer by user land */
- uint32 written_records;
-} dhd_dbg_ring_status_t;
-
-typedef struct dhd_dbg_ring {
- int id; /* ring id */
- uint8 name[DBGRING_NAME_MAX]; /* name string */
- uint32 ring_size; /* numbers of item in ring */
- uint32 wp; /* write pointer */
- uint32 rp; /* read pointer */
- uint32 rp_tmp; /* tmp read pointer */
- uint32 log_level; /* log_level */
- uint32 threshold; /* threshold bytes */
- void * ring_buf; /* pointer of actually ring buffer */
- void * lock; /* lock for ring access */
- struct ring_statistics stat; /* statistics */
- enum dbg_ring_state state; /* ring state enum */
- bool tail_padded; /* writer does not have enough space */
- uint32 rem_len; /* number of bytes from wp_pad to end */
- bool sched_pull; /* schedule reader immediately */
- bool pull_inactive; /* pull contents from ring even if it is inactive */
-} dhd_dbg_ring_t;
-
-#define DBGRING_FLUSH_THRESHOLD(ring) (ring->ring_size / 3)
-#define RING_STAT_TO_STATUS(ring, status) \
- do { \
- strncpy(status.name, ring->name, \
- sizeof(status.name) - 1); \
- status.ring_id = ring->id; \
- status.ring_buffer_byte_size = ring->ring_size; \
- status.written_bytes = ring->stat.written_bytes; \
- status.written_records = ring->stat.written_records; \
- status.read_bytes = ring->stat.read_bytes; \
- status.verbose_level = ring->log_level; \
- } while (0)
-
-#define DBG_RING_ENTRY_SIZE (sizeof(dhd_dbg_ring_entry_t))
-#define ENTRY_LENGTH(hdr) ((hdr)->len + DBG_RING_ENTRY_SIZE)
-#define PAYLOAD_MAX_LEN 65535
-#define PAYLOAD_ECNTR_MAX_LEN 1648u
-#define PAYLOAD_RTT_MAX_LEN 1648u
-#define PENDING_LEN_MAX 0xFFFFFFFF
-#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t))
-
-#define TXACTIVESZ(r, w, d) (((r) <= (w)) ? ((w) - (r)) : ((d) - (r) + (w)))
-#define DBG_RING_READ_AVAIL_SPACE(w, r, d) (((w) >= (r)) ? ((w) - (r)) : ((d) - (r)))
-#define DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d) (((w) >= (r)) ? ((d) - (w)) : ((r) - (w)))
-#define DBG_RING_WRITE_SPACE_AVAIL(r, w, d) (d - (TXACTIVESZ(r, w, d)))
-#define DBG_RING_CHECK_WRITE_SPACE(r, w, d) \
- MIN(DBG_RING_WRITE_SPACE_AVAIL(r, w, d), DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d))
-
-typedef void (*os_pullreq_t)(void *os_priv, const int ring_id);
-
-int dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
- uint32 ring_sz, void *allocd_buf, bool pull_inactive);
-void dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring);
-int dhd_dbg_ring_push(dhd_dbg_ring_t *ring, dhd_dbg_ring_entry_t *hdr, void *data);
-int dhd_dbg_ring_pull(dhd_dbg_ring_t *ring, void *data, uint32 buf_len,
- bool strip_hdr);
-int dhd_dbg_ring_pull_single(dhd_dbg_ring_t *ring, void *data, uint32 buf_len,
- bool strip_header);
-uint32 dhd_dbg_ring_get_pending_len(dhd_dbg_ring_t *ring);
-void dhd_dbg_ring_sched_pull(dhd_dbg_ring_t *ring, uint32 pending_len,
- os_pullreq_t pull_fn, void *os_pvt, const int id);
-int dhd_dbg_ring_config(dhd_dbg_ring_t *ring, int log_level, uint32 threshold);
-void dhd_dbg_ring_start(dhd_dbg_ring_t *ring);
-#endif /* __DHD_DBG_RING_H__ */
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_debug.c 823976 2019-06-06 11:39:07Z $
+ * $Id: dhd_debug.c 711908 2017-07-20 10:37:34Z $
*/
#include <typedefs.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_dbg.h>
-#include <dhd_dbg_ring.h>
#include <dhd_debug.h>
#include <dhd_mschdbg.h>
-#include <dhd_bus.h>
#include <event_log.h>
#include <event_trace.h>
#include <msgtrace.h>
+#if defined(DHD_EFI)
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#define container_of(ptr, type, member) \
+ ((type *)((char *)(ptr) - offsetof(type, member)))
+#endif
+
+#define DBGRING_FLUSH_THRESHOLD(ring) (ring->ring_size / 3)
+#define RING_STAT_TO_STATUS(ring, status) \
+ do { \
+ strncpy(status.name, ring->name, \
+ sizeof(status.name) - 1); \
+ status.ring_id = ring->id; \
+ status.ring_buffer_byte_size = ring->ring_size; \
+ status.written_bytes = ring->stat.written_bytes; \
+ status.written_records = ring->stat.written_records; \
+ status.read_bytes = ring->stat.read_bytes; \
+ status.verbose_level = ring->log_level; \
+ } while (0)
+
#define DHD_PKT_INFO DHD_ERROR
struct map_table {
uint16 fw_id;
/* define log level per ring type */
struct log_level_table fw_verbose_level_map[] = {
- {1, EVENT_LOG_TAG_PCI_ERROR, "PCI_ERROR"},
- {1, EVENT_LOG_TAG_PCI_WARN, "PCI_WARN"},
- {2, EVENT_LOG_TAG_PCI_INFO, "PCI_INFO"},
- {3, EVENT_LOG_TAG_PCI_DBG, "PCI_DEBUG"},
- {3, EVENT_LOG_TAG_BEACON_LOG, "BEACON_LOG"},
- {2, EVENT_LOG_TAG_WL_ASSOC_LOG, "ASSOC_LOG"},
- {2, EVENT_LOG_TAG_WL_ROAM_LOG, "ROAM_LOG"},
- {1, EVENT_LOG_TAG_TRACE_WL_INFO, "WL INFO"},
- {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, "BTCOEX INFO"},
- {1, EVENT_LOG_TAG_SCAN_WARN, "SCAN_WARN"},
- {1, EVENT_LOG_TAG_SCAN_ERROR, "SCAN_ERROR"},
- {2, EVENT_LOG_TAG_SCAN_TRACE_LOW, "SCAN_TRACE_LOW"},
- {2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, "SCAN_TRACE_HIGH"}
+ {1, EVENT_LOG_TAG_PCI_ERROR, EVENT_LOG_SET_BUS, "PCI_ERROR"},
+ {1, EVENT_LOG_TAG_PCI_WARN, EVENT_LOG_SET_BUS, "PCI_WARN"},
+ {2, EVENT_LOG_TAG_PCI_INFO, EVENT_LOG_SET_BUS, "PCI_INFO"},
+ {3, EVENT_LOG_TAG_PCI_DBG, EVENT_LOG_SET_BUS, "PCI_DEBUG"},
+ {3, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON_LOG"},
+ {2, EVENT_LOG_TAG_WL_ASSOC_LOG, EVENT_LOG_SET_WL, "ASSOC_LOG"},
+ {2, EVENT_LOG_TAG_WL_ROAM_LOG, EVENT_LOG_SET_WL, "ROAM_LOG"},
+ {1, EVENT_LOG_TAG_TRACE_WL_INFO, EVENT_LOG_SET_WL, "WL_INFO"},
+ {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, EVENT_LOG_SET_WL, "BTCOEX_INFO"},
+#ifdef CUSTOMER_HW4_DEBUG
+ {3, EVENT_LOG_TAG_SCAN_WARN, EVENT_LOG_SET_WL, "SCAN_WARN"},
+#else
+ {1, EVENT_LOG_TAG_SCAN_WARN, EVENT_LOG_SET_WL, "SCAN_WARN"},
+#endif /* CUSTOMER_HW4_DEBUG */
+ {1, EVENT_LOG_TAG_SCAN_ERROR, EVENT_LOG_SET_WL, "SCAN_ERROR"},
+ {2, EVENT_LOG_TAG_SCAN_TRACE_LOW, EVENT_LOG_SET_WL, "SCAN_TRACE_LOW"},
+ {2, EVENT_LOG_TAG_SCAN_TRACE_HIGH, EVENT_LOG_SET_WL, "SCAN_TRACE_HIGH"}
+};
+
+struct log_level_table fw_event_level_map[] = {
+ {1, EVENT_LOG_TAG_TRACE_WL_INFO, EVENT_LOG_SET_WL, "WL_INFO"},
+ {1, EVENT_LOG_TAG_TRACE_BTCOEX_INFO, EVENT_LOG_SET_WL, "BTCOEX_INFO"},
+#ifdef CUSTOMER_HW4_DEBUG
+ {3, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON LOG"},
+#else
+ {2, EVENT_LOG_TAG_BEACON_LOG, EVENT_LOG_SET_WL, "BEACON LOG"},
+#endif /* CUSTOMER_HW4_DEBUG */
+};
+
+struct map_table nan_event_map[] = {
+ {TRACE_NAN_CLUSTER_STARTED, NAN_EVENT_CLUSTER_STARTED, "NAN_CLUSTER_STARTED"},
+ {TRACE_NAN_CLUSTER_JOINED, NAN_EVENT_CLUSTER_JOINED, "NAN_CLUSTER_JOINED"},
+ {TRACE_NAN_CLUSTER_MERGED, NAN_EVENT_CLUSTER_MERGED, "NAN_CLUSTER_MERGED"},
+ {TRACE_NAN_ROLE_CHANGED, NAN_EVENT_ROLE_CHANGED, "NAN_ROLE_CHANGED"},
+ {TRACE_NAN_SCAN_COMPLETE, NAN_EVENT_SCAN_COMPLETE, "NAN_SCAN_COMPLETE"},
+ {TRACE_NAN_STATUS_CHNG, NAN_EVENT_STATUS_CHNG, "NAN_STATUS_CHNG"},
+};
+
+struct log_level_table nan_event_level_map[] = {
+ {1, EVENT_LOG_TAG_NAN_ERROR, 0, "NAN_ERROR"},
+ {2, EVENT_LOG_TAG_NAN_INFO, 0, "NAN_INFO"},
+ {3, EVENT_LOG_TAG_NAN_DBG, 0, "NAN_DEBUG"},
+};
+
+struct map_table nan_evt_tag_map[] = {
+ {TRACE_TAG_BSSID, WIFI_TAG_BSSID, "BSSID"},
+ {TRACE_TAG_ADDR, WIFI_TAG_ADDR, "ADDR_0"},
};
/* reference tab table */
typedef struct dhddbg_loglist_item {
dll_t list;
- prcd_event_log_hdr_t prcd_log_hdr;
+ event_log_hdr_t *hdr;
} loglist_item_t;
typedef struct dhbdbg_pending_item {
};
#define TRACE_LOG_MAGIC_NUMBER 0xEAE47C06
-void print_roam_enhanced_log(prcd_event_log_hdr_t *plog_hdr);
int
-dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data)
+dhd_dbg_ring_pull_single(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
+ bool strip_header)
{
dhd_dbg_ring_t *ring;
- int ret = 0;
- uint32 pending_len = 0;
+ dhd_dbg_ring_entry_t *r_entry;
+ uint32 rlen;
+ char *buf;
if (!dhdp || !dhdp->dbg) {
- return BCME_BADADDR;
- }
-
- if (!VALID_RING(ring_id)) {
- DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
- return BCME_RANGE;
+ return 0;
}
ring = &dhdp->dbg->dbg_rings[ring_id];
- ret = dhd_dbg_ring_push(ring, hdr, data);
- if (ret != BCME_OK)
- return ret;
+ if (ring->state != RING_ACTIVE) {
+ return 0;
+ }
- pending_len = dhd_dbg_ring_get_pending_len(ring);
- dhd_dbg_ring_sched_pull(ring, pending_len, dhdp->dbg->pullreq,
- dhdp->dbg->private, ring->id);
+ if (ring->rp == ring->wp) {
+ return 0;
+ }
- return ret;
-}
+ r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->rp);
-dhd_dbg_ring_t *
-dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id)
-{
- if (!dhdp || !dhdp->dbg) {
- return NULL;
+ /* Boundary Check */
+ rlen = ENTRY_LENGTH(r_entry);
+ if ((ring->rp + rlen) > ring->ring_size) {
+ DHD_ERROR(("%s: entry len %d is out of boundary of ring size %d,"
+ " current ring %d[%s] - rp=%d\n", __FUNCTION__, rlen,
+ ring->ring_size, ring->id, ring->name, ring->rp));
+ return 0;
}
- if (!VALID_RING(ring_id)) {
- DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
- return NULL;
+ if (strip_header) {
+ rlen = r_entry->len;
+ buf = (char *)r_entry + DBG_RING_ENTRY_SIZE;
+ } else {
+ rlen = ENTRY_LENGTH(r_entry);
+ buf = (char *)r_entry;
+ }
+ if (rlen > buf_len) {
+ DHD_ERROR(("%s: buf len %d is too small for entry len %d\n",
+ __FUNCTION__, buf_len, rlen));
+ DHD_ERROR(("%s: ring %d[%s] - ring size=%d, wp=%d, rp=%d\n",
+ __FUNCTION__, ring->id, ring->name, ring->ring_size,
+ ring->wp, ring->rp));
+ ASSERT(0);
+ return 0;
}
- return &dhdp->dbg->dbg_rings[ring_id];
+ memcpy(data, buf, rlen);
+ /* update ring context */
+ ring->rp += ENTRY_LENGTH(r_entry);
+ /* skip padding if there is one */
+ if (ring->tail_padded && ((ring->rp + ring->rem_len) == ring->ring_size)) {
+ DHD_DBGIF(("%s: RING%d[%s] Found padding, rp=%d, wp=%d\n",
+ __FUNCTION__, ring->id, ring->name, ring->rp, ring->wp));
+ ring->rp = 0;
+ ring->tail_padded = FALSE;
+ ring->rem_len = 0;
+ }
+ if (ring->rp >= ring->ring_size) {
+ DHD_ERROR(("%s: RING%d[%s] rp pointed out of ring boundary,"
+ " rp=%d, ring_size=%d\n", __FUNCTION__, ring->id,
+ ring->name, ring->rp, ring->ring_size));
+ ASSERT(0);
+ }
+ ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
+ DHD_DBGIF(("%s RING%d[%s]read_bytes %d, wp=%d, rp=%d\n", __FUNCTION__,
+ ring->id, ring->name, ring->stat.read_bytes, ring->wp, ring->rp));
+
+ return rlen;
}
int
-dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
- bool strip_header)
+dhd_dbg_ring_pull(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len)
{
+ int32 r_len, total_r_len = 0;
dhd_dbg_ring_t *ring;
- if (!dhdp || !dhdp->dbg) {
+ if (!dhdp || !dhdp->dbg)
+ return 0;
+ ring = &dhdp->dbg->dbg_rings[ring_id];
+ if (ring->state != RING_ACTIVE)
return 0;
- }
- if (!VALID_RING(ring_id)) {
- DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
- return BCME_RANGE;
+ while (buf_len > 0) {
+ r_len = dhd_dbg_ring_pull_single(dhdp, ring_id, data, buf_len, FALSE);
+ if (r_len == 0)
+ break;
+ data = (uint8 *)data + r_len;
+ buf_len -= r_len;
+ total_r_len += r_len;
}
- ring = &dhdp->dbg->dbg_rings[ring_id];
-
- return dhd_dbg_ring_pull_single(ring, data, buf_len, strip_header);
+ return total_r_len;
}
int
-dhd_dbg_pull_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len)
+dhd_dbg_ring_push(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data)
{
+ unsigned long flags;
+ uint32 pending_len;
+ uint32 w_len;
+ uint32 avail_size;
dhd_dbg_ring_t *ring;
+ dhd_dbg_ring_entry_t *w_entry, *r_entry;
- if (!dhdp || !dhdp->dbg)
- return 0;
- if (!VALID_RING(ring_id)) {
- DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
- return BCME_RANGE;
+ if (!dhdp || !dhdp->dbg) {
+ return BCME_BADADDR;
}
+
ring = &dhdp->dbg->dbg_rings[ring_id];
- return dhd_dbg_ring_pull(ring, data, buf_len, FALSE);
+
+ if (ring->state != RING_ACTIVE) {
+ return BCME_OK;
+ }
+
+ flags = dhd_os_spin_lock(ring->lock);
+
+ w_len = ENTRY_LENGTH(hdr);
+
+ if (w_len > ring->ring_size) {
+ dhd_os_spin_unlock(ring->lock, flags);
+ return BCME_ERROR;
+ }
+
+ /* Claim the space */
+ do {
+ avail_size = DBG_RING_CHECK_WRITE_SPACE(ring->rp, ring->wp, ring->ring_size);
+ if (avail_size <= w_len) {
+ /* Prepare the space */
+ if (ring->rp <= ring->wp) {
+ ring->tail_padded = TRUE;
+ ring->rem_len = ring->ring_size - ring->wp;
+ DHD_DBGIF(("%s: RING%d[%s] Insuffient tail space,"
+ " rp=%d, wp=%d, rem_len=%d, ring_size=%d,"
+ " avail_size=%d, w_len=%d\n", __FUNCTION__,
+ ring->id, ring->name, ring->rp, ring->wp,
+ ring->rem_len, ring->ring_size, avail_size,
+ w_len));
+
+ /* 0 pad insufficient tail space */
+ memset((uint8 *)ring->ring_buf + ring->wp, 0, ring->rem_len);
+ if (ring->rp == ring->wp) {
+ ring->rp = 0;
+ }
+ ring->wp = 0;
+ } else {
+ /* Not enough space for new entry, free some up */
+ r_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf +
+ ring->rp);
+ ring->rp += ENTRY_LENGTH(r_entry);
+ /* skip padding if there is one */
+ if (ring->tail_padded &&
+ ((ring->rp + ring->rem_len) == ring->ring_size)) {
+ DHD_DBGIF(("%s: RING%d[%s] Found padding,"
+ " avail_size=%d, w_len=%d\n", __FUNCTION__,
+ ring->id, ring->name, avail_size, w_len));
+ ring->rp = 0;
+ ring->tail_padded = FALSE;
+ ring->rem_len = 0;
+ }
+ if (ring->rp >= ring->ring_size) {
+ DHD_ERROR(("%s: RING%d[%s] rp points out of boundary,"
+ " ring->rp = %d, ring->ring_size=%d\n",
+ __FUNCTION__, ring->id, ring->name, ring->rp,
+ ring->ring_size));
+ ASSERT(0);
+ }
+ ring->stat.read_bytes += ENTRY_LENGTH(r_entry);
+ DHD_DBGIF(("%s: RING%d[%s] read_bytes %d, wp=%d, rp=%d\n",
+ __FUNCTION__, ring->id, ring->name, ring->stat.read_bytes,
+ ring->wp, ring->rp));
+ }
+ } else {
+ break;
+ }
+ } while (TRUE);
+
+ w_entry = (dhd_dbg_ring_entry_t *)((uint8 *)ring->ring_buf + ring->wp);
+ /* header */
+ memcpy(w_entry, hdr, DBG_RING_ENTRY_SIZE);
+ w_entry->len = hdr->len;
+ /* payload */
+ memcpy((char *)w_entry + DBG_RING_ENTRY_SIZE, data, w_entry->len);
+ /* update write pointer */
+ ring->wp += w_len;
+ if (ring->wp >= ring->ring_size) {
+ DHD_ERROR(("%s: RING%d[%s] wp pointed out of ring boundary, "
+ "wp=%d, ring_size=%d\n", __FUNCTION__, ring->id,
+ ring->name, ring->wp, ring->ring_size));
+ ASSERT(0);
+ }
+ /* update statistics */
+ ring->stat.written_records++;
+ ring->stat.written_bytes += w_len;
+ DHD_DBGIF(("%s : RING%d[%s] written_records %d, written_bytes %d, read_bytes=%d,"
+ " ring->threshold=%d, wp=%d, rp=%d\n", __FUNCTION__, ring->id, ring->name,
+ ring->stat.written_records, ring->stat.written_bytes, ring->stat.read_bytes,
+ ring->threshold, ring->wp, ring->rp));
+
+ /* Calculate current pending size */
+ if (ring->stat.written_bytes > ring->stat.read_bytes) {
+ pending_len = ring->stat.written_bytes - ring->stat.read_bytes;
+ } else if (ring->stat.written_bytes < ring->stat.read_bytes) {
+ pending_len = 0xFFFFFFFF - ring->stat.read_bytes + ring->stat.written_bytes;
+ } else {
+ pending_len = 0;
+ }
+
+ /* if the current pending size is bigger than threshold */
+ if (ring->threshold > 0 &&
+ (pending_len >= ring->threshold) && ring->sched_pull) {
+ dhdp->dbg->pullreq(dhdp->dbg->private, ring->id);
+ ring->sched_pull = FALSE;
+ }
+ dhd_os_spin_unlock(ring->lock, flags);
+ return BCME_OK;
}
static int
char *data, *s;
static uint32 seqnum_prev = 0;
- if (!event_data) {
- DHD_ERROR(("%s: event_data is NULL\n", __FUNCTION__));
- return;
- }
-
hdr = (msgtrace_hdr_t *)event_data;
data = (char *)event_data + MSGTRACE_HDRLEN;
}
#endif /* MACOSX_DHD */
#ifdef SHOW_LOGTRACE
-#define DATA_UNIT_FOR_LOG_CNT 4
+static const uint8 *
+event_get_tlv(uint16 id, const char* tlvs, uint tlvs_len)
+{
+ const uint8 *pos = (const uint8 *)tlvs;
+ const uint8 *end = pos + tlvs_len;
+ const tlv_log *tlv;
+ int rest;
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ while (pos + 1 < end) {
+ if (pos + 4 + pos[1] > end)
+ break;
+ tlv = (const tlv_log *) pos;
+ if (tlv->tag == id)
+ return pos;
+ rest = tlv->len % 4; /* padding values */
+ pos += 4 + tlv->len + rest;
+ }
+ return NULL;
+}
-int
-replace_percent_p_to_x(char *fmt)
+#define DATA_UNIT_FOR_LOG_CNT 4
+/* #pragma used as a WAR to fix build failure,
+ * ignore dropping of 'const' qualifier in tlv_data assignment
+ * this pragma disables the warning only for the following function
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+static int
+dhd_dbg_nan_event_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, uint32 *data)
{
- int p_to_x_done = FALSE;
+ int ret = BCME_OK;
+ wl_event_log_id_ver_t nan_hdr;
+ log_nan_event_t *evt_payload;
+ uint16 evt_payload_len = 0, tot_payload_len = 0;
+ dhd_dbg_ring_entry_t msg_hdr;
+ bool evt_match = FALSE;
+ event_log_hdr_t *ts_hdr;
+ uint32 *ts_data;
+ char *tlvs, *dest_tlvs;
+ tlv_log *tlv_data;
+ int tlv_len = 0;
+ int i = 0, evt_idx = 0;
+ char eaddr_buf[ETHER_ADDR_STR_LEN];
- while (*fmt != '\0')
- {
- /* Skip characters will we see a % */
- if (*fmt++ != '%')
- {
- continue;
- }
+ BCM_REFERENCE(eaddr_buf);
- /*
- * Skip any flags, field width and precision:
- *Flags: Followed by %
- * #, 0, -, ' ', +
- */
- if (*fmt == '#')
- fmt++;
+ nan_hdr.t = *data;
+ DHD_DBGIF(("%s: version %u event %x\n", __FUNCTION__, nan_hdr.version,
+ nan_hdr.event));
- if (*fmt == '0' || *fmt == '-' || *fmt == '+')
- fmt++;
+ if (nan_hdr.version != DIAG_VERSION) {
+ DHD_ERROR(("Event payload version %u mismatch with current version %u\n",
+ nan_hdr.version, DIAG_VERSION));
+ return BCME_VERSION;
+ }
- /*
- * Field width:
- * An optional decimal digit string (with non-zero first digit)
- * specifying a minimum field width
- */
- while (*fmt && bcm_isdigit(*fmt))
- fmt++;
+ /* nan event log should at least contain a wl_event_log_id_ver_t
+ * header and a arm cycle count
+ */
+ if (hdr->count < NAN_EVENT_LOG_MIN_LENGTH) {
+ return BCME_BADLEN;
+ }
- /*
- * Precision:
- * An optional precision, in the form of a period ('.') followed by an
- * optional decimal digit string.
- */
- if (*fmt == '.')
- {
- fmt++;
- while (*fmt && bcm_isdigit(*fmt)) fmt++;
+ memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
+ ts_hdr = (event_log_hdr_t *)((uint8 *)data - sizeof(event_log_hdr_t));
+ if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
+ ts_data = (uint32 *)ts_hdr - ts_hdr->count;
+ msg_hdr.timestamp = (uint64)ts_data[0];
+ msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
+ }
+ msg_hdr.type = DBG_RING_ENTRY_NAN_EVENT_TYPE;
+ for (i = 0; i < ARRAYSIZE(nan_event_map); i++) {
+ if (nan_event_map[i].fw_id == nan_hdr.event) {
+ evt_match = TRUE;
+ evt_idx = i;
+ break;
}
-
- /* If %p is seen, change it to %x */
- if (*fmt == 'p')
- {
- *fmt = 'x';
- p_to_x_done = TRUE;
+ }
+ if (evt_match) {
+ DHD_DBGIF(("%s : event (%s)\n", __FUNCTION__, nan_event_map[evt_idx].desc));
+ /* payload length for nan event data */
+ evt_payload_len = sizeof(log_nan_event_t) +
+ (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
+ if ((evt_payload = MALLOC(dhdp->osh, evt_payload_len)) == NULL) {
+ DHD_ERROR(("Memory allocation failed for nan evt log (%u)\n",
+ evt_payload_len));
+ return BCME_NOMEM;
}
- if (*fmt)
- fmt++;
+ evt_payload->version = NAN_EVENT_VERSION;
+ evt_payload->event = nan_event_map[evt_idx].host_id;
+ dest_tlvs = (char *)evt_payload->tlvs;
+ tot_payload_len = sizeof(log_nan_event_t);
+ tlvs = (char *)(&data[1]);
+ tlv_len = (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
+ for (i = 0; i < ARRAYSIZE(nan_evt_tag_map); i++) {
+ tlv_data = (tlv_log *)event_get_tlv(nan_evt_tag_map[i].fw_id,
+ tlvs, tlv_len);
+ if (tlv_data) {
+ DHD_DBGIF(("NAN evt tlv.tag(%s), tlv.len : %d, tlv.data : ",
+ nan_evt_tag_map[i].desc, tlv_data->len));
+ memcpy(dest_tlvs, tlv_data, sizeof(tlv_log) + tlv_data->len);
+ tot_payload_len += tlv_data->len + sizeof(tlv_log);
+ switch (tlv_data->tag) {
+ case TRACE_TAG_BSSID:
+ case TRACE_TAG_ADDR:
+ DHD_DBGIF(("%s\n",
+ bcm_ether_ntoa(
+ (const struct ether_addr *)tlv_data->value,
+ eaddr_buf)));
+ break;
+ default:
+ if (DHD_DBGIF_ON()) {
+ prhex(NULL, &tlv_data->value[0],
+ tlv_data->len);
+ }
+ break;
+ }
+ dest_tlvs += tlv_data->len + sizeof(tlv_log);
+ }
+ }
+ msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
+ msg_hdr.len = tot_payload_len;
+ dhd_dbg_ring_push(dhdp, NAN_EVENT_RING_ID, &msg_hdr, evt_payload);
+ MFREE(dhdp->osh, evt_payload, evt_payload_len);
+ }
+ return ret;
+}
+
+static int
+dhd_dbg_custom_evnt_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr, uint32 *data)
+{
+ int i = 0, match_idx = 0;
+ int payload_len, tlv_len;
+ uint16 tot_payload_len = 0;
+ int ret = BCME_OK;
+ int log_level;
+ wl_event_log_id_ver_t wl_log_id;
+ dhd_dbg_ring_entry_t msg_hdr;
+ log_conn_event_t *event_data;
+ bool evt_match = FALSE;
+ event_log_hdr_t *ts_hdr;
+ uint32 *ts_data;
+ char *tlvs, *dest_tlvs;
+ tlv_log *tlv_data;
+ static uint64 ts_saved = 0;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ char chanbuf[CHANSPEC_STR_LEN];
+
+ BCM_REFERENCE(eabuf);
+ BCM_REFERENCE(chanbuf);
+ /* get a event type and version */
+ wl_log_id.t = *data;
+ if (wl_log_id.version != DIAG_VERSION)
+ return BCME_VERSION;
+
+ /* custom event log should at least contain a wl_event_log_id_ver_t
+ * header and a arm cycle count
+ */
+ if (hdr->count < NAN_EVENT_LOG_MIN_LENGTH) {
+ return BCME_BADLEN;
}
- return p_to_x_done;
+ ts_hdr = (event_log_hdr_t *)((uint8 *)data - sizeof(event_log_hdr_t));
+ if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
+ ts_data = (uint32 *)ts_hdr - ts_hdr->count;
+ ts_saved = (uint64)ts_data[0];
+ }
+ memset(&msg_hdr, 0, sizeof(dhd_dbg_ring_entry_t));
+ msg_hdr.timestamp = ts_saved;
+
+ DHD_DBGIF(("Android Event ver %d, payload %d words, ts %llu\n",
+ (*data >> 16), hdr->count - 1, ts_saved));
+
+ /* Perform endian convertion */
+ for (i = 0; i < hdr->count; i++) {
+ /* *(data + i) = ntoh32(*(data + i)); */
+ DHD_DATA(("%08x ", *(data + i)));
+ }
+ DHD_DATA(("\n"));
+ msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP;
+ msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
+ msg_hdr.type = DBG_RING_ENTRY_EVENT_TYPE;
+
+ /* convert the data to log_conn_event_t format */
+ for (i = 0; i < ARRAYSIZE(event_map); i++) {
+ if (event_map[i].fw_id == wl_log_id.event) {
+ evt_match = TRUE;
+ match_idx = i;
+ break;
+ }
+ }
+ if (evt_match) {
+ log_level = dhdp->dbg->dbg_rings[FW_EVENT_RING_ID].log_level;
+ /* filter the data based on log_level */
+ for (i = 0; i < ARRAYSIZE(fw_event_level_map); i++) {
+ if ((fw_event_level_map[i].tag == hdr->tag) &&
+ (fw_event_level_map[i].log_level > log_level)) {
+ return BCME_OK;
+ }
+ }
+ DHD_DBGIF(("%s : event (%s)\n", __FUNCTION__, event_map[match_idx].desc));
+ /* get the payload length for event data (skip : log header + timestamp) */
+ payload_len = sizeof(log_conn_event_t) + DATA_UNIT_FOR_LOG_CNT * (hdr->count - 2);
+ event_data = MALLOC(dhdp->osh, payload_len);
+ if (!event_data) {
+ DHD_ERROR(("failed to allocate the log_conn_event_t with length(%d)\n",
+ payload_len));
+ return BCME_NOMEM;
+ }
+ event_data->event = event_map[match_idx].host_id;
+ dest_tlvs = (char *)event_data->tlvs;
+ tot_payload_len = sizeof(log_conn_event_t);
+ tlvs = (char *)(&data[1]);
+ tlv_len = (hdr->count - 2) * DATA_UNIT_FOR_LOG_CNT;
+ for (i = 0; i < ARRAYSIZE(event_tag_map); i++) {
+ tlv_data = (tlv_log *)event_get_tlv(event_tag_map[i].fw_id,
+ tlvs, tlv_len);
+ if (tlv_data) {
+ DHD_DBGIF(("tlv.tag(%s), tlv.len : %d, tlv.data : ",
+ event_tag_map[i].desc, tlv_data->len));
+ memcpy(dest_tlvs, tlv_data, sizeof(tlv_log) + tlv_data->len);
+ tot_payload_len += tlv_data->len + sizeof(tlv_log);
+ switch (tlv_data->tag) {
+ case TRACE_TAG_BSSID:
+ case TRACE_TAG_ADDR:
+ case TRACE_TAG_ADDR1:
+ case TRACE_TAG_ADDR2:
+ case TRACE_TAG_ADDR3:
+ case TRACE_TAG_ADDR4:
+ DHD_DBGIF(("%s\n",
+ bcm_ether_ntoa((const struct ether_addr *)tlv_data->value,
+ eabuf)));
+ break;
+ case TRACE_TAG_SSID:
+ DHD_DBGIF(("%s\n", tlv_data->value));
+ break;
+ case TRACE_TAG_STATUS:
+ DHD_DBGIF(("%d\n", ltoh32_ua(&tlv_data->value[0])));
+ break;
+ case TRACE_TAG_REASON_CODE:
+ DHD_DBGIF(("%d\n", ltoh16_ua(&tlv_data->value[0])));
+ break;
+ case TRACE_TAG_RATE_MBPS:
+ DHD_DBGIF(("%d Kbps\n",
+ ltoh16_ua(&tlv_data->value[0]) * 500));
+ break;
+ case TRACE_TAG_CHANNEL_SPEC:
+ DHD_DBGIF(("%s\n",
+ wf_chspec_ntoa(
+ ltoh16_ua(&tlv_data->value[0]), chanbuf)));
+ break;
+ default:
+ if (DHD_DBGIF_ON()) {
+ prhex(NULL, &tlv_data->value[0], tlv_data->len);
+ }
+ }
+ dest_tlvs += tlv_data->len + sizeof(tlv_log);
+ }
+ }
+ msg_hdr.len = tot_payload_len;
+ dhd_dbg_ring_push(dhdp, FW_EVENT_RING_ID, &msg_hdr, event_data);
+ MFREE(dhdp->osh, event_data, payload_len);
+ }
+ return ret;
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
/* To identify format of types %Ns where N >= 0 is a number */
bool
}
}
-/* To identify format of non string format types */
-bool
-check_valid_non_string_format(char *curr_ptr)
-{
- char *next_ptr;
- char *next_fmt_stptr;
- char valid_fmt_types[17] = {'d', 'i', 'x', 'X', 'c', 'p', 'u',
- 'f', 'F', 'e', 'E', 'g', 'G', 'o',
- 'a', 'A', 'n'};
- int i;
- bool valid = FALSE;
-
- /* Check for next % in the fmt str */
- next_fmt_stptr = bcmstrstr(curr_ptr, "%");
-
- for (next_ptr = curr_ptr; *next_ptr != '\0'; next_ptr++) {
- for (i = 0; i < (int)((sizeof(valid_fmt_types))/sizeof(valid_fmt_types[0])); i++) {
- if (*next_ptr == valid_fmt_types[i]) {
- /* Check whether format type found corresponds to current %
- * and not the next one, if exists.
- */
- if ((next_fmt_stptr == NULL) ||
- (next_fmt_stptr && (next_ptr < next_fmt_stptr))) {
- /* Not validating for length/width fields in
- * format specifier.
- */
- valid = TRUE;
- }
- goto done;
- }
- }
- }
-
-done:
- return valid;
-}
-
#define MAX_NO_OF_ARG 16
-#define FMTSTR_SIZE 200
-#define ROMSTR_SIZE 268
+#define FMTSTR_SIZE 132
+#define ROMSTR_SIZE 200
#define SIZE_LOC_STR 50
-#define LOG_PRINT_CNT_MAX 16u
-#define EL_PARSE_VER "V02"
-#define EL_MSEC_PER_SEC 1000
-#ifdef DHD_LOG_PRINT_RATE_LIMIT
-#define MAX_LOG_PRINT_COUNT 100u
-#define LOG_PRINT_THRESH (1u * USEC_PER_SEC)
-#endif // endif
-
-bool
-dhd_dbg_process_event_log_hdr(event_log_hdr_t *log_hdr, prcd_event_log_hdr_t *prcd_log_hdr)
-{
- event_log_extended_hdr_t *ext_log_hdr;
- uint16 event_log_fmt_num;
- uint8 event_log_hdr_type;
-
- /* Identify the type of event tag, payload type etc.. */
- event_log_hdr_type = log_hdr->fmt_num & DHD_EVENT_LOG_HDR_MASK;
- event_log_fmt_num = (log_hdr->fmt_num >> DHD_EVENT_LOG_FMT_NUM_OFFSET) &
- DHD_EVENT_LOG_FMT_NUM_MASK;
-
- switch (event_log_hdr_type) {
- case DHD_OW_NB_EVENT_LOG_HDR:
- prcd_log_hdr->ext_event_log_hdr = FALSE;
- prcd_log_hdr->binary_payload = FALSE;
- break;
- case DHD_TW_NB_EVENT_LOG_HDR:
- prcd_log_hdr->ext_event_log_hdr = TRUE;
- prcd_log_hdr->binary_payload = FALSE;
- break;
- case DHD_BI_EVENT_LOG_HDR:
- if (event_log_fmt_num == DHD_OW_BI_EVENT_FMT_NUM) {
- prcd_log_hdr->ext_event_log_hdr = FALSE;
- prcd_log_hdr->binary_payload = TRUE;
- } else if (event_log_fmt_num == DHD_TW_BI_EVENT_FMT_NUM) {
- prcd_log_hdr->ext_event_log_hdr = TRUE;
- prcd_log_hdr->binary_payload = TRUE;
- } else {
- DHD_ERROR(("%s: invalid format number 0x%X\n",
- __FUNCTION__, event_log_fmt_num));
- return FALSE;
- }
- break;
- case DHD_INVALID_EVENT_LOG_HDR:
- default:
- DHD_ERROR(("%s: invalid event log header type 0x%X\n",
- __FUNCTION__, event_log_hdr_type));
- return FALSE;
- }
-
- /* Parse extended and legacy event log headers and populate prcd_event_log_hdr_t */
- if (prcd_log_hdr->ext_event_log_hdr) {
- ext_log_hdr = (event_log_extended_hdr_t *)
- ((uint8 *)log_hdr - sizeof(event_log_hdr_t));
- prcd_log_hdr->tag = ((ext_log_hdr->extended_tag &
- DHD_TW_VALID_TAG_BITS_MASK) << DHD_TW_EVENT_LOG_TAG_OFFSET) | log_hdr->tag;
- } else {
- prcd_log_hdr->tag = log_hdr->tag;
- }
- prcd_log_hdr->count = log_hdr->count;
- prcd_log_hdr->fmt_num_raw = log_hdr->fmt_num;
- prcd_log_hdr->fmt_num = event_log_fmt_num;
-
- /* update arm cycle */
- /*
- * For loegacy event tag :-
- * |payload........|Timestamp| Tag
- *
- * For extended event tag:-
- * |payload........|Timestamp|extended Tag| Tag.
- *
- */
- prcd_log_hdr->armcycle = prcd_log_hdr->ext_event_log_hdr ?
- *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_EXT_OFFSET) :
- *(uint32 *)(log_hdr - EVENT_TAG_TIMESTAMP_OFFSET);
-
- /* update event log data pointer address */
- prcd_log_hdr->log_ptr =
- (uint32 *)log_hdr - log_hdr->count - prcd_log_hdr->ext_event_log_hdr;
-
- /* handle error cases above this */
- return TRUE;
-}
-
+static uint64 verboselog_ts_saved = 0;
static void
-dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr,
- void *raw_event_ptr, uint32 logset, uint16 block, uint32* data)
+dhd_dbg_verboselog_handler(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
+ void *raw_event_ptr)
{
event_log_hdr_t *ts_hdr;
- uint32 *log_ptr = plog_hdr->log_ptr;
+ uint32 *log_ptr = (uint32 *)hdr - hdr->count;
char fmtstr_loc_buf[ROMSTR_SIZE] = { 0 };
uint32 rom_str_len = 0;
uint32 *ts_data;
return;
}
- if (log_ptr < data) {
- DHD_ERROR(("Invalid log pointer, logptr : %p data : %p \n", log_ptr, data));
- return;
+ /* Get time stamp if it's updated */
+ ts_hdr = (event_log_hdr_t *)((char *)log_ptr - sizeof(event_log_hdr_t));
+ if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
+ ts_data = (uint32 *)ts_hdr - ts_hdr->count;
+ verboselog_ts_saved = (uint64)ts_data[0];
+ DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n",
+ ts_data[ts_hdr->count - 1], ts_data[0], ts_data[1]));
}
- BCM_REFERENCE(ts_hdr);
- BCM_REFERENCE(ts_data);
-
- if (log_ptr > data) {
- /* Get time stamp if it's updated */
- ts_hdr = (event_log_hdr_t *)((char *)log_ptr - sizeof(event_log_hdr_t));
- if (ts_hdr->tag == EVENT_LOG_TAG_TS) {
- ts_data = (uint32 *)ts_hdr - ts_hdr->count;
- if (ts_data >= data) {
- DHD_MSGTRACE_LOG(("EVENT_LOG_TS[0x%08x]: SYS:%08x CPU:%08x\n",
- ts_data[ts_hdr->count - 1], ts_data[0], ts_data[1]));
- }
- }
- }
-
- if (plog_hdr->tag == EVENT_LOG_TAG_ROM_PRINTF) {
- rom_str_len = (plog_hdr->count - 1) * sizeof(uint32);
+ if (hdr->tag == EVENT_LOG_TAG_ROM_PRINTF) {
+ rom_str_len = (hdr->count - 1) * sizeof(uint32);
if (rom_str_len >= (ROMSTR_SIZE -1))
rom_str_len = ROMSTR_SIZE - 1;
fmtstr_loc_buf[rom_str_len] = '\0';
DHD_MSGTRACE_LOG(("EVENT_LOG_ROM[0x%08x]: %s",
- log_ptr[plog_hdr->count - 1], fmtstr_loc_buf));
+ log_ptr[hdr->count - 1], fmtstr_loc_buf));
/* Add newline if missing */
if (fmtstr_loc_buf[strlen(fmtstr_loc_buf) - 1] != '\n')
return;
}
- if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE ||
- plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE_TLV) {
- wl_mschdbg_verboselog_handler(dhdp, raw_event_ptr, plog_hdr, log_ptr);
+ if (hdr->tag == EVENT_LOG_TAG_MSCHPROFILE || hdr->tag == EVENT_LOG_TAG_MSCHPROFILE_TLV) {
+ wl_mschdbg_verboselog_handler(dhdp, raw_event_ptr, hdr->tag, log_ptr);
return;
}
/* print the message out in a logprint */
- dhd_dbg_verboselog_printf(dhdp, plog_hdr, raw_event_ptr, log_ptr, logset, block);
+ dhd_dbg_verboselog_printf(dhdp, hdr, raw_event_ptr, log_ptr);
}
void
-dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr,
- void *raw_event_ptr, uint32 *log_ptr, uint32 logset, uint16 block)
+dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
+ void *raw_event_ptr, uint32 *log_ptr)
{
dhd_event_log_t *raw_event = (dhd_event_log_t *)raw_event_ptr;
uint16 count;
} u_arg;
u_arg arg[MAX_NO_OF_ARG] = {{0}};
char *c_ptr = NULL;
- struct bcmstrbuf b;
-#ifdef DHD_LOG_PRINT_RATE_LIMIT
- static int log_print_count = 0;
- static uint64 ts0 = 0;
- uint64 ts1 = 0;
-#endif /* DHD_LOG_PRINT_RATE_LIMIT */
BCM_REFERENCE(arg);
-#ifdef DHD_LOG_PRINT_RATE_LIMIT
- if (!ts0)
- ts0 = OSL_SYSUPTIME_US();
-
- ts1 = OSL_SYSUPTIME_US();
-
- if (((ts1 - ts0) <= LOG_PRINT_THRESH) && (log_print_count >= MAX_LOG_PRINT_COUNT)) {
- log_print_threshold = 1;
- ts0 = 0;
- log_print_count = 0;
- DHD_ERROR(("%s: Log print water mark is reached,"
- " console logs are dumped only to debug_dump file\n", __FUNCTION__));
- } else if ((ts1 - ts0) > LOG_PRINT_THRESH) {
- log_print_threshold = 0;
- ts0 = 0;
- log_print_count = 0;
+ if (!raw_event) {
+ return;
}
-#endif /* DHD_LOG_PRINT_RATE_LIMIT */
- /* print the message out in a logprint. Logprint expects raw format number */
- if (!(raw_event->fmts)) {
+ /* print the message out in a logprint */
+ if (!(raw_event->fmts) || hdr->fmt_num == 0xffff) {
if (dhdp->dbg) {
log_level = dhdp->dbg->dbg_rings[FW_VERBOSE_RING_ID].log_level;
for (id = 0; id < ARRAYSIZE(fw_verbose_level_map); id++) {
- if ((fw_verbose_level_map[id].tag == plog_hdr->tag) &&
+ if ((fw_verbose_level_map[id].tag == hdr->tag) &&
(fw_verbose_level_map[id].log_level > log_level))
return;
}
}
- if (plog_hdr->binary_payload) {
- DHD_ECNTR_LOG(("%06d.%03d EL:tag=%d len=%d fmt=0x%x",
- (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
- (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
- plog_hdr->tag,
- plog_hdr->count,
- plog_hdr->fmt_num_raw));
-
- for (count = 0; count < (plog_hdr->count - 1); count++) {
- if (count && (count % LOG_PRINT_CNT_MAX == 0)) {
- DHD_ECNTR_LOG(("\n\t%08x", log_ptr[count]));
- } else {
- DHD_ECNTR_LOG((" %08x", log_ptr[count]));
- }
- }
- DHD_ECNTR_LOG(("\n"));
+ DHD_EVENT(("%d.%d EL:tag=%d len=%d fmt=0x%x",
+ (uint32)verboselog_ts_saved / 1000,
+ (uint32)verboselog_ts_saved % 1000,
+ hdr->tag,
+ hdr->count,
+ hdr->fmt_num));
+
+ for (count = 0; count < (hdr->count-1); count++) {
+ if (count % 8 == 0)
+ DHD_EVENT(("\n\t%08x", log_ptr[count]));
+ else
+ DHD_EVENT((" %08x", log_ptr[count]));
}
- else {
- bcm_binit(&b, fmtstr_loc_buf, FMTSTR_SIZE);
- bcm_bprintf(&b, "%06d.%03d EL:%s:%u:%u %d %d 0x%x",
- (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
- (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
- EL_PARSE_VER, logset, block,
- plog_hdr->tag,
- plog_hdr->count,
- plog_hdr->fmt_num_raw);
- for (count = 0; count < (plog_hdr->count - 1); count++) {
- bcm_bprintf(&b, " %x", log_ptr[count]);
- }
+ DHD_EVENT(("\n"));
- /* ensure preserve fw logs go to debug_dump only in case of customer4 */
- if (logset < dhdp->event_log_max_sets &&
- ((0x01u << logset) & dhdp->logset_prsrv_mask)) {
- DHD_PRSRV_MEM(("%s\n", b.origbuf));
- } else {
- DHD_FWLOG(("%s\n", b.origbuf));
-#ifdef DHD_LOG_PRINT_RATE_LIMIT
- log_print_count++;
-#endif /* DHD_LOG_PRINT_RATE_LIMIT */
- }
- }
return;
}
return;
}
- if ((plog_hdr->fmt_num) < raw_event->num_fmts) {
- if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
+ if ((hdr->fmt_num >> 2) < raw_event->num_fmts) {
+ if (hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "%s",
- raw_event->fmts[plog_hdr->fmt_num]);
- plog_hdr->count++;
+ raw_event->fmts[hdr->fmt_num >> 2]);
+ hdr->count++;
} else {
- snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "CONSOLE_E:%u:%u %06d.%03d %s",
- logset, block,
- (uint32)(log_ptr[plog_hdr->count - 1] / EL_MSEC_PER_SEC),
- (uint32)(log_ptr[plog_hdr->count - 1] % EL_MSEC_PER_SEC),
- raw_event->fmts[plog_hdr->fmt_num]);
+ snprintf(fmtstr_loc_buf, FMTSTR_SIZE, "CONSOLE_E: %6d.%3d %s",
+ log_ptr[hdr->count-1]/1000, (log_ptr[hdr->count - 1] % 1000),
+ raw_event->fmts[hdr->fmt_num >> 2]);
}
c_ptr = fmtstr_loc_buf;
} else {
- /* for ecounters, don't print the error as it will flood */
- if ((plog_hdr->fmt_num != DHD_OW_BI_EVENT_FMT_NUM) &&
- (plog_hdr->fmt_num != DHD_TW_BI_EVENT_FMT_NUM)) {
- DHD_ERROR(("%s: fmt number: 0x%x out of range\n",
- __FUNCTION__, plog_hdr->fmt_num));
- } else {
- DHD_INFO(("%s: fmt number: 0x%x out of range\n",
- __FUNCTION__, plog_hdr->fmt_num));
- }
-
- goto exit;
- }
-
- if (plog_hdr->count > MAX_NO_OF_ARG) {
- DHD_ERROR(("%s: plog_hdr->count(%d) out of range\n",
- __FUNCTION__, plog_hdr->count));
+ DHD_ERROR(("%s: fmt number out of range \n", __FUNCTION__));
goto exit;
}
- /* print the format string which will be needed for debugging incorrect formats */
- DHD_INFO(("%s: fmtstr_loc_buf = %s\n", __FUNCTION__, fmtstr_loc_buf));
-
- /* Replace all %p to %x to handle 32 bit %p */
- replace_percent_p_to_x(fmtstr_loc_buf);
-
- for (count = 0; count < (plog_hdr->count - 1); count++) {
+ for (count = 0; count < (hdr->count - 1); count++) {
if (c_ptr != NULL)
if ((c_ptr = bcmstrstr(c_ptr, "%")) != NULL)
c_ptr++;
"(s)0x%x", log_ptr[count]);
arg[count].addr = str_buf[count];
}
- } else if (check_valid_non_string_format(c_ptr)) {
- /* Other than string format */
- arg[count].val = log_ptr[count];
} else {
- *(c_ptr - 1) = '\0';
- break;
+ /* Other than string */
+ arg[count].val = log_ptr[count];
}
}
}
- /* ensure preserve fw logs go to debug_dump only in case of customer4 */
- if (logset < dhdp->event_log_max_sets &&
- ((0x01u << logset) & dhdp->logset_prsrv_mask)) {
- DHD_PRSRV_MEM((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
- arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
- arg[11], arg[12], arg[13], arg[14], arg[15]));
- } else {
- DHD_FWLOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
- arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
- arg[11], arg[12], arg[13], arg[14], arg[15]));
-#ifdef DHD_LOG_PRINT_RATE_LIMIT
- log_print_count++;
-#endif /* DHD_LOG_PRINT_RATE_LIMIT */
- }
+ /* Print FW logs */
+ DHD_FWLOG((fmtstr_loc_buf, arg[0], arg[1], arg[2], arg[3],
+ arg[4], arg[5], arg[6], arg[7], arg[8], arg[9], arg[10],
+ arg[11], arg[12], arg[13], arg[14], arg[15]));
exit:
MFREE(dhdp->osh, str_buf, (MAX_NO_OF_ARG * SIZE_LOC_STR));
}
-void
+static void
dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data,
- void *raw_event_ptr, uint datalen, bool msgtrace_hdr_present,
- uint32 msgtrace_seqnum)
+ void *raw_event_ptr, uint datalen)
{
msgtrace_hdr_t *hdr;
- char *data, *tmpdata;
- const uint32 log_hdr_len = sizeof(event_log_hdr_t);
+ char *data;
+ int id;
+ uint32 log_hdr_len = sizeof(event_log_hdr_t);
uint32 log_pyld_len;
static uint32 seqnum_prev = 0;
event_log_hdr_t *log_hdr;
bool msg_processed = FALSE;
- prcd_event_log_hdr_t prcd_log_hdr;
- prcd_event_log_hdr_t *plog_hdr;
+ uint32 *log_ptr = NULL;
dll_t list_head, *cur;
loglist_item_t *log_item;
+ int32 nan_evt_ring_log_level = 0;
dhd_dbg_ring_entry_t msg_hdr;
char *logbuf;
struct tracelog_header *logentry_header;
- uint ring_data_len = 0;
- bool ecntr_pushed = FALSE;
- bool rtt_pushed = FALSE;
- bool dll_inited = FALSE;
- uint32 logset = 0;
- uint16 block = 0;
- bool event_log_max_sets_queried;
- uint32 event_log_max_sets;
- uint min_expected_len = 0;
- uint16 len_chk = 0;
-
- BCM_REFERENCE(ecntr_pushed);
- BCM_REFERENCE(rtt_pushed);
- BCM_REFERENCE(len_chk);
-
- /* store event_logset_queried and event_log_max_sets in local variables
- * to avoid race conditions as they were set from different contexts(preinit)
- */
- event_log_max_sets_queried = dhdp->event_log_max_sets_queried;
- /* Make sure queried is read first with wmb and then max_sets,
- * as it is done in reverse order during preinit ioctls.
- */
- OSL_SMP_WMB();
- event_log_max_sets = dhdp->event_log_max_sets;
-
- if (msgtrace_hdr_present)
- min_expected_len = (MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_LEN);
- else
- min_expected_len = EVENT_LOG_BLOCK_LEN;
/* log trace event consists of:
* msgtrace header
* event log block header
* event log payload
*/
- if (!event_data || (datalen <= min_expected_len)) {
- DHD_ERROR(("%s: Not processing due to invalid event_data : %p or length : %d\n",
- __FUNCTION__, event_data, datalen));
- if (event_data && msgtrace_hdr_present) {
- prhex("event_data dump", event_data, datalen);
- tmpdata = (char *)event_data + MSGTRACE_HDRLEN;
- if (tmpdata) {
- DHD_ERROR(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n",
- ltoh16(*((uint16 *)(tmpdata+2))),
- ltoh32(*((uint32 *)(tmpdata + 4))),
- ltoh16(*((uint16 *)(tmpdata)))));
- }
- } else if (!event_data) {
- DHD_ERROR(("%s: event_data is NULL, cannot dump prhex\n", __FUNCTION__));
- }
+ if (datalen <= MSGTRACE_HDRLEN + EVENT_LOG_BLOCK_HDRLEN) {
return;
}
+ hdr = (msgtrace_hdr_t *)event_data;
+ data = (char *)event_data + MSGTRACE_HDRLEN;
+ datalen -= MSGTRACE_HDRLEN;
- if (msgtrace_hdr_present) {
- hdr = (msgtrace_hdr_t *)event_data;
- data = (char *)event_data + MSGTRACE_HDRLEN;
- datalen -= MSGTRACE_HDRLEN;
- msgtrace_seqnum = ntoh32(hdr->seqnum);
- } else {
- data = (char *)event_data;
- }
-
- if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, msgtrace_seqnum))
+ if (dhd_dbg_msgtrace_seqchk(&seqnum_prev, ntoh32(hdr->seqnum)))
return;
/* Save the whole message to event log ring */
logentry_header = (struct tracelog_header *)logbuf;
logentry_header->magic_num = TRACE_LOG_MAGIC_NUMBER;
logentry_header->buf_size = datalen;
- logentry_header->seq_num = msgtrace_seqnum;
+ logentry_header->seq_num = hdr->seqnum;
msg_hdr.type = DBG_RING_ENTRY_DATA_TYPE;
- ring_data_len = datalen + sizeof(*logentry_header);
-
if ((sizeof(*logentry_header) + datalen) > PAYLOAD_MAX_LEN) {
DHD_ERROR(("%s:Payload len=%u exceeds max len\n", __FUNCTION__,
((uint)sizeof(*logentry_header) + datalen)));
- goto exit;
+ VMFREE(dhdp->osh, logbuf, sizeof(*logentry_header) + datalen);
+ return;
}
msg_hdr.len = sizeof(*logentry_header) + datalen;
memcpy(logbuf + sizeof(*logentry_header), data, datalen);
- DHD_DBGIF(("%s: datalen %d %d\n", __FUNCTION__, msg_hdr.len, datalen));
- dhd_dbg_push_to_ring(dhdp, FW_VERBOSE_RING_ID, &msg_hdr, logbuf);
+ dhd_dbg_ring_push(dhdp, FW_VERBOSE_RING_ID, &msg_hdr, logbuf);
+ VMFREE(dhdp->osh, logbuf, sizeof(*logentry_header) + datalen);
/* Print sequence number, originating set and length of received
* event log buffer. Refer to event log buffer structure in
DHD_MSGTRACE_LOG(("EVENT_LOG_HDR[0x%x]: Set: 0x%08x length = %d\n",
ltoh16(*((uint16 *)(data+2))), ltoh32(*((uint32 *)(data + 4))),
ltoh16(*((uint16 *)(data)))));
-
- logset = ltoh32(*((uint32 *)(data + 4)));
-
- if (logset >= event_log_max_sets) {
- DHD_ERROR(("%s logset: %d max: %d out of range queried: %d\n",
- __FUNCTION__, logset, event_log_max_sets, event_log_max_sets_queried));
-#ifdef DHD_FW_COREDUMP
- if (event_log_max_sets_queried) {
- DHD_ERROR(("%s: collect socram for DUMP_TYPE_LOGSET_BEYOND_RANGE\n",
- __FUNCTION__));
- dhdp->memdump_type = DUMP_TYPE_LOGSET_BEYOND_RANGE;
- dhd_bus_mem_dump(dhdp);
- }
-#endif /* DHD_FW_COREDUMP */
- }
-
- block = ltoh16(*((uint16 *)(data+2)));
-
data += EVENT_LOG_BLOCK_HDRLEN;
datalen -= EVENT_LOG_BLOCK_HDRLEN;
* data log_hdr
*/
dll_init(&list_head);
- dll_inited = TRUE;
-
while (datalen > log_hdr_len) {
log_hdr = (event_log_hdr_t *)(data + datalen - log_hdr_len);
- memset(&prcd_log_hdr, 0, sizeof(prcd_log_hdr));
- if (!dhd_dbg_process_event_log_hdr(log_hdr, &prcd_log_hdr)) {
- DHD_ERROR(("%s: Error while parsing event log header\n",
- __FUNCTION__));
- }
-
/* skip zero padding at end of frame */
- if (prcd_log_hdr.tag == EVENT_LOG_TAG_NULL) {
+ if (log_hdr->tag == EVENT_LOG_TAG_NULL) {
datalen -= log_hdr_len;
continue;
}
- /* Check argument count (for non-ecounter events only),
- * any event log should contain at least
+ /* Check argument count, any event log should contain at least
* one argument (4 bytes) for arm cycle count and up to 16
- * arguments except EVENT_LOG_TAG_STATS which could use the
- * whole payload of 256 words
+ * arguments when the format is valid
*/
- if (prcd_log_hdr.count == 0) {
+ if (log_hdr->count == 0) {
break;
}
- /* Both tag_stats and proxd are binary payloads so skip
- * argument count check for these.
- */
- if ((prcd_log_hdr.tag != EVENT_LOG_TAG_STATS) &&
- (prcd_log_hdr.tag != EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) &&
- (prcd_log_hdr.tag != EVENT_LOG_TAG_ROAM_ENHANCED_LOG) &&
- (prcd_log_hdr.count > MAX_NO_OF_ARG)) {
+ if ((log_hdr->count > MAX_NO_OF_ARG) && (log_hdr->fmt_num != 0xffff)) {
break;
}
- log_pyld_len = (prcd_log_hdr.count + prcd_log_hdr.ext_event_log_hdr) *
- DATA_UNIT_FOR_LOG_CNT;
+ log_pyld_len = log_hdr->count * DATA_UNIT_FOR_LOG_CNT;
/* log data should not cross the event data boundary */
- if ((uint32)((char *)log_hdr - data) < log_pyld_len) {
+ if ((char *)log_hdr - data < log_pyld_len)
break;
- }
/* skip 4 bytes time stamp packet */
- if (prcd_log_hdr.tag == EVENT_LOG_TAG_TS) {
- datalen -= (log_pyld_len + log_hdr_len);
+ if (log_hdr->tag == EVENT_LOG_TAG_TS) {
+ datalen -= log_pyld_len + log_hdr_len;
continue;
}
if (!(log_item = MALLOC(dhdp->osh, sizeof(*log_item)))) {
__FUNCTION__));
break;
}
-
- log_item->prcd_log_hdr.tag = prcd_log_hdr.tag;
- log_item->prcd_log_hdr.count = prcd_log_hdr.count;
- log_item->prcd_log_hdr.fmt_num = prcd_log_hdr.fmt_num;
- log_item->prcd_log_hdr.fmt_num_raw = prcd_log_hdr.fmt_num_raw;
- log_item->prcd_log_hdr.armcycle = prcd_log_hdr.armcycle;
- log_item->prcd_log_hdr.log_ptr = prcd_log_hdr.log_ptr;
- log_item->prcd_log_hdr.payload_len = prcd_log_hdr.payload_len;
- log_item->prcd_log_hdr.ext_event_log_hdr = prcd_log_hdr.ext_event_log_hdr;
- log_item->prcd_log_hdr.binary_payload = prcd_log_hdr.binary_payload;
-
+ log_item->hdr = log_hdr;
dll_insert(&log_item->list, &list_head);
datalen -= (log_pyld_len + log_hdr_len);
}
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+#endif
log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
-#endif // endif
-
- plog_hdr = &log_item->prcd_log_hdr;
-
-#if defined(EWP_ECNTRS_LOGGING) && defined(DHD_LOG_DUMP)
- /* Ecounter tag can be time_data or log_stats+binary paloaod */
- if ((plog_hdr->tag == EVENT_LOG_TAG_ECOUNTERS_TIME_DATA) ||
- ((plog_hdr->tag == EVENT_LOG_TAG_STATS) &&
- (plog_hdr->binary_payload))) {
- if (!ecntr_pushed && dhd_log_dump_ecntr_enabled()) {
- /*
- * check msg hdr len before pushing.
- * FW msg_hdr.len includes length of event log hdr,
- * logentry header and payload.
- */
- len_chk = (sizeof(*logentry_header) + sizeof(*log_hdr) +
- PAYLOAD_ECNTR_MAX_LEN);
- /* account extended event log header(extended_event_log_hdr) */
- if (plog_hdr->ext_event_log_hdr) {
- len_chk += sizeof(*log_hdr);
- }
- if (msg_hdr.len > len_chk) {
- DHD_ERROR(("%s: EVENT_LOG_VALIDATION_FAILS: "
- "msg_hdr.len=%u, max allowed for ecntrs=%u\n",
- __FUNCTION__, msg_hdr.len, len_chk));
- goto exit;
- }
- dhd_dbg_ring_push(dhdp->ecntr_dbg_ring, &msg_hdr, logbuf);
- ecntr_pushed = TRUE;
- }
- }
-#endif /* EWP_ECNTRS_LOGGING && DHD_LOG_DUMP */
+#endif
+ log_hdr = log_item->hdr;
+ log_ptr = (uint32 *)log_hdr - log_hdr->count;
+ dll_delete(cur);
+ MFREE(dhdp->osh, log_item, sizeof(*log_item));
- if (plog_hdr->tag == EVENT_LOG_TAG_ROAM_ENHANCED_LOG) {
- print_roam_enhanced_log(plog_hdr);
- msg_processed = TRUE;
- }
-#if defined(EWP_RTT_LOGGING) && defined(DHD_LOG_DUMP)
- if ((plog_hdr->tag == EVENT_LOG_TAG_PROXD_SAMPLE_COLLECT) &&
- plog_hdr->binary_payload) {
- if (!rtt_pushed && dhd_log_dump_rtt_enabled()) {
- /*
- * check msg hdr len before pushing.
- * FW msg_hdr.len includes length of event log hdr,
- * logentry header and payload.
- */
- len_chk = (sizeof(*logentry_header) + sizeof(*log_hdr) +
- PAYLOAD_RTT_MAX_LEN);
- /* account extended event log header(extended_event_log_hdr) */
- if (plog_hdr->ext_event_log_hdr) {
- len_chk += sizeof(*log_hdr);
+ /* Before DHD debugability is implemented WLC_E_TRACE had been
+ * used to carry verbose logging from firmware. We need to
+ * be able to handle those messages even without a initialized
+ * debug layer.
+ */
+ if (dhdp->dbg) {
+ /* check the data for NAN event ring; keeping first as small table */
+ /* process only user configured to log */
+ nan_evt_ring_log_level = dhdp->dbg->dbg_rings[NAN_EVENT_RING_ID].log_level;
+ if (dhdp->dbg->dbg_rings[NAN_EVENT_RING_ID].log_level) {
+ for (id = 0; id < ARRAYSIZE(nan_event_level_map); id++) {
+ if (nan_event_level_map[id].tag == log_hdr->tag) {
+ /* dont process if tag log level is greater
+ * than ring log level
+ */
+ if (nan_event_level_map[id].log_level >
+ nan_evt_ring_log_level) {
+ msg_processed = TRUE;
+ break;
+ }
+ /* In case of BCME_VERSION error,
+ * this is not NAN event type data
+ */
+ if (dhd_dbg_nan_event_handler(dhdp,
+ log_hdr, log_ptr) != BCME_VERSION) {
+ msg_processed = TRUE;
+ }
+ break;
+ }
}
- if (msg_hdr.len > len_chk) {
- DHD_ERROR(("%s: EVENT_LOG_VALIDATION_FAILS: "
- "msg_hdr.len=%u, max allowed for ecntrs=%u\n",
- __FUNCTION__, msg_hdr.len, len_chk));
- goto exit;
+ }
+ if (!msg_processed) {
+ /* check the data for event ring */
+ for (id = 0; id < ARRAYSIZE(fw_event_level_map); id++) {
+ if (fw_event_level_map[id].tag == log_hdr->tag) {
+ /* In case of BCME_VERSION error,
+ * this is not event type data
+ */
+ if (dhd_dbg_custom_evnt_handler(dhdp,
+ log_hdr, log_ptr) != BCME_VERSION) {
+ msg_processed = TRUE;
+ }
+ break;
+ }
}
- dhd_dbg_ring_push(dhdp->rtt_dbg_ring, &msg_hdr, logbuf);
- rtt_pushed = TRUE;
}
}
-#endif /* EWP_RTT_LOGGING && DHD_LOG_DUMP */
-
- if (!msg_processed) {
- dhd_dbg_verboselog_handler(dhdp, plog_hdr, raw_event_ptr,
- logset, block, (uint32 *)data);
- }
- dll_delete(cur);
- MFREE(dhdp->osh, log_item, sizeof(*log_item));
-
- }
- BCM_REFERENCE(log_hdr);
+ if (!msg_processed)
+ dhd_dbg_verboselog_handler(dhdp, log_hdr, raw_event_ptr);
-exit:
- while (dll_inited && (!dll_empty(&list_head))) {
- cur = dll_head_p(&list_head);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- log_item = (loglist_item_t *)container_of(cur, loglist_item_t, list);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- dll_delete(cur);
- MFREE(dhdp->osh, log_item, sizeof(*log_item));
}
- VMFREE(dhdp->osh, logbuf, ring_data_len);
}
#else /* !SHOW_LOGTRACE */
static INLINE void dhd_dbg_verboselog_handler(dhd_pub_t *dhdp,
- prcd_event_log_hdr_t *plog_hdr, void *raw_event_ptr, uint32 logset, uint16 block,
- uint32 *data) {};
-INLINE void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp,
- void *event_data, void *raw_event_ptr, uint datalen,
- bool msgtrace_hdr_present, uint32 msgtrace_seqnum) {};
+ event_log_hdr_t *hdr, void *raw_event_ptr) {};
+static INLINE void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp,
+ void *event_data, void *raw_event_ptr, uint datalen) {};
#endif /* SHOW_LOGTRACE */
#ifndef MACOSX_DHD
void
if (hdr->trace_type == MSGTRACE_HDR_TYPE_MSG)
dhd_dbg_msgtrace_msg_parser(event_data);
else if (hdr->trace_type == MSGTRACE_HDR_TYPE_LOG)
- dhd_dbg_msgtrace_log_parser(dhdp, event_data, raw_event_ptr, datalen,
- TRUE, 0);
+ dhd_dbg_msgtrace_log_parser(dhdp, event_data, raw_event_ptr, datalen);
}
-
#endif /* MACOSX_DHD */
+static int
+dhd_dbg_ring_init(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring, uint16 id, uint8 *name,
+ uint32 ring_sz, int section)
+{
+ void *buf;
+ unsigned long flags;
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ buf = DHD_OS_PREALLOC(dhdp, section, ring_sz);
+#else
+ buf = MALLOCZ(dhdp->osh, ring_sz);
+#endif
+ if (!buf)
+ return BCME_NOMEM;
+
+ ring->lock = dhd_os_spin_lock_init(dhdp->osh);
+
+ flags = dhd_os_spin_lock(ring->lock);
+ ring->id = id;
+ strncpy(ring->name, name, DBGRING_NAME_MAX);
+ ring->name[DBGRING_NAME_MAX - 1] = 0;
+ ring->ring_size = ring_sz;
+ ring->wp = ring->rp = 0;
+ ring->ring_buf = buf;
+ ring->threshold = DBGRING_FLUSH_THRESHOLD(ring);
+ ring->state = RING_SUSPEND;
+ ring->sched_pull = TRUE;
+ ring->rem_len = 0;
+ dhd_os_spin_unlock(ring->lock, flags);
+
+ return BCME_OK;
+}
+
+static void
+dhd_dbg_ring_deinit(dhd_pub_t *dhdp, dhd_dbg_ring_t *ring)
+{
+ void *buf;
+ uint32 ring_sz;
+ unsigned long flags;
+
+ if (!ring->ring_buf)
+ return;
+
+ flags = dhd_os_spin_lock(ring->lock);
+ ring->id = 0;
+ ring->name[0] = 0;
+ ring_sz = ring->ring_size;
+ ring->ring_size = 0;
+ ring->wp = ring->rp = 0;
+ buf = ring->ring_buf;
+ ring->ring_buf = NULL;
+ memset(&ring->stat, 0, sizeof(ring->stat));
+ ring->threshold = 0;
+ ring->state = RING_STOP;
+ dhd_os_spin_unlock(ring->lock, flags);
+
+ dhd_os_spin_lock_deinit(dhdp->osh, ring->lock);
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(dhdp->osh, buf, ring_sz);
+#endif
+}
+
+uint8
+dhd_dbg_find_sets_by_tag(uint16 tag)
+{
+ uint i;
+ uint8 sets = 0;
+
+ for (i = 0; i < ARRAYSIZE(fw_verbose_level_map); i++) {
+ if (fw_verbose_level_map[i].tag == tag) {
+ sets |= fw_verbose_level_map[i].sets;
+ }
+ }
+
+ for (i = 0; i < ARRAYSIZE(fw_event_level_map); i++) {
+ if (fw_event_level_map[i].tag == tag) {
+ sets |= fw_event_level_map[i].sets;
+ }
+ }
+
+ return sets;
+}
/*
* dhd_dbg_set_event_log_tag : modify the state of an event log tag
memset(&pars, 0, sizeof(pars));
pars.tag = tag;
- pars.set = set;
- pars.flags = EVENT_LOG_TAG_FLAG_LOG;
+ pars.set = dhd_dbg_find_sets_by_tag(tag);
+ pars.flags = set ? EVENT_LOG_TAG_FLAG_LOG : EVENT_LOG_TAG_FLAG_NONE;
if (!bcm_mkiovar(cmd, (char *)&pars, sizeof(pars), iovbuf, sizeof(iovbuf))) {
DHD_ERROR(("%s mkiovar failed\n", __FUNCTION__));
}
ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
- if (ret) {
- DHD_ERROR(("%s set log tag iovar failed %d\n", __FUNCTION__, ret));
- }
+// if (ret) {
+// DHD_ERROR(("%s set log tag iovar failed %d\n", __FUNCTION__, ret));
+// }
}
int
{
dhd_dbg_ring_t *ring;
uint8 set = 1;
+ unsigned long lock_flags;
int i, array_len = 0;
struct log_level_table *log_level_tbl = NULL;
-
if (!dhdp || !dhdp->dbg)
return BCME_BADADDR;
- if (!VALID_RING(ring_id)) {
- DHD_ERROR(("%s : invalid ring_id : %d\n", __FUNCTION__, ring_id));
- return BCME_RANGE;
- }
-
ring = &dhdp->dbg->dbg_rings[ring_id];
- dhd_dbg_ring_config(ring, log_level, threshold);
+ if (ring->state == RING_STOP)
+ return BCME_UNSUPPORTED;
+
+ lock_flags = dhd_os_spin_lock(ring->lock);
+ if (log_level == 0)
+ ring->state = RING_SUSPEND;
+ else
+ ring->state = RING_ACTIVE;
+ ring->log_level = log_level;
+
+ ring->threshold = MIN(threshold, DBGRING_FLUSH_THRESHOLD(ring));
+ dhd_os_spin_unlock(ring->lock, lock_flags);
if (log_level > 0)
set = TRUE;
- if (ring->id == FW_VERBOSE_RING_ID) {
+ if (ring->id == FW_EVENT_RING_ID) {
+ log_level_tbl = fw_event_level_map;
+ array_len = ARRAYSIZE(fw_event_level_map);
+ } else if (ring->id == FW_VERBOSE_RING_ID) {
log_level_tbl = fw_verbose_level_map;
array_len = ARRAYSIZE(fw_verbose_level_map);
+ } else if (ring->id == NAN_EVENT_RING_ID) {
+ log_level_tbl = nan_event_level_map;
+ array_len = ARRAYSIZE(nan_event_level_map);
}
for (i = 0; i < array_len; i++) {
return BCME_OK;
}
-int
-__dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *get_ring_status)
-{
- dhd_dbg_ring_status_t ring_status;
- int ret = BCME_OK;
-
- if (ring == NULL) {
- return BCME_BADADDR;
- }
-
- bzero(&ring_status, sizeof(dhd_dbg_ring_status_t));
- RING_STAT_TO_STATUS(ring, ring_status);
- *get_ring_status = ring_status;
-
- return ret;
-}
-
/*
* dhd_dbg_get_ring_status : get the ring status from the coresponding ring buffer
* Return: An error code or 0 on success.
int id = 0;
dhd_dbg_t *dbg;
dhd_dbg_ring_t *dbg_ring;
+ dhd_dbg_ring_status_t ring_status;
if (!dhdp || !dhdp->dbg)
return BCME_BADADDR;
dbg = dhdp->dbg;
+ memset(&ring_status, 0, sizeof(dhd_dbg_ring_status_t));
for (id = DEBUG_RING_ID_INVALID + 1; id < DEBUG_RING_ID_MAX; id++) {
dbg_ring = &dbg->dbg_rings[id];
if (VALID_RING(dbg_ring->id) && (dbg_ring->id == ring_id)) {
- __dhd_dbg_get_ring_status(dbg_ring, dbg_ring_status);
+ RING_STAT_TO_STATUS(dbg_ring, ring_status);
+ *dbg_ring_status = ring_status;
break;
}
}
return ret;
}
-#ifdef SHOW_LOGTRACE
-void
-dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info)
-{
- dhd_dbg_ring_status_t ring_status;
- uint32 rlen = 0;
-
- rlen = dhd_dbg_ring_pull_single(ring, trace_buf_info->buf, TRACE_LOG_BUF_MAX_SIZE, TRUE);
-
- trace_buf_info->size = rlen;
- trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
- if (rlen == 0) {
- trace_buf_info->availability = BUF_NOT_AVAILABLE;
- return;
- }
-
- __dhd_dbg_get_ring_status(ring, &ring_status);
-
- if (ring_status.written_bytes != ring_status.read_bytes) {
- trace_buf_info->availability = NEXT_BUF_AVAIL;
- }
-}
-#endif /* SHOW_LOGTRACE */
-
/*
* dhd_dbg_find_ring_id : return ring_id based on ring_name
* Return: An invalid ring id for failure or valid ring id on success.
dbg_ring = &dbg->dbg_rings[ring_id];
if (!start) {
if (VALID_RING(dbg_ring->id)) {
- dhd_dbg_ring_start(dbg_ring);
+ /* Initialize the information for the ring */
+ dbg_ring->state = RING_SUSPEND;
+ dbg_ring->log_level = 0;
+ dbg_ring->rp = dbg_ring->wp = 0;
+ dbg_ring->threshold = 0;
+ memset(&dbg_ring->stat, 0, sizeof(struct ring_statistics));
+ memset(dbg_ring->ring_buf, 0, dbg_ring->ring_size);
}
}
}
return ret;
}
-#if defined(DBG_PKT_MON)
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
uint32
__dhd_dbg_pkt_hash(uintptr_t pkt, uint32 pktid)
{
uint32
__dhd_dbg_driver_ts_usec(void)
{
- struct osl_timespec ts;
+ struct timespec ts;
- osl_get_monotonic_boottime(&ts);
+ get_monotonic_boottime(&ts);
return ((uint32)(__TIMESPEC_TO_US(ts)));
}
case WLFC_CTL_PKTFLAG_DISCARD_NOACK:
pkt_fate = TX_PKT_FATE_SENT;
break;
- case WLFC_CTL_PKTFLAG_EXPIRED:
- pkt_fate = TX_PKT_FATE_FW_DROP_EXPTIME;
- break;
- case WLFC_CTL_PKTFLAG_MKTFREE:
- pkt_fate = TX_PKT_FATE_FW_PKT_FREE;
- break;
default:
pkt_fate = TX_PKT_FATE_FW_DROP_OTHER;
break;
return pkt_fate;
}
-#endif // endif
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
#ifdef DBG_PKT_MON
static int
DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
count = 0;
while ((count < pkt_count) && tx_pkts) {
- if (tx_pkts->info.pkt) {
+ if (tx_pkts->info.pkt)
PKTFREE(dhdp->osh, tx_pkts->info.pkt, TRUE);
- }
tx_pkts++;
count++;
}
DHD_PKT_INFO(("%s, %d\n", __FUNCTION__, __LINE__));
count = 0;
while ((count < pkt_count) && rx_pkts) {
- if (rx_pkts->info.pkt) {
+ if (rx_pkts->info.pkt)
PKTFREE(dhdp->osh, rx_pkts->info.pkt, TRUE);
- }
rx_pkts++;
count++;
}
int
dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp,
- dbg_mon_tx_pkts_t tx_pkt_mon,
- dbg_mon_tx_status_t tx_status_mon,
- dbg_mon_rx_pkts_t rx_pkt_mon)
+ dbg_mon_tx_pkts_t tx_pkt_mon,
+ dbg_mon_tx_status_t tx_status_mon,
+ dbg_mon_rx_pkts_t rx_pkt_mon)
{
dhd_dbg_tx_report_t *tx_report = NULL;
dhd_dbg_pkt_mon_state_t tx_pkt_state;
dhd_dbg_pkt_mon_state_t tx_status_state;
dhd_dbg_pkt_mon_state_t rx_pkt_state;
+ gfp_t kflags;
uint32 alloc_len;
int ret = BCME_OK;
unsigned long flags;
return BCME_OK;
}
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+
/* allocate and initialize tx packet monitoring */
alloc_len = sizeof(*tx_report);
- tx_report = (dhd_dbg_tx_report_t *)MALLOCZ(dhdp->osh, alloc_len);
+ tx_report = (dhd_dbg_tx_report_t *)kzalloc(alloc_len, kflags);
if (unlikely(!tx_report)) {
DHD_ERROR(("%s(): could not allocate memory for - "
"dhd_dbg_tx_report_t\n", __FUNCTION__));
}
alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN);
- tx_pkts = (dhd_dbg_tx_info_t *)MALLOCZ(dhdp->osh, alloc_len);
+ tx_pkts = (dhd_dbg_tx_info_t *)kzalloc(alloc_len, kflags);
if (unlikely(!tx_pkts)) {
DHD_ERROR(("%s(): could not allocate memory for - "
"dhd_dbg_tx_info_t\n", __FUNCTION__));
/* allocate and initialze rx packet monitoring */
alloc_len = sizeof(*rx_report);
- rx_report = (dhd_dbg_rx_report_t *)MALLOCZ(dhdp->osh, alloc_len);
+ rx_report = (dhd_dbg_rx_report_t *)kzalloc(alloc_len, kflags);
if (unlikely(!rx_report)) {
DHD_ERROR(("%s(): could not allocate memory for - "
"dhd_dbg_rx_report_t\n", __FUNCTION__));
}
alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN);
- rx_pkts = (dhd_dbg_rx_info_t *)MALLOCZ(dhdp->osh, alloc_len);
+ rx_pkts = (dhd_dbg_rx_info_t *)kzalloc(alloc_len, kflags);
if (unlikely(!rx_pkts)) {
DHD_ERROR(("%s(): could not allocate memory for - "
"dhd_dbg_rx_info_t\n", __FUNCTION__));
fail:
/* tx packet monitoring */
if (tx_pkts) {
- alloc_len = (sizeof(*tx_pkts) * MAX_FATE_LOG_LEN);
- MFREE(dhdp->osh, tx_pkts, alloc_len);
+ kfree(tx_pkts);
}
if (tx_report) {
- alloc_len = sizeof(*tx_report);
- MFREE(dhdp->osh, tx_report, alloc_len);
+ kfree(tx_report);
}
dhdp->dbg->pkt_mon.tx_report = NULL;
dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL;
/* rx packet monitoring */
if (rx_pkts) {
- alloc_len = (sizeof(*rx_pkts) * MAX_FATE_LOG_LEN);
- MFREE(dhdp->osh, rx_pkts, alloc_len);
+ kfree(rx_pkts);
}
if (rx_report) {
- alloc_len = sizeof(*rx_report);
- MFREE(dhdp->osh, rx_report, alloc_len);
+ kfree(rx_report);
}
dhdp->dbg->pkt_mon.rx_report = NULL;
dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL;
return -EINVAL;
}
+
tx_pkt_state = dhdp->dbg->pkt_mon.tx_pkt_state;
tx_status_state = dhdp->dbg->pkt_mon.tx_status_state;
rx_pkt_state = dhdp->dbg->pkt_mon.rx_pkt_state;
}
} else
#endif /* CONFIG_COMPAT */
+
{
ptr = (wifi_tx_report_t *)user_buf;
while ((count < pkt_count) && tx_pkt && ptr) {
if (tx_report->tx_pkts) {
__dhd_dbg_free_tx_pkts(dhdp, tx_report->tx_pkts,
tx_report->pkt_pos);
- MFREE(dhdp->osh, tx_report->tx_pkts,
- (sizeof(*tx_report->tx_pkts) * MAX_FATE_LOG_LEN));
+ kfree(tx_report->tx_pkts);
dhdp->dbg->pkt_mon.tx_report->tx_pkts = NULL;
}
- MFREE(dhdp->osh, tx_report, sizeof(*tx_report));
+ kfree(tx_report);
dhdp->dbg->pkt_mon.tx_report = NULL;
}
dhdp->dbg->pkt_mon.tx_pkt_mon = NULL;
if (rx_report->rx_pkts) {
__dhd_dbg_free_rx_pkts(dhdp, rx_report->rx_pkts,
rx_report->pkt_pos);
- MFREE(dhdp->osh, rx_report->rx_pkts,
- (sizeof(*rx_report->rx_pkts) * MAX_FATE_LOG_LEN));
+ kfree(rx_report->rx_pkts);
dhdp->dbg->pkt_mon.rx_report->rx_pkts = NULL;
}
- MFREE(dhdp->osh, rx_report, sizeof(*rx_report));
+ kfree(rx_report);
dhdp->dbg->pkt_mon.rx_report = NULL;
}
dhdp->dbg->pkt_mon.rx_pkt_mon = NULL;
}
#endif /* DBG_PKT_MON */
-#if defined(DBG_PKT_MON)
-bool
-dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt, uint32 pktid,
- uint16 status)
-{
- bool pkt_fate = TRUE;
- if (dhdp->d11_tx_status) {
- pkt_fate = (status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE;
- DHD_DBG_PKT_MON_TX_STATUS(dhdp, pkt, pktid, status);
- }
- return pkt_fate;
-}
-#else /* DBG_PKT_MON || DHD_PKT_LOGGING */
-bool
-dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt,
- uint32 pktid, uint16 status)
-{
- return TRUE;
-}
-#endif // endif
-
-#define EL_LOG_STR_LEN 512
-
-void
-print_roam_enhanced_log(prcd_event_log_hdr_t *plog_hdr)
-{
- prsv_periodic_log_hdr_t *hdr = (prsv_periodic_log_hdr_t *)plog_hdr->log_ptr;
- char chanspec_buf[CHANSPEC_STR_LEN];
-
- if (hdr->version != ROAM_LOG_VER_1) {
- DHD_ERROR(("ROAM_LOG ENHANCE: version is not matched\n"));
- goto default_print;
- return;
- }
-
- switch (hdr->id) {
- case ROAM_LOG_SCANSTART:
- {
- roam_log_trig_v1_t *log = (roam_log_trig_v1_t *)plog_hdr->log_ptr;
- DHD_ERROR(("ROAM_LOG_SCANSTART time: %d,"
- " version:%d reason: %d rssi:%d cu:%d result:%d\n",
- plog_hdr->armcycle, log->hdr.version, log->reason,
- log->rssi, log->current_cu, log->result));
- if (log->reason == WLC_E_REASON_DEAUTH ||
- log->reason == WLC_E_REASON_DISASSOC) {
- DHD_ERROR((" ROAM_LOG_PRT_ROAM: RCVD reason:%d\n",
- log->prt_roam.rcvd_reason));
- } else if (log->reason == WLC_E_REASON_BSSTRANS_REQ) {
- DHD_ERROR((" ROAM_LOG_BSS_REQ: mode:%d candidate:%d token:%d "
- "duration disassoc:%d valid:%d term:%d\n",
- log->bss_trans.req_mode, log->bss_trans.nbrlist_size,
- log->bss_trans.token, log->bss_trans.disassoc_dur,
- log->bss_trans.validity_dur, log->bss_trans.bss_term_dur));
- }
- break;
- }
- case ROAM_LOG_SCAN_CMPLT:
- {
- int i;
- roam_log_scan_cmplt_v1_t *log =
- (roam_log_scan_cmplt_v1_t *)plog_hdr->log_ptr;
-
- DHD_ERROR(("ROAM_LOG_SCAN_CMPL: time:%d version:%d"
- "is_full:%d scan_count:%d score_delta:%d",
- plog_hdr->armcycle, log->hdr.version, log->full_scan,
- log->scan_count, log->score_delta));
- DHD_ERROR((" ROAM_LOG_CUR_AP: " MACDBG "rssi:%d score:%d channel:%s\n",
- MAC2STRDBG((uint8 *)&log->cur_info.addr),
- log->cur_info.rssi,
- log->cur_info.score,
- wf_chspec_ntoa_ex(log->cur_info.chanspec, chanspec_buf)));
- for (i = 0; i < log->scan_list_size; i++) {
- DHD_ERROR((" ROAM_LOG_CANDIDATE %d: " MACDBG
- "rssi:%d score:%d channel:%s TPUT:%dkbps\n",
- i, MAC2STRDBG((uint8 *)&log->scan_list[i].addr),
- log->scan_list[i].rssi, log->scan_list[i].score,
- wf_chspec_ntoa_ex(log->scan_list[i].chanspec,
- chanspec_buf),
- log->scan_list[i].estm_tput != ROAM_LOG_INVALID_TPUT?
- log->scan_list[i].estm_tput:0));
- }
- break;
- }
- case ROAM_LOG_ROAM_CMPLT:
- {
- roam_log_cmplt_v1_t *log = (roam_log_cmplt_v1_t *)plog_hdr->log_ptr;
- DHD_ERROR(("ROAM_LOG_ROAM_CMPL: time: %d, version:%d"
- "status: %d reason: %d channel:%s retry:%d " MACDBG "\n",
- plog_hdr->armcycle, log->hdr.version, log->status, log->reason,
- wf_chspec_ntoa_ex(log->chanspec, chanspec_buf),
- log->retry, MAC2STRDBG((uint8 *)&log->addr)));
- break;
- }
- case ROAM_LOG_NBR_REQ:
- {
- roam_log_nbrreq_v1_t *log = (roam_log_nbrreq_v1_t *)plog_hdr->log_ptr;
- DHD_ERROR(("ROAM_LOG_NBR_REQ: time: %d, version:%d token:%d\n",
- plog_hdr->armcycle, log->hdr.version, log->token));
- break;
- }
- case ROAM_LOG_NBR_REP:
- {
- roam_log_nbrrep_v1_t *log = (roam_log_nbrrep_v1_t *)plog_hdr->log_ptr;
- DHD_ERROR(("ROAM_LOG_NBR_REP: time:%d, veresion:%d chan_num:%d\n",
- plog_hdr->armcycle, log->hdr.version, log->channel_num));
- break;
- }
- case ROAM_LOG_BCN_REQ:
- {
- roam_log_bcnrpt_req_v1_t *log =
- (roam_log_bcnrpt_req_v1_t *)plog_hdr->log_ptr;
- DHD_ERROR(("ROAM_LOG_BCN_REQ: time:%d, version:%d ret:%d"
- "class:%d num_chan:%d ",
- plog_hdr->armcycle, log->hdr.version,
- log->result, log->reg, log->channel));
- DHD_ERROR(("ROAM_LOG_BCN_REQ: mode:%d is_wild:%d duration:%d"
- "ssid_len:%d\n", log->mode, log->bssid_wild,
- log->duration, log->ssid_len));
- break;
- }
- case ROAM_LOG_BCN_REP:
- {
- roam_log_bcnrpt_rep_v1_t *log =
- (roam_log_bcnrpt_rep_v1_t *)plog_hdr->log_ptr;
- DHD_ERROR(("ROAM_LOG_BCN_REP: time:%d, verseion:%d count:%d\n",
- plog_hdr->armcycle, log->hdr.version,
- log->count));
- break;
- }
- default:
- goto default_print;
- }
-
- return;
-
-default_print:
- {
- uint32 *ptr = (uint32 *)plog_hdr->log_ptr;
- int i;
- int loop_cnt = hdr->length / sizeof(uint32);
- struct bcmstrbuf b;
- char pr_buf[EL_LOG_STR_LEN] = { 0 };
-
- bcm_binit(&b, pr_buf, EL_LOG_STR_LEN);
- bcm_bprintf(&b, "ROAM_LOG_UNKNOWN ID:%d ver:%d armcycle:%d",
- hdr->id, hdr->version, plog_hdr->armcycle);
- for (i = 0; i < loop_cnt && b.size > 0; i++) {
- bcm_bprintf(&b, " %x", *ptr);
- ptr++;
- }
- DHD_ERROR(("%s\n", b.origbuf));
- }
-}
-
/*
* dhd_dbg_attach: initialziation of dhd dbugability module
*
dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq,
dbg_urgent_noti_t os_urgent_notifier, void *os_priv)
{
- dhd_dbg_t *dbg = NULL;
- dhd_dbg_ring_t *ring = NULL;
- int ret = BCME_ERROR, ring_id = 0;
- void *buf = NULL;
+ dhd_dbg_t *dbg;
+ int ret, ring_id;
dbg = MALLOCZ(dhdp->osh, sizeof(dhd_dbg_t));
if (!dbg)
return BCME_NOMEM;
-#ifdef CONFIG_DHD_USE_STATIC_BUF
- buf = DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_FW_VERBOSE_RING, FW_VERBOSE_RING_SIZE);
-#else
- buf = MALLOCZ(dhdp->osh, FW_VERBOSE_RING_SIZE);
-#endif
- if (!buf)
- goto error;
ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_VERBOSE_RING_ID], FW_VERBOSE_RING_ID,
- (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, buf, FALSE);
+ (uint8 *)FW_VERBOSE_RING_NAME, FW_VERBOSE_RING_SIZE, DHD_PREALLOC_FW_VERBOSE_RING);
if (ret)
goto error;
-#ifdef CONFIG_DHD_USE_STATIC_BUF
- buf = DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_DHD_EVENT_RING, DHD_EVENT_RING_SIZE);
-#else
- buf = MALLOCZ(dhdp->osh, DHD_EVENT_RING_SIZE);
-#endif
- if (!buf)
+ ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[FW_EVENT_RING_ID], FW_EVENT_RING_ID,
+ (uint8 *)FW_EVENT_RING_NAME, FW_EVENT_RING_SIZE, DHD_PREALLOC_FW_EVENT_RING);
+ if (ret)
goto error;
+
ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[DHD_EVENT_RING_ID], DHD_EVENT_RING_ID,
- (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, buf, FALSE);
+ (uint8 *)DHD_EVENT_RING_NAME, DHD_EVENT_RING_SIZE, DHD_PREALLOC_DHD_EVENT_RING);
+ if (ret)
+ goto error;
+
+ ret = dhd_dbg_ring_init(dhdp, &dbg->dbg_rings[NAN_EVENT_RING_ID], NAN_EVENT_RING_ID,
+ (uint8 *)NAN_EVENT_RING_NAME, NAN_EVENT_RING_SIZE, DHD_PREALLOC_NAN_EVENT_RING);
if (ret)
goto error;
error:
for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
if (VALID_RING(dbg->dbg_rings[ring_id].id)) {
- ring = &dbg->dbg_rings[ring_id];
- dhd_dbg_ring_deinit(dhdp, ring);
- if (ring->ring_buf) {
-#ifndef CONFIG_DHD_USE_STATIC_BUF
- MFREE(dhdp->osh, ring->ring_buf, ring->ring_size);
-#endif
- ring->ring_buf = NULL;
- }
- ring->ring_size = 0;
+ dhd_dbg_ring_deinit(dhdp, &dbg->dbg_rings[ring_id]);
}
}
MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t));
dhd_dbg_detach(dhd_pub_t *dhdp)
{
int ring_id;
- dhd_dbg_ring_t *ring = NULL;
dhd_dbg_t *dbg;
-
if (!dhdp->dbg)
return;
dbg = dhdp->dbg;
for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
if (VALID_RING(dbg->dbg_rings[ring_id].id)) {
- ring = &dbg->dbg_rings[ring_id];
- dhd_dbg_ring_deinit(dhdp, ring);
- if (ring->ring_buf) {
-#ifndef CONFIG_DHD_USE_STATIC_BUF
- MFREE(dhdp->osh, ring->ring_buf, ring->ring_size);
-#endif
- ring->ring_buf = NULL;
- }
- ring->ring_size = 0;
+ dhd_dbg_ring_deinit(dhdp, &dbg->dbg_rings[ring_id]);
}
}
MFREE(dhdp->osh, dhdp->dbg, sizeof(dhd_dbg_t));
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_debug.h 783721 2018-10-08 13:05:26Z $
+ * $Id: dhd_debug.h 705824 2017-06-19 13:58:39Z $
*/
#ifndef _dhd_debug_h_
#define _dhd_debug_h_
#include <event_log.h>
#include <bcmutils.h>
-#include <dhd_dbg_ring.h>
enum {
DEBUG_RING_ID_INVALID = 0,
FW_VERBOSE_RING_ID,
+ FW_EVENT_RING_ID,
DHD_EVENT_RING_ID,
+ NAN_EVENT_RING_ID,
/* add new id here */
DEBUG_RING_ID_MAX
};
DBG_RING_ENTRY_FLAGS_HAS_TIMESTAMP = (1 << (1))
};
+#define DBGRING_NAME_MAX 32
/* firmware verbose ring, ring id 1 */
#define FW_VERBOSE_RING_NAME "fw_verbose"
-#define FW_VERBOSE_RING_SIZE (256 * 1024)
+#define FW_VERBOSE_RING_SIZE (64 * 1024)
/* firmware event ring, ring id 2 */
#define FW_EVENT_RING_NAME "fw_event"
#define FW_EVENT_RING_SIZE (64 * 1024)
/* DHD connection event ring, ring id 3 */
#define DHD_EVENT_RING_NAME "dhd_event"
#define DHD_EVENT_RING_SIZE (64 * 1024)
+
/* NAN event ring, ring id 4 */
#define NAN_EVENT_RING_NAME "nan_event"
#define NAN_EVENT_RING_SIZE (64 * 1024)
#define TLV_LOG_NEXT(tlv) \
((tlv) ? ((tlv_log *)((uint8 *)tlv + TLV_LOG_SIZE(tlv))) : 0)
+#define DBG_RING_STATUS_SIZE (sizeof(dhd_dbg_ring_status_t))
+
#define VALID_RING(id) \
((id > DEBUG_RING_ID_INVALID) && (id < DEBUG_RING_ID_MAX))
#define DBG_RING_ACTIVE(dhdp, ring_id) 0
#endif /* DEBUGABILITY */
+#define TXACTIVESZ(r, w, d) (((r) <= (w)) ? ((w) - (r)) : ((d) - (r) + (w)))
+#define DBG_RING_READ_AVAIL_SPACE(w, r, d) (((w) >= (r)) ? ((w) - (r)) : ((d) - (r)))
+#define DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d) (((w) >= (r)) ? ((d) - (w)) : ((r) - (w)))
+#define DBG_RING_WRITE_SPACE_AVAIL(r, w, d) (d - (TXACTIVESZ(r, w, d)))
+#define DBG_RING_CHECK_WRITE_SPACE(r, w, d) \
+ MIN(DBG_RING_WRITE_SPACE_AVAIL(r, w, d), DBG_RING_WRITE_SPACE_AVAIL_CONT(r, w, d))
+
enum {
/* driver receive association command from kernel */
WIFI_EVENT_ASSOCIATION_REQUESTED = 0,
} per_packet_status_entry_t;
#define PACKED_STRUCT __attribute__ ((packed))
-
typedef struct log_conn_event {
uint16 event;
tlv_log *tlvs;
DBG_RING_ENTRY_NAN_EVENT_TYPE
};
+typedef struct dhd_dbg_ring_entry {
+ uint16 len; /* payload length excluding the header */
+ uint8 flags;
+ uint8 type; /* Per ring specific */
+ uint64 timestamp; /* present if has_timestamp bit is set. */
+} PACKED_STRUCT dhd_dbg_ring_entry_t;
+
+#define DBG_RING_ENTRY_SIZE (sizeof(dhd_dbg_ring_entry_t))
+
+#define ENTRY_LENGTH(hdr) ((hdr)->len + DBG_RING_ENTRY_SIZE)
+
+#define PAYLOAD_MAX_LEN 65535
+
+typedef struct dhd_dbg_ring_status {
+ uint8 name[DBGRING_NAME_MAX];
+ uint32 flags;
+ int ring_id; /* unique integer representing the ring */
+ /* total memory size allocated for the buffer */
+ uint32 ring_buffer_byte_size;
+ uint32 verbose_level;
+ /* number of bytes that was written to the buffer by driver */
+ uint32 written_bytes;
+ /* number of bytes that was read from the buffer by user land */
+ uint32 read_bytes;
+ /* number of records that was read from the buffer by user land */
+ uint32 written_records;
+} dhd_dbg_ring_status_t;
+
struct log_level_table {
int log_level;
uint16 tag;
+ uint8 sets;
char *desc;
};
-/*
- * Assuming that the Ring lock is mutex, bailing out if the
- * callers are from atomic context. On a long term, one has to
- * schedule a job to execute in sleepable context so that
- * contents are pushed to the ring.
- */
+#ifdef DEBUGABILITY
#define DBG_EVENT_LOG(dhdp, connect_state) \
{ \
do { \
uint16 state = connect_state; \
- if (CAN_SLEEP() && DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) \
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) \
dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID, \
&state, sizeof(state)); \
} while (0); \
}
+#else
+#define DBG_EVENT_LOG(dhdp, connect_state)
+#endif /* DEBUGABILITY */
+
#define MD5_PREFIX_LEN 4
#define MAX_FATE_LOG_LEN 32
+
#define MAX_FRAME_LEN_ETHERNET 1518
#define MAX_FRAME_LEN_80211_MGMT 2352 /* 802.11-2012 Fig. 8-34 */
*/
TX_PKT_FATE_FW_DROP_INVALID,
- /* Dropped by firmware due to lifetime expiration. */
- TX_PKT_FATE_FW_DROP_EXPTIME,
+ /* Dropped by firmware due to lack of buffer space. */
+ TX_PKT_FATE_FW_DROP_NOBUFS,
/*
* Dropped by firmware for any other reason. Includes
/* Dropped by driver for any other reason. */
TX_PKT_FATE_DRV_DROP_OTHER,
- /* Packet free by firmware. */
- TX_PKT_FATE_FW_PKT_FREE,
-
} wifi_tx_packet_fate;
typedef enum {
} frame_content;
} compat_wifi_frame_info_t;
+
typedef struct compat_wifi_tx_report {
char md5_prefix[MD5_PREFIX_LEN];
wifi_tx_packet_fate fate;
compat_wifi_frame_info_t frame_inf;
} compat_wifi_rx_report_t;
+
/*
* Packet logging - internal data
*/
dbg_mon_rx_pkts_t rx_pkt_mon;
} dhd_dbg_pkt_mon_t;
+enum dbg_ring_state {
+ RING_STOP = 0, /* ring is not initialized */
+ RING_ACTIVE, /* ring is live and logging */
+ RING_SUSPEND /* ring is initialized but not logging */
+};
+
+struct ring_statistics {
+ /* number of bytes that was written to the buffer by driver */
+ uint32 written_bytes;
+ /* number of bytes that was read from the buffer by user land */
+ uint32 read_bytes;
+ /* number of records that was written to the buffer by driver */
+ uint32 written_records;
+};
+
+typedef struct dhd_dbg_ring {
+ int id; /* ring id */
+ uint8 name[DBGRING_NAME_MAX]; /* name string */
+ uint32 ring_size; /* numbers of item in ring */
+ uint32 wp; /* write pointer */
+ uint32 rp; /* read pointer */
+ uint32 log_level; /* log_level */
+ uint32 threshold; /* threshold bytes */
+ void * ring_buf; /* pointer of actually ring buffer */
+ void * lock; /* spin lock for ring access */
+ struct ring_statistics stat; /* statistics */
+ enum dbg_ring_state state; /* ring state enum */
+ bool tail_padded; /* writer does not have enough space */
+ uint32 rem_len; /* number of bytes from wp_pad to end */
+ bool sched_pull; /* schedule reader immediately */
+} dhd_dbg_ring_t;
+
typedef struct dhd_dbg {
dhd_dbg_ring_t dbg_rings[DEBUG_RING_ID_MAX];
void *private; /* os private_data */
#ifdef DUMP_IOCTL_IOV_LIST
typedef struct dhd_iov_li {
dll_t list;
- uint32 cmd; /* command number */
- char buff[100]; /* command name */
+ uint32 cmd;
+ char buff[100];
} dhd_iov_li_t;
-#endif /* DUMP_IOCTL_IOV_LIST */
#define IOV_LIST_MAX_LEN 5
+#endif /* DUMP_IOCTL_IOV_LIST */
#ifdef DHD_DEBUG
typedef struct {
} dhd_dbg_mwli_t;
#endif /* DHD_DEBUG */
-#define DHD_OW_BI_RAW_EVENT_LOG_FMT 0xFFFF
-
-/* LSB 2 bits of format number to identify the type of event log */
-#define DHD_EVENT_LOG_HDR_MASK 0x3
-
-#define DHD_EVENT_LOG_FMT_NUM_OFFSET 2
-#define DHD_EVENT_LOG_FMT_NUM_MASK 0x3FFF
-/**
- * OW:- one word
- * TW:- two word
- * NB:- non binary
- * BI:- binary
- */
-#define DHD_OW_NB_EVENT_LOG_HDR 0
-#define DHD_TW_NB_EVENT_LOG_HDR 1
-#define DHD_BI_EVENT_LOG_HDR 3
-#define DHD_INVALID_EVENT_LOG_HDR 2
-
-#define DHD_TW_VALID_TAG_BITS_MASK 0xF
-#define DHD_OW_BI_EVENT_FMT_NUM 0x3FFF
-#define DHD_TW_BI_EVENT_FMT_NUM 0x3FFE
-
-#define DHD_TW_EVENT_LOG_TAG_OFFSET 8
-
-#define EVENT_TAG_TIMESTAMP_OFFSET 1
-#define EVENT_TAG_TIMESTAMP_EXT_OFFSET 2
-
-typedef struct prcd_event_log_hdr {
- uint32 tag; /* Event_log entry tag */
- uint32 count; /* Count of 4-byte entries */
- uint32 fmt_num_raw; /* Format number */
- uint32 fmt_num; /* Format number >> 2 */
- uint32 armcycle; /* global ARM CYCLE for TAG */
- uint32 *log_ptr; /* start of payload */
- uint32 payload_len;
- /* Extended event log header info
- * 0 - legacy, 1 - extended event log header present
- */
- bool ext_event_log_hdr;
- bool binary_payload; /* 0 - non binary payload, 1 - binary payload */
-} prcd_event_log_hdr_t; /* Processed event log header */
-
/* dhd_dbg functions */
extern void dhd_dbg_trace_evnt_handler(dhd_pub_t *dhdp, void *event_data,
void *raw_event_ptr, uint datalen);
-void dhd_dbg_msgtrace_log_parser(dhd_pub_t *dhdp, void *event_data,
- void *raw_event_ptr, uint datalen, bool msgtrace_hdr_present,
- uint32 msgtrace_seqnum);
-
extern int dhd_dbg_attach(dhd_pub_t *dhdp, dbg_pullreq_t os_pullreq,
dbg_urgent_noti_t os_urgent_notifier, void *os_priv);
extern void dhd_dbg_detach(dhd_pub_t *dhdp);
extern int dhd_dbg_start(dhd_pub_t *dhdp, bool start);
extern int dhd_dbg_set_configuration(dhd_pub_t *dhdp, int ring_id,
int log_level, int flags, uint32 threshold);
+extern int dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id,
+ dhd_dbg_ring_status_t *dbg_ring_status);
+extern int dhd_dbg_ring_push(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr, void *data);
+extern int dhd_dbg_ring_pull(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len);
+extern int dhd_dbg_ring_pull_single(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
+ bool strip_header);
extern int dhd_dbg_find_ring_id(dhd_pub_t *dhdp, char *ring_name);
-extern dhd_dbg_ring_t *dhd_dbg_get_ring_from_ring_id(dhd_pub_t *dhdp, int ring_id);
extern void *dhd_dbg_get_priv(dhd_pub_t *dhdp);
extern int dhd_dbg_send_urgent_evt(dhd_pub_t *dhdp, const void *data, const uint32 len);
-extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, prcd_event_log_hdr_t *plog_hdr,
- void *raw_event_ptr, uint32 *log_ptr, uint32 logset, uint16 block);
-int dhd_dbg_pull_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len);
-int dhd_dbg_pull_single_from_ring(dhd_pub_t *dhdp, int ring_id, void *data, uint32 buf_len,
- bool strip_header);
-int dhd_dbg_push_to_ring(dhd_pub_t *dhdp, int ring_id, dhd_dbg_ring_entry_t *hdr,
- void *data);
-int __dhd_dbg_get_ring_status(dhd_dbg_ring_t *ring, dhd_dbg_ring_status_t *ring_status);
-int dhd_dbg_get_ring_status(dhd_pub_t *dhdp, int ring_id,
- dhd_dbg_ring_status_t *dbg_ring_status);
-#ifdef SHOW_LOGTRACE
-void dhd_dbg_read_ring_into_trace_buf(dhd_dbg_ring_t *ring, trace_buf_info_t *trace_buf_info);
-#endif /* SHOW_LOGTRACE */
+extern void dhd_dbg_verboselog_printf(dhd_pub_t *dhdp, event_log_hdr_t *hdr,
+ void *raw_event_ptr, uint32 *log_ptr);
#ifdef DBG_PKT_MON
extern int dhd_dbg_attach_pkt_monitor(dhd_pub_t *dhdp,
extern int dhd_dbg_detach_pkt_monitor(dhd_pub_t *dhdp);
#endif /* DBG_PKT_MON */
-extern bool dhd_dbg_process_tx_status(dhd_pub_t *dhdp, void *pkt,
- uint32 pktid, uint16 status);
-
/* os wrapper function */
extern int dhd_os_dbg_attach(dhd_pub_t *dhdp);
extern void dhd_os_dbg_detach(dhd_pub_t *dhdp);
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: dhd_debug_linux.c 769272 2018-06-25 09:23:27Z $
+ * $Id: dhd_debug_linux.c 710862 2017-07-14 07:43:59Z $
*/
#include <typedefs.h>
} linux_dbgring_info_t;
struct log_level_table dhd_event_map[] = {
- {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, "DRIVER EAPOL TX REQ"},
- {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, "DRIVER EAPOL RX"},
- {2, WIFI_EVENT_DRIVER_SCAN_REQUESTED, "SCAN_REQUESTED"},
- {2, WIFI_EVENT_DRIVER_SCAN_COMPLETE, "SCAN COMPELETE"},
- {3, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND, "SCAN RESULT FOUND"},
- {2, WIFI_EVENT_DRIVER_PNO_ADD, "PNO ADD"},
- {2, WIFI_EVENT_DRIVER_PNO_REMOVE, "PNO REMOVE"},
- {2, WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND, "PNO NETWORK FOUND"},
- {2, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED, "PNO SCAN_REQUESTED"},
- {1, WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND, "PNO SCAN RESULT FOUND"},
- {1, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE, "PNO SCAN COMPELETE"}
+ {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED, 0, "DRIVER EAPOL TX REQ"},
+ {1, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED, 0, "DRIVER EAPOL RX"},
+ {2, WIFI_EVENT_DRIVER_SCAN_REQUESTED, 0, "SCAN_REQUESTED"},
+ {2, WIFI_EVENT_DRIVER_SCAN_COMPLETE, 0, "SCAN COMPELETE"},
+ {3, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND, 0, "SCAN RESULT FOUND"},
+ {2, WIFI_EVENT_DRIVER_PNO_ADD, 0, "PNO ADD"},
+ {2, WIFI_EVENT_DRIVER_PNO_REMOVE, 0, "PNO REMOVE"},
+ {2, WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND, 0, "PNO NETWORK FOUND"},
+ {2, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED, 0, "PNO SCAN_REQUESTED"},
+ {1, WIFI_EVENT_DRIVER_PNO_SCAN_RESULT_FOUND, 0, "PNO SCAN RESULT FOUND"},
+ {1, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE, 0, "PNO SCAN COMPELETE"}
};
static void
ndev = dhd_linux_get_primary_netdev(dhdp);
if (!ndev)
return;
- if (!VALID_RING(ring_id))
- return;
if (ring_send_sub_cb[ring_id]) {
ring_sub_send = ring_send_sub_cb[ring_id];
ring_sub_send(ndev, ring_id, data, len, ring_status);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+#endif
linux_dbgring_info_t *ring_info =
container_of(d_work, linux_dbgring_info_t, work);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
-#endif // endif
+#endif
dhd_pub_t *dhdp = ring_info->dhdp;
int ringid = ring_info->ring_id;
dhd_dbg_ring_status_t ring_status;
unsigned long flags;
ring = &dhdp->dbg->dbg_rings[ringid];
- DHD_DBG_RING_LOCK(ring->lock, flags);
+ flags = dhd_os_spin_lock(ring->lock);
dhd_dbg_get_ring_status(dhdp, ringid, &ring_status);
if (ring->wp > ring->rp) {
goto exit;
}
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
- rlen = dhd_dbg_pull_from_ring(dhdp, ringid, buf, buflen);
- DHD_DBG_RING_LOCK(ring->lock, flags);
-
+ rlen = dhd_dbg_ring_pull(dhdp, ringid, buf, buflen);
if (!ring->sched_pull) {
ring->sched_pull = TRUE;
}
}
}
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
+ dhd_os_spin_unlock(ring->lock, flags);
return;
}
int ret = BCME_OK;
int ring_id;
linux_dbgring_info_t *os_priv, *ring_info;
+ uint32 ms;
ring_id = dhd_dbg_find_ring_id(dhdp, ring_name);
if (!VALID_RING(ring_id))
return BCME_ERROR;
ring_info = &os_priv[ring_id];
ring_info->log_level = log_level;
-
+ if (ring_id == FW_VERBOSE_RING_ID || ring_id == FW_EVENT_RING_ID) {
+ ring_info->tsoffset = local_clock();
+ if (dhd_wl_ioctl_get_intiovar(dhdp, "rte_timesync", &ms, WLC_GET_VAR,
+ FALSE, 0))
+ DHD_ERROR(("%s rte_timesync failed\n", __FUNCTION__));
+ do_div(ring_info->tsoffset, 1000000);
+ ring_info->tsoffset -= ms;
+ }
if (time_intval == 0 || log_level == 0) {
ring_info->interval = 0;
cancel_delayed_work_sync(&ring_info->work);
if (!os_priv)
return BCME_ERROR;
- max_log_level = os_priv[FW_VERBOSE_RING_ID].log_level;
-
+ max_log_level = MAX(os_priv[FW_VERBOSE_RING_ID].log_level,
+ os_priv[FW_EVENT_RING_ID].log_level);
if (max_log_level == SUPPRESS_LOG_LEVEL) {
/* suppress the logging in FW not to wake up host while device in suspend mode */
ret = dhd_iovar(dhdp, 0, "logtrace", (char *)&enable, sizeof(enable), NULL, 0,
msg_hdr.flags |= DBG_RING_ENTRY_FLAGS_HAS_BINARY;
msg_hdr.timestamp = local_clock();
/* convert to ms */
- msg_hdr.timestamp = DIV_U64_BY_U32(msg_hdr.timestamp, NSEC_PER_MSEC);
+ do_div(msg_hdr.timestamp, 1000000);
msg_hdr.len = data_len;
/* filter the event for higher log level with current log level */
for (i = 0; i < ARRAYSIZE(dhd_event_map); i++) {
}
}
}
- ret = dhd_dbg_push_to_ring(dhdp, ring_id, &msg_hdr, event_data);
+ ret = dhd_dbg_ring_push(dhdp, ring_id, &msg_hdr, event_data);
if (ret) {
DHD_ERROR(("%s : failed to push data into the ring (%d) with ret(%d)\n",
__FUNCTION__, ring_id, ret));
{
int ret = BCME_OK;
*features = 0;
-#ifdef DEBUGABILITY
*features |= DBG_MEMORY_DUMP_SUPPORTED;
if (FW_SUPPORTED(dhdp, logtrace)) {
*features |= DBG_CONNECT_EVENT_SUPPORTED;
*features |= DBG_PACKET_FATE_SUPPORTED;
}
#endif /* DBG_PKT_MON */
-#endif /* DEBUGABILITY */
return ret;
}
* Flow rings are transmit traffic (=propagating towards antenna) related entities
*
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_flowring.c 808473 2019-03-07 07:35:30Z $
+ * $Id: dhd_flowring.c 710862 2017-07-14 07:43:59Z $
*/
+
#include <typedefs.h>
#include <bcmutils.h>
#include <bcmendian.h>
#define FLOW_QUEUE_PKT_NEXT(p) PKTLINK(p)
#define FLOW_QUEUE_PKT_SETNEXT(p, x) PKTSETLINK((p), (x))
+#if defined(EAPOL_PKT_PRIO) || defined(DHD_LOSSLESS_ROAMING)
+const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 7 };
+#else
const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
+#endif /* EAPOL_PKT_PRIO || DHD_LOSSLESS_ROAMING */
const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
/** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
ASSERT(dhdp != (dhd_pub_t*)NULL);
ASSERT(flowid < dhdp->num_flow_rings);
- if (flowid >= dhdp->num_flow_rings) {
- return NULL;
- }
flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
flow_queue_t *
dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
{
- flow_ring_node_t * flow_ring_node = NULL;
+ flow_ring_node_t * flow_ring_node;
flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
- if (flow_ring_node)
- return &flow_ring_node->queue;
- else
- return NULL;
+ return &flow_ring_node->queue;
}
/* Flow ring's queue management functions */
int queue_budget, int cumm_threshold, void *cumm_ctr,
int l2cumm_threshold, void *l2cumm_ctr)
{
- flow_queue_t * queue = NULL;
+ flow_queue_t * queue;
ASSERT(dhdp != (dhd_pub_t*)NULL);
ASSERT(queue_budget > 1);
ASSERT(l2cumm_ctr != (void*)NULL);
queue = dhd_flow_queue(dhdp, flowid);
- if (queue) {
- DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
- /* Set the queue's parent threshold and cummulative counter */
- DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
- DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
+ DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
- /* Set the queue's grandparent threshold and cummulative counter */
- DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
- DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
- }
-}
+ /* Set the queue's parent threshold and cummulative counter */
+ DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
+ DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
-uint8
-dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp)
-{
- uint8 prio_count = 0;
- int i;
- // Pick all elements one by one
- for (i = 0; i < NUMPRIO; i++)
- {
- // Check if the picked element is already counted
- int j;
- for (j = 0; j < i; j++) {
- if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) {
- break;
- }
- }
- // If not counted earlier, then count it
- if (i == j) {
- prio_count++;
- }
- }
-
-#ifdef DHD_LOSSLESS_ROAMING
- /* For LLR, we are using flowring with prio 7 which is not considered
- * in prio2ac array. But in __dhd_sendpkt, it is hardcoded hardcoded
- * prio to PRIO_8021D_NC and send to dhd_flowid_update.
- * So add 1 to prio_count.
- */
- prio_count++;
-#endif /* DHD_LOSSLESS_ROAMING */
-
- return prio_count;
-}
-
-uint8
-dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp)
-{
- uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp);
- uint8 total_tx_flow_rings = dhdp->num_flow_rings - dhdp->bus->max_cmn_rings;
- uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings;
- return max_multi_client_flow_rings;
+ /* Set the queue's grandparent threshold and cummulative counter */
+ DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
+ DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
}
/** Initializes data structures of multiple flow rings */
dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
-
- dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
- dhdp->multi_client_flow_rings = 0U;
-
#ifdef DHD_LOSSLESS_ROAMING
dhdp->dequeue_prec_map = ALLPRIO;
-#endif // endif
+#endif
/* Now populate into dhd pub */
DHD_FLOWID_LOCK(lock, flags);
dhdp->num_flow_rings = num_flow_rings;
dhdp->num_flow_rings = 0U;
bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
- dhdp->max_multi_client_flow_rings = 0U;
- dhdp->multi_client_flow_rings = 0U;
-
lock = dhdp->flowid_lock;
dhdp->flowid_lock = NULL;
if_flow_lkup_t *if_flow_lkup;
unsigned long flags;
- ASSERT(ifindex < DHD_MAX_IFS);
- if (ifindex >= DHD_MAX_IFS)
- return FLOWID_INVALID;
-
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
ASSERT(if_flow_lkup);
- if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
+ if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
+ (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
#ifdef WLTDLS
if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da)) &&
is_tdls_destination(dhdp, da)) {
if (flowid == FLOWID_INVALID) {
MFREE(dhdp->osh, fl_hash_node, sizeof(flow_hash_info_t));
- DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__));
+ DHD_ERROR(("%s: cannot get free flowid \n", __FUNCTION__));
return FLOWID_INVALID;
}
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
- if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
- /* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */
+ if ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_STA) ||
+ (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_WDS)) {
+ /* For STA non TDLS dest and WDS dest we allocate entry based on prio only */
#ifdef WLTDLS
if (dhdp->peer_tbl.tdls_peer_count &&
(is_tdls_destination(dhdp, da))) {
DHD_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
- if (fl_hash_node->flowid >= dhdp->num_flow_rings) {
- DHD_ERROR(("%s: flowid=%d num_flow_rings=%d ifindex=%d prio=%d role=%d\n",
- __FUNCTION__, fl_hash_node->flowid, dhdp->num_flow_rings,
- ifindex, prio, if_flow_lkup[ifindex].role));
- dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL);
- dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL);
- return FLOWID_INVALID;
- }
-
return fl_hash_node->flowid;
} /* dhd_flowid_alloc */
flow_ring_table_t *flow_ring_table;
unsigned long flags;
int ret;
+ bool is_sta_assoc;
- DHD_TRACE(("%s\n", __FUNCTION__));
-
+ DHD_INFO(("%s\n", __FUNCTION__));
if (!dhdp->flow_ring_table) {
return BCME_ERROR;
}
- ASSERT(ifindex < DHD_MAX_IFS);
- if (ifindex >= DHD_MAX_IFS)
- return BCME_BADARG;
-
flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
if (id == FLOWID_INVALID) {
- bool if_role_multi_client;
+
if_flow_lkup_t *if_flow_lkup;
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
if (!if_flow_lkup[ifindex].status)
return BCME_ERROR;
-
- /* check role for multi client case */
- if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
-
- /* Abort Flowring creation if multi client flowrings crossed the threshold */
-#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
- if (if_role_multi_client &&
- (dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) {
- DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
- __FUNCTION__, dhdp->multi_client_flow_rings,
- dhdp->max_multi_client_flow_rings));
- return BCME_ERROR;
- }
-#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
-
- /* Do not create Flowring if peer is not associated */
+ BCM_REFERENCE(is_sta_assoc);
#if defined(PCIE_FULL_DONGLE)
- if (if_role_multi_client && !ETHER_ISMULTI(da) &&
- !dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) {
- DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__));
+ is_sta_assoc = dhd_sta_associated(dhdp, ifindex, (uint8 *)da);
+ DHD_ERROR(("%s: multi %x ifindex %d role %x assoc %d\n", __FUNCTION__,
+ ETHER_ISMULTI(da), ifindex, if_flow_lkup[ifindex].role,
+ is_sta_assoc));
+ if (!ETHER_ISMULTI(da) &&
+ ((if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_AP) ||
+ (if_flow_lkup[ifindex].role == WLC_E_IF_ROLE_P2P_GO)) &&
+ (!is_sta_assoc))
return BCME_ERROR;
- }
#endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
if (id == FLOWID_INVALID) {
- DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n",
+ DHD_ERROR(("%s: alloc flowid ifindex %u status %u\n",
__FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
return BCME_ERROR;
}
ASSERT(id < dhdp->num_flow_rings);
- /* Only after flowid alloc, increment multi_client_flow_rings */
- if (if_role_multi_client) {
- dhdp->multi_client_flow_rings++;
- }
-
/* register this flowid in dhd_pub */
dhd_add_flowid(dhdp, ifindex, prio, da, id);
flow_ring_node->active = TRUE;
flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
-#ifdef TX_STATUS_LATENCY_STATS
- flow_ring_node->flow_info.num_tx_status = 0;
- flow_ring_node->flow_info.cum_tx_status_latency = 0;
- flow_ring_node->flow_info.num_tx_pkts = 0;
-#endif /* TX_STATUS_LATENCY_STATS */
+#ifdef DEVICE_TX_STUCK_DETECT
+ flow_ring_node->tx_cmpl = flow_ring_node->tx_cmpl_prev = OSL_SYSUPTIME();
+ flow_ring_node->stuck_count = 0;
+#endif /* DEVICE_TX_STUCK_DETECT */
+
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
/* Create and inform device about the new flow */
if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
!= BCME_OK) {
- DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
- flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
- flow_ring_node->active = FALSE;
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
return BCME_ERROR;
}
return BCME_OK;
} else {
/* if the Flow id was found in the hash */
-
- if (id >= dhdp->num_flow_rings) {
- DHD_ERROR(("%s: Invalid flow id : %u, num_flow_rings : %u\n",
- __FUNCTION__, id, dhdp->num_flow_rings));
- *flowid = FLOWID_INVALID;
- ASSERT(0);
- return BCME_ERROR;
- }
+ ASSERT(id < dhdp->num_flow_rings);
flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
} /* Flow Id found in the hash */
} /* dhd_flowid_lookup */
-int
-dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
-{
- int hashidx = 0;
- bool found = FALSE;
- flow_hash_info_t *cur;
- if_flow_lkup_t *if_flow_lkup;
- unsigned long flags;
-
- if (!dhdp->flow_ring_table) {
- DHD_ERROR(("%s : dhd->flow_ring_table is NULL\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
- if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
- for (hashidx = 0; hashidx < DHD_FLOWRING_HASH_SIZE; hashidx++) {
- cur = if_flow_lkup[ifindex].fl_hash[hashidx];
- if (cur) {
- if (cur->flowid == flowid) {
- found = TRUE;
- }
-
- while (!found && cur) {
- if (cur->flowid == flowid) {
- found = TRUE;
- break;
- }
- cur = cur->next;
- }
-
- if (found) {
- DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
- return BCME_OK;
- }
- }
- }
- DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
-
- return BCME_ERROR;
-}
-
-int
-dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex,
- uint8 prio, char *sa, char *da, uint16 *flowid)
-{
- return dhd_flowid_lookup(dhdp, ifindex, prio, sa, da, flowid);
-}
-
/**
* Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
* select the flowring to send the packet to the dongle.
{
uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
struct ether_header *eh = (struct ether_header *)pktdata;
- uint16 flowid = 0;
+ uint16 flowid;
ASSERT(ifindex < DHD_MAX_IFS);
flow_hash_info_t *cur, *prev;
if_flow_lkup_t *if_flow_lkup;
unsigned long flags;
- bool if_role_multi_client;
-
- ASSERT(ifindex < DHD_MAX_IFS);
- if (ifindex >= DHD_MAX_IFS)
- return;
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
- if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
-
for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
cur = if_flow_lkup[ifindex].fl_hash[hashix];
prev->next = cur->next;
}
- /* Decrement multi_client_flow_rings */
- if (if_role_multi_client) {
- dhdp->multi_client_flow_rings--;
- }
-
/* deregister flowid from dhd_pub. */
dhd_del_flowid(dhdp, ifindex, flowid);
return;
flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
- for (id = 0; id < dhdp->num_flow_rings; id++) {
+ for (id = 0; id <= dhdp->num_flow_rings; id++) {
if (flow_ring_table[id].active &&
(flow_ring_table[id].flow_info.ifindex == ifindex) &&
(flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
}
}
+
/** Delete flow ring(s) for given peer address. Related to AP/AWDL/TDLS functionality. */
void
dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
for (id = 0; id < dhdp->num_flow_rings; id++) {
- /*
- * Send flowring delete request even if flowring status is
- * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND
- * event comes ahead of flowring create response.
- * Otherwise the flowring will not be deleted later as there will not be any
- * DISASSOC_IND event. With this change, when create response event comes to DHD,
- * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response
- * event will come, upon which DHD will delete the flowring.
- */
if (flow_ring_table[id].active &&
(flow_ring_table[id].flow_info.ifindex == ifindex) &&
(!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
- ((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) ||
- (flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) {
+ (flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
DHD_ERROR(("%s: deleting flowid %d\n",
__FUNCTION__, flow_ring_table[id].flowid));
dhd_bus_flow_ring_delete_request(dhdp->bus,
if (ifindex >= DHD_MAX_IFS)
return;
- DHD_INFO(("%s: ifindex %u op %u role is %u \n",
+ DHD_ERROR(("%s: ifindex %u op %u role is %u \n",
__FUNCTION__, ifindex, op, role));
if (!dhdp->flowid_allocator) {
DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
if (ifindex >= DHD_MAX_IFS)
return BCME_BADARG;
- DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
+ DHD_ERROR(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
else
bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
- dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
-
return BCME_OK;
}
/** Inform firmware on updated flow priority mapping, called on IOVAR */
int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
{
- uint8 iovbuf[24];
- int len;
+ uint8 iovbuf[24] = {0};
if (!set) {
- memset(&iovbuf, 0, sizeof(iovbuf));
- len = bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
- if (len == 0) {
- return BCME_BUFTOOSHORT;
- }
+ bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
return BCME_ERROR;
*map = iovbuf[0];
return BCME_OK;
}
- len = bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
- if (len == 0) {
- return BCME_BUFTOOSHORT;
- }
- if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) {
+ bcm_mkiovar("bus:fl_prio_map", (char *)map, 4, (char*)iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
DHD_ERROR(("%s: failed to set fl_prio_map \n",
__FUNCTION__));
return BCME_ERROR;
* Provides type definitions and function prototypes used to create, delete and manage flow rings at
* high level.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_flowring.h 786596 2018-10-26 22:54:51Z $
+ * $Id: dhd_flowring.h 672438 2016-11-28 12:35:24Z $
*/
+
/****************
* Common types *
*/
#endif /* IDLE_TX_FLOW_MGMT */
#define FLOW_RING_STATUS_STA_FREEING 7
+#ifdef DHD_EFI
+#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 1600
+#else
#define DHD_FLOWRING_RX_BUFPOST_PKTSZ 2048
-#define DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX 4096
+#endif
#define DHD_FLOW_PRIO_AC_MAP 0
#define DHD_FLOW_PRIO_TID_MAP 1
#define DHD_IF_ROLE(pub, idx) (((if_flow_lkup_t *)(pub)->if_flow_lkup)[idx].role)
#define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
#define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA)
-#define DHD_IF_ROLE_P2PGC(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_CLIENT)
#define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
#define DHD_IF_ROLE_WDS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_WDS)
-#define DHD_IF_ROLE_IBSS(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_IBSS)
-#ifdef WL_NAN
-#define DHD_IF_ROLE_NAN(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_NAN)
-#else
-#define DHD_IF_ROLE_NAN(pub, idx) (FALSE)
-#endif /* WL_NAN */
-#define DHD_IF_ROLE_AWDL(pub, idx) (FALSE)
-
-#define DHD_IF_ROLE_GENERIC_STA(pub, idx) \
- (DHD_IF_ROLE_STA(pub, idx) || DHD_IF_ROLE_P2PGC(pub, idx) || DHD_IF_ROLE_WDS(pub, idx))
-
-#define DHD_IF_ROLE_MULTI_CLIENT(pub, idx) \
- (DHD_IF_ROLE_AP(pub, idx) || DHD_IF_ROLE_P2PGO(pub, idx) || DHD_IF_ROLE_AWDL(pub, idx) ||\
- DHD_IF_ROLE_NAN(pub, idx))
-
#define DHD_FLOW_RING(dhdp, flowid) \
(flow_ring_node_t *)&(((flow_ring_node_t *)((dhdp)->flow_ring_table))[flowid])
#define DHD_FLOW_QUEUE_SET_L2CLEN(queue, grandparent_clen_ptr) \
((queue)->l2clen_ptr) = (void *)(grandparent_clen_ptr)
+/* see wlfc_proto.h for tx status details */
+#define DHD_FLOWRING_MAXSTATUS_MSGS 5
#define DHD_FLOWRING_TXSTATUS_CNT_UPDATE(bus, flowid, txstatus)
/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
typedef struct dhd_pkttag_fr {
uint16 flowid;
uint16 ifid;
-#ifdef DHD_LB_TXC
int dataoff;
dmaaddr_t physaddr;
uint32 pa_len;
-#endif /* DHD_LB_TXC */
} dhd_pkttag_fr_t;
#define DHD_PKTTAG_SET_IFID(tag, idx) ((tag)->ifid = (uint16)(idx))
#define DHD_PKTTAG_PA(tag) ((tag)->physaddr)
#define DHD_PKTTAG_PA_LEN(tag) ((tag)->pa_len)
+
/** each flow ring is dedicated to a tid/sa/da combination */
typedef struct flow_info {
uint8 tid;
uint8 ifindex;
- uchar sa[ETHER_ADDR_LEN];
- uchar da[ETHER_ADDR_LEN];
-#ifdef TX_STATUS_LATENCY_STATS
- /* total number of tx_status received on this flowid */
- uint64 num_tx_status;
- /* cumulative tx_status latency for this flowid */
- uint64 cum_tx_status_latency;
- /* num tx packets sent on this flowring */
- uint64 num_tx_pkts;
-#endif /* TX_STATUS_LATENCY_STATS */
+ char sa[ETHER_ADDR_LEN];
+ char da[ETHER_ADDR_LEN];
} flow_info_t;
/** a flow ring is used for outbound (towards antenna) 802.3 packets */
#ifdef IDLE_TX_FLOW_MGMT
uint64 last_active_ts; /* contains last active timestamp */
#endif /* IDLE_TX_FLOW_MGMT */
-#ifdef DHD_HP2P
- bool hp2p_ring;
-#endif /* DHD_HP2P */
+#ifdef DEVICE_TX_STUCK_DETECT
+ /* Time stamp(msec) when last time a Tx packet completion is received on this flow ring */
+ uint32 tx_cmpl;
+ /*
+ * Holds the tx_cmpl which was read during the previous
+ * iteration of the stuck detection algo
+ */
+ uint32 tx_cmpl_prev;
+ /* counter to decide if this particlur flow is stuck or not */
+ uint32 stuck_count;
+#endif /* DEVICE_TX_STUCK_DETECT */
+
} flow_ring_node_t;
typedef flow_ring_node_t flow_ring_table_t;
extern int dhd_flowid_update(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio,
void *pktbuf);
-extern int dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex,
- uint8 prio, char *sa, char *da, uint16 *flowid);
-extern int dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifidex, uint16 flowid);
extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid);
#include <dhd_linux.h>
#include <linux/gpio.h>
+#ifdef CUSTOMER_HW_PLATFORM
+#include <plat/sdhci.h>
+#define sdmmc_channel sdmmc_device_mmc0
+#endif /* CUSTOMER_HW_PLATFORM */
+
#if defined(BUS_POWER_RESTORE) && defined(BCMSDIO)
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
extern int wifi_irq_trigger_level(void);
extern u8 *wifi_get_mac(void);
#endif
-extern void sdio_reinit(void);
extern void set_usb_bt_power(int is_power);
+extern void sdio_reinit(void);
extern void extern_wifi_set_enable(int is_on);
-extern void pci_remove_reinit(unsigned int vid, unsigned int pid, int delBus);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
extern int wifi_irq_num(void);
#endif
#endif
#endif
#if defined(BUS_POWER_RESTORE)
-#if defined(BCMSDIO) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+#if defined(BCMSDIO)
if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) {
- mdelay(100);
printf("======== mmc_power_restore_host! ========\n");
mmc_power_restore_host(adapter->sdio_func->card->host);
}
#elif defined(BCMPCIE)
+ OSL_SLEEP(50); /* delay needed to be able to restore PCIe configuration registers */
if (adapter->pci_dev) {
- mdelay(100);
printf("======== pci_set_power_state PCI_D0! ========\n");
pci_set_power_state(adapter->pci_dev, PCI_D0);
if (adapter->pci_saved_state)
mdelay(100);
} else {
#if defined(BUS_POWER_RESTORE)
-#if defined(BCMSDIO) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0))
+#if defined(BCMSDIO)
if (adapter->sdio_func && adapter->sdio_func->card && adapter->sdio_func->card->host) {
printf("======== mmc_power_save_host! ========\n");
mmc_power_save_host(adapter->sdio_func->card->host);
}
}
#ifdef CUSTOMER_HW_AMLOGIC
-#ifdef BCMSIDO
+#ifdef BCMSDIO
extern_wifi_set_enable(0);
mdelay(200);
#endif
#endif
#elif defined(BCMPCIE)
printf("======== Card detection to remove PCIE card! ========\n");
-#ifdef CUSTOMER_HW_AMLOGIC
- extern_wifi_set_enable(0);
- mdelay(200);
-#endif
#endif
}
#endif /* BUS_POWER_RESTORE */
return err;
}
-static int dhd_wlan_get_mac_addr(unsigned char *buf
-#ifdef CUSTOM_MULTI_MAC
- , char *name
-#endif
-)
+static int dhd_wlan_get_mac_addr(unsigned char *buf)
{
int err = 0;
-#ifdef CUSTOM_MULTI_MAC
- if (!strcmp("wlan1", name)) {
+ printf("======== %s ========\n", __FUNCTION__);
#ifdef EXAMPLE_GET_MAC
- struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
- bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
-#endif /* EXAMPLE_GET_MAC */
- } else
-#endif /* CUSTOM_MULTI_MAC */
+ /* EXAMPLE code */
{
-#ifdef EXAMPLE_GET_MAC
- struct ether_addr ea_example = {{0x02, 0x11, 0x22, 0x33, 0x44, 0x55}};
+ struct ether_addr ea_example = {{0x00, 0x11, 0x22, 0x33, 0x44, 0xFF}};
bcopy((char *)&ea_example, buf, sizeof(struct ether_addr));
-#endif /* EXAMPLE_GET_MAC */
}
-
+#endif /* EXAMPLE_GET_MAC */
#ifdef EXAMPLE_GET_MAC_VER2
/* EXAMPLE code */
{
}
#endif /* EXAMPLE_GET_MAC_VER2 */
- printf("======== %s err=%d ========\n", __FUNCTION__, err);
-
return err;
}
gpio_wl_host_wake = -1;
#endif
-#ifdef CUSTOMER_HW_AMLOGIC
-#if defined(BCMPCIE)
- printf("======== Card detection to detect PCIE card! ========\n");
- pci_remove_reinit(0x14e4, 0x43ec, 1);
-#endif
-#endif
-
if (gpio_wl_reg_on >= 0) {
err = gpio_request(gpio_wl_reg_on, "WL_REG_ON");
if (err < 0) {
/*
* IP Packet Parser Module.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_ip.c 813282 2019-04-04 09:42:28Z $
+ * $Id: dhd_ip.c 700317 2017-05-18 15:13:29Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <dhd_ip.h>
-#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
+#ifdef DHDTCPACK_SUPPRESS
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <bcmtcp.h>
-#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
+#endif /* DHDTCPACK_SUPPRESS */
/* special values */
/* 802.3 llc/snap header */
int ifidx;
uint8 supp_cnt;
dhd_pub_t *dhdp;
-#ifndef TCPACK_SUPPRESS_HOLD_HRT
- timer_list_compat_t timer;
-#else
- struct tasklet_hrtimer timer;
-#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+ struct timer_list timer;
} tcpack_info_t;
typedef struct _tdata_psh_info_t {
tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info;
#ifdef DHDTCPACK_SUP_DBG
tcpack_sup_mod->psh_info_enq_num++;
-#endif // endif
+#endif
}
static tdata_psh_info_t*
return tdata_psh_info;
}
-#ifdef BCMSDIO
static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp,
tcpack_sup_module_t *tcpack_sup_mod)
{
return;
}
-#endif /* BCMSDIO */
-#ifdef BCMPCIE
-#ifndef TCPACK_SUPPRESS_HOLD_HRT
-static void dhd_tcpack_send(ulong data)
+static void dhd_tcpack_send(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct timer_list *t
#else
-static enum hrtimer_restart dhd_tcpack_send(struct hrtimer *timer)
-#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+ ulong data
+#endif
+)
{
tcpack_sup_module_t *tcpack_sup_mod;
- tcpack_info_t *cur_tbl;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ tcpack_info_t *cur_tbl = from_timer(cur_tbl, t, timer);
+#else
+ tcpack_info_t *cur_tbl = (tcpack_info_t *)data;
+#endif
dhd_pub_t *dhdp;
int ifidx;
void* pkt;
unsigned long flags;
-#ifndef TCPACK_SUPPRESS_HOLD_HRT
- cur_tbl = (tcpack_info_t *)data;
-#else
- cur_tbl = container_of(timer, tcpack_info_t, timer.timer);
-#endif /* TCPACK_SUPPRESS_HOLD_HRT */
-
if (!cur_tbl) {
- goto done;
+ return;
}
dhdp = cur_tbl->dhdp;
if (!dhdp) {
- goto done;
+ return;
}
flags = dhd_os_tcpacklock(dhdp);
- if (unlikely(dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD)) {
- dhd_os_tcpackunlock(dhdp, flags);
- goto done;
- }
-
tcpack_sup_mod = dhdp->tcpack_sup_module;
if (!tcpack_sup_mod) {
DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
__FUNCTION__, __LINE__));
dhd_os_tcpackunlock(dhdp, flags);
- goto done;
+ return;
}
pkt = cur_tbl->pkt_in_q;
ifidx = cur_tbl->ifidx;
if (!pkt) {
dhd_os_tcpackunlock(dhdp, flags);
- goto done;
+ return;
}
cur_tbl->pkt_in_q = NULL;
cur_tbl->pkt_ether_hdr = NULL;
dhd_os_tcpackunlock(dhdp, flags);
dhd_sendpkt(dhdp, ifidx, pkt);
-
-done:
-#ifndef TCPACK_SUPPRESS_HOLD_HRT
- return;
-#else
- return HRTIMER_NORESTART;
-#endif /* TCPACK_SUPPRESS_HOLD_HRT */
}
-#endif /* BCMPCIE */
int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode)
{
tcpack_sup_module = dhdp->tcpack_sup_module;
prev_mode = dhdp->tcpack_sup_mode;
+ /* Check a new mode */
if (prev_mode == mode) {
DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode));
goto exit;
}
bzero(tcpack_sup_module, sizeof(tcpack_sup_module_t));
break;
-#ifdef BCMSDIO
case TCPACK_SUP_DELAYTX:
if (tcpack_sup_module) {
/* We won't need tdata_psh_info pool and
goto exit;
}
break;
-#endif /* BCMSDIO */
}
/* Update a new mode */
for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
tcpack_info_t *tcpack_info_tbl =
&tcpack_sup_module->tcpack_info_tbl[i];
-#ifndef TCPACK_SUPPRESS_HOLD_HRT
del_timer(&tcpack_info_tbl->timer);
-#else
- hrtimer_cancel(&tcpack_info_tbl->timer.timer);
-#endif /* TCPACK_SUPPRESS_HOLD_HRT */
if (tcpack_info_tbl->pkt_in_q) {
PKTFREE(dhdp->osh,
tcpack_info_tbl->pkt_in_q, TRUE);
__FUNCTION__, __LINE__));
}
break;
-#ifdef BCMSDIO
case TCPACK_SUP_REPLACE:
/* There is nothing to configure for this mode */
break;
dhd_bus_set_dotxinrx(dhdp->bus, FALSE);
}
break;
-#endif /* BCMSDIO */
-#ifdef BCMPCIE
case TCPACK_SUP_HOLD:
dhdp->tcpack_sup_ratio = CUSTOM_TCPACK_SUPP_RATIO;
dhdp->tcpack_sup_delay = CUSTOM_TCPACK_DELAY_TIME;
tcpack_info_t *tcpack_info_tbl =
&tcpack_sup_module->tcpack_info_tbl[i];
tcpack_info_tbl->dhdp = dhdp;
-#ifndef TCPACK_SUPPRESS_HOLD_HRT
- init_timer_compat(&tcpack_info_tbl->timer,
- dhd_tcpack_send, tcpack_info_tbl);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ timer_setup(&tcpack_info_tbl->timer, dhd_tcpack_send, 0);
#else
- tasklet_hrtimer_init(&tcpack_info_tbl->timer,
- dhd_tcpack_send, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-#endif /* TCPACK_SUPPRESS_HOLD_HRT */
+ init_timer(&tcpack_info_tbl->timer);
+ tcpack_info_tbl->timer.data = (ulong)tcpack_info_tbl;
+ tcpack_info_tbl->timer.function = dhd_tcpack_send;
+#endif
}
break;
-#endif /* BCMPCIE */
}
exit:
if (dhdp->tcpack_sup_mode == TCPACK_SUP_HOLD) {
for (i = 0; i < TCPACK_INFO_MAXNUM; i++) {
-#ifndef TCPACK_SUPPRESS_HOLD_HRT
del_timer_sync(&tcpack_sup_mod->tcpack_info_tbl[i].timer);
-#else
- hrtimer_cancel(&tcpack_sup_mod->tcpack_info_tbl[i].timer.timer);
-#endif /* TCPACK_SUPPRESS_HOLD_HRT */
}
}
bool set_dotxinrx = TRUE;
unsigned long flags;
+
if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
goto exit;
dhd_os_tcpackunlock(dhdp, flags);
if (!hold) {
-#ifndef TCPACK_SUPPRESS_HOLD_HRT
del_timer_sync(&tcpack_info_tbl[i].timer);
-#else
- hrtimer_cancel(&tcpack_sup_mod->tcpack_info_tbl[i].timer.timer);
-#endif /* TCPACK_SUPPRESS_HOLD_HRT */
}
goto exit;
}
tcpack_info_tbl[free_slot].pkt_ether_hdr = new_ether_hdr;
tcpack_info_tbl[free_slot].ifidx = ifidx;
tcpack_info_tbl[free_slot].supp_cnt = 1;
-#ifndef TCPACK_SUPPRESS_HOLD_HRT
mod_timer(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer,
jiffies + msecs_to_jiffies(dhdp->tcpack_sup_delay));
-#else
- tasklet_hrtimer_start(&tcpack_sup_mod->tcpack_info_tbl[free_slot].timer,
- ktime_set(0, dhdp->tcpack_sup_delay*1000000),
- HRTIMER_MODE_REL);
-#endif /* TCPACK_SUPPRESS_HOLD_HRT */
tcpack_sup_mod->tcpack_info_cnt++;
} else {
DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
return hold;
}
#endif /* DHDTCPACK_SUPPRESS */
-
-#ifdef DHDTCPSYNC_FLOOD_BLK
-tcp_hdr_flag_t
-dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt)
-{
- uint8 *ether_hdr; /* Ethernet header of the new packet */
- uint16 ether_type; /* Ethernet type of the new packet */
- uint8 *ip_hdr; /* IP header of the new packet */
- uint8 *tcp_hdr; /* TCP header of the new packet */
- uint32 ip_hdr_len; /* IP header length of the new packet */
- uint32 cur_framelen;
- uint8 flags;
-
- ether_hdr = PKTDATA(dhdp->osh, pkt);
- cur_framelen = PKTLEN(dhdp->osh, pkt);
-
- ether_type = ether_hdr[12] << 8 | ether_hdr[13];
-
- if (ether_type != ETHER_TYPE_IP) {
- DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
- __FUNCTION__, __LINE__, ether_type));
- return FLAG_OTHERS;
- }
-
- ip_hdr = ether_hdr + ETHER_HDR_LEN;
- cur_framelen -= ETHER_HDR_LEN;
-
- if (cur_framelen < IPV4_MIN_HEADER_LEN) {
- return FLAG_OTHERS;
- }
-
- ip_hdr_len = IPV4_HLEN(ip_hdr);
- if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) {
- DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
- __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr)));
- return FLAG_OTHERS;
- }
-
- tcp_hdr = ip_hdr + ip_hdr_len;
-
- flags = (uint8)tcp_hdr[TCP_FLAGS_OFFSET];
-
- if (flags & TCP_FLAG_SYN) {
- if (flags & TCP_FLAG_ACK) {
- return FLAG_SYNCACK;
- }
- return FLAG_SYNC;
- }
- return FLAG_OTHERS;
-}
-#endif /* DHDTCPSYNC_FLOOD_BLK */
*
* Provides type definitions and function prototypes used to parse ip packet.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_ip.h 790572 2018-11-26 11:03:46Z $
+ * $Id: dhd_ip.h 536854 2015-02-24 13:17:29Z $
*/
#ifndef _dhd_ip_h_
#define _dhd_ip_h_
-#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
+#ifdef DHDTCPACK_SUPPRESS
#include <dngl_stats.h>
#include <bcmutils.h>
#include <dhd.h>
-#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
+#endif /* DHDTCPACK_SUPPRESS */
typedef enum pkt_frag
{
extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
-#ifdef DHDTCPSYNC_FLOOD_BLK
-typedef enum tcp_hdr_flags {
- FLAG_SYNC,
- FLAG_SYNCACK,
- FLAG_RST,
- FLAG_OTHERS
-} tcp_hdr_flag_t;
-
-extern tcp_hdr_flag_t dhd_tcpdata_get_flag(dhd_pub_t *dhdp, void *pkt);
-#endif /* DHDTCPSYNC_FLOOD_BLK */
-
#ifdef DHDTCPACK_SUPPRESS
#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */
* Broadcom Dongle Host Driver (DHD), Linux-specific network interface
* Basically selected code segments from usb-cdc.c and usb-rndis.c
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux.c 822756 2019-05-30 13:20:26Z $
+ * $Id: dhd_linux.c 710862 2017-07-14 07:43:59Z $
*/
#include <typedefs.h>
#include <linuxver.h>
#include <osl.h>
-#include <bcmstdlib_s.h>
#ifdef SHOW_LOGTRACE
#include <linux/syscalls.h>
#include <event_log.h>
#endif /* SHOW_LOGTRACE */
-#if defined(PCIE_FULL_DONGLE) || defined(SHOW_LOGTRACE)
-#include <bcmmsgbuf.h>
-#endif /* PCIE_FULL_DONGLE */
-
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
-#include <linux/irq.h>
#include <net/addrconf.h>
#ifdef ENABLE_ADAPTIVE_SCHED
#include <linux/cpufreq.h>
#endif /* ENABLE_ADAPTIVE_SCHED */
-#include <linux/rtc.h>
-#include <linux/namei.h>
+
#include <asm/uaccess.h>
#include <asm/unaligned.h>
-#include <dhd_linux_priv.h>
#include <epivers.h>
#include <bcmutils.h>
#include <bcmendian.h>
#include <bcmdevs.h>
-#include <bcmiov.h>
+
#include <ethernet.h>
#include <bcmevent.h>
#include <vlan.h>
#include <802.3.h>
+#include <dngl_stats.h>
#include <dhd_linux_wq.h>
#include <dhd.h>
#include <dhd_linux.h>
-#include <dhd_linux_pktdump.h>
#ifdef DHD_WET
#include <dhd_wet.h>
#endif /* DHD_WET */
#ifdef PCIE_FULL_DONGLE
#include <dhd_flowring.h>
-#endif // endif
+#endif
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <dhd_config.h>
#include <wl_escan.h>
#endif
#include <dhd_dbg.h>
-#include <dhd_dbg_ring.h>
#include <dhd_debug.h>
#ifdef CONFIG_HAS_WAKELOCK
#include <linux/wakelock.h>
-#endif // endif
-#if defined(WL_CFG80211)
+#endif
+#ifdef WL_CFG80211
#include <wl_cfg80211.h>
-#endif /* WL_CFG80211 */
+#endif
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
-#endif // endif
+#endif
#ifdef RTT_SUPPORT
#include <dhd_rtt.h>
-#endif // endif
-
-#ifdef CSI_SUPPORT
-#include <dhd_csi.h>
-#endif /* CSI_SUPPORT */
+#endif
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
-#endif // endif
+#endif
-#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
- defined(CONFIG_SOC_EXYNOS9820)
+#if defined(CONFIG_SOC_EXYNOS8895)
#include <linux/exynos-pci-ctrl.h>
-#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
+#endif /* CONFIG_SOC_EXYNOS8895 */
+
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
#ifdef DHD_L2_FILTER
#include <bcmicmp.h>
#include <dhd_psta.h>
#endif /* DHD_PSTA */
-#ifdef AMPDU_VO_ENABLE
-#include <802.1d.h>
-#endif /* AMPDU_VO_ENABLE */
-#if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
+#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
-#endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
+#endif /* DHDTCPACK_SUPPRESS */
#include <dhd_daemon.h>
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
-#include <eapol.h>
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif /* DHD_PKT_LOGGING */
+#if defined(STAT_REPORT)
+#include <wl_statreport.h>
+#endif /* STAT_REPORT */
#ifdef DHD_DEBUG_PAGEALLOC
typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
#endif /* DHD_DEBUG_PAGEALLOC */
-#define IP_PROT_RESERVED 0xFF
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
-static void dhd_m4_state_handler(struct work_struct * work);
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
+#if defined(DHD_LB)
+#if !defined(PCIE_FULL_DONGLE)
+#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
+#endif /* !PCIE_FULL_DONGLE */
+#endif /* DHD_LB */
+
+#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
+ defined(DHD_LB_STATS)
+#if !defined(DHD_LB)
+#error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
+#endif /* !DHD_LB */
+#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
+
+#if defined(DHD_LB)
+/* Dynamic CPU selection for load balancing */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#if !defined(DHD_LB_PRIMARY_CPUS)
+#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
+#endif
+#if !defined(DHD_LB_SECONDARY_CPUS)
+#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
+#endif
+
+#define HIST_BIN_SIZE 9
+
+static void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
+
+#if defined(DHD_LB_TXP)
+static void dhd_lb_tx_handler(unsigned long data);
+static void dhd_tx_dispatcher_work(struct work_struct * work);
+static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
+static void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
+
+/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
+typedef struct dhd_tx_lb_pkttag_fr {
+ struct net_device *net;
+ int ifidx;
+} dhd_tx_lb_pkttag_fr_t;
+
+#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp)
+#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net)
+
+#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx)
+#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx)
+#endif /* DHD_LB_TXP */
+#endif /* DHD_LB */
+
+#ifdef HOFFLOAD_MODULES
+#include <linux/firmware.h>
+#endif
+
+#ifdef WLMEDIA_HTSF
+#include <linux/time.h>
+#include <htsf.h>
+
+#define HTSF_MINLEN 200 /* min. packet length to timestamp */
+#define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */
+#define TSMAX 1000 /* max no. of timing record kept */
+#define NUMBIN 34
+
+static uint32 tsidx = 0;
+static uint32 htsf_seqnum = 0;
+uint32 tsfsync;
+struct timeval tsync;
+static uint32 tsport = 5010;
+
+typedef struct histo_ {
+ uint32 bin[NUMBIN];
+} histo_t;
+
+#if !ISPOWEROF2(DHD_SDALIGN)
+#error DHD_SDALIGN is not a power of 2!
+#endif
+
+static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
+#endif /* WLMEDIA_HTSF */
+
+#ifdef WL_MONITOR
+#include <bcmmsgbuf.h>
+#include <bcmwifi_monitor.h>
+#endif
+
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
+
+#ifdef STBLINUX
+#ifdef quote_str
+#undef quote_str
+#endif /* quote_str */
+#ifdef to_str
+#undef to_str
+#endif /* quote_str */
+#define to_str(s) #s
+#define quote_str(s) to_str(s)
+
+static char *driver_target = "driver_target: "quote_str(BRCM_DRIVER_TARGET);
+#endif /* STBLINUX */
-#ifdef DHDTCPSYNC_FLOOD_BLK
-static void dhd_blk_tsfl_handler(struct work_struct * work);
-#endif /* DHDTCPSYNC_FLOOD_BLK */
-#ifdef WL_NATOE
-#include <dhd_linux_nfct.h>
-#endif /* WL_NATOE */
#if defined(SOFTAP)
extern bool ap_cfg_running;
extern bool ap_fw_loaded;
-#endif // endif
+#endif
+
+#ifdef DHD_8021X_DUMP
+extern void dhd_dump_eapol_4way_message(char *ifname, char *dump_data, bool direction);
+#endif /* DHD_8021X_DUMP */
#ifdef FIX_CPU_MIN_CLOCK
#include <linux/pm_qos.h>
#ifdef SET_RANDOM_MAC_SOFTAP
#ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
#define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
-#endif // endif
+#endif
static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
#endif /* SET_RANDOM_MAC_SOFTAP */
/* enable HOSTIP cache update from the host side when an eth0:N is up */
#define AOE_IP_ALIAS_SUPPORT 1
+#ifdef BCM_FD_AGGR
+#include <bcm_rpc.h>
+#include <bcm_rpc_tp.h>
+#endif
#ifdef PROP_TXSTATUS
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
-#endif // endif
+#endif
#include <wl_android.h>
#include <linux/amlogic/wifi_dt.h>
#endif
+
const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
static bool dhd_inet6addr_notifier_registered = FALSE;
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
-#if defined(CONFIG_PM_SLEEP)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
volatile bool dhd_mmc_suspend = FALSE;
DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
-#endif /* defined(CONFIG_PM_SLEEP) */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN)
+#if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
-#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
-static void dhd_hang_process(struct work_struct *work_data);
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
MODULE_LICENSE("GPL and additional rights");
+#endif /* LinuxVer */
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
DEFINE_MUTEX(_dhd_mutex_lock_);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
-#endif
-static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force);
+#endif
#ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
#define MAX_CONSECUTIVE_HANG_COUNTS 5
#include <dhd_ulp.h>
#endif /* DHD_ULP */
+#ifdef BCM_FD_AGGR
+#define DBUS_RX_BUFFER_SIZE_DHD(net) (BCM_RPC_TP_DNGL_AGG_MAX_BYTE)
+#else
#ifndef PROP_TXSTATUS
#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
#else
#define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
-#endif // endif
+#endif
+#endif /* BCM_FD_AGGR */
#ifdef PROP_TXSTATUS
extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
extern void dhd_wlfc_plat_init(void *dhd);
extern void dhd_wlfc_plat_deinit(void *dhd);
#endif /* PROP_TXSTATUS */
-#ifdef USE_DYNAMIC_F2_BLKSIZE
extern uint sd_f2_blocksize;
extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
-#endif /* USE_DYNAMIC_F2_BLKSIZE */
+
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15)
+const char *
+print_tainted()
+{
+ return "";
+}
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */
/* Linux wireless extension support */
#if defined(WL_WIRELESS_EXT)
#include <linux/nl80211.h>
#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
+#if defined(BCMPCIE)
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
+#else
+extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
+#endif /* OEM_ANDROID && BCMPCIE */
+
+#ifdef PKT_FILTER_SUPPORT
+extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
+#endif
+
#if defined(PKT_FILTER_SUPPORT) && defined(APF)
static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
u8* program, uint32 program_len);
static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
#endif /* PKT_FILTER_SUPPORT && APF */
-#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
-static int dhd_wait_for_file_dump(dhd_pub_t *dhdp);
-#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */
-#if defined(ARGOS_NOTIFY_CB)
-/* ARGOS notifer data */
-static struct notifier_block argos_wifi; /* STA */
-static struct notifier_block argos_p2p; /* P2P */
-argos_rps_ctrl argos_rps_ctrl_data;
-#endif // endif
-#ifdef DHD_FW_COREDUMP
-static int dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
-#endif /* DHD_FW_COREDUMP */
+static INLINE int argos_register_notifier_init(struct net_device *net) { return 0;}
+static INLINE int argos_register_notifier_deinit(void) { return 0;}
-#ifdef DHD_LOG_DUMP
+#if defined(BT_OVER_SDIO)
+extern void wl_android_set_wifi_on_flag(bool enable);
+#endif /* BT_OVER_SDIO */
-struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
-/* Only header for log dump buffers is stored in array
- * header for sections like 'dhd dump', 'ext trap'
- * etc, is not in the array, because they are not log
- * ring buffers
- */
-dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = {
- {GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL},
- {PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE},
- {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
-};
+#if defined(TRAFFIC_MGMT_DWM)
+void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf);
+#endif
-static int dld_buf_size[DLD_BUFFER_NUM] = {
- LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
- LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
- LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
+#ifdef DHD_FW_COREDUMP
+static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
+#endif /* DHD_FW_COREDUMP */
+#ifdef DHD_LOG_DUMP
+#define DLD_BUFFER_NUM 2
+/* [0]: General, [1]: Special */
+struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
+static const int dld_buf_size[] = {
+ (1024 * 1024), /* DHD_LOG_DUMP_BUFFER_SIZE */
+ (8 * 1024) /* DHD_LOG_DUMP_BUFFER_EX_SIZE */
};
-
static void dhd_log_dump_init(dhd_pub_t *dhd);
static void dhd_log_dump_deinit(dhd_pub_t *dhd);
static void dhd_log_dump(void *handle, void *event_info, u8 event);
-static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
-static int dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type);
-static void dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size);
-void dhd_get_debug_dump_len(void *handle, struct sk_buff *skb, void *event_info, u8 event);
-void cfgvendor_log_dump_len(dhd_pub_t *dhdp, log_dump_type_t *type, struct sk_buff *skb);
-static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
-static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type);
+void dhd_schedule_log_dump(dhd_pub_t *dhdp);
+static int do_dhd_log_dump(dhd_pub_t *dhdp);
#endif /* DHD_LOG_DUMP */
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-#include <linux/workqueue.h>
-#include <linux/pm_runtime.h>
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
#ifdef DHD_DEBUG_UART
#include <linux/kmod.h>
#define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
static int is_reboot = 0;
#endif /* BCMPCIE */
-dhd_pub_t *g_dhd_pub = NULL;
-
#if defined(BT_OVER_SDIO)
#include "dhd_bt_interface.h"
+dhd_pub_t *g_dhd_pub = NULL;
#endif /* defined (BT_OVER_SDIO) */
-#ifdef WL_STATIC_IF
-bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
-#endif /* WL_STATIC_IF */
-
atomic_t exit_in_progress = ATOMIC_INIT(0);
+typedef struct dhd_if_event {
+ struct list_head list;
+ wl_event_data_if_t event;
+ char name[IFNAMSIZ+1];
+ uint8 mac[ETHER_ADDR_LEN];
+} dhd_if_event_t;
+
+/* Interface control information */
+typedef struct dhd_if {
+ struct dhd_info *info; /* back pointer to dhd_info */
+ /* OS/stack specifics */
+ struct net_device *net;
+ int idx; /* iface idx in dongle */
+ uint subunit; /* subunit */
+ uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
+ bool set_macaddress;
+ bool set_multicast;
+ uint8 bssidx; /* bsscfg index for the interface */
+ bool attached; /* Delayed attachment when unset */
+ bool txflowcontrol; /* Per interface flow control indicator */
+ char name[IFNAMSIZ+1]; /* linux interface name */
+ char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
+ struct net_device_stats stats;
+#ifdef DHD_WMF
+ dhd_wmf_t wmf; /* per bsscfg wmf setting */
+ bool wmf_psta_disable; /* enable/disable MC pkt to each mac
+ * of MC group behind PSTA
+ */
+#endif /* DHD_WMF */
+#ifdef PCIE_FULL_DONGLE
+ struct list_head sta_list; /* sll of associated stations */
+#if !defined(BCM_GMAC3)
+ spinlock_t sta_list_lock; /* lock for manipulating sll */
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
+ uint32 ap_isolate; /* ap-isolation settings */
+#ifdef DHD_L2_FILTER
+ bool parp_enable;
+ bool parp_discard;
+ bool parp_allnode;
+ arp_table_t *phnd_arp_table;
+ /* for Per BSS modification */
+ bool dhcp_unicast;
+ bool block_ping;
+ bool grat_arp;
+#endif /* DHD_L2_FILTER */
+#ifdef DHD_MCAST_REGEN
+ bool mcast_regen_bss_enable;
+#endif
+ bool rx_pkt_chainable; /* set all rx packet to chainable config by default */
+ cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
+} dhd_if_t;
+
+#ifdef WLMEDIA_HTSF
+typedef struct {
+ uint32 low;
+ uint32 high;
+} tsf_t;
+
+typedef struct {
+ uint32 last_cycle;
+ uint32 last_sec;
+ uint32 last_tsf;
+ uint32 coef; /* scaling factor */
+ uint32 coefdec1; /* first decimal */
+ uint32 coefdec2; /* second decimal */
+} htsf_t;
+
+typedef struct {
+ uint32 t1;
+ uint32 t2;
+ uint32 t3;
+ uint32 t4;
+} tstamp_t;
+
+static tstamp_t ts[TSMAX];
+static tstamp_t maxdelayts;
+static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0;
+
+#endif /* WLMEDIA_HTSF */
+
+struct ipv6_work_info_t {
+ uint8 if_idx;
+ char ipv6_addr[IPV6_ADDR_LEN];
+ unsigned long event;
+};
static void dhd_process_daemon_msg(struct sk_buff *skb);
static void dhd_destroy_to_notifier_skt(void);
static int dhd_create_to_notifier_skt(void);
int sender_pid = 0;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
-struct netlink_kernel_cfg dhd_netlink_cfg = {
+struct netlink_kernel_cfg g_cfg = {
.groups = 1,
.input = dhd_process_daemon_msg,
};
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
+
+typedef struct dhd_dump {
+ uint8 *buf;
+ int bufsize;
+} dhd_dump_t;
+
+
+/* When Perimeter locks are deployed, any blocking calls must be preceeded
+ * with a PERIM UNLOCK and followed by a PERIM LOCK.
+ * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
+ * wait_event_timeout().
+ */
+
+/* Local private structure (extension of pub) */
+typedef struct dhd_info {
+#if defined(WL_WIRELESS_EXT)
+ wl_iw_t iw; /* wireless extensions state (must be first) */
+#endif /* defined(WL_WIRELESS_EXT) */
+ dhd_pub_t pub;
+ dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
+
+ wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */
+ char fw_path[PATH_MAX]; /* path to firmware image */
+ char nv_path[PATH_MAX]; /* path to nvram vars file */
+ char clm_path[PATH_MAX]; /* path to clm vars file */
+ char conf_path[PATH_MAX]; /* path to config vars file */
+#ifdef DHD_UCODE_DOWNLOAD
+ char uc_path[PATH_MAX]; /* path to ucode image */
+#endif /* DHD_UCODE_DOWNLOAD */
+
+ /* serialize dhd iovars */
+ struct mutex dhd_iovar_mutex;
+
+ struct semaphore proto_sem;
+#ifdef PROP_TXSTATUS
+ spinlock_t wlfc_spinlock;
+
+#ifdef BCMDBUS
+ ulong wlfc_lock_flags;
+ ulong wlfc_pub_lock_flags;
+#endif /* BCMDBUS */
+#endif /* PROP_TXSTATUS */
+#ifdef WLMEDIA_HTSF
+ htsf_t htsf;
+#endif
+ wait_queue_head_t ioctl_resp_wait;
+ wait_queue_head_t d3ack_wait;
+ wait_queue_head_t dhd_bus_busy_state_wait;
+ uint32 default_wd_interval;
+
+ struct timer_list timer;
+ bool wd_timer_valid;
+#ifdef DHD_PCIE_RUNTIMEPM
+ struct timer_list rpm_timer;
+ bool rpm_timer_valid;
+ tsk_ctl_t thr_rpm_ctl;
+#endif /* DHD_PCIE_RUNTIMEPM */
+ struct tasklet_struct tasklet;
+ spinlock_t sdlock;
+ spinlock_t txqlock;
+ spinlock_t rxqlock;
+ spinlock_t dhd_lock;
+#ifdef BCMDBUS
+ ulong txqlock_flags;
+#else
+
+ struct semaphore sdsem;
+ tsk_ctl_t thr_dpc_ctl;
+ tsk_ctl_t thr_wdt_ctl;
+#endif /* BCMDBUS */
+
+ tsk_ctl_t thr_rxf_ctl;
+ spinlock_t rxf_lock;
+ bool rxthread_enabled;
+
+ /* Wakelocks */
+#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ struct wake_lock wl_wifi; /* Wifi wakelock */
+ struct wake_lock wl_rxwake; /* Wifi rx wakelock */
+ struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
+ struct wake_lock wl_wdwake; /* Wifi wd wakelock */
+ struct wake_lock wl_evtwake; /* Wifi event wakelock */
+ struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */
+ struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ struct wake_lock wl_intrwake; /* Host wakeup wakelock */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ struct wake_lock wl_scanwake; /* Wifi scan wakelock */
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#endif /* CONFIG_HAS_WAKELOCK && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ /* net_device interface lock, prevent race conditions among net_dev interface
+ * calls and wifi_on or wifi_off
+ */
+ struct mutex dhd_net_if_mutex;
+ struct mutex dhd_suspend_mutex;
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+ struct mutex dhd_apf_mutex;
+#endif /* PKT_FILTER_SUPPORT && APF */
+#endif
+ spinlock_t wakelock_spinlock;
+ spinlock_t wakelock_evt_spinlock;
+ uint32 wakelock_counter;
+ int wakelock_wd_counter;
+ int wakelock_rx_timeout_enable;
+ int wakelock_ctrl_timeout_enable;
+ bool waive_wakelock;
+ uint32 wakelock_before_waive;
+
+ /* Thread to issue ioctl for multicast */
+ wait_queue_head_t ctrl_wait;
+ atomic_t pend_8021x_cnt;
+ dhd_attach_states_t dhd_state;
+#ifdef SHOW_LOGTRACE
+ dhd_event_log_t event_data;
+#endif /* SHOW_LOGTRACE */
+
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ struct early_suspend early_suspend;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+
+#ifdef ARP_OFFLOAD_SUPPORT
+ u32 pend_ipaddr;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef BCM_FD_AGGR
+ void *rpc_th;
+ void *rpc_osh;
+ struct timer_list rpcth_timer;
+ bool rpcth_timer_active;
+ uint8 fdaggr;
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+ spinlock_t tcpack_lock;
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef FIX_CPU_MIN_CLOCK
+ bool cpufreq_fix_status;
+ struct mutex cpufreq_fix;
+ struct pm_qos_request dhd_cpu_qos;
+#ifdef FIX_BUS_MIN_CLOCK
+ struct pm_qos_request dhd_bus_qos;
+#endif /* FIX_BUS_MIN_CLOCK */
+#endif /* FIX_CPU_MIN_CLOCK */
+ void *dhd_deferred_wq;
+#ifdef DEBUG_CPU_FREQ
+ struct notifier_block freq_trans;
+ int __percpu *new_freq;
+#endif
+ unsigned int unit;
+ struct notifier_block pm_notifier;
+#ifdef DHD_PSTA
+ uint32 psta_mode; /* PSTA or PSR */
+#endif /* DHD_PSTA */
+#ifdef DHD_WET
+ uint32 wet_mode;
+#endif /* DHD_WET */
+#ifdef DHD_DEBUG
+ dhd_dump_t *dump;
+ struct timer_list join_timer;
+ u32 join_timeout_val;
+ bool join_timer_active;
+ uint scan_time_count;
+ struct timer_list scan_timer;
+ bool scan_timer_active;
+#endif
+#if defined(DHD_LB)
+ /* CPU Load Balance dynamic CPU selection */
+
+ /* Variable that tracks the currect CPUs available for candidacy */
+ cpumask_var_t cpumask_curr_avail;
+
+ /* Primary and secondary CPU mask */
+ cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
+ cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
+
+ struct notifier_block cpu_notifier;
+
+ /* Tasklet to handle Tx Completion packet freeing */
+ struct tasklet_struct tx_compl_tasklet;
+ atomic_t tx_compl_cpu;
+
+ /* Tasklet to handle RxBuf Post during Rx completion */
+ struct tasklet_struct rx_compl_tasklet;
+ atomic_t rx_compl_cpu;
+
+ /* Napi struct for handling rx packet sendup. Packets are removed from
+ * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
+ * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
+ * to run to rx_napi_cpu.
+ */
+ struct sk_buff_head rx_pend_queue ____cacheline_aligned;
+ struct sk_buff_head rx_napi_queue ____cacheline_aligned;
+ struct napi_struct rx_napi_struct ____cacheline_aligned;
+ atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
+ struct net_device *rx_napi_netdev; /* netdev of primary interface */
+
+ struct work_struct rx_napi_dispatcher_work;
+ struct work_struct tx_compl_dispatcher_work;
+ struct work_struct tx_dispatcher_work;
+
+ /* Number of times DPC Tasklet ran */
+ uint32 dhd_dpc_cnt;
+ /* Number of times NAPI processing got scheduled */
+ uint32 napi_sched_cnt;
+ /* Number of times NAPI processing ran on each available core */
+ uint32 *napi_percpu_run_cnt;
+ /* Number of times RX Completions got scheduled */
+ uint32 rxc_sched_cnt;
+ /* Number of times RX Completion ran on each available core */
+ uint32 *rxc_percpu_run_cnt;
+ /* Number of times TX Completions got scheduled */
+ uint32 txc_sched_cnt;
+ /* Number of times TX Completions ran on each available core */
+ uint32 *txc_percpu_run_cnt;
+ /* CPU status */
+ /* Number of times each CPU came online */
+ uint32 *cpu_online_cnt;
+ /* Number of times each CPU went offline */
+ uint32 *cpu_offline_cnt;
+
+ /* Number of times TX processing run on each core */
+ uint32 *txp_percpu_run_cnt;
+ /* Number of times TX start run on each core */
+ uint32 *tx_start_percpu_run_cnt;
+
+ /* Tx load balancing */
+
+ /* TODO: Need to see if batch processing is really required in case of TX
+ * processing. In case of RX the Dongle can send a bunch of rx completions,
+ * hence we took a 3 queue approach
+ * enque - adds the skbs to rx_pend_queue
+ * dispatch - uses a lock and adds the list of skbs from pend queue to
+ * napi queue
+ * napi processing - copies the pend_queue into a local queue and works
+ * on it.
+ * But for TX its going to be 1 skb at a time, so we are just thinking
+ * of using only one queue and use the lock supported skb queue functions
+ * to add and process it. If its in-efficient we'll re-visit the queue
+ * design.
+ */
+
+ /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
+ /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
+ /*
+ * From the Tasklet that actually sends out data
+ * copy the list tx_pend_queue into tx_active_queue. There by we need
+ * to spinlock to only perform the copy the rest of the code ie to
+ * construct the tx_pend_queue and the code to process tx_active_queue
+ * can be lockless. The concept is borrowed as is from RX processing
+ */
+ /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
+
+ /* Control TXP in runtime, enable by default */
+ atomic_t lb_txp_active;
+
+ /*
+ * When the NET_TX tries to send a TX packet put it into tx_pend_queue
+ * For now, the processing tasklet will also direcly operate on this
+ * queue
+ */
+ struct sk_buff_head tx_pend_queue ____cacheline_aligned;
+
+ /* cpu on which the DHD Tx is happenning */
+ atomic_t tx_cpu;
+
+ /* CPU on which the Network stack is calling the DHD's xmit function */
+ atomic_t net_tx_cpu;
+
+ /* Tasklet context from which the DHD's TX processing happens */
+ struct tasklet_struct tx_tasklet;
+
+ /*
+ * Consumer Histogram - NAPI RX Packet processing
+ * -----------------------------------------------
+ * On Each CPU, when the NAPI RX Packet processing call back was invoked
+ * how many packets were processed is captured in this data structure.
+ * Now its difficult to capture the "exact" number of packets processed.
+ * So considering the packet counter to be a 32 bit one, we have a
+ * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
+ * processed is rounded off to the next power of 2 and put in the
+ * approriate "bin" the value in the bin gets incremented.
+ * For example, assume that in CPU 1 if NAPI Rx runs 3 times
+ * and the packet count processed is as follows (assume the bin counters are 0)
+ * iteration 1 - 10 (the bin counter 2^4 increments to 1)
+ * iteration 2 - 30 (the bin counter 2^5 increments to 1)
+ * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
+ */
+ uint32 *napi_rx_hist[HIST_BIN_SIZE];
+ uint32 *txc_hist[HIST_BIN_SIZE];
+ uint32 *rxc_hist[HIST_BIN_SIZE];
+#endif /* DHD_LB */
+
+#ifdef SHOW_LOGTRACE
+ struct work_struct event_log_dispatcher_work;
+#endif /* SHOW_LOGTRACE */
+
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+ struct kobject dhd_kobj;
+#ifdef SHOW_LOGTRACE
+ struct sk_buff_head evt_trace_queue ____cacheline_aligned;
+#endif
+ struct timer_list timesync_timer;
+#if defined(BT_OVER_SDIO)
+ char btfw_path[PATH_MAX];
+#endif /* defined (BT_OVER_SDIO) */
+
+#ifdef WL_MONITOR
+ struct net_device *monitor_dev; /* monitor pseudo device */
+ struct sk_buff *monitor_skb;
+ uint monitor_len;
+ uint monitor_type; /* monitor pseudo device */
+ monitor_info_t *monitor_info;
+#endif /* WL_MONITOR */
+ uint32 shub_enable;
+#if defined(BT_OVER_SDIO)
+ struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
+ int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
+#endif /* BT_OVER_SDIO */
+#ifdef DHD_DEBUG_UART
+ bool duart_execute;
+#endif
+#ifdef PCIE_INB_DW
+ wait_queue_head_t ds_exit_wait;
+#endif /* PCIE_INB_DW */
+} dhd_info_t;
+
+#ifdef WL_MONITOR
+#define MONPKT_EXTRA_LEN 48
+#endif
+
+#define DHDIF_FWDER(dhdif) FALSE
#if defined(BT_OVER_SDIO)
/* Flag to indicate if driver is initialized */
#else
/* Flag to indicate if driver is initialized */
uint dhd_driver_init_done = FALSE;
-#endif // endif
+#endif
/* Flag to indicate if we should download firmware on driver load */
uint dhd_download_fw_on_driverload = TRUE;
module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
+
/* backup buffer for firmware and nvram path */
char fw_bak_path[MOD_PARAM_PATHLEN];
char nv_bak_path[MOD_PARAM_PATHLEN];
int disable_proptx = 0;
module_param(op_mode, int, 0644);
extern int wl_control_wl_start(struct net_device *dev);
-#if defined(BCMLXSDMMC) || defined(BCMDBUS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (defined(BCMLXSDMMC) || defined(BCMDBUS))
struct semaphore dhd_registration_sem;
-#endif /* BCMXSDMMC */
-
-#ifdef DHD_LOG_DUMP
-int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE;
-module_param(logdump_max_filesize, int, 0644);
-int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE;
-module_param(logdump_max_bufsize, int, 0644);
-int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
-int logdump_periodic_flush = FALSE;
-module_param(logdump_periodic_flush, int, 0644);
-#ifdef EWP_ECNTRS_LOGGING
-int logdump_ecntr_enable = TRUE;
-#else
-int logdump_ecntr_enable = FALSE;
-#endif /* EWP_ECNTRS_LOGGING */
-module_param(logdump_ecntr_enable, int, 0644);
-#ifdef EWP_RTT_LOGGING
-int logdump_rtt_enable = TRUE;
-#else
-int logdump_rtt_enable = FALSE;
-#endif /* EWP_RTT_LOGGING */
-module_param(logdump_rtt_enable, int, 0644);
-#endif /* DHD_LOG_DUMP */
-#ifdef EWP_EDL
-int host_edl_support = TRUE;
-module_param(host_edl_support, int, 0644);
-#endif // endif
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
/* deferred handlers */
static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
-#ifdef WL_NATOE
-static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event);
-static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event);
-#endif /* WL_NATOE */
#ifdef DHD_UPDATE_INTF_MAC
static void dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event);
#ifdef WL_CFG80211
extern void dhd_netdev_free(struct net_device *ndev);
#endif /* WL_CFG80211 */
-static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
-
-#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
-static void dhd_bridge_dev_set(dhd_info_t * dhd, int ifidx, struct net_device * dev);
-#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
/* update rx_pkt_chainable state of dhd interface */
static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
+#ifdef HOFFLOAD_MODULES
+char dhd_hmem_module_string[MOD_PARAM_SRLEN];
+module_param_string(dhd_hmem_module_string, dhd_hmem_module_string, MOD_PARAM_SRLEN, 0660);
+#endif
/* Error bits */
module_param(dhd_msg_level, int, 0);
#if defined(WL_WIRELESS_EXT)
/* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
#ifdef ENABLE_ARP_SNOOP_MODE
-uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY |
- ARP_OL_UPDATE_HOST_CACHE);
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY;
#else
-uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE;
+uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY;
#endif /* ENABLE_ARP_SNOOP_MODE */
module_param(dhd_arp_mode, uint, 0);
module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
#endif /* DHD_UCODE_DOWNLOAD */
-/* wl event forwarding */
-#ifdef WL_EVENT_ENAB
-uint wl_event_enable = true;
-#else
-uint wl_event_enable = false;
-#endif /* WL_EVENT_ENAB */
-module_param(wl_event_enable, uint, 0660);
-
-/* wl event forwarding */
-#ifdef LOGTRACE_PKT_SENDUP
-uint logtrace_pkt_sendup = true;
-#else
-uint logtrace_pkt_sendup = false;
-#endif /* LOGTRACE_PKT_SENDUP */
-module_param(logtrace_pkt_sendup, uint, 0660);
-
/* Watchdog interval */
+
/* extend watchdog expiration to 2 seconds when DPC is running */
#define WATCHDOG_EXTEND_INTERVAL (2000)
uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
module_param(dhd_watchdog_ms, uint, 0);
+#ifdef DHD_PCIE_RUNTIMEPM
+uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
+#endif /* DHD_PCIE_RUNTIMEPMT */
#if defined(DHD_DEBUG)
/* Console poll interval */
uint dhd_console_ms = 0;
/* Global Pkt filter enable control */
uint dhd_pkt_filter_enable = TRUE;
module_param(dhd_pkt_filter_enable, uint, 0);
-#endif // endif
+#endif
/* Pkt filter init setup */
uint dhd_pkt_filter_init = 0;
module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
#endif /* WL_CFG80211 */
-#ifdef DHD_MSI_SUPPORT
-uint enable_msi = TRUE;
-module_param(enable_msi, uint, 0);
-#endif /* PCIE_FULL_DONGLE */
-
-#ifdef DHD_SSSR_DUMP
-int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len);
-extern uint support_sssr_dump;
-module_param(support_sssr_dump, uint, 0);
-#endif /* DHD_SSSR_DUMP */
-
/* Keep track of number of instances */
static int dhd_found = 0;
static int instance_base = 0; /* Starting instance number */
module_param(instance_base, int, 0644);
-#if defined(DHD_LB_RXP)
+#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
static int dhd_napi_weight = 32;
module_param(dhd_napi_weight, int, 0644);
-#endif /* DHD_LB_RXP */
+#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
#ifdef PCIE_FULL_DONGLE
extern int h2d_max_txpost;
module_param(h2d_max_txpost, int, 0644);
+#endif /* PCIE_FULL_DONGLE */
-extern uint dma_ring_indices;
-module_param(dma_ring_indices, uint, 0644);
+#ifdef DHD_DHCP_DUMP
+struct bootp_fmt {
+ struct iphdr ip_header;
+ struct udphdr udp_header;
+ uint8 op;
+ uint8 htype;
+ uint8 hlen;
+ uint8 hops;
+ uint32 transaction_id;
+ uint16 secs;
+ uint16 flags;
+ uint32 client_ip;
+ uint32 assigned_ip;
+ uint32 server_ip;
+ uint32 relay_ip;
+ uint8 hw_address[16];
+ uint8 server_name[64];
+ uint8 file_name[128];
+ uint8 options[312];
+};
-extern bool h2d_phase;
-module_param(h2d_phase, bool, 0644);
-extern bool force_trap_bad_h2d_phase;
-module_param(force_trap_bad_h2d_phase, bool, 0644);
-#endif /* PCIE_FULL_DONGLE */
+static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
+static const char dhcp_ops[][10] = {
+ "NA", "REQUEST", "REPLY"
+};
+static const char dhcp_types[][10] = {
+ "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
+};
+static void dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx);
+#endif /* DHD_DHCP_DUMP */
-#ifdef FORCE_TPOWERON
-/*
- * On Fire's reference platform, coming out of L1.2,
- * there is a constant delay of 45us between CLKREQ# and stable REFCLK
- * Due to this delay, with tPowerOn < 50
- * there is a chance of the refclk sense to trigger on noise.
- *
- * 0x29 when written to L1SSControl2 translates to 50us.
- */
-#define FORCE_TPOWERON_50US 0x29
-uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */
-module_param(tpoweron_scale, uint, 0644);
-#endif /* FORCE_TPOWERON */
+#ifdef DHD_ICMP_DUMP
+#include <net/icmp.h>
+static void dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx);
+#endif /* DHD_ICMP_DUMP */
+
+/* Functions to manage sysfs interface for dhd */
+static int dhd_sysfs_init(dhd_info_t *dhd);
+static void dhd_sysfs_exit(dhd_info_t *dhd);
#ifdef SHOW_LOGTRACE
-static char *logstrs_path = "/data/vendor/misc/wifi/logstrs.bin";
-char *st_str_file_path = "/data/vendor/misc/wifi/rtecdc.bin";
-static char *map_file_path = "/data/vendor/misc/wifi/rtecdc.map";
-static char *rom_st_str_file_path = "/data/vendor/misc/wifi/roml.bin";
-static char *rom_map_file_path = "/data/vendor/misc/wifi/roml.map";
+#if defined(CUSTOMER_HW4_DEBUG)
+static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
+static char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
+static char *map_file_path = PLATFORM_PATH"rtecdc.map";
+static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
+static char *rom_map_file_path = PLATFORM_PATH"roml.map";
+#elif defined(CUSTOMER_HW2)
+static char *logstrs_path = "/data/misc/wifi/logstrs.bin";
+static char *st_str_file_path = "/data/misc/wifi/rtecdc.bin";
+static char *map_file_path = "/data/misc/wifi/rtecdc.map";
+static char *rom_st_str_file_path = "/data/misc/wifi/roml.bin";
+static char *rom_map_file_path = "/data/misc/wifi/roml.map";
+#else
+static char *logstrs_path = "/installmedia/logstrs.bin";
+static char *st_str_file_path = "/installmedia/rtecdc.bin";
+static char *map_file_path = "/installmedia/rtecdc.map";
+static char *rom_st_str_file_path = "/installmedia/roml.bin";
+static char *rom_map_file_path = "/installmedia/roml.map";
+#endif /* CUSTOMER_HW4_DEBUG || CUSTOMER_HW2 */
static char *ram_file_str = "rtecdc";
static char *rom_file_str = "roml";
char *map_file);
#endif /* SHOW_LOGTRACE */
-#ifdef USE_WFA_CERT_CONF
-int g_frameburst = 1;
-#endif /* USE_WFA_CERT_CONF */
+#if defined(DHD_LB)
-static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
-
-/* DHD Perimiter lock only used in router with bypass forwarding. */
-#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
-#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
-#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
-
-#ifdef PCIE_FULL_DONGLE
-#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
-#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
- spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
-#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
- spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
-
-#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
-static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
- struct list_head *snapshot_list);
-static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
-#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
-#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
-#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
-#endif /* PCIE_FULL_DONGLE */
-
-/* Control fw roaming */
-#ifdef BCMCCX
-uint dhd_roam_disable = 0;
-#else
-uint dhd_roam_disable = 0;
-#endif /* BCMCCX */
-
-#ifdef BCMDBGFS
-extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
-extern void dhd_dbgfs_remove(void);
-#endif // endif
+static void
+dhd_lb_set_default_cpus(dhd_info_t *dhd)
+{
+ /* Default CPU allocation for the jobs */
+ atomic_set(&dhd->rx_napi_cpu, 1);
+ atomic_set(&dhd->rx_compl_cpu, 2);
+ atomic_set(&dhd->tx_compl_cpu, 2);
+ atomic_set(&dhd->tx_cpu, 2);
+ atomic_set(&dhd->net_tx_cpu, 0);
+}
-static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */
-module_param(pcie_txs_metadata_enable, int, 0);
+static void
+dhd_cpumasks_deinit(dhd_info_t *dhd)
+{
+ free_cpumask_var(dhd->cpumask_curr_avail);
+ free_cpumask_var(dhd->cpumask_primary);
+ free_cpumask_var(dhd->cpumask_primary_new);
+ free_cpumask_var(dhd->cpumask_secondary);
+ free_cpumask_var(dhd->cpumask_secondary_new);
+}
-/* Control radio state */
-uint dhd_radio_up = 1;
+static int
+dhd_cpumasks_init(dhd_info_t *dhd)
+{
+ int id;
+ uint32 cpus, num_cpus = num_possible_cpus();
+ int ret = 0;
-/* Network inteface name */
-char iface_name[IFNAMSIZ] = {'\0'};
-module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
+ DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
+ DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
-/* The following are specific to the SDIO dongle */
+ if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
+ !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
+ DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto fail;
+ }
-/* IOCTL response timeout */
-int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
+ cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
+ cpumask_clear(dhd->cpumask_primary);
+ cpumask_clear(dhd->cpumask_secondary);
-/* DS Exit response timeout */
-int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
+ if (num_cpus > 32) {
+ DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
+ ASSERT(0);
+ }
-/* Idle timeout for backplane clock */
-int dhd_idletime = DHD_IDLETIME_TICKS;
-module_param(dhd_idletime, int, 0);
+ cpus = DHD_LB_PRIMARY_CPUS;
+ for (id = 0; id < num_cpus; id++) {
+ if (isset(&cpus, id))
+ cpumask_set_cpu(id, dhd->cpumask_primary);
+ }
-/* Use polling */
-uint dhd_poll = FALSE;
-module_param(dhd_poll, uint, 0);
+ cpus = DHD_LB_SECONDARY_CPUS;
+ for (id = 0; id < num_cpus; id++) {
+ if (isset(&cpus, id))
+ cpumask_set_cpu(id, dhd->cpumask_secondary);
+ }
-/* Use interrupts */
-uint dhd_intr = TRUE;
-module_param(dhd_intr, uint, 0);
+ return ret;
+fail:
+ dhd_cpumasks_deinit(dhd);
+ return ret;
+}
-/* SDIO Drive Strength (in milliamps) */
-uint dhd_sdiod_drive_strength = 6;
-module_param(dhd_sdiod_drive_strength, uint, 0);
+/*
+ * The CPU Candidacy Algorithm
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * The available CPUs for selection are divided into two groups
+ * Primary Set - A CPU mask that carries the First Choice CPUs
+ * Secondary Set - A CPU mask that carries the Second Choice CPUs.
+ *
+ * There are two types of Job, that needs to be assigned to
+ * the CPUs, from one of the above mentioned CPU group. The Jobs are
+ * 1) Rx Packet Processing - napi_cpu
+ * 2) Completion Processiong (Tx, RX) - compl_cpu
+ *
+ * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
+ * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
+ * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
+ * If there are more processors free, it assigns one to compl_cpu.
+ * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
+ * CPU, as much as possible.
+ *
+ * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
+ * would allow Tx completion skb's to be released into a local free pool from
+ * which the rx buffer posts could have been serviced. it is important to note
+ * that a Tx packet may not have a large enough buffer for rx posting.
+ */
+void dhd_select_cpu_candidacy(dhd_info_t *dhd)
+{
+ uint32 primary_available_cpus; /* count of primary available cpus */
+ uint32 secondary_available_cpus; /* count of secondary available cpus */
+ uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
+ uint32 compl_cpu = 0; /* cpu selected for completion jobs */
+ uint32 tx_cpu = 0; /* cpu selected for tx processing job */
-#ifdef BCMSDIO
-/* Tx/Rx bounds */
-extern uint dhd_txbound;
-extern uint dhd_rxbound;
-module_param(dhd_txbound, uint, 0);
-module_param(dhd_rxbound, uint, 0);
+ cpumask_clear(dhd->cpumask_primary_new);
+ cpumask_clear(dhd->cpumask_secondary_new);
-/* Deferred transmits */
-extern uint dhd_deferred_tx;
-module_param(dhd_deferred_tx, uint, 0);
+ /*
+ * Now select from the primary mask. Even if a Job is
+ * already running on a CPU in secondary group, we still move
+ * to primary CPU. So no conditional checks.
+ */
+ cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
+ dhd->cpumask_curr_avail);
-#endif /* BCMSDIO */
+ cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
+ dhd->cpumask_curr_avail);
-#ifdef SDTEST
-/* Echo packet generator (pkts/s) */
-uint dhd_pktgen = 0;
-module_param(dhd_pktgen, uint, 0);
+ primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
-/* Echo packet len (0 => sawtooth, max 2040) */
-uint dhd_pktgen_len = 0;
-module_param(dhd_pktgen_len, uint, 0);
-#endif /* SDTEST */
+ if (primary_available_cpus > 0) {
+ napi_cpu = cpumask_first(dhd->cpumask_primary_new);
-#if defined(BCMSUP_4WAY_HANDSHAKE)
-/* Use in dongle supplicant for 4-way handshake */
-#if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
-/* Enable idsup by default (if supported in fw) */
-uint dhd_use_idsup = 1;
-#else
-uint dhd_use_idsup = 0;
-#endif /* WLFBT || WL_ENABLE_IDSUP */
-module_param(dhd_use_idsup, uint, 0);
-#endif /* BCMSUP_4WAY_HANDSHAKE */
+ /* If no further CPU is available,
+ * cpumask_next returns >= nr_cpu_ids
+ */
+ tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
+ if (tx_cpu >= nr_cpu_ids)
+ tx_cpu = 0;
-#ifndef BCMDBUS
-/* Allow delayed firmware download for debug purpose */
-int allow_delay_fwdl = FALSE;
-module_param(allow_delay_fwdl, int, 0);
-#endif /* !BCMDBUS */
+ /* In case there are no more CPUs, do completions & Tx in same CPU */
+ compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
+ if (compl_cpu >= nr_cpu_ids)
+ compl_cpu = tx_cpu;
+ }
-#ifdef ECOUNTER_PERIODIC_DISABLE
-uint enable_ecounter = FALSE;
-#else
-uint enable_ecounter = TRUE;
-#endif // endif
-module_param(enable_ecounter, uint, 0);
+ DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
+ __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
-/* TCM verification flag */
-uint dhd_tcm_test_enable = FALSE;
-module_param(dhd_tcm_test_enable, uint, 0644);
+ /* -- Now check for the CPUs from the secondary mask -- */
+ secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
-extern char dhd_version[];
-extern char fw_version[];
-extern char clm_version[];
+ DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
+ __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
-int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
-static void dhd_net_if_lock_local(dhd_info_t *dhd);
-static void dhd_net_if_unlock_local(dhd_info_t *dhd);
-static void dhd_suspend_lock(dhd_pub_t *dhdp);
-static void dhd_suspend_unlock(dhd_pub_t *dhdp);
+ if (secondary_available_cpus > 0) {
+ /* At this point if napi_cpu is unassigned it means no CPU
+ * is online from Primary Group
+ */
+ if (napi_cpu == 0) {
+ napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
+ compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
+ } else if (tx_cpu == 0) {
+ tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
+ } else if (compl_cpu == 0) {
+ compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
+ }
-/* Monitor interface */
-int dhd_monitor_init(void *dhd_pub);
-int dhd_monitor_uninit(void);
+ /* If no CPU was available for tx processing, choose CPU 0 */
+ if (tx_cpu >= nr_cpu_ids)
+ tx_cpu = 0;
-#ifdef DHD_PM_CONTROL_FROM_FILE
-bool g_pm_control;
-#ifdef DHD_EXPORT_CNTL_FILE
-int pmmode_val;
-#endif /* DHD_EXPORT_CNTL_FILE */
-void sec_control_pm(dhd_pub_t *dhd, uint *);
-#endif /* DHD_PM_CONTROL_FROM_FILE */
+ /* If no CPU was available for completion, choose CPU 0 */
+ if (compl_cpu >= nr_cpu_ids)
+ compl_cpu = 0;
+ }
+ if ((primary_available_cpus == 0) &&
+ (secondary_available_cpus == 0)) {
+ /* No CPUs available from primary or secondary mask */
+ napi_cpu = 1;
+ compl_cpu = 0;
+ tx_cpu = 2;
+ }
-#if defined(WL_WIRELESS_EXT)
-struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
-#endif /* defined(WL_WIRELESS_EXT) */
+ DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
+ __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
-#ifndef BCMDBUS
-static void dhd_dpc(ulong data);
-#endif /* !BCMDBUS */
-/* forward decl */
-extern int dhd_wait_pend8021x(struct net_device *dev);
-void dhd_os_wd_timer_extend(void *bus, bool extend);
+ ASSERT(napi_cpu < nr_cpu_ids);
+ ASSERT(compl_cpu < nr_cpu_ids);
+ ASSERT(tx_cpu < nr_cpu_ids);
-#ifdef TOE
-#ifndef BDC
-#error TOE requires BDC
-#endif /* !BDC */
-static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
-static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
-#endif /* TOE */
+ atomic_set(&dhd->rx_napi_cpu, napi_cpu);
+ atomic_set(&dhd->tx_compl_cpu, compl_cpu);
+ atomic_set(&dhd->rx_compl_cpu, compl_cpu);
+ atomic_set(&dhd->tx_cpu, tx_cpu);
-static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
- wl_event_msg_t *event_ptr, void **data_ptr);
+ return;
+}
-#if defined(CONFIG_PM_SLEEP)
-static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+/*
+ * Function to handle CPU Hotplug notifications.
+ * One of the task it does is to trigger the CPU Candidacy algorithm
+ * for load balancing.
+ */
+int
+dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
- int ret = NOTIFY_DONE;
- bool suspend = FALSE;
+ unsigned long int cpu = (unsigned long int)hcpu;
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
+#endif
+ dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
-#endif // endif
+#endif
- BCM_REFERENCE(dhdinfo);
- BCM_REFERENCE(suspend);
+ if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
+ DHD_INFO(("%s(): LB data is not initialized yet.\n",
+ __FUNCTION__));
+ return NOTIFY_BAD;
+ }
- switch (action) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- suspend = TRUE;
- break;
+ switch (action)
+ {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
+ cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+ break;
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- suspend = FALSE;
- break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
+ cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
+ dhd_select_cpu_candidacy(dhd);
+ break;
+ default:
+ break;
}
- printf("%s: action=%ld, suspend=%d, suspend_mode=%d\n",
- __FUNCTION__, action, suspend, dhdinfo->pub.conf->suspend_mode);
- if (suspend) {
- DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
- if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
- dhd_suspend_resume_helper(dhdinfo, suspend, 0);
-#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
- dhd_wlfc_suspend(&dhdinfo->pub);
-#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
- if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
- dhd_conf_set_suspend_resume(&dhdinfo->pub, suspend);
- DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
- } else {
- if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
- dhd_conf_set_suspend_resume(&dhdinfo->pub, suspend);
-#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
- dhd_wlfc_resume(&dhdinfo->pub);
-#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
- if (dhdinfo->pub.conf->suspend_mode == PM_NOTIFIER)
- dhd_suspend_resume_helper(dhdinfo, suspend, 0);
+ return NOTIFY_OK;
+}
+
+#if defined(DHD_LB_STATS)
+void dhd_lb_stats_init(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+ int i, j, num_cpus = num_possible_cpus();
+ int alloc_size = sizeof(uint32) * num_cpus;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
+ __FUNCTION__));
+ return;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
- KERNEL_VERSION(2, 6, 39))
- dhd_mmc_suspend = suspend;
- smp_mb();
-#endif
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
- return ret;
-}
+ DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
+ DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
-/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
- * created in kernel notifier link list (with 'next' pointing to itself)
- */
-static bool dhd_pm_notifier_registered = FALSE;
+ dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->napi_percpu_run_cnt) {
+ DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
-extern int register_pm_notifier(struct notifier_block *nb);
-extern int unregister_pm_notifier(struct notifier_block *nb);
-#endif /* CONFIG_PM_SLEEP */
+ DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
-/* Request scheduling of the bus rx frame */
-static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
-static void dhd_os_rxflock(dhd_pub_t *pub);
-static void dhd_os_rxfunlock(dhd_pub_t *pub);
+ dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->rxc_percpu_run_cnt) {
+ DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
-#if defined(DHD_H2D_LOG_TIME_SYNC)
-static void
-dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event);
-#endif /* DHD_H2D_LOG_TIME_SYNC */
+ DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
-/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
-typedef struct dhd_dev_priv {
- dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
- dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
- int ifidx; /* interface index */
- void * lkup;
-} dhd_dev_priv_t;
+ dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txc_percpu_run_cnt) {
+ DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
-#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
-#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
-#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
-#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
-#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
-#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
+ dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->cpu_online_cnt) {
+ DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
-/** Clear the dhd net_device's private structure. */
-static inline void
-dhd_dev_priv_clear(struct net_device * dev)
-{
- dhd_dev_priv_t * dev_priv;
- ASSERT(dev != (struct net_device *)NULL);
- dev_priv = DHD_DEV_PRIV(dev);
- dev_priv->dhd = (dhd_info_t *)NULL;
- dev_priv->ifp = (dhd_if_t *)NULL;
- dev_priv->ifidx = DHD_BAD_IF;
- dev_priv->lkup = (void *)NULL;
-}
+ dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->cpu_offline_cnt) {
+ DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
-/** Setup the dhd net_device's private structure. */
-static inline void
-dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
- int ifidx)
-{
- dhd_dev_priv_t * dev_priv;
- ASSERT(dev != (struct net_device *)NULL);
- dev_priv = DHD_DEV_PRIV(dev);
- dev_priv->dhd = dhd;
- dev_priv->ifp = ifp;
- dev_priv->ifidx = ifidx;
+ dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txp_percpu_run_cnt) {
+ DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+
+ dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->tx_start_percpu_run_cnt) {
+ DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
+ __FUNCTION__));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++)
+ DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
+
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->napi_rx_hist[j]) {
+ DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
+ __FUNCTION__, j));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
+ }
+ }
+#ifdef DHD_LB_TXC
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->txc_hist[j]) {
+ DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
+ __FUNCTION__, j));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
+ }
+ }
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
+ if (!dhd->rxc_hist[j]) {
+ DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
+ __FUNCTION__, j));
+ return;
+ }
+ for (i = 0; i < num_cpus; i++) {
+ DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
+ }
+ }
+#endif /* DHD_LB_RXC */
+ return;
}
-/* Return interface pointer */
-struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
+void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
{
- ASSERT(ifidx < DHD_MAX_IFS);
+ dhd_info_t *dhd;
+ int j, num_cpus = num_possible_cpus();
+ int alloc_size = sizeof(uint32) * num_cpus;
- if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS)
- return NULL;
+ if (dhdp == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
+ __FUNCTION__));
+ return;
+ }
- return dhdp->info->iflist[ifidx];
-}
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
-#ifdef PCIE_FULL_DONGLE
+ if (dhd->napi_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
+ dhd->napi_percpu_run_cnt = NULL;
+ }
+ if (dhd->rxc_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
+ dhd->rxc_percpu_run_cnt = NULL;
+ }
+ if (dhd->txc_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
+ dhd->txc_percpu_run_cnt = NULL;
+ }
+ if (dhd->cpu_online_cnt) {
+ MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
+ dhd->cpu_online_cnt = NULL;
+ }
+ if (dhd->cpu_offline_cnt) {
+ MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
+ dhd->cpu_offline_cnt = NULL;
+ }
-/** Dummy objects are defined with state representing bad|down.
- * Performance gains from reducing branch conditionals, instruction parallelism,
- * dual issue, reducing load shadows, avail of larger pipelines.
- * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
- * is accessed via the dhd_sta_t.
- */
+ if (dhd->txp_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
+ dhd->txp_percpu_run_cnt = NULL;
+ }
+ if (dhd->tx_start_percpu_run_cnt) {
+ MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
+ dhd->tx_start_percpu_run_cnt = NULL;
+ }
-/* Dummy dhd_info object */
-dhd_info_t dhd_info_null = {
- .pub = {
- .info = &dhd_info_null,
-#ifdef DHDTCPACK_SUPPRESS
- .tcpack_sup_mode = TCPACK_SUP_REPLACE,
-#endif /* DHDTCPACK_SUPPRESS */
- .up = FALSE,
- .busstate = DHD_BUS_DOWN
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ if (dhd->napi_rx_hist[j]) {
+ MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
+ dhd->napi_rx_hist[j] = NULL;
+ }
+#ifdef DHD_LB_TXC
+ if (dhd->txc_hist[j]) {
+ MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
+ dhd->txc_hist[j] = NULL;
+ }
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+ if (dhd->rxc_hist[j]) {
+ MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
+ dhd->rxc_hist[j] = NULL;
+ }
+#endif /* DHD_LB_RXC */
}
-};
-#define DHD_INFO_NULL (&dhd_info_null)
-#define DHD_PUB_NULL (&dhd_info_null.pub)
-/* Dummy netdevice object */
-struct net_device dhd_net_dev_null = {
- .reg_state = NETREG_UNREGISTERED
-};
-#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
+ return;
+}
-/* Dummy dhd_if object */
-dhd_if_t dhd_if_null = {
-#ifdef WMF
- .wmf = { .wmf_enable = TRUE },
-#endif // endif
- .info = DHD_INFO_NULL,
- .net = DHD_NET_DEV_NULL,
- .idx = DHD_BAD_IF
-};
-#define DHD_IF_NULL (&dhd_if_null)
+static void dhd_lb_stats_dump_histo(
+ struct bcmstrbuf *strbuf, uint32 **hist)
+{
+ int i, j;
+ uint32 *per_cpu_total;
+ uint32 total = 0;
+ uint32 num_cpus = num_possible_cpus();
-#define DHD_STA_NULL ((dhd_sta_t *)NULL)
+ per_cpu_total = (uint32 *)kmalloc(sizeof(uint32) * num_cpus, GFP_ATOMIC);
+ if (!per_cpu_total) {
+ DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
+ return;
+ }
+ bzero(per_cpu_total, sizeof(uint32) * num_cpus);
-/** Interface STA list management. */
+ bcm_bprintf(strbuf, "CPU: \t\t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%d\t", i);
+ bcm_bprintf(strbuf, "\nBin\n");
-/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
-static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
-static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
+ for (i = 0; i < HIST_BIN_SIZE; i++) {
+ bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
+ for (j = 0; j < num_cpus; j++) {
+ bcm_bprintf(strbuf, "%d\t", hist[i][j]);
+ }
+ bcm_bprintf(strbuf, "\n");
+ }
+ bcm_bprintf(strbuf, "Per CPU Total \t");
+ total = 0;
+ for (i = 0; i < num_cpus; i++) {
+ for (j = 0; j < HIST_BIN_SIZE; j++) {
+ per_cpu_total[i] += (hist[j][i] * (1<<j));
+ }
+ bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
+ total += per_cpu_total[i];
+ }
+ bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
-/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
-static void dhd_if_del_sta_list(dhd_if_t * ifp);
-static void dhd_if_flush_sta(dhd_if_t * ifp);
+ kfree(per_cpu_total);
+ return;
+}
-/* Construct/Destruct a sta pool. */
-static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
-static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
-/* Clear the pool of dhd_sta_t objects for built-in type driver */
-static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
+static inline void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
+{
+ int i, num_cpus = num_possible_cpus();
-/** Reset a dhd_sta object and free into the dhd pool. */
-static void
-dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
+ bcm_bprintf(strbuf, "CPU: \t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%d\t", i);
+ bcm_bprintf(strbuf, "\n");
+
+ bcm_bprintf(strbuf, "Val: \t");
+ for (i = 0; i < num_cpus; i++)
+ bcm_bprintf(strbuf, "%u\t", *(p+i));
+ bcm_bprintf(strbuf, "\n");
+ return;
+}
+
+void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
{
- int prio;
+ dhd_info_t *dhd;
- ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
+ if (dhdp == NULL || strbuf == NULL) {
+ DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
+ __FUNCTION__, dhdp, strbuf));
+ return;
+ }
- ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+ dhd = dhdp->info;
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
+ return;
+ }
- /*
- * Flush and free all packets in all flowring's queues belonging to sta.
- * Packets in flow ring will be flushed later.
- */
- for (prio = 0; prio < (int)NUMPRIO; prio++) {
- uint16 flowid = sta->flowid[prio];
+ bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
- if (flowid != FLOWID_INVALID) {
- unsigned long flags;
- flow_ring_node_t * flow_ring_node;
+ bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
-#ifdef DHDTCPACK_SUPPRESS
- /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
- * when there is a newly coming packet from network stack.
- */
- dhd_tcpack_info_tbl_clean(dhdp);
-#endif /* DHDTCPACK_SUPPRESS */
+ bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
+ dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
+ dhd->txc_sched_cnt);
- flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
- if (flow_ring_node) {
- flow_queue_t *queue = &flow_ring_node->queue;
+#ifdef DHD_LB_RXP
+ bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
+ dhd_lb_stats_dump_histo(strbuf, dhd->napi_rx_hist);
+#endif /* DHD_LB_RXP */
- DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
- flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
+#ifdef DHD_LB_RXC
+ bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
+ dhd_lb_stats_dump_histo(strbuf, dhd->rxc_hist);
+#endif /* DHD_LB_RXC */
- if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
- void * pkt;
- while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
- NULL) {
- PKTFREE(dhdp->osh, pkt, TRUE);
- }
- }
+#ifdef DHD_LB_TXC
+ bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
+ bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
+ dhd_lb_stats_dump_histo(strbuf, dhd->txc_hist);
+#endif /* DHD_LB_TXC */
- DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
- ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
- }
- }
+#ifdef DHD_LB_TXP
+ bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
- sta->flowid[prio] = FLOWID_INVALID;
- }
+ bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
+ dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
+#endif /* DHD_LB_TXP */
+
+ bcm_bprintf(strbuf, "\nCPU masks primary(big)=0x%x secondary(little)=0x%x\n",
+ DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS);
+
+ bcm_bprintf(strbuf, "napi_cpu %x tx_cpu %x\n",
+ atomic_read(&dhd->rx_napi_cpu), atomic_read(&dhd->tx_cpu));
- id16_map_free(dhdp->staid_allocator, sta->idx);
- DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
- sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
- sta->ifidx = DHD_BAD_IF;
- bzero(sta->ea.octet, ETHER_ADDR_LEN);
- INIT_LIST_HEAD(&sta->list);
- sta->idx = ID16_INVALID; /* implying free */
}
-/** Allocate a dhd_sta object from the dhd pool. */
-static dhd_sta_t *
-dhd_sta_alloc(dhd_pub_t * dhdp)
+/* Given a number 'n' returns 'm' that is next larger power of 2 after n */
+static inline uint32 next_larger_power2(uint32 num)
{
- uint16 idx;
- dhd_sta_t * sta;
- dhd_sta_pool_t * sta_pool;
+ num--;
+ num |= (num >> 1);
+ num |= (num >> 2);
+ num |= (num >> 4);
+ num |= (num >> 8);
+ num |= (num >> 16);
- ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+ return (num + 1);
+}
- idx = id16_map_alloc(dhdp->staid_allocator);
- if (idx == ID16_INVALID) {
- DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
- return DHD_STA_NULL;
- }
+static void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
+{
+ uint32 bin_power;
+ uint32 *p;
+ bin_power = next_larger_power2(count);
- sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
- sta = &sta_pool[idx];
+ switch (bin_power) {
+ case 1: p = bin[0] + cpu; break;
+ case 2: p = bin[1] + cpu; break;
+ case 4: p = bin[2] + cpu; break;
+ case 8: p = bin[3] + cpu; break;
+ case 16: p = bin[4] + cpu; break;
+ case 32: p = bin[5] + cpu; break;
+ case 64: p = bin[6] + cpu; break;
+ case 128: p = bin[7] + cpu; break;
+ default : p = bin[8] + cpu; break;
+ }
- ASSERT((sta->idx == ID16_INVALID) &&
- (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
+ *p = *p + 1;
+ return;
+}
- DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
+extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
+{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
- sta->idx = idx; /* implying allocated */
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
- return sta;
+ return;
}
-/** Delete all STAs in an interface's STA list. */
-static void
-dhd_if_del_sta_list(dhd_if_t *ifp)
+extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
{
- dhd_sta_t *sta, *next;
- unsigned long flags;
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
- DHD_IF_STA_LIST_LOCK(ifp, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
- list_del(&sta->list);
- dhd_sta_free(&ifp->info->pub, sta);
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
return;
}
-/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
-static void
-dhd_if_flush_sta(dhd_if_t * ifp)
+extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
{
+ int cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ cpu = get_cpu();
+ put_cpu();
+ dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
+
+ return;
}
-/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
-static int
-dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
+extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
{
- int idx, prio, sta_pool_memsz;
- dhd_sta_t * sta;
- dhd_sta_pool_t * sta_pool;
- void * staid_allocator;
+ dhd_info_t *dhd = dhdp->info;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
+}
- ASSERT(dhdp != (dhd_pub_t *)NULL);
- ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
+extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
+}
+#endif /* DHD_LB_STATS */
- /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
- staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
- if (staid_allocator == NULL) {
- DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
- return BCME_ERROR;
- }
+#endif /* DHD_LB */
- /* Pre allocate a pool of dhd_sta objects (one extra). */
- sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
- sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
- if (sta_pool == NULL) {
- DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
- id16_map_fini(dhdp->osh, staid_allocator);
- return BCME_ERROR;
- }
+#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
+int g_frameburst = 1;
+#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
- dhdp->sta_pool = sta_pool;
- dhdp->staid_allocator = staid_allocator;
+static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
- /* Initialize all sta(s) for the pre-allocated free pool. */
- bzero((uchar *)sta_pool, sta_pool_memsz);
- for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
- sta = &sta_pool[idx];
- sta->idx = id16_map_alloc(staid_allocator);
- ASSERT(sta->idx <= max_sta);
- }
+/* DHD Perimiter lock only used in router with bypass forwarding. */
+#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
+#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
+#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
- /* Now place them into the pre-allocated free pool. */
- for (idx = 1; idx <= max_sta; idx++) {
- sta = &sta_pool[idx];
- for (prio = 0; prio < (int)NUMPRIO; prio++) {
- sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
- }
- dhd_sta_free(dhdp, sta);
- }
+#ifdef PCIE_FULL_DONGLE
+#if defined(BCM_GMAC3)
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
- return BCME_OK;
-}
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ BCM_REFERENCE(slist); &(ifp)->sta_list; })
+#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ BCM_REFERENCE(slist); })
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
-/** Destruct the pool of dhd_sta_t objects.
- * Caller must ensure that no STA objects are currently associated with an if.
- */
-static void
-dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
-{
- dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+#else /* ! BCM_GMAC3 */
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
+ spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
+ spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
- if (sta_pool) {
- int idx;
- int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
- for (idx = 1; idx <= max_sta; idx++) {
- ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
- ASSERT(sta_pool[idx].idx == ID16_INVALID);
- }
- MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
- dhdp->sta_pool = NULL;
- }
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
+ struct list_head *snapshot_list);
+static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
+#define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
+#define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
- id16_map_fini(dhdp->osh, dhdp->staid_allocator);
- dhdp->staid_allocator = NULL;
-}
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
-/* Clear the pool of dhd_sta_t objects for built-in type driver */
-static void
-dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
-{
- int idx, prio, sta_pool_memsz;
- dhd_sta_t * sta;
- dhd_sta_pool_t * sta_pool;
- void *staid_allocator;
+/* Control fw roaming */
+uint dhd_roam_disable = 0;
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return;
- }
+#ifdef BCMDBGFS
+extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
+extern void dhd_dbgfs_remove(void);
+#endif
- sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
- staid_allocator = dhdp->staid_allocator;
- if (!sta_pool) {
- DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
- return;
- }
+/* Control radio state */
+uint dhd_radio_up = 1;
- if (!staid_allocator) {
- DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
- return;
- }
+/* Network inteface name */
+char iface_name[IFNAMSIZ] = {'\0'};
+module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
- /* clear free pool */
- sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
- bzero((uchar *)sta_pool, sta_pool_memsz);
+/* The following are specific to the SDIO dongle */
- /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
- id16_map_clear(staid_allocator, max_sta, 1);
+/* IOCTL response timeout */
+int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
- /* Initialize all sta(s) for the pre-allocated free pool. */
- for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
- sta = &sta_pool[idx];
- sta->idx = id16_map_alloc(staid_allocator);
- ASSERT(sta->idx <= max_sta);
- }
- /* Now place them into the pre-allocated free pool. */
- for (idx = 1; idx <= max_sta; idx++) {
- sta = &sta_pool[idx];
- for (prio = 0; prio < (int)NUMPRIO; prio++) {
- sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
- }
- dhd_sta_free(dhdp, sta);
- }
-}
+/* DS Exit response timeout */
+int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
-/** Find STA with MAC address ea in an interface's STA list. */
-dhd_sta_t *
-dhd_find_sta(void *pub, int ifidx, void *ea)
-{
- dhd_sta_t *sta;
- dhd_if_t *ifp;
- unsigned long flags;
+/* Idle timeout for backplane clock */
+int dhd_idletime = DHD_IDLETIME_TICKS;
+module_param(dhd_idletime, int, 0);
- ASSERT(ea != NULL);
- ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
- if (ifp == NULL)
- return DHD_STA_NULL;
+/* Use polling */
+uint dhd_poll = FALSE;
+module_param(dhd_poll, uint, 0);
- DHD_IF_STA_LIST_LOCK(ifp, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry(sta, &ifp->sta_list, list) {
- if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
- DHD_INFO(("%s: Found STA " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG((char *)ea)));
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
- return sta;
- }
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+/* Use interrupts */
+uint dhd_intr = TRUE;
+module_param(dhd_intr, uint, 0);
- return DHD_STA_NULL;
-}
+/* SDIO Drive Strength (in milliamps) */
+uint dhd_sdiod_drive_strength = 6;
+module_param(dhd_sdiod_drive_strength, uint, 0);
-/** Add STA into the interface's STA list. */
-dhd_sta_t *
-dhd_add_sta(void *pub, int ifidx, void *ea)
-{
- dhd_sta_t *sta;
- dhd_if_t *ifp;
- unsigned long flags;
+#ifdef BCMSDIO
+/* Tx/Rx bounds */
+extern uint dhd_txbound;
+extern uint dhd_rxbound;
+module_param(dhd_txbound, uint, 0);
+module_param(dhd_rxbound, uint, 0);
- ASSERT(ea != NULL);
- ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
- if (ifp == NULL)
- return DHD_STA_NULL;
+/* Deferred transmits */
+extern uint dhd_deferred_tx;
+module_param(dhd_deferred_tx, uint, 0);
- if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
- DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
- return DHD_STA_NULL;
- }
+#endif /* BCMSDIO */
- sta = dhd_sta_alloc((dhd_pub_t *)pub);
- if (sta == DHD_STA_NULL) {
- DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
- return DHD_STA_NULL;
- }
- memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
+#ifdef SDTEST
+/* Echo packet generator (pkts/s) */
+uint dhd_pktgen = 0;
+module_param(dhd_pktgen, uint, 0);
- /* link the sta and the dhd interface */
- sta->ifp = ifp;
- sta->ifidx = ifidx;
- INIT_LIST_HEAD(&sta->list);
+/* Echo packet len (0 => sawtooth, max 2040) */
+uint dhd_pktgen_len = 0;
+module_param(dhd_pktgen_len, uint, 0);
+#endif /* SDTEST */
- DHD_IF_STA_LIST_LOCK(ifp, flags);
- list_add_tail(&sta->list, &ifp->sta_list);
- DHD_ERROR(("%s: Adding STA " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG((char *)ea)));
+#ifndef BCMDBUS
+/* Allow delayed firmware download for debug purpose */
+int allow_delay_fwdl = FALSE;
+module_param(allow_delay_fwdl, int, 0);
+#endif /* !BCMDBUS */
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+extern char dhd_version[];
+extern char fw_version[];
+extern char clm_version[];
- return sta;
-}
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
+static void dhd_net_if_lock_local(dhd_info_t *dhd);
+static void dhd_net_if_unlock_local(dhd_info_t *dhd);
+static void dhd_suspend_lock(dhd_pub_t *dhdp);
+static void dhd_suspend_unlock(dhd_pub_t *dhdp);
-/** Delete all STAs from the interface's STA list. */
-void
-dhd_del_all_sta(void *pub, int ifidx)
-{
- dhd_sta_t *sta, *next;
- dhd_if_t *ifp;
- unsigned long flags;
+#ifdef WLMEDIA_HTSF
+void htsf_update(dhd_info_t *dhd, void *data);
+tsf_t prev_tsf, cur_tsf;
- ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
- if (ifp == NULL)
- return;
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx);
+static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx);
+static void dhd_dump_latency(void);
+static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf);
+static void dhd_dump_htsfhisto(histo_t *his, char *s);
+#endif /* WLMEDIA_HTSF */
- DHD_IF_STA_LIST_LOCK(ifp, flags);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+/* Monitor interface */
+int dhd_monitor_init(void *dhd_pub);
+int dhd_monitor_uninit(void);
- list_del(&sta->list);
- dhd_sta_free(&ifp->info->pub, sta);
-#ifdef DHD_L2_FILTER
- if (ifp->parp_enable) {
- /* clear Proxy ARP cache of specific Ethernet Address */
- bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
- ifp->phnd_arp_table, FALSE,
- sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
- }
-#endif /* DHD_L2_FILTER */
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
- return;
-}
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
+#endif /* defined(WL_WIRELESS_EXT) */
-/** Delete STA from the interface's STA list. */
-void
-dhd_del_sta(void *pub, int ifidx, void *ea)
-{
- dhd_sta_t *sta, *next;
- dhd_if_t *ifp;
- unsigned long flags;
+#ifndef BCMDBUS
+static void dhd_dpc(ulong data);
+#endif /* !BCMDBUS */
+/* forward decl */
+extern int dhd_wait_pend8021x(struct net_device *dev);
+void dhd_os_wd_timer_extend(void *bus, bool extend);
- ASSERT(ea != NULL);
- ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
- if (ifp == NULL)
- return;
+#ifdef TOE
+#ifndef BDC
+#error TOE requires BDC
+#endif /* !BDC */
+static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
+static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
+#endif /* TOE */
+
+static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
+ wl_event_msg_t *event_ptr, void **data_ptr);
+
+#if defined(CONFIG_PM_SLEEP)
+static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+{
+ int ret = NOTIFY_DONE;
+ bool suspend = FALSE;
- DHD_IF_STA_LIST_LOCK(ifp, flags);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
- if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
- DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
- list_del(&sta->list);
- dhd_sta_free(&ifp->info->pub, sta);
- }
- }
+#endif
+ dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
-#endif // endif
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-#ifdef DHD_L2_FILTER
- if (ifp->parp_enable) {
- /* clear Proxy ARP cache of specific Ethernet Address */
- bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
- ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
- }
-#endif /* DHD_L2_FILTER */
- return;
-}
+#endif
-/** Add STA if it doesn't exist. Not reentrant. */
-dhd_sta_t*
-dhd_findadd_sta(void *pub, int ifidx, void *ea)
-{
- dhd_sta_t *sta;
+ BCM_REFERENCE(dhdinfo);
+ BCM_REFERENCE(suspend);
- sta = dhd_find_sta(pub, ifidx, ea);
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ suspend = TRUE;
+ break;
- if (!sta) {
- /* Add entry */
- sta = dhd_add_sta(pub, ifidx, ea);
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ suspend = FALSE;
+ break;
}
- return sta;
-}
+#if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
+ if (suspend) {
+ DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
+ dhd_wlfc_suspend(&dhdinfo->pub);
+ DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
+ } else {
+ dhd_wlfc_resume(&dhdinfo->pub);
+ }
+#endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
-#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
-static struct list_head *
-dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
-{
- unsigned long flags;
- dhd_sta_t *sta, *snapshot;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+ KERNEL_VERSION(2, 6, 39))
+ dhd_mmc_suspend = suspend;
+ smp_mb();
+#endif
- INIT_LIST_HEAD(snapshot_list);
+ return ret;
+}
- DHD_IF_STA_LIST_LOCK(ifp, flags);
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_pm_notifier_registered = FALSE;
- list_for_each_entry(sta, &ifp->sta_list, list) {
- /* allocate one and add to snapshot */
- snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
- if (snapshot == NULL) {
- DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
- continue;
- }
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+#endif /* CONFIG_PM_SLEEP */
- memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
+/* Request scheduling of the bus rx frame */
+static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
+static void dhd_os_rxflock(dhd_pub_t *pub);
+static void dhd_os_rxfunlock(dhd_pub_t *pub);
- INIT_LIST_HEAD(&snapshot->list);
- list_add_tail(&snapshot->list, snapshot_list);
- }
+/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
+typedef struct dhd_dev_priv {
+ dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
+ dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
+ int ifidx; /* interface index */
+ void * lkup;
+} dhd_dev_priv_t;
- DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
+#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
+#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
+#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
+#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
+#define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
- return snapshot_list;
+#if defined(DHD_OF_SUPPORT)
+extern int dhd_wlan_init(void);
+#endif /* defined(DHD_OF_SUPPORT) */
+/** Clear the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_clear(struct net_device * dev)
+{
+ dhd_dev_priv_t * dev_priv;
+ ASSERT(dev != (struct net_device *)NULL);
+ dev_priv = DHD_DEV_PRIV(dev);
+ dev_priv->dhd = (dhd_info_t *)NULL;
+ dev_priv->ifp = (dhd_if_t *)NULL;
+ dev_priv->ifidx = DHD_BAD_IF;
+ dev_priv->lkup = (void *)NULL;
}
-static void
-dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
+/** Setup the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
+ int ifidx)
{
- dhd_sta_t *sta, *next;
+ dhd_dev_priv_t * dev_priv;
+ ASSERT(dev != (struct net_device *)NULL);
+ dev_priv = DHD_DEV_PRIV(dev);
+ dev_priv->dhd = dhd;
+ dev_priv->ifp = ifp;
+ dev_priv->ifidx = ifidx;
+}
- list_for_each_entry_safe(sta, next, snapshot_list, list) {
- list_del(&sta->list);
- MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
+#ifdef PCIE_FULL_DONGLE
+
+/** Dummy objects are defined with state representing bad|down.
+ * Performance gains from reducing branch conditionals, instruction parallelism,
+ * dual issue, reducing load shadows, avail of larger pipelines.
+ * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
+ * is accessed via the dhd_sta_t.
+ */
+
+/* Dummy dhd_info object */
+dhd_info_t dhd_info_null = {
+#if defined(BCM_GMAC3)
+ .fwdh = FWDER_NULL,
+#endif
+ .pub = {
+ .info = &dhd_info_null,
+#ifdef DHDTCPACK_SUPPRESS
+ .tcpack_sup_mode = TCPACK_SUP_REPLACE,
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(TRAFFIC_MGMT_DWM)
+ .dhd_tm_dwm_tbl = { .dhd_dwm_enabled = TRUE },
+#endif
+ .up = FALSE,
+ .busstate = DHD_BUS_DOWN
}
-}
-#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
+};
+#define DHD_INFO_NULL (&dhd_info_null)
+#define DHD_PUB_NULL (&dhd_info_null.pub)
-#else
-static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
-static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
-static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
-static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
-static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
-dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
-dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
-void dhd_del_sta(void *pub, int ifidx, void *ea) {}
-#endif /* PCIE_FULL_DONGLE */
+/* Dummy netdevice object */
+struct net_device dhd_net_dev_null = {
+ .reg_state = NETREG_UNREGISTERED
+};
+#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
-#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
-void
-dhd_axi_error_dispatch(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- schedule_work(&dhd->axi_error_dispatcher_work);
-}
+/* Dummy dhd_if object */
+dhd_if_t dhd_if_null = {
+#if defined(BCM_GMAC3)
+ .fwdh = FWDER_NULL,
+#endif
+#ifdef WMF
+ .wmf = { .wmf_enable = TRUE },
+#endif
+ .info = DHD_INFO_NULL,
+ .net = DHD_NET_DEV_NULL,
+ .idx = DHD_BAD_IF
+};
+#define DHD_IF_NULL (&dhd_if_null)
-static void dhd_axi_error_dispatcher_fn(struct work_struct * work)
-{
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, axi_error_dispatcher_work);
- dhd_axi_error(&dhd->pub);
-}
-#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+#define DHD_STA_NULL ((dhd_sta_t *)NULL)
-/** Returns dhd iflist index corresponding the the bssidx provided by apps */
-int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
-{
- dhd_if_t *ifp;
- dhd_info_t *dhd = dhdp->info;
- int i;
+/** Interface STA list management. */
- ASSERT(bssidx < DHD_MAX_IFS);
- ASSERT(dhdp);
+/** Fetch the dhd_if object, given the interface index in the dhd. */
+static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
- for (i = 0; i < DHD_MAX_IFS; i++) {
- ifp = dhd->iflist[i];
- if (ifp && (ifp->bssidx == bssidx)) {
- DHD_TRACE(("Index manipulated for %s from %d to %d\n",
- ifp->name, bssidx, i));
- break;
- }
- }
- return i;
-}
+/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
+static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
+static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
-static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
-{
- uint32 store_idx;
- uint32 sent_idx;
+/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
+static void dhd_if_del_sta_list(dhd_if_t * ifp);
+static void dhd_if_flush_sta(dhd_if_t * ifp);
- if (!skb) {
- DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
- return BCME_ERROR;
- }
+/* Construct/Destruct a sta pool. */
+static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
+static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
+static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
- dhd_os_rxflock(dhdp);
- store_idx = dhdp->store_idx;
- sent_idx = dhdp->sent_idx;
- if (dhdp->skbbuf[store_idx] != NULL) {
- /* Make sure the previous packets are processed */
- dhd_os_rxfunlock(dhdp);
- DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
- skb, store_idx, sent_idx));
- /* removed msleep here, should use wait_event_timeout if we
- * want to give rx frame thread a chance to run
- */
-#if defined(WAIT_DEQUEUE)
- OSL_SLEEP(1);
-#endif // endif
- return BCME_ERROR;
- }
- DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
- skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
- dhdp->skbbuf[store_idx] = skb;
- dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
- dhd_os_rxfunlock(dhdp);
- return BCME_OK;
+/* Return interface pointer */
+static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
+{
+ ASSERT(ifidx < DHD_MAX_IFS);
+
+ if (ifidx >= DHD_MAX_IFS)
+ return NULL;
+
+ return dhdp->info->iflist[ifidx];
}
-static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
+/** Reset a dhd_sta object and free into the dhd pool. */
+static void
+dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
{
- uint32 store_idx;
- uint32 sent_idx;
- void *skb;
+ int prio;
- dhd_os_rxflock(dhdp);
+ ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
- store_idx = dhdp->store_idx;
- sent_idx = dhdp->sent_idx;
- skb = dhdp->skbbuf[sent_idx];
+ ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
- if (skb == NULL) {
- dhd_os_rxfunlock(dhdp);
- DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
- store_idx, sent_idx));
- return NULL;
- }
+ /*
+ * Flush and free all packets in all flowring's queues belonging to sta.
+ * Packets in flow ring will be flushed later.
+ */
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ uint16 flowid = sta->flowid[prio];
- dhdp->skbbuf[sent_idx] = NULL;
- dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
+ if (flowid != FLOWID_INVALID) {
+ unsigned long flags;
+ flow_queue_t * queue = dhd_flow_queue(dhdp, flowid);
+ flow_ring_node_t * flow_ring_node;
- DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
- skb, sent_idx));
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(dhdp);
+#endif /* DHDTCPACK_SUPPRESS */
- dhd_os_rxfunlock(dhdp);
+ flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
+ flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
- return skb;
-}
+ if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
+ void * pkt;
+ while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) != NULL) {
+ PKTFREE(dhdp->osh, pkt, TRUE);
+ }
+ }
-int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
-{
- if (prepost) { /* pre process */
- dhd_read_cis(dhdp);
- dhd_check_module_cid(dhdp);
- dhd_check_module_mac(dhdp);
- dhd_set_macaddr_from_file(dhdp);
- } else { /* post process */
- dhd_write_macaddr(&dhdp->mac);
- dhd_clear_cis(dhdp);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
+ ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
+ }
+
+ sta->flowid[prio] = FLOWID_INVALID;
}
- return 0;
+ id16_map_free(dhdp->staid_allocator, sta->idx);
+ DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
+ sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
+ sta->ifidx = DHD_BAD_IF;
+ bzero(sta->ea.octet, ETHER_ADDR_LEN);
+ INIT_LIST_HEAD(&sta->list);
+ sta->idx = ID16_INVALID; /* implying free */
}
-#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
-static int dhd_wait_for_file_dump(dhd_pub_t *dhdp)
+/** Allocate a dhd_sta object from the dhd pool. */
+static dhd_sta_t *
+dhd_sta_alloc(dhd_pub_t * dhdp)
{
- struct net_device *primary_ndev;
- struct bcm_cfg80211 *cfg;
- unsigned long flags = 0;
- primary_ndev = dhd_linux_get_primary_netdev(dhdp);
+ uint16 idx;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
- if (!primary_ndev) {
- DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
- return BCME_ERROR;
- }
- cfg = wl_get_cfg(primary_ndev);
+ ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
- if (!cfg) {
- DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
- return BCME_ERROR;
+ idx = id16_map_alloc(dhdp->staid_allocator);
+ if (idx == ID16_INVALID) {
+ DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
+ return DHD_STA_NULL;
}
- DHD_GENERAL_LOCK(dhdp, flags);
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
- DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
- return BCME_ERROR;
- }
- DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
+ sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
+ sta = &sta_pool[idx];
- DHD_OS_WAKE_LOCK(dhdp);
- /* check for hal started and only then send event if not clear dump state here */
- if (wl_cfg80211_is_hal_started(cfg)) {
- int timeleft = 0;
+ ASSERT((sta->idx == ID16_INVALID) &&
+ (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
- DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__));
- dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
+ DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
- DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
- __FUNCTION__, dhdp->dhd_bus_busy_state));
- timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
- &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0);
- if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) {
- DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
- __FUNCTION__, dhdp->dhd_bus_busy_state));
- }
- } else {
- DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
- }
- DHD_OS_WAKE_UNLOCK(dhdp);
- /* In case of dhd_os_busbusy_wait_bitmask() timeout,
- * hal dump bit will not be cleared. Hence clearing it here.
- */
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
+ sta->idx = idx; /* implying allocated */
- return BCME_OK;
+ return sta;
}
-#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */
-// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
-#if defined(PKT_FILTER_SUPPORT)
-#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
-static bool
-_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
+/** Delete all STAs in an interface's STA list. */
+static void
+dhd_if_del_sta_list(dhd_if_t *ifp)
{
- bool _apply = FALSE;
- /* In case of IBSS mode, apply arp pkt filter */
- if (op_mode_param & DHD_FLAG_IBSS_MODE) {
- _apply = TRUE;
- goto exit;
- }
- /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
- if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
- _apply = TRUE;
- goto exit;
+ dhd_sta_t *sta, *next;
+ unsigned long flags;
+
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+#if defined(BCM_GMAC3)
+ if (ifp->fwdh) {
+ /* Remove sta from WOFA forwarder. */
+ fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (uintptr_t)sta);
+ }
+#endif /* BCM_GMAC3 */
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-exit:
- return _apply;
+ return;
}
-#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
-void
-dhd_set_packet_filter(dhd_pub_t *dhd)
+/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
+static void
+dhd_if_flush_sta(dhd_if_t * ifp)
{
- int i;
+#if defined(BCM_GMAC3)
- DHD_TRACE(("%s: enter\n", __FUNCTION__));
- if (dhd_pkt_filter_enable) {
- for (i = 0; i < dhd->pktfilter_count; i++) {
- dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+ if (ifp && (ifp->fwdh != FWDER_NULL)) {
+ dhd_sta_t *sta, *next;
+ unsigned long flags;
+
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+ /* Remove any sta entry from WOFA forwarder. */
+ fwder_flush(ifp->fwdh, (uintptr_t)sta);
}
+
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
}
+#endif /* BCM_GMAC3 */
}
-void
-dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
+/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
+static int
+dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
{
- int i;
+ int idx, prio, sta_pool_memsz;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
+ void * staid_allocator;
- DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
- if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value &&
- !dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)) {
- DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
- return;
+ ASSERT(dhdp != (dhd_pub_t *)NULL);
+ ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
+
+ /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+ staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
+ if (staid_allocator == NULL) {
+ DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
+ return BCME_ERROR;
}
- /* 1 - Enable packet filter, only allow unicast packet to send up */
- /* 0 - Disable packet filter */
- if (dhd_pkt_filter_enable && (!value ||
- (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress) ||
- dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND)))
- {
- for (i = 0; i < dhd->pktfilter_count; i++) {
-// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
-#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
- if (value && (i == DHD_ARP_FILTER_NUM) &&
- !_turn_on_arp_filter(dhd, dhd->op_mode)) {
- DHD_TRACE(("Do not turn on ARP white list pkt filter:"
- "val %d, cnt %d, op_mode 0x%x\n",
- value, i, dhd->op_mode));
- continue;
- }
-#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
- dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
- value, dhd_master_mode);
+
+ /* Pre allocate a pool of dhd_sta objects (one extra). */
+ sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
+ sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
+ if (sta_pool == NULL) {
+ DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
+ id16_map_fini(dhdp->osh, staid_allocator);
+ return BCME_ERROR;
+ }
+
+ dhdp->sta_pool = sta_pool;
+ dhdp->staid_allocator = staid_allocator;
+
+ /* Initialize all sta(s) for the pre-allocated free pool. */
+ bzero((uchar *)sta_pool, sta_pool_memsz);
+ for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+ sta = &sta_pool[idx];
+ sta->idx = id16_map_alloc(staid_allocator);
+ ASSERT(sta->idx <= max_sta);
+ }
+ /* Now place them into the pre-allocated free pool. */
+ for (idx = 1; idx <= max_sta; idx++) {
+ sta = &sta_pool[idx];
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
}
+ dhd_sta_free(dhdp, sta);
}
+
+ return BCME_OK;
}
-int
-dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
+/** Destruct the pool of dhd_sta_t objects.
+ * Caller must ensure that no STA objects are currently associated with an if.
+ */
+static void
+dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
{
- char *filterp = NULL;
- int filter_id = 0;
+ dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
- switch (num) {
- case DHD_BROADCAST_FILTER_NUM:
- filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
- filter_id = 101;
- break;
- case DHD_MULTICAST4_FILTER_NUM:
- filter_id = 102;
- if (FW_SUPPORTED((dhdp), pf6)) {
- if (dhdp->pktfilter[num] != NULL) {
- dhd_pktfilter_offload_delete(dhdp, filter_id);
- dhdp->pktfilter[num] = NULL;
- }
- if (!add_remove) {
- filterp = DISCARD_IPV4_MCAST;
- add_remove = 1;
- break;
- }
- }
- filterp = "102 0 0 0 0xFFFFFF 0x01005E";
- break;
- case DHD_MULTICAST6_FILTER_NUM:
- filter_id = 103;
- if (FW_SUPPORTED((dhdp), pf6)) {
- if (dhdp->pktfilter[num] != NULL) {
- dhd_pktfilter_offload_delete(dhdp, filter_id);
- dhdp->pktfilter[num] = NULL;
- }
- if (!add_remove) {
- filterp = DISCARD_IPV6_MCAST;
- add_remove = 1;
- break;
- }
- }
- filterp = "103 0 0 0 0xFFFF 0x3333";
- break;
- case DHD_MDNS_FILTER_NUM:
- filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
- filter_id = 104;
- break;
- case DHD_ARP_FILTER_NUM:
- filterp = "105 0 0 12 0xFFFF 0x0806";
- filter_id = 105;
- break;
- case DHD_BROADCAST_ARP_FILTER_NUM:
- filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
- " 0xFFFFFFFFFFFF0000000000000806";
- filter_id = 106;
- break;
- default:
- return -EINVAL;
+ if (sta_pool) {
+ int idx;
+ int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+ for (idx = 1; idx <= max_sta; idx++) {
+ ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
+ ASSERT(sta_pool[idx].idx == ID16_INVALID);
+ }
+ MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
+ dhdp->sta_pool = NULL;
}
- /* Add filter */
- if (add_remove) {
- dhdp->pktfilter[num] = filterp;
- dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
- } else { /* Delete filter */
- if (dhdp->pktfilter[num]) {
- dhd_pktfilter_offload_delete(dhdp, filter_id);
- dhdp->pktfilter[num] = NULL;
+ id16_map_fini(dhdp->osh, dhdp->staid_allocator);
+ dhdp->staid_allocator = NULL;
+}
+
+/* Clear the pool of dhd_sta_t objects for built-in type driver */
+static void
+dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
+{
+ int idx, prio, sta_pool_memsz;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
+ void *staid_allocator;
+
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+ staid_allocator = dhdp->staid_allocator;
+
+ if (!sta_pool) {
+ DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (!staid_allocator) {
+ DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ /* clear free pool */
+ sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+ bzero((uchar *)sta_pool, sta_pool_memsz);
+
+ /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+ id16_map_clear(staid_allocator, max_sta, 1);
+
+ /* Initialize all sta(s) for the pre-allocated free pool. */
+ for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+ sta = &sta_pool[idx];
+ sta->idx = id16_map_alloc(staid_allocator);
+ ASSERT(sta->idx <= max_sta);
+ }
+ /* Now place them into the pre-allocated free pool. */
+ for (idx = 1; idx <= max_sta; idx++) {
+ sta = &sta_pool[idx];
+ for (prio = 0; prio < (int)NUMPRIO; prio++) {
+ sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
}
+ dhd_sta_free(dhdp, sta);
}
+}
- return 0;
+/** Find STA with MAC address ea in an interface's STA list. */
+dhd_sta_t *
+dhd_find_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta;
+ dhd_if_t *ifp;
+ unsigned long flags;
+
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return DHD_STA_NULL;
+
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry(sta, &ifp->sta_list, list) {
+ if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+ DHD_INFO(("%s: found STA " MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG((char *)ea)));
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+ return sta;
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+ return DHD_STA_NULL;
}
-#endif /* PKT_FILTER_SUPPORT */
-static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+/** Add STA into the interface's STA list. */
+dhd_sta_t *
+dhd_add_sta(void *pub, int ifidx, void *ea)
{
-#ifndef SUPPORT_PM2_ONLY
- int power_mode = PM_MAX;
-#endif /* SUPPORT_PM2_ONLY */
- /* wl_pkt_filter_enable_t enable_parm; */
- int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
- int ret = 0;
-#ifdef DHD_USE_EARLYSUSPEND
-#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
- int bcn_timeout = 0;
-#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
-#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
- int roam_time_thresh = 0; /* (ms) */
-#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
-#ifndef ENABLE_FW_ROAM_SUSPEND
- uint roamvar = 1;
-#endif /* ENABLE_FW_ROAM_SUSPEND */
-#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- int bcn_li_bcn = 1;
-#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
- uint nd_ra_filter = 0;
-#ifdef ENABLE_IPMCAST_FILTER
- int ipmcast_l2filter;
-#endif /* ENABLE_IPMCAST_FILTER */
-#ifdef CUSTOM_EVENT_PM_WAKE
- uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
-#endif /* CUSTOM_EVENT_PM_WAKE */
-#endif /* DHD_USE_EARLYSUSPEND */
-#ifdef PASS_ALL_MCAST_PKTS
- struct dhd_info *dhdinfo;
- uint32 allmulti;
- uint i;
-#endif /* PASS_ALL_MCAST_PKTS */
-#ifdef DYNAMIC_SWOOB_DURATION
-#ifndef CUSTOM_INTR_WIDTH
-#define CUSTOM_INTR_WIDTH 100
- int intr_width = 0;
-#endif /* CUSTOM_INTR_WIDTH */
-#endif /* DYNAMIC_SWOOB_DURATION */
+ dhd_sta_t *sta;
+ dhd_if_t *ifp;
+ unsigned long flags;
-#if defined(BCMPCIE)
- int lpas = 0;
- int dtim_period = 0;
- int bcn_interval = 0;
- int bcn_to_dly = 0;
-#if defined(CUSTOM_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
- bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
-#else
- int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
-#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
-#endif /* OEM_ANDROID && BCMPCIE */
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return DHD_STA_NULL;
- if (!dhd)
- return -ENODEV;
+ sta = dhd_sta_alloc((dhd_pub_t *)pub);
+ if (sta == DHD_STA_NULL) {
+ DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
+ return DHD_STA_NULL;
+ }
-#ifdef PASS_ALL_MCAST_PKTS
- dhdinfo = dhd->info;
-#endif /* PASS_ALL_MCAST_PKTS */
+ memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
- DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
- __FUNCTION__, value, dhd->in_suspend));
+ /* link the sta and the dhd interface */
+ sta->ifp = ifp;
+ sta->ifidx = ifidx;
+#ifdef DHD_WMF
+ sta->psta_prim = NULL;
+#endif
+ INIT_LIST_HEAD(&sta->list);
- dhd_suspend_lock(dhd);
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
-#ifdef CUSTOM_SET_CPUCORE
- DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
- /* set specific cpucore */
- dhd_set_cpucore(dhd, TRUE);
-#endif /* CUSTOM_SET_CPUCORE */
- if (dhd->up) {
- if (value && dhd->in_suspend) {
-#ifdef PKT_FILTER_SUPPORT
- dhd->early_suspended = 1;
-#endif // endif
- /* Kernel suspended */
- DHD_ERROR(("%s: force extra Suspend setting\n", __FUNCTION__));
+ list_add_tail(&sta->list, &ifp->sta_list);
-#ifndef SUPPORT_PM2_ONLY
- dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
- sizeof(power_mode), TRUE, 0);
-#endif /* SUPPORT_PM2_ONLY */
+#if defined(BCM_GMAC3)
+ if (ifp->fwdh) {
+ ASSERT(ISALIGNED(ea, 2));
+ /* Add sta to WOFA forwarder. */
+ fwder_reassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta);
+ }
+#endif /* BCM_GMAC3 */
-#ifdef PKT_FILTER_SUPPORT
- /* Enable packet filter,
- * only allow unicast packet to send up
- */
- dhd_enable_packet_filter(1, dhd);
-#ifdef APF
- dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
-#endif /* APF */
-#endif /* PKT_FILTER_SUPPORT */
-#ifdef ARP_OFFLOAD_SUPPORT
- dhd_arp_offload_enable(dhd, TRUE);
-#endif /* ARP_OFFLOAD_SUPPORT */
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-#ifdef PASS_ALL_MCAST_PKTS
- allmulti = 0;
- for (i = 0; i < DHD_MAX_IFS; i++) {
- if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
- ret = dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
- sizeof(allmulti), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s allmulti failed %d\n", __FUNCTION__, ret));
- }
- }
-#endif /* PASS_ALL_MCAST_PKTS */
-
- /* If DTIM skip is set up as default, force it to wake
- * each third DTIM for better power savings. Note that
- * one side effect is a chance to miss BC/MC packet.
- */
-#ifdef WLTDLS
- /* Do not set bcn_li_ditm on WFD mode */
- if (dhd->tdls_mode) {
- bcn_li_dtim = 0;
- } else
-#endif /* WLTDLS */
-#if defined(BCMPCIE)
- bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
- &bcn_interval);
- ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
- sizeof(bcn_li_dtim), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s bcn_li_dtim failed %d\n", __FUNCTION__, ret));
- }
- if ((bcn_li_dtim * dtim_period * bcn_interval) >=
- MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
- /*
- * Increase max roaming threshold from 2 secs to 8 secs
- * the real roam threshold is MIN(max_roam_threshold,
- * bcn_timeout/2)
- */
- lpas = 1;
- ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas),
- NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s lpas failed %d\n", __FUNCTION__, ret));
- }
- bcn_to_dly = 1;
- /*
- * if bcn_to_dly is 1, the real roam threshold is
- * MIN(max_roam_threshold, bcn_timeout -1);
- * notify link down event after roaming procedure complete
- * if we hit bcn_timeout while we are in roaming progress.
- */
- ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
- sizeof(bcn_to_dly), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s bcn_to_dly failed %d\n", __FUNCTION__, ret));
- }
- /* Increase beacon timeout to 6 secs or use bigger one */
- bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
- ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
- sizeof(bcn_timeout), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__, ret));
- }
- }
-#else
- bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
- if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
- sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
- DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
-#endif /* OEM_ANDROID && BCMPCIE */
-#ifdef WL_CFG80211
- /* Disable cfg80211 feature events during suspend */
- ret = wl_cfg80211_config_suspend_events(
- dhd_linux_get_primary_netdev(dhd), FALSE);
- if (ret < 0) {
- DHD_ERROR(("failed to disable events (%d)\n", ret));
- }
-#endif /* WL_CFG80211 */
-#ifdef DHD_USE_EARLYSUSPEND
-#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
- bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
- ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
- sizeof(bcn_timeout), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__, ret));
- }
-#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
-#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
- roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
- ret = dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
- sizeof(roam_time_thresh), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s roam_time_thresh failed %d\n", __FUNCTION__, ret));
- }
-#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
-#ifndef ENABLE_FW_ROAM_SUSPEND
- /* Disable firmware roaming during suspend */
- ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
- sizeof(roamvar), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s roam_off failed %d\n", __FUNCTION__, ret));
- }
-#endif /* ENABLE_FW_ROAM_SUSPEND */
-#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- if (bcn_li_dtim) {
- bcn_li_bcn = 0;
- }
- ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
- sizeof(bcn_li_bcn), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret));
- }
-#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
-#if defined(WL_CFG80211) && defined(WL_BCNRECV)
- ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd));
- if (ret != BCME_OK) {
- DHD_ERROR(("failed to stop beacon recv event on"
- " suspend state (%d)\n", ret));
- }
-#endif /* WL_CFG80211 && WL_BCNRECV */
-#ifdef NDO_CONFIG_SUPPORT
- if (dhd->ndo_enable) {
- if (!dhd->ndo_host_ip_overflow) {
- /* enable ND offload on suspend */
- ret = dhd_ndo_enable(dhd, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: failed to enable NDO\n",
- __FUNCTION__));
- }
- } else {
- DHD_INFO(("%s: NDO disabled on suspend due to"
- "HW capacity\n", __FUNCTION__));
- }
- }
-#endif /* NDO_CONFIG_SUPPORT */
-#ifndef APF
- if (FW_SUPPORTED(dhd, ndoe))
-#else
- if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
-#endif /* APF */
- {
- /* enable IPv6 RA filter in firmware during suspend */
- nd_ra_filter = 1;
- ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
- (char *)&nd_ra_filter, sizeof(nd_ra_filter),
- NULL, 0, TRUE);
- if (ret < 0)
- DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
- ret));
- }
- dhd_os_suppress_logging(dhd, TRUE);
-#ifdef ENABLE_IPMCAST_FILTER
- ipmcast_l2filter = 1;
- ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
- (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
- NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret));
- }
-#endif /* ENABLE_IPMCAST_FILTER */
-#ifdef DYNAMIC_SWOOB_DURATION
- intr_width = CUSTOM_INTR_WIDTH;
- ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
- sizeof(intr_width), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("failed to set intr_width (%d)\n", ret));
- }
-#endif /* DYNAMIC_SWOOB_DURATION */
-#ifdef CUSTOM_EVENT_PM_WAKE
- pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4;
- ret = dhd_iovar(dhd, 0, "const_awake_thresh",
- (char *)&pm_awake_thresh,
- sizeof(pm_awake_thresh), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s set const_awake_thresh failed %d\n",
- __FUNCTION__, ret));
- }
-#endif /* CUSTOM_EVENT_PM_WAKE */
-#ifdef CONFIG_SILENT_ROAM
- if (!dhd->sroamed) {
- ret = dhd_sroam_set_mon(dhd, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s set sroam failed %d\n",
- __FUNCTION__, ret));
- }
- }
- dhd->sroamed = FALSE;
-#endif /* CONFIG_SILENT_ROAM */
-#endif /* DHD_USE_EARLYSUSPEND */
- } else {
-#ifdef PKT_FILTER_SUPPORT
- dhd->early_suspended = 0;
-#endif // endif
- /* Kernel resumed */
- DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
-#ifdef DYNAMIC_SWOOB_DURATION
- intr_width = 0;
- ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
- sizeof(intr_width), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("failed to set intr_width (%d)\n", ret));
- }
-#endif /* DYNAMIC_SWOOB_DURATION */
-#ifndef SUPPORT_PM2_ONLY
- power_mode = PM_FAST;
- dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
- sizeof(power_mode), TRUE, 0);
-#endif /* SUPPORT_PM2_ONLY */
-#if defined(WL_CFG80211) && defined(WL_BCNRECV)
- ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd));
- if (ret != BCME_OK) {
- DHD_ERROR(("failed to resume beacon recv state (%d)\n",
- ret));
- }
-#endif /* WL_CF80211 && WL_BCNRECV */
-#ifdef ARP_OFFLOAD_SUPPORT
- dhd_arp_offload_enable(dhd, FALSE);
-#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef PKT_FILTER_SUPPORT
- /* disable pkt filter */
- dhd_enable_packet_filter(0, dhd);
-#ifdef APF
- dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
-#endif /* APF */
-#endif /* PKT_FILTER_SUPPORT */
-#ifdef PASS_ALL_MCAST_PKTS
- allmulti = 1;
- for (i = 0; i < DHD_MAX_IFS; i++) {
- if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
- ret = dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
- sizeof(allmulti), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: allmulti failed:%d\n", __FUNCTION__, ret));
- }
- }
-#endif /* PASS_ALL_MCAST_PKTS */
-#if defined(BCMPCIE)
- /* restore pre-suspend setting */
- ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
- sizeof(bcn_li_dtim), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s:bcn_li_ditm failed:%d\n", __FUNCTION__, ret));
- }
- ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
- 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s:lpas failed:%d\n", __FUNCTION__, ret));
- }
- ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
- sizeof(bcn_to_dly), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s:bcn_to_dly failed:%d\n", __FUNCTION__, ret));
- }
- ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
- sizeof(bcn_timeout), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s:bcn_timeout failed:%d\n", __FUNCTION__, ret));
- }
-#else
- /* restore pre-suspend setting for dtim_skip */
- ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
- sizeof(bcn_li_dtim), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
- }
-#endif /* OEM_ANDROID && BCMPCIE */
-#ifdef DHD_USE_EARLYSUSPEND
-#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
- bcn_timeout = CUSTOM_BCN_TIMEOUT;
- ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
- sizeof(bcn_timeout), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s:bcn_timeout failed:%d\n", __FUNCTION__, ret));
- }
-#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
-#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
- roam_time_thresh = 2000;
- ret = dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
- sizeof(roam_time_thresh), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s:roam_time_thresh failed:%d\n", __FUNCTION__, ret));
- }
-
-#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
-#ifndef ENABLE_FW_ROAM_SUSPEND
- roamvar = dhd_roam_disable;
- ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
- sizeof(roamvar), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret));
- }
-#endif /* ENABLE_FW_ROAM_SUSPEND */
-#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
- sizeof(bcn_li_bcn), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: bcn_li_bcn failed:%d\n", __FUNCTION__, ret));
- }
-#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
-#ifdef NDO_CONFIG_SUPPORT
- if (dhd->ndo_enable) {
- /* Disable ND offload on resume */
- ret = dhd_ndo_enable(dhd, FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s: failed to disable NDO\n",
- __FUNCTION__));
- }
- }
-#endif /* NDO_CONFIG_SUPPORT */
-#ifndef APF
- if (FW_SUPPORTED(dhd, ndoe))
-#else
- if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
-#endif /* APF */
- {
- /* disable IPv6 RA filter in firmware during suspend */
- nd_ra_filter = 0;
- ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
- (char *)&nd_ra_filter, sizeof(nd_ra_filter),
- NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
- ret));
- }
- }
- dhd_os_suppress_logging(dhd, FALSE);
-#ifdef ENABLE_IPMCAST_FILTER
- ipmcast_l2filter = 0;
- ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
- (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
- NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret));
- }
-#endif /* ENABLE_IPMCAST_FILTER */
-#ifdef CUSTOM_EVENT_PM_WAKE
- ret = dhd_iovar(dhd, 0, "const_awake_thresh",
- (char *)&pm_awake_thresh,
- sizeof(pm_awake_thresh), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s set const_awake_thresh failed %d\n",
- __FUNCTION__, ret));
- }
-#endif /* CUSTOM_EVENT_PM_WAKE */
-#ifdef CONFIG_SILENT_ROAM
- ret = dhd_sroam_set_mon(dhd, FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret));
- }
-#endif /* CONFIG_SILENT_ROAM */
-#endif /* DHD_USE_EARLYSUSPEND */
-#ifdef WL_CFG80211
- /* Enable cfg80211 feature events during resume */
- ret = wl_cfg80211_config_suspend_events(
- dhd_linux_get_primary_netdev(dhd), TRUE);
- if (ret < 0) {
- DHD_ERROR(("failed to enable events (%d)\n", ret));
- }
-#endif /* WL_CFG80211 */
-#ifdef DHD_LB_IRQSET
- dhd_irq_set_affinity(dhd, dhd->info->cpumask_primary);
-#endif /* DHD_LB_IRQSET */
- }
- }
- dhd_suspend_unlock(dhd);
-
- return 0;
-}
-
-static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
-{
- dhd_pub_t *dhdp = &dhd->pub;
- int ret = 0;
-
- DHD_OS_WAKE_LOCK(dhdp);
- DHD_PERIM_LOCK(dhdp);
-
- /* Set flag when early suspend was called */
- dhdp->in_suspend = val;
- if ((force || !dhdp->suspend_disable_flag) &&
- (dhd_support_sta_mode(dhdp) || dhd_conf_get_insuspend(dhdp, ALL_IN_SUSPEND)))
- {
- ret = dhd_set_suspend(val, dhdp);
- }
-
- DHD_PERIM_UNLOCK(dhdp);
- DHD_OS_WAKE_UNLOCK(dhdp);
- return ret;
-}
-
-#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
-static void dhd_early_suspend(struct early_suspend *h)
-{
- struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
- DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
-
- if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
- dhd_suspend_resume_helper(dhd, 1, 0);
- dhd_conf_set_suspend_resume(&dhd->pub, 1);
- }
-}
-
-static void dhd_late_resume(struct early_suspend *h)
-{
- struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
- DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
-
- if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
- dhd_conf_set_suspend_resume(&dhd->pub, 0);
- dhd_suspend_resume_helper(dhd, 0, 0);
- }
-}
-#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
-
-/*
- * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
- * the sleep time reaches one jiffy, then switches over to task delay. Usage:
- *
- * dhd_timeout_start(&tmo, usec);
- * while (!dhd_timeout_expired(&tmo))
- * if (poll_something())
- * break;
- * if (dhd_timeout_expired(&tmo))
- * fatal();
- */
-
-void
-dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
-{
- tmo->limit = usec;
- tmo->increment = 0;
- tmo->elapsed = 0;
- tmo->tick = jiffies_to_usecs(1);
-}
-
-int
-dhd_timeout_expired(dhd_timeout_t *tmo)
-{
- /* Does nothing the first call */
- if (tmo->increment == 0) {
- tmo->increment = 1;
- return 0;
- }
-
- if (tmo->elapsed >= tmo->limit)
- return 1;
-
- /* Add the delay that's about to take place */
- tmo->elapsed += tmo->increment;
-
- if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
- OSL_DELAY(tmo->increment);
- tmo->increment *= 2;
- if (tmo->increment > tmo->tick)
- tmo->increment = tmo->tick;
- } else {
- /*
- * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
- * context where the exact wakeup time is flexible, it would be good
- * to use usleep_range() instead of udelay(). It takes a few advantages
- * such as improving responsiveness and reducing power.
- */
- OSL_SLEEP(jiffies_to_msecs(1));
- }
-
- return 0;
-}
-
-int
-dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
-{
- int i = 0;
-
- if (!dhd) {
- DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
- return DHD_BAD_IF;
- }
-
- while (i < DHD_MAX_IFS) {
- if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
- return i;
- i++;
- }
-
- return DHD_BAD_IF;
-}
-
-struct net_device * dhd_idx2net(void *pub, int ifidx)
-{
- struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
- struct dhd_info *dhd_info;
-
- if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
- return NULL;
- dhd_info = dhd_pub->info;
- if (dhd_info && dhd_info->iflist[ifidx])
- return dhd_info->iflist[ifidx]->net;
- return NULL;
-}
-
-int
-dhd_ifname2idx(dhd_info_t *dhd, char *name)
-{
- int i = DHD_MAX_IFS;
-
- ASSERT(dhd);
-
- if (name == NULL || *name == '\0')
- return 0;
-
- while (--i > 0)
- if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
- break;
-
- DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
-
- return i; /* default - the primary interface */
-}
-
-char *
-dhd_ifname(dhd_pub_t *dhdp, int ifidx)
-{
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
-
- ASSERT(dhd);
-
- if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
- DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
- return "<if_bad>";
- }
-
- if (dhd->iflist[ifidx] == NULL) {
- DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
- return "<if_null>";
- }
-
- if (dhd->iflist[ifidx]->net)
- return dhd->iflist[ifidx]->net->name;
-
- return "<if_none>";
-}
-
-uint8 *
-dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
-{
- int i;
- dhd_info_t *dhd = (dhd_info_t *)dhdp;
-
- ASSERT(dhd);
- for (i = 0; i < DHD_MAX_IFS; i++)
- if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
- return dhd->iflist[i]->mac_addr;
-
- return NULL;
-}
-
-static void
-_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
-{
- struct net_device *dev;
- struct netdev_hw_addr *ha;
- uint32 allmulti, cnt;
-
- wl_ioctl_t ioc;
- char *buf, *bufp;
- uint buflen;
- int ret;
-
-#ifdef MCAST_LIST_ACCUMULATION
- int i;
- uint32 cnt_iface[DHD_MAX_IFS];
- cnt = 0;
- allmulti = 0;
-
- for (i = 0; i < DHD_MAX_IFS; i++) {
- if (dhd->iflist[i]) {
- dev = dhd->iflist[i]->net;
- if (!dev)
- continue;
- netif_addr_lock_bh(dev);
- cnt_iface[i] = netdev_mc_count(dev);
- cnt += cnt_iface[i];
- netif_addr_unlock_bh(dev);
-
- /* Determine initial value of allmulti flag */
- allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
- }
- }
-#else /* !MCAST_LIST_ACCUMULATION */
- if (!dhd->iflist[ifidx]) {
- DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
- return;
- }
- dev = dhd->iflist[ifidx]->net;
- if (!dev)
- return;
- netif_addr_lock_bh(dev);
- cnt = netdev_mc_count(dev);
- netif_addr_unlock_bh(dev);
-
- /* Determine initial value of allmulti flag */
- allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
-#endif /* MCAST_LIST_ACCUMULATION */
-
-#ifdef PASS_ALL_MCAST_PKTS
-#ifdef PKT_FILTER_SUPPORT
- if (!dhd->pub.early_suspended)
-#endif /* PKT_FILTER_SUPPORT */
- allmulti = TRUE;
-#endif /* PASS_ALL_MCAST_PKTS */
-
- /* Send down the multicast list first. */
-
- buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
- if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
- DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
- dhd_ifname(&dhd->pub, ifidx), cnt));
- return;
- }
-
- strncpy(bufp, "mcast_list", buflen - 1);
- bufp[buflen - 1] = '\0';
- bufp += strlen("mcast_list") + 1;
-
- cnt = htol32(cnt);
- memcpy(bufp, &cnt, sizeof(cnt));
- bufp += sizeof(cnt);
-
-#ifdef MCAST_LIST_ACCUMULATION
- for (i = 0; i < DHD_MAX_IFS; i++) {
- if (dhd->iflist[i]) {
- DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
- dev = dhd->iflist[i]->net;
-
- netif_addr_lock_bh(dev);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- netdev_for_each_mc_addr(ha, dev) {
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- if (!cnt_iface[i])
- break;
- memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
- DHD_TRACE(("_dhd_set_multicast_list: cnt "
- "%d " MACDBG "\n",
- cnt_iface[i], MAC2STRDBG(ha->addr)));
- cnt_iface[i]--;
- }
- netif_addr_unlock_bh(dev);
- }
- }
-#else /* !MCAST_LIST_ACCUMULATION */
- netif_addr_lock_bh(dev);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- netdev_for_each_mc_addr(ha, dev) {
- if (!cnt)
- break;
- memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
- cnt--;
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- netif_addr_unlock_bh(dev);
-#endif /* MCAST_LIST_ACCUMULATION */
-
- memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = WLC_SET_VAR;
- ioc.buf = buf;
- ioc.len = buflen;
- ioc.set = TRUE;
-
- ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
- if (ret < 0) {
- DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
- dhd_ifname(&dhd->pub, ifidx), cnt));
- allmulti = cnt ? TRUE : allmulti;
- }
-
- MFREE(dhd->pub.osh, buf, buflen);
-
- /* Now send the allmulti setting. This is based on the setting in the
- * net_device flags, but might be modified above to be turned on if we
- * were trying to set some addresses and dongle rejected it...
- */
-
- allmulti = htol32(allmulti);
- ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
- sizeof(allmulti), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: set allmulti %d failed\n",
- dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
- }
-
- /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
-
-#ifdef MCAST_LIST_ACCUMULATION
- allmulti = 0;
- for (i = 0; i < DHD_MAX_IFS; i++) {
- if (dhd->iflist[i]) {
- dev = dhd->iflist[i]->net;
- allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
- }
- }
-#else
- allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
-#endif /* MCAST_LIST_ACCUMULATION */
-
- allmulti = htol32(allmulti);
-
- memset(&ioc, 0, sizeof(ioc));
- ioc.cmd = WLC_SET_PROMISC;
- ioc.buf = &allmulti;
- ioc.len = sizeof(allmulti);
- ioc.set = TRUE;
-
- ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
- if (ret < 0) {
- DHD_ERROR(("%s: set promisc %d failed\n",
- dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
- }
-}
-
-int
-_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
-{
- int ret;
-
- ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
- ETHER_ADDR_LEN, NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
- } else {
- memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
- if (ifidx == 0)
- memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
- }
-
- return ret;
-}
-
-#ifdef DHD_PSTA
-/* Get psta/psr configuration configuration */
-int dhd_get_psta_mode(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- return (int)dhd->psta_mode;
-}
-/* Set psta/psr configuration configuration */
-int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
-{
- dhd_info_t *dhd = dhdp->info;
- dhd->psta_mode = val;
- return 0;
-}
-#endif /* DHD_PSTA */
-
-#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
-static void
-dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
-{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
-
- ASSERT(idx < DHD_MAX_IFS);
-
- ifp = dhd->iflist[idx];
-
- if (
-#ifdef DHD_L2_FILTER
- (ifp->block_ping) ||
-#endif // endif
-#ifdef DHD_WET
- (dhd->wet_mode) ||
-#endif // endif
-#ifdef DHD_MCAST_REGEN
- (ifp->mcast_regen_bss_enable) ||
-#endif // endif
- FALSE) {
- ifp->rx_pkt_chainable = FALSE;
- }
-}
-#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
-
-#ifdef DHD_WET
-/* Get wet configuration configuration */
-int dhd_get_wet_mode(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- return (int)dhd->wet_mode;
-}
-
-/* Set wet configuration configuration */
-int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
-{
- dhd_info_t *dhd = dhdp->info;
- dhd->wet_mode = val;
- dhd_update_rx_pkt_chainable_state(dhdp, 0);
- return 0;
-}
-#endif /* DHD_WET */
-
-#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
-int32 dhd_role_to_nl80211_iftype(int32 role)
-{
- switch (role) {
- case WLC_E_IF_ROLE_STA:
- return NL80211_IFTYPE_STATION;
- case WLC_E_IF_ROLE_AP:
- return NL80211_IFTYPE_AP;
- case WLC_E_IF_ROLE_WDS:
- return NL80211_IFTYPE_WDS;
- case WLC_E_IF_ROLE_P2P_GO:
- return NL80211_IFTYPE_P2P_GO;
- case WLC_E_IF_ROLE_P2P_CLIENT:
- return NL80211_IFTYPE_P2P_CLIENT;
- case WLC_E_IF_ROLE_IBSS:
- case WLC_E_IF_ROLE_NAN:
- return NL80211_IFTYPE_ADHOC;
- default:
- return NL80211_IFTYPE_UNSPECIFIED;
- }
-}
-#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
-
-static void
-dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
-{
- dhd_info_t *dhd = handle;
- dhd_if_event_t *if_event = event_info;
- int ifidx, bssidx;
- int ret;
-#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
- struct wl_if_event_info info;
-#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
- struct net_device *ndev = NULL;
-#endif
-#else
- struct net_device *ndev;
-#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
-
- BCM_REFERENCE(ret);
- if (event != DHD_WQ_WORK_IF_ADD) {
- DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
- return;
- }
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
- return;
- }
-
- if (!if_event) {
- DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
- return;
- }
-
- dhd_net_if_lock_local(dhd);
- DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK(&dhd->pub);
-
- ifidx = if_event->event.ifidx;
- bssidx = if_event->event.bssidx;
- DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
-
-#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
- if (if_event->event.ifidx > 0) {
- u8 *mac_addr;
- bzero(&info, sizeof(info));
- info.ifidx = ifidx;
- info.bssidx = bssidx;
- info.role = if_event->event.role;
- strncpy(info.name, if_event->name, IFNAMSIZ);
- if (is_valid_ether_addr(if_event->mac)) {
- mac_addr = if_event->mac;
- } else {
- mac_addr = NULL;
- }
-
-#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
- if ((ndev = wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
- &info, mac_addr, NULL, true)) == NULL)
-#else
- if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
- &info, mac_addr, NULL, true) == NULL)
-#endif
- {
- /* Do the post interface create ops */
- DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
- goto done;
- }
- }
-#else
- /* This path is for non-android case */
- /* The interface name in host and in event msg are same */
- /* if name in event msg is used to create dongle if list on host */
- ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
- if_event->mac, bssidx, TRUE, if_event->name);
- if (!ndev) {
- DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
- goto done;
- }
-
- DHD_PERIM_UNLOCK(&dhd->pub);
- ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
- DHD_PERIM_LOCK(&dhd->pub);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
- dhd_remove_if(&dhd->pub, ifidx, TRUE);
- goto done;
- }
-#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
-
-#ifndef PCIE_FULL_DONGLE
- /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
- if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
- uint32 var_int = 1;
- ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
- NULL, 0, TRUE);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
- dhd_remove_if(&dhd->pub, ifidx, TRUE);
- }
- }
-#endif /* PCIE_FULL_DONGLE */
-
-done:
- MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
-#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
- dhd_bridge_dev_set(dhd, ifidx, ndev);
-#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
-
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- dhd_net_if_unlock_local(dhd);
-}
-
-static void
-dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
-{
- dhd_info_t *dhd = handle;
- int ifidx;
- dhd_if_event_t *if_event = event_info;
-
- if (event != DHD_WQ_WORK_IF_DEL) {
- DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
- return;
- }
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
- return;
- }
-
- if (!if_event) {
- DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
- return;
- }
-
- dhd_net_if_lock_local(dhd);
- DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK(&dhd->pub);
-
- ifidx = if_event->event.ifidx;
- DHD_TRACE(("Removing interface with idx %d\n", ifidx));
-#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
- dhd_bridge_dev_set(dhd, ifidx, NULL);
-#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
-
- DHD_PERIM_UNLOCK(&dhd->pub);
- if (!dhd->pub.info->iflist[ifidx]) {
- /* No matching netdev found */
- DHD_ERROR(("Netdev not found! Do nothing.\n"));
- goto done;
- }
-#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
- if (if_event->event.ifidx > 0) {
- /* Do the post interface del ops */
- if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net,
- true, if_event->event.ifidx) != 0) {
- DHD_TRACE(("Post ifdel ops failed. Returning \n"));
- goto done;
- }
- }
-#else
- /* For non-cfg80211 drivers */
- dhd_remove_if(&dhd->pub, ifidx, TRUE);
-#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
-
-done:
- DHD_PERIM_LOCK(&dhd->pub);
- MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- dhd_net_if_unlock_local(dhd);
-}
-
-#ifdef DHD_UPDATE_INTF_MAC
-static void
-dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event)
-{
- dhd_info_t *dhd = handle;
- int ifidx;
- dhd_if_event_t *if_event = event_info;
-
- if (event != DHD_WQ_WORK_IF_UPDATE) {
- DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
- return;
- }
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
- return;
- }
-
- if (!if_event) {
- DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
- return;
- }
-
- dhd_net_if_lock_local(dhd);
- DHD_OS_WAKE_LOCK(&dhd->pub);
-
- ifidx = if_event->event.ifidx;
- DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx));
-
- dhd_op_if_update(&dhd->pub, ifidx);
-
- MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
-
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- dhd_net_if_unlock_local(dhd);
-}
-
-int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx)
-{
- dhd_info_t * dhdinfo = NULL;
- dhd_if_t * ifp = NULL;
- int ret = 0;
- char buf[128];
-
- if ((NULL==dhdpub)||(NULL==dhdpub->info)) {
- DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__));
- return -1;
- } else {
- dhdinfo = (dhd_info_t *)dhdpub->info;
- ifp = dhdinfo->iflist[ifidx];
- if (NULL==ifp) {
- DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__));
- return -2;
- }
- }
-
- DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
- // Get MAC address
- strcpy(buf, "cur_etheraddr");
- ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx);
- if (0>ret) {
- DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret));
- // avoid collision
- dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1;
- // force locally administrate address
- ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr);
- } else {
- DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
- ifp->name, ifp->idx,
- (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2],
- (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5]));
- memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN);
- if (dhdinfo->iflist[ifp->idx]->net) {
- memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN);
- }
- }
-
- return ret;
-}
-#endif /* DHD_UPDATE_INTF_MAC */
-
-static void
-dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
-{
- dhd_info_t *dhd = handle;
- dhd_if_t *ifp = event_info;
-
- if (event != DHD_WQ_WORK_SET_MAC) {
- DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
- }
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
- return;
- }
-
- dhd_net_if_lock_local(dhd);
- DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK(&dhd->pub);
-
- // terence 20160907: fix for not able to set mac when wlan0 is down
- if (ifp == NULL || !ifp->set_macaddress) {
- goto done;
- }
- if (ifp == NULL || !dhd->pub.up) {
- DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
- goto done;
- }
-
- DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
- ifp->set_macaddress = FALSE;
- if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
- DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
- else
- DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
-
-done:
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- dhd_net_if_unlock_local(dhd);
-}
-
-static void
-dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
-{
- dhd_info_t *dhd = handle;
- int ifidx = (int)((long int)event_info);
- dhd_if_t *ifp = NULL;
-
- if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
- DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
- return;
- }
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
- return;
- }
-
- dhd_net_if_lock_local(dhd);
- DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK(&dhd->pub);
-
- ifp = dhd->iflist[ifidx];
-
- if (ifp == NULL || !dhd->pub.up) {
- DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
- goto done;
- }
-
- if (ifp == NULL || !dhd->pub.up) {
- DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
- goto done;
- }
-
- ifidx = ifp->idx;
-
-#ifdef MCAST_LIST_ACCUMULATION
- ifidx = 0;
-#endif /* MCAST_LIST_ACCUMULATION */
-
- _dhd_set_multicast_list(dhd, ifidx);
- DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
-
-done:
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- dhd_net_if_unlock_local(dhd);
-}
-
-static int
-dhd_set_mac_address(struct net_device *dev, void *addr)
-{
- int ret = 0;
-
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- struct sockaddr *sa = (struct sockaddr *)addr;
- int ifidx;
- dhd_if_t *dhdif;
-
- ifidx = dhd_net2idx(dhd, dev);
- if (ifidx == DHD_BAD_IF)
- return -1;
-
- dhdif = dhd->iflist[ifidx];
-
- dhd_net_if_lock_local(dhd);
- memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
- dhdif->set_macaddress = TRUE;
- dhd_net_if_unlock_local(dhd);
- dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
- dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
- return ret;
-}
-
-static void
-dhd_set_multicast_list(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ifidx;
-
- ifidx = dhd_net2idx(dhd, dev);
- if (ifidx == DHD_BAD_IF)
- return;
-
- dhd->iflist[ifidx]->set_multicast = TRUE;
- dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
- DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
-
- // terence 20160907: fix for not able to set mac when wlan0 is down
- dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
- DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
-}
-
-#ifdef DHD_UCODE_DOWNLOAD
-/* Get ucode path */
-char *
-dhd_get_ucode_path(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- return dhd->uc_path;
-}
-#endif /* DHD_UCODE_DOWNLOAD */
-
-#ifdef PROP_TXSTATUS
-int
-dhd_os_wlfc_block(dhd_pub_t *pub)
-{
- dhd_info_t *di = (dhd_info_t *)(pub->info);
- ASSERT(di != NULL);
- /* terence 20161229: don't do spin lock if proptx not enabled */
- if (disable_proptx)
- return 1;
-#ifdef BCMDBUS
- spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags);
-#else
- spin_lock_bh(&di->wlfc_spinlock);
-#endif /* BCMDBUS */
- return 1;
-}
-
-int
-dhd_os_wlfc_unblock(dhd_pub_t *pub)
-{
- dhd_info_t *di = (dhd_info_t *)(pub->info);
-
- ASSERT(di != NULL);
- /* terence 20161229: don't do spin lock if proptx not enabled */
- if (disable_proptx)
- return 1;
-#ifdef BCMDBUS
- spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
-#else
- spin_unlock_bh(&di->wlfc_spinlock);
-#endif /* BCMDBUS */
- return 1;
-}
-
-#endif /* PROP_TXSTATUS */
-
-/* This routine do not support Packet chain feature, Currently tested for
- * proxy arp feature
- */
-int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
-{
- struct sk_buff *skb;
- void *skbhead = NULL;
- void *skbprev = NULL;
- dhd_if_t *ifp;
- ASSERT(!PKTISCHAINED(p));
- skb = PKTTONATIVE(dhdp->osh, p);
-
- ifp = dhdp->info->iflist[ifidx];
- skb->dev = ifp->net;
-
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- if (in_interrupt()) {
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
- __FUNCTION__, __LINE__);
- netif_rx(skb);
- } else {
- if (dhdp->info->rxthread_enabled) {
- if (!skbhead) {
- skbhead = skb;
- } else {
- PKTSETNEXT(dhdp->osh, skbprev, skb);
- }
- skbprev = skb;
- } else {
- /* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service
- * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
- * by netif_rx_ni(), but in earlier kernels, we need
- * to do it manually.
- */
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
- __FUNCTION__, __LINE__);
- netif_rx_ni(skb);
- }
- }
-
- if (dhdp->info->rxthread_enabled && skbhead)
- dhd_sched_rxf(dhdp, skbhead);
-
- return BCME_OK;
-}
-
-int BCMFASTPATH
-__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
-{
- int ret = BCME_OK;
- dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
- struct ether_header *eh = NULL;
- bool pkt_ether_type_802_1x = FALSE;
- uint8 pkt_flow_prio;
-
-#if defined(DHD_L2_FILTER)
- dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
-#endif // endif
-
- /* Reject if down */
- if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
- /* free the packet here since the caller won't */
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return -ENODEV;
- }
-
-#ifdef PCIE_FULL_DONGLE
- if (dhdp->busstate == DHD_BUS_SUSPEND) {
- DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return NETDEV_TX_BUSY;
- }
-#endif /* PCIE_FULL_DONGLE */
-
- /* Reject if pktlen > MAX_MTU_SZ */
- if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
- /* free the packet here since the caller won't */
- dhdp->tx_big_packets++;
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return BCME_ERROR;
- }
-
-#ifdef DHD_L2_FILTER
- /* if dhcp_unicast is enabled, we need to convert the */
- /* broadcast DHCP ACK/REPLY packets to Unicast. */
- if (ifp->dhcp_unicast) {
- uint8* mac_addr;
- uint8* ehptr = NULL;
- int ret;
- ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
- if (ret == BCME_OK) {
- /* if given mac address having valid entry in sta list
- * copy the given mac address, and return with BCME_OK
- */
- if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
- ehptr = PKTDATA(dhdp->osh, pktbuf);
- bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
- }
- }
- }
-
- if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
- if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return BCME_ERROR;
- }
- }
-
- if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
- ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
-
- /* Drop the packets if l2 filter has processed it already
- * otherwise continue with the normal path
- */
- if (ret == BCME_OK) {
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return BCME_ERROR;
- }
- }
-#endif /* DHD_L2_FILTER */
- /* Update multicast statistic */
- if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
- uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
- eh = (struct ether_header *)pktdata;
-
- if (ETHER_ISMULTI(eh->ether_dhost))
- dhdp->tx_multicast++;
- if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
-#ifdef DHD_LOSSLESS_ROAMING
- uint8 prio = (uint8)PKTPRIO(pktbuf);
-
- /* back up 802.1x's priority */
- dhdp->prio_8021x = prio;
-#endif /* DHD_LOSSLESS_ROAMING */
- pkt_ether_type_802_1x = TRUE;
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
- atomic_inc(&dhd->pend_8021x_cnt);
-#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
- wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
- pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
-#endif /* WL_CFG80211 && WL_WPS_SYNC */
- }
- dhd_dump_pkt(dhdp, ifidx, pktdata,
- (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL);
- } else {
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return BCME_ERROR;
- }
-
- {
- /* Look into the packet and update the packet priority */
-#ifndef PKTPRIO_OVERRIDE
- if (PKTPRIO(pktbuf) == 0)
-#endif /* !PKTPRIO_OVERRIDE */
- {
-#if defined(QOS_MAP_SET)
- pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
-#else
- pktsetprio(pktbuf, FALSE);
-#endif /* QOS_MAP_SET */
- }
-#ifndef PKTPRIO_OVERRIDE
- else {
- /* Some protocols like OZMO use priority values from 256..263.
- * these are magic values to indicate a specific 802.1d priority.
- * make sure that priority field is in range of 0..7
- */
- PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7);
- }
-#endif /* !PKTPRIO_OVERRIDE */
- }
-
- BCM_REFERENCE(pkt_ether_type_802_1x);
- BCM_REFERENCE(pkt_flow_prio);
-
-#ifdef SUPPORT_SET_TID
- dhd_set_tid_based_on_uid(dhdp, pktbuf);
-#endif /* SUPPORT_SET_TID */
-
-#ifdef PCIE_FULL_DONGLE
- /*
- * Lkup the per interface hash table, for a matching flowring. If one is not
- * available, allocate a unique flowid and add a flowring entry.
- * The found or newly created flowid is placed into the pktbuf's tag.
- */
-
-#ifdef DHD_LOSSLESS_ROAMING
- /* For LLR override and use flowring with prio 7 for 802.1x packets */
- if (pkt_ether_type_802_1x) {
- pkt_flow_prio = PRIO_8021D_NC;
- } else
-#endif /* DHD_LOSSLESS_ROAMING */
- {
- pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))];
- }
-
- ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf);
- if (ret != BCME_OK) {
- PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
- return ret;
- }
-#endif /* PCIE_FULL_DONGLE */
- /* terence 20150901: Micky add to ajust the 802.1X priority */
- /* Set the 802.1X packet with the highest priority 7 */
- if (dhdp->conf->pktprio8021x >= 0)
- pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
-
-#ifdef PROP_TXSTATUS
- if (dhd_wlfc_is_supported(dhdp)) {
- /* store the interface ID */
- DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
-
- /* store destination MAC in the tag as well */
- DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
-
- /* decide which FIFO this packet belongs to */
- if (ETHER_ISMULTI(eh->ether_dhost))
- /* one additional queue index (highest AC + 1) is used for bc/mc queue */
- DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
- else
- DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
- } else
-#endif /* PROP_TXSTATUS */
- {
- /* If the protocol uses a data header, apply it */
- dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
- }
-
- /* Use bus module to send data frame */
-#ifdef PROP_TXSTATUS
- {
- if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
- dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
- /* non-proptxstatus way */
-#ifdef BCMPCIE
- ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
-#else
- ret = dhd_bus_txdata(dhdp->bus, pktbuf);
-#endif /* BCMPCIE */
- }
- }
-#else
-#ifdef BCMPCIE
- ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
-#else
- ret = dhd_bus_txdata(dhdp->bus, pktbuf);
-#endif /* BCMPCIE */
-#endif /* PROP_TXSTATUS */
-#ifdef BCMDBUS
- if (ret)
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
-#endif /* BCMDBUS */
-
- return ret;
-}
-
-int BCMFASTPATH
-dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
-{
- int ret = 0;
- unsigned long flags;
- dhd_if_t *ifp;
-
- DHD_GENERAL_LOCK(dhdp, flags);
- ifp = dhd_get_ifp(dhdp, ifidx);
- if (!ifp || ifp->del_in_progress) {
- DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
- __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0));
- DHD_GENERAL_UNLOCK(dhdp, flags);
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return -ENODEV;
- }
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
- DHD_ERROR(("%s: returning as busstate=%d\n",
- __FUNCTION__, dhdp->busstate));
- DHD_GENERAL_UNLOCK(dhdp, flags);
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return -ENODEV;
- }
- DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
- DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
-
- DHD_GENERAL_LOCK(dhdp, flags);
- if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
- DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
- __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
- DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
- DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
- dhd_os_tx_completion_wake(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- return -ENODEV;
- }
- DHD_GENERAL_UNLOCK(dhdp, flags);
-
- ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
-
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
- DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
- dhd_os_tx_completion_wake(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- return ret;
-}
-
-int BCMFASTPATH
-dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
-{
- int ret;
- uint datalen;
- void *pktbuf;
- dhd_info_t *dhd = DHD_DEV_INFO(net);
- dhd_if_t *ifp = NULL;
- int ifidx;
- unsigned long flags;
- uint8 htsfdlystat_sz = 0;
-
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
- if (dhd_query_bus_erros(&dhd->pub)) {
- return -ENODEV;
- }
-
- DHD_GENERAL_LOCK(&dhd->pub, flags);
- DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
-
- DHD_GENERAL_LOCK(&dhd->pub, flags);
-#ifdef BCMPCIE
- if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
- DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
- __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
- DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
-#ifdef PCIE_FULL_DONGLE
- /* Stop tx queues if suspend is in progress */
- if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
- dhd_bus_stop_queue(dhd->pub.bus);
- }
-#endif /* PCIE_FULL_DONGLE */
- dhd_os_busbusy_wake(&dhd->pub);
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- return NETDEV_TX_BUSY;
- }
-#else
- if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
- DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
- __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
- }
-#endif
-
- DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
-
- /* Reject if down */
- if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
- DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
- __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
- netif_stop_queue(net);
- /* Send Event when bus down detected during data session */
- if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) {
- DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
- dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
- net_os_send_hang_message(net);
- }
- DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
- dhd_os_busbusy_wake(&dhd->pub);
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return NETDEV_TX_BUSY;
- }
-
- ifp = DHD_DEV_IFP(net);
- ifidx = DHD_DEV_IFIDX(net);
- if (!ifp || (ifidx == DHD_BAD_IF) ||
- ifp->del_in_progress) {
- DHD_ERROR(("%s: ifidx %d ifp:%p del_in_progress:%d\n",
- __FUNCTION__, ifidx, ifp, (ifp ? ifp->del_in_progress : 0)));
- netif_stop_queue(net);
- DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
- dhd_os_busbusy_wake(&dhd->pub);
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return NETDEV_TX_BUSY;
- }
-
- DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
-
- ASSERT(ifidx == dhd_net2idx(dhd, net));
- ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
-
- bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
-
- /* re-align socket buffer if "skb->data" is odd address */
- if (((unsigned long)(skb->data)) & 0x1) {
- unsigned char *data = skb->data;
- uint32 length = skb->len;
- PKTPUSH(dhd->pub.osh, skb, 1);
- memmove(skb->data, data, length);
- PKTSETLEN(dhd->pub.osh, skb, length);
- }
-
- datalen = PKTLEN(dhd->pub.osh, skb);
-
- /* Make sure there's enough room for any header */
- if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
- struct sk_buff *skb2;
-
- DHD_INFO(("%s: insufficient headroom\n",
- dhd_ifname(&dhd->pub, ifidx)));
- dhd->pub.tx_realloc++;
-
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
- skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
-
- dev_kfree_skb(skb);
- if ((skb = skb2) == NULL) {
- DHD_ERROR(("%s: skb_realloc_headroom failed\n",
- dhd_ifname(&dhd->pub, ifidx)));
- ret = -ENOMEM;
- goto done;
- }
- bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
- }
-
- /* move from dhdsdio_sendfromq(), try to orphan skb early */
- if (dhd->pub.conf->orphan_move == 2)
- PKTORPHAN(skb, dhd->pub.conf->tsq);
- else if (dhd->pub.conf->orphan_move == 3)
- skb_orphan(skb);
-
- /* Convert to packet */
- if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
- DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
- dhd_ifname(&dhd->pub, ifidx)));
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
- dev_kfree_skb_any(skb);
- ret = -ENOMEM;
- goto done;
- }
-
-#ifdef DHD_WET
- /* wet related packet proto manipulation should be done in DHD
- since dongle doesn't have complete payload
- */
- if (WET_ENABLED(&dhd->pub) &&
- (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
- DHD_INFO(("%s:%s: wet send proc failed\n",
- __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
- PKTFREE(dhd->pub.osh, pktbuf, FALSE);
- ret = -EFAULT;
- goto done;
- }
-#endif /* DHD_WET */
-
-#ifdef DHD_PSTA
- /* PSR related packet proto manipulation should be done in DHD
- * since dongle doesn't have complete payload
- */
- if (PSR_ENABLED(&dhd->pub) &&
- (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) {
-
- DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
- dhd_ifname(&dhd->pub, ifidx)));
- }
-#endif /* DHD_PSTA */
-
-#ifdef DHDTCPSYNC_FLOOD_BLK
- if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
- ifp->tsyncack_txed ++;
- }
-#endif /* DHDTCPSYNC_FLOOD_BLK */
-
-#ifdef DHDTCPACK_SUPPRESS
- if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
- /* If this packet has been hold or got freed, just return */
- if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
- ret = 0;
- goto done;
- }
- } else {
- /* If this packet has replaced another packet and got freed, just return */
- if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
- ret = 0;
- goto done;
- }
- }
-#endif /* DHDTCPACK_SUPPRESS */
-
- /*
- * If Load Balance is enabled queue the packet
- * else send directly from here.
- */
-#if defined(DHD_LB_TXP)
- ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
-#else
- ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
-#endif // endif
-
-done:
- if (ret) {
- ifp->stats.tx_dropped++;
- dhd->pub.tx_dropped++;
- } else {
-#ifdef PROP_TXSTATUS
- /* tx_packets counter can counted only when wlfc is disabled */
- if (!dhd_wlfc_is_supported(&dhd->pub))
-#endif // endif
- {
- dhd->pub.tx_packets++;
- ifp->stats.tx_packets++;
- ifp->stats.tx_bytes += datalen;
- }
- }
-
- DHD_GENERAL_LOCK(&dhd->pub, flags);
- DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
- DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
- dhd_os_tx_completion_wake(&dhd->pub);
- dhd_os_busbusy_wake(&dhd->pub);
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- /* Return ok: we always eat the packet */
- return NETDEV_TX_OK;
-}
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-void dhd_rx_wq_wakeup(struct work_struct *ptr)
-{
- struct dhd_rx_tx_work *work;
- struct dhd_pub * pub;
-
- work = container_of(ptr, struct dhd_rx_tx_work, work);
-
- pub = work->pub;
-
- DHD_RPM(("%s: ENTER. \n", __FUNCTION__));
-
- if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) {
- return;
- }
-
- DHD_OS_WAKE_LOCK(pub);
- if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) {
-
- // do nothing but wakeup the bus.
- pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus));
- pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus));
- }
- DHD_OS_WAKE_UNLOCK(pub);
- kfree(work);
-}
-
-void dhd_start_xmit_wq_adapter(struct work_struct *ptr)
-{
- struct dhd_rx_tx_work *work;
- int ret;
- dhd_info_t *dhd;
- struct dhd_bus * bus;
-
- work = container_of(ptr, struct dhd_rx_tx_work, work);
-
- dhd = DHD_DEV_INFO(work->net);
-
- bus = dhd->pub.bus;
-
- if (atomic_read(&dhd->pub.block_bus)) {
- kfree_skb(work->skb);
- kfree(work);
- dhd_netif_start_queue(bus);
- return;
- }
-
- if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) {
- ret = dhd_start_xmit(work->skb, work->net);
- pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
- pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
- }
- kfree(work);
- dhd_netif_start_queue(bus);
-
- if (ret)
- netdev_err(work->net,
- "error: dhd_start_xmit():%d\n", ret);
-}
-
-int BCMFASTPATH
-dhd_start_xmit_wrapper(struct sk_buff *skb, struct net_device *net)
-{
- struct dhd_rx_tx_work *start_xmit_work;
- int ret;
- dhd_info_t *dhd = DHD_DEV_INFO(net);
-
- if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
- DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__));
-
- dhd_netif_stop_queue(dhd->pub.bus);
-
- start_xmit_work = (struct dhd_rx_tx_work*)
- kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC);
-
- if (!start_xmit_work) {
- netdev_err(net,
- "error: failed to alloc start_xmit_work\n");
- ret = -ENOMEM;
- goto exit;
- }
-
- INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter);
- start_xmit_work->skb = skb;
- start_xmit_work->net = net;
- queue_work(dhd->tx_wq, &start_xmit_work->work);
- ret = NET_XMIT_SUCCESS;
-
- } else if (dhd->pub.busstate == DHD_BUS_DATA) {
- ret = dhd_start_xmit(skb, net);
- } else {
- /* when bus is down */
- ret = -ENODEV;
- }
-
-exit:
- return ret;
-}
-void
-dhd_bus_wakeup_work(dhd_pub_t *dhdp)
-{
- struct dhd_rx_tx_work *rx_work;
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
-
- rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
- if (!rx_work) {
- DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__));
- return;
- }
-
- INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup);
- rx_work->pub = dhdp;
- queue_work(dhd->rx_wq, &rx_work->work);
-
-}
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
-static void
-__dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state)
-{
-
- if ((state == ON) && (dhdp->txoff == FALSE)) {
- netif_stop_queue(net);
- dhd_prot_update_pktid_txq_stop_cnt(dhdp);
- } else if (state == ON) {
- DHD_INFO(("%s: Netif Queue has already stopped\n", __FUNCTION__));
- }
- if ((state == OFF) && (dhdp->txoff == TRUE)) {
- netif_wake_queue(net);
- dhd_prot_update_pktid_txq_start_cnt(dhdp);
- } else if (state == OFF) {
- DHD_INFO(("%s: Netif Queue has already started\n", __FUNCTION__));
- }
-}
-
-void
-dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
-{
- struct net_device *net;
- dhd_info_t *dhd = dhdp->info;
- int i;
-
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
- ASSERT(dhd);
-
-#ifdef DHD_LOSSLESS_ROAMING
- /* block flowcontrol during roaming */
- if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
- return;
- }
-#endif // endif
-
- if (ifidx == ALL_INTERFACES) {
- for (i = 0; i < DHD_MAX_IFS; i++) {
- if (dhd->iflist[i]) {
- net = dhd->iflist[i]->net;
- __dhd_txflowcontrol(dhdp, net, state);
- }
- }
- } else {
- if (dhd->iflist[ifidx]) {
- net = dhd->iflist[ifidx]->net;
- __dhd_txflowcontrol(dhdp, net, state);
- }
- }
- dhdp->txoff = state;
-}
-
-#ifdef DHD_MCAST_REGEN
-/*
- * Description: This function is called to do the reverse translation
- *
- * Input eh - pointer to the ethernet header
- */
-int32
-dhd_mcast_reverse_translation(struct ether_header *eh)
-{
- uint8 *iph;
- uint32 dest_ip;
-
- iph = (uint8 *)eh + ETHER_HDR_LEN;
- dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
-
- /* Only IP packets are handled */
- if (eh->ether_type != hton16(ETHER_TYPE_IP))
- return BCME_ERROR;
-
- /* Non-IPv4 multicast packets are not handled */
- if (IP_VER(iph) != IP_VER_4)
- return BCME_ERROR;
-
- /*
- * The packet has a multicast IP and unicast MAC. That means
- * we have to do the reverse translation
- */
- if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
- ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
- return BCME_OK;
- }
-
- return BCME_ERROR;
-}
-#endif /* MCAST_REGEN */
-
-#ifdef SHOW_LOGTRACE
-static void
-dhd_netif_rx_ni(struct sk_buff * skb)
-{
- /* Do not call netif_recieve_skb as this workqueue scheduler is
- * not from NAPI Also as we are not in INTR context, do not call
- * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
- * does netif_rx, disables irq, raise NET_IF_RX softirq and
- * enables interrupts back
- */
- netif_rx_ni(skb);
-}
-
-static int
-dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
-{
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
- int ret = BCME_OK;
- uint datalen;
- bcm_event_msg_u_t evu;
- void *data = NULL;
- void *pktdata = NULL;
- bcm_event_t *pvt_data;
- uint pktlen;
-
- DHD_TRACE(("%s:Enter\n", __FUNCTION__));
-
- /* In dhd_rx_frame, header is stripped using skb_pull
- * of size ETH_HLEN, so adjust pktlen accordingly
- */
- pktlen = skb->len + ETH_HLEN;
-
- pktdata = (void *)skb_mac_header(skb);
- ret = wl_host_event_get_data(pktdata, pktlen, &evu);
-
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
- __FUNCTION__, ret));
- goto exit;
- }
-
- datalen = ntoh32(evu.event.datalen);
-
- pvt_data = (bcm_event_t *)pktdata;
- data = &pvt_data[1];
-
- dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
-
-exit:
- return ret;
-}
-
-/*
- * dhd_event_logtrace_process_items processes
- * each skb from evt_trace_queue.
- * Returns TRUE if more packets to be processed
- * else returns FALSE
- */
-
-static int
-dhd_event_logtrace_process_items(dhd_info_t *dhd)
-{
- dhd_pub_t *dhdp;
- struct sk_buff *skb;
- uint32 qlen;
- uint32 process_len;
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
- return 0;
- }
-
- dhdp = &dhd->pub;
-
- if (!dhdp) {
- DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
- return 0;
- }
-
- qlen = skb_queue_len(&dhd->evt_trace_queue);
- process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND);
-
- /* Run while loop till bound is reached or skb queue is empty */
- while (process_len--) {
- int ifid = 0;
- skb = skb_dequeue(&dhd->evt_trace_queue);
- if (skb == NULL) {
- DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
- __FUNCTION__));
- break;
- }
- BCM_REFERENCE(ifid);
-#ifdef PCIE_FULL_DONGLE
- /* Check if pkt is from INFO ring or WLC_E_TRACE */
- ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
- if (ifid == DHD_DUMMY_INFO_IF) {
- /* Process logtrace from info rings */
- dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
- } else
-#endif /* PCIE_FULL_DONGLE */
- {
- /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
- dhd_event_logtrace_pkt_process(dhdp, skb);
- }
-
- /* Dummy sleep so that scheduler kicks in after processing any logprints */
- OSL_SLEEP(0);
-
- /* Send packet up if logtrace_pkt_sendup is TRUE */
- if (dhdp->logtrace_pkt_sendup) {
-#ifdef DHD_USE_STATIC_CTRLBUF
- /* If bufs are allocated via static buf pool
- * and logtrace_pkt_sendup enabled, make a copy,
- * free the local one and send the copy up.
- */
- void *npkt = PKTDUP(dhdp->osh, skb);
- /* Clone event and send it up */
- PKTFREE_STATIC(dhdp->osh, skb, FALSE);
- if (npkt) {
- skb = npkt;
- } else {
- DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
- /* Packet is already freed, go to next packet */
- continue;
- }
-#endif /* DHD_USE_STATIC_CTRLBUF */
-#ifdef PCIE_FULL_DONGLE
- /* For infobuf packets as if is DHD_DUMMY_INFO_IF,
- * to send skb to network layer, assign skb->dev with
- * Primary interface n/w device
- */
- if (ifid == DHD_DUMMY_INFO_IF) {
- skb = PKTTONATIVE(dhdp->osh, skb);
- skb->dev = dhd->iflist[0]->net;
- }
-#endif /* PCIE_FULL_DONGLE */
- /* Send pkt UP */
- dhd_netif_rx_ni(skb);
- } else {
- /* Don't send up. Free up the packet. */
-#ifdef DHD_USE_STATIC_CTRLBUF
- PKTFREE_STATIC(dhdp->osh, skb, FALSE);
-#else
- PKTFREE(dhdp->osh, skb, FALSE);
-#endif /* DHD_USE_STATIC_CTRLBUF */
- }
- }
-
- /* Reschedule if more packets to be processed */
- return (qlen >= DHD_EVENT_LOGTRACE_BOUND);
-}
-
-#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
-static int
-dhd_logtrace_thread(void *data)
-{
- tsk_ctl_t *tsk = (tsk_ctl_t *)data;
- dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
- dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
- int ret;
-
- while (1) {
- dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS();
- if (!binary_sema_down(tsk)) {
- dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS();
- SMP_RD_BARRIER_DEPENDS();
- if (dhd->pub.dongle_reset == FALSE) {
- do {
- /* Check terminated before processing the items */
- if (tsk->terminated) {
- DHD_ERROR(("%s: task terminated\n", __FUNCTION__));
- goto exit;
- }
-#ifdef EWP_EDL
- /* check if EDL is being used */
- if (dhd->pub.dongle_edl_support) {
- ret = dhd_prot_process_edl_complete(&dhd->pub,
- &dhd->event_data);
- } else {
- ret = dhd_event_logtrace_process_items(dhd);
- }
-#else
- ret = dhd_event_logtrace_process_items(dhd);
-#endif /* EWP_EDL */
- /* if ret > 0, bound has reached so to be fair to other
- * processes need to yield the scheduler.
- * The comment above yield()'s definition says:
- * If you want to use yield() to wait for something,
- * use wait_event().
- * If you want to use yield() to be 'nice' for others,
- * use cond_resched().
- * If you still want to use yield(), do not!
- */
- if (ret > 0) {
- cond_resched();
- OSL_SLEEP(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS);
- } else if (ret < 0) {
- DHD_ERROR(("%s: ERROR should not reach here\n",
- __FUNCTION__));
- }
- } while (ret > 0);
- }
- if (tsk->flush_ind) {
- DHD_ERROR(("%s: flushed\n", __FUNCTION__));
- dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS();
- tsk->flush_ind = 0;
- complete(&tsk->flushed);
- }
- } else {
- DHD_ERROR(("%s: unexpted break\n", __FUNCTION__));
- dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS();
- break;
- }
- }
-exit:
- complete_and_exit(&tsk->completed, 0);
- dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS();
-}
-#else
-static void
-dhd_event_logtrace_process(struct work_struct * work)
-{
- int ret = 0;
-/* Ignore compiler warnings due to -Werror=cast-qual */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- struct delayed_work *dw = to_delayed_work(work);
- struct dhd_info *dhd =
- container_of(dw, struct dhd_info, event_log_dispatcher_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-#ifdef EWP_EDL
- if (dhd->pub.dongle_edl_support) {
- ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data);
- } else {
- ret = dhd_event_logtrace_process_items(dhd);
- }
-#else
- ret = dhd_event_logtrace_process_items(dhd);
-#endif /* EWP_EDL */
-
- if (ret > 0) {
- schedule_delayed_work(&(dhd)->event_log_dispatcher_work,
- msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS));
- }
-
- return;
-}
-#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
-
-void
-dhd_schedule_logtrace(void *dhd_info)
-{
- dhd_info_t *dhd = (dhd_info_t *)dhd_info;
-
-#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
- if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
- binary_sema_up(&dhd->thr_logtrace_ctl);
- } else {
- DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
- dhd->thr_logtrace_ctl.thr_pid));
- }
-#else
- schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
-#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
- return;
-}
-
-void
-dhd_cancel_logtrace_process_sync(dhd_info_t *dhd)
-{
-#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
- if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
- PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
- } else {
- DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
- dhd->thr_logtrace_ctl.thr_pid));
- }
-#else
- cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
-#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
-}
-
-void
-dhd_flush_logtrace_process(dhd_info_t *dhd)
-{
-#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
- if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
- PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
- } else {
- DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
- dhd->thr_logtrace_ctl.thr_pid));
- }
-#else
- flush_delayed_work(&dhd->event_log_dispatcher_work);
-#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
-}
-
-int
-dhd_init_logtrace_process(dhd_info_t *dhd)
-{
-#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
- dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID;
- PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread");
- if (dhd->thr_logtrace_ctl.thr_pid < 0) {
- DHD_ERROR(("%s: init logtrace process failed\n", __FUNCTION__));
- return BCME_ERROR;
- } else {
- DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__,
- dhd->thr_logtrace_ctl.thr_pid));
- }
-#else
- INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
-#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
- return BCME_OK;
-}
-
-int
-dhd_reinit_logtrace_process(dhd_info_t *dhd)
-{
-#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
- /* Re-init only if PROC_STOP from dhd_stop was called
- * which can be checked via thr_pid
- */
- if (dhd->thr_logtrace_ctl.thr_pid < 0) {
- PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl,
- 0, "dhd_logtrace_thread");
- if (dhd->thr_logtrace_ctl.thr_pid < 0) {
- DHD_ERROR(("%s: reinit logtrace process failed\n", __FUNCTION__));
- return BCME_ERROR;
- } else {
- DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__,
- dhd->thr_logtrace_ctl.thr_pid));
- }
- }
-#else
- /* No need to re-init for WQ as calcel_delayed_work_sync will
- * will not delete the WQ
- */
-#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
- return BCME_OK;
-}
-
-void
-dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
-{
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
-
-#ifdef PCIE_FULL_DONGLE
- /* Add ifidx in the PKTTAG */
- DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
-#endif /* PCIE_FULL_DONGLE */
- skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
-
- dhd_schedule_logtrace(dhd);
-}
-
-void
-dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
-#ifdef DHD_USE_STATIC_CTRLBUF
- PKTFREE_STATIC(dhdp->osh, skb, FALSE);
-#else
- PKTFREE(dhdp->osh, skb, FALSE);
-#endif /* DHD_USE_STATIC_CTRLBUF */
- }
-}
-
-void
-dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg)
-{
- struct sk_buff *skb = NULL;
- uint32 pktsize = 0;
- void *pkt = NULL;
- info_buf_payload_hdr_t *infobuf = NULL;
- dhd_info_t *dhd = dhdp->info;
- uint8 *pktdata = NULL;
-
- if (!msg)
- return;
-
- /* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>| */
- infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32));
- pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
- sizeof(uint32));
- pkt = PKTGET(dhdp->osh, pktsize, FALSE);
- if (!pkt) {
- DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__));
- } else {
- PKTSETLEN(dhdp->osh, pkt, pktsize);
- pktdata = PKTDATA(dhdp->osh, pkt);
- memcpy(pktdata, msg, pktsize);
- /* For infobuf packets assign skb->dev with
- * Primary interface n/w device
- */
- skb = PKTTONATIVE(dhdp->osh, pkt);
- skb->dev = dhd->iflist[0]->net;
- /* Send pkt UP */
- dhd_netif_rx_ni(skb);
- }
-}
-#endif /* SHOW_LOGTRACE */
-
-/** Called when a frame is received by the dongle on interface 'ifidx' */
-void
-dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
-{
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
- struct sk_buff *skb;
- uchar *eth;
- uint len;
- void *data, *pnext = NULL;
- int i;
- dhd_if_t *ifp;
- wl_event_msg_t event;
- int tout_rx = 0;
- int tout_ctrl = 0;
- void *skbhead = NULL;
- void *skbprev = NULL;
- uint16 protocol;
- unsigned char *dump_data;
-#ifdef DHD_MCAST_REGEN
- uint8 interface_role;
- if_flow_lkup_t *if_flow_lkup;
- unsigned long flags;
-#endif // endif
-#ifdef DHD_WAKE_STATUS
- int pkt_wake = 0;
- wake_counts_t *wcp = NULL;
-#endif /* DHD_WAKE_STATUS */
-
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- BCM_REFERENCE(dump_data);
-
- for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
- struct ether_header *eh;
-
- pnext = PKTNEXT(dhdp->osh, pktbuf);
- PKTSETNEXT(dhdp->osh, pktbuf, NULL);
-
- /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
- * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data
- * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
- */
- if (ifidx == DHD_DUMMY_INFO_IF) {
- /* Event msg printing is called from dhd_rx_frame which is in Tasklet
- * context in case of PCIe FD, in case of other bus this will be from
- * DPC context. If we get bunch of events from Dongle then printing all
- * of them from Tasklet/DPC context that too in data path is costly.
- * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
- * events with type WLC_E_TRACE.
- * We'll print this console logs from the WorkQueue context by enqueing SKB
- * here and Dequeuing will be done in WorkQueue and will be freed only if
- * logtrace_pkt_sendup is TRUE
- */
-#ifdef SHOW_LOGTRACE
- dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
-#else /* !SHOW_LOGTRACE */
- /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
- * free the PKT here itself
- */
-#ifdef DHD_USE_STATIC_CTRLBUF
- PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
-#else
- PKTFREE(dhdp->osh, pktbuf, FALSE);
-#endif /* DHD_USE_STATIC_CTRLBUF */
-#endif /* SHOW_LOGTRACE */
- continue;
- }
-#ifdef DHD_WAKE_STATUS
-#ifdef BCMDBUS
- wcp = NULL;
-#else
- pkt_wake = dhd_bus_get_bus_wake(dhdp);
- wcp = dhd_bus_get_wakecount(dhdp);
-#endif /* BCMDBUS */
- if (wcp == NULL) {
- /* If wakeinfo count buffer is null do not update wake count values */
- pkt_wake = 0;
- }
-#endif /* DHD_WAKE_STATUS */
-
- eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
-
- if (ifidx >= DHD_MAX_IFS) {
- DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
- __FUNCTION__, ifidx));
- if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
-#ifdef DHD_USE_STATIC_CTRLBUF
- PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
-#else
- PKTFREE(dhdp->osh, pktbuf, FALSE);
-#endif /* DHD_USE_STATIC_CTRLBUF */
- } else {
- PKTCFREE(dhdp->osh, pktbuf, FALSE);
- }
- continue;
- }
-
- ifp = dhd->iflist[ifidx];
- if (ifp == NULL) {
- DHD_ERROR(("%s: ifp is NULL. drop packet\n",
- __FUNCTION__));
- if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
-#ifdef DHD_USE_STATIC_CTRLBUF
- PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
-#else
- PKTFREE(dhdp->osh, pktbuf, FALSE);
-#endif /* DHD_USE_STATIC_CTRLBUF */
- } else {
- PKTCFREE(dhdp->osh, pktbuf, FALSE);
- }
- continue;
- }
-
- /* Dropping only data packets before registering net device to avoid kernel panic */
-#ifndef PROP_TXSTATUS_VSDB
- if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
- (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
-#else
- if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
- (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
-#endif /* PROP_TXSTATUS_VSDB */
- {
- DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
- __FUNCTION__));
- PKTCFREE(dhdp->osh, pktbuf, FALSE);
- continue;
- }
-
-#ifdef PROP_TXSTATUS
- if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
- /* WLFC may send header only packet when
- there is an urgent message but no packet to
- piggy-back on
- */
- PKTCFREE(dhdp->osh, pktbuf, FALSE);
- continue;
- }
-#endif // endif
-#ifdef DHD_L2_FILTER
- /* If block_ping is enabled drop the ping packet */
- if (ifp->block_ping) {
- if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
- PKTCFREE(dhdp->osh, pktbuf, FALSE);
- continue;
- }
- }
- if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
- if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
- PKTCFREE(dhdp->osh, pktbuf, FALSE);
- continue;
- }
- }
- if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
- int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
-
- /* Drop the packets if l2 filter has processed it already
- * otherwise continue with the normal path
- */
- if (ret == BCME_OK) {
- PKTCFREE(dhdp->osh, pktbuf, TRUE);
- continue;
- }
- }
- if (ifp->block_tdls) {
- if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) {
- PKTCFREE(dhdp->osh, pktbuf, FALSE);
- continue;
- }
- }
-#endif /* DHD_L2_FILTER */
-
-#ifdef DHD_MCAST_REGEN
- DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
- if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
- ASSERT(if_flow_lkup);
-
- interface_role = if_flow_lkup[ifidx].role;
- DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
-
- if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
- !DHD_IF_ROLE_AP(dhdp, ifidx) &&
- ETHER_ISUCAST(eh->ether_dhost)) {
- if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
-#ifdef DHD_PSTA
- /* Change bsscfg to primary bsscfg for unicast-multicast packets */
- if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
- (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
- if (ifidx != 0) {
- /* Let the primary in PSTA interface handle this
- * frame after unicast to Multicast conversion
- */
- ifp = dhd_get_ifp(dhdp, 0);
- ASSERT(ifp);
- }
- }
- }
-#endif /* PSTA */
- }
-#endif /* MCAST_REGEN */
-
-#ifdef DHDTCPSYNC_FLOOD_BLK
- if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
- int delta_sec;
- int delta_sync;
- int sync_per_sec;
- u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
- ifp->tsync_rcvd ++;
- delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
- delta_sec = curr_time - ifp->last_sync;
- if (delta_sec > 1) {
- sync_per_sec = delta_sync/delta_sec;
- if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
- schedule_work(&ifp->blk_tsfl_work);
- DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
- "sync recvied %d pkt/sec \n",
- ifidx, sync_per_sec));
- }
- dhd_reset_tcpsync_info_by_ifp(ifp);
- }
-
- }
-#endif /* DHDTCPSYNC_FLOOD_BLK */
-
-#ifdef DHDTCPACK_SUPPRESS
- dhd_tcpdata_info_get(dhdp, pktbuf);
-#endif // endif
- skb = PKTTONATIVE(dhdp->osh, pktbuf);
-
- ASSERT(ifp);
- skb->dev = ifp->net;
-#ifdef DHD_WET
- /* wet related packet proto manipulation should be done in DHD
- * since dongle doesn't have complete payload
- */
- if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
- pktbuf) < 0)) {
- DHD_INFO(("%s:%s: wet recv proc failed\n",
- __FUNCTION__, dhd_ifname(dhdp, ifidx)));
- }
-#endif /* DHD_WET */
-
-#ifdef DHD_PSTA
- if (PSR_ENABLED(dhdp) &&
- (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
- DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
- dhd_ifname(dhdp, ifidx)));
- }
-#endif /* DHD_PSTA */
-
-#ifdef PCIE_FULL_DONGLE
- if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
- (!ifp->ap_isolate)) {
- eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
- if (ETHER_ISUCAST(eh->ether_dhost)) {
- if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
- dhd_sendpkt(dhdp, ifidx, pktbuf);
- continue;
- }
- } else {
- void *npktbuf = NULL;
- if ((ntoh16(eh->ether_type) != ETHER_TYPE_IAPP_L2_UPDATE) &&
- (npktbuf = PKTDUP(dhdp->osh, pktbuf)) != NULL) {
- dhd_sendpkt(dhdp, ifidx, npktbuf);
- }
- }
- }
-#endif /* PCIE_FULL_DONGLE */
-#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
- if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
- (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) &&
- (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) {
- DHD_ERROR(("%s: Reassoc is in progress. "
- "Drop EAPOL M1 frame\n", __FUNCTION__));
- PKTFREE(dhdp->osh, pktbuf, FALSE);
- continue;
- }
-#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
- /* Get the protocol, maintain skb around eth_type_trans()
- * The main reason for this hack is for the limitation of
- * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
- * to perform skb_pull inside vs ETH_HLEN. Since to avoid
- * coping of the packet coming from the network stack to add
- * BDC, Hardware header etc, during network interface registration
- * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
- * for BDC, Hardware header etc. and not just the ETH_HLEN
- */
- eth = skb->data;
- len = skb->len;
- dump_data = skb->data;
- protocol = (skb->data[12] << 8) | skb->data[13];
-
- if (protocol == ETHER_TYPE_802_1X) {
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
-#if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
- wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
-#endif /* WL_CFG80211 && WL_WPS_SYNC */
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
- if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) {
- OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED);
- }
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
- }
- dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL);
-
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- if (skb->pkt_type == PACKET_MULTICAST) {
- dhd->pub.rx_multicast++;
- ifp->stats.multicast++;
- }
-
- skb->data = eth;
- skb->len = len;
-
- DHD_DBG_PKT_MON_RX(dhdp, skb);
- /* Strip header, count, deliver upward */
- skb_pull(skb, ETH_HLEN);
-
- /* Process special event packets and then discard them */
- memset(&event, 0, sizeof(event));
-
- if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
- bcm_event_msg_u_t evu;
- int ret_event, event_type;
- void *pkt_data = skb_mac_header(skb);
-
- ret_event = wl_host_event_get_data(pkt_data, len, &evu);
-
- if (ret_event != BCME_OK) {
- DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
- __FUNCTION__, ret_event));
-#ifdef DHD_USE_STATIC_CTRLBUF
- PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
-#else
- PKTFREE(dhdp->osh, pktbuf, FALSE);
-#endif // endif
- continue;
- }
-
- memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
- event_type = ntoh32_ua((void *)&event.event_type);
-#ifdef SHOW_LOGTRACE
- /* Event msg printing is called from dhd_rx_frame which is in Tasklet
- * context in case of PCIe FD, in case of other bus this will be from
- * DPC context. If we get bunch of events from Dongle then printing all
- * of them from Tasklet/DPC context that too in data path is costly.
- * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
- * events with type WLC_E_TRACE.
- * We'll print this console logs from the WorkQueue context by enqueing SKB
- * here and Dequeuing will be done in WorkQueue and will be freed only if
- * logtrace_pkt_sendup is true
- */
- if (event_type == WLC_E_TRACE) {
- DHD_EVENT(("%s: WLC_E_TRACE\n", __FUNCTION__));
- dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
- continue;
- }
-#endif /* SHOW_LOGTRACE */
-
- ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data);
-
- wl_event_to_host_order(&event);
- if (!tout_ctrl)
- tout_ctrl = DHD_PACKET_TIMEOUT_MS;
-
-#if defined(PNO_SUPPORT)
- if (event_type == WLC_E_PFN_NET_FOUND) {
- /* enforce custom wake lock to garantee that Kernel not suspended */
- tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
- }
-#endif /* PNO_SUPPORT */
- if (numpkt != 1) {
- DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
- __FUNCTION__));
- }
-
-#ifdef DHD_WAKE_STATUS
- if (unlikely(pkt_wake)) {
-#ifdef DHD_WAKE_EVENT_STATUS
- if (event.event_type < WLC_E_LAST) {
- wcp->rc_event[event.event_type]++;
- wcp->rcwake++;
- pkt_wake = 0;
- }
-#endif /* DHD_WAKE_EVENT_STATUS */
- }
-#endif /* DHD_WAKE_STATUS */
-
- /* For delete virtual interface event, wl_host_event returns positive
- * i/f index, do not proceed. just free the pkt.
- */
- if ((event_type == WLC_E_IF) && (ret_event > 0)) {
- DHD_ERROR(("%s: interface is deleted. Free event packet\n",
- __FUNCTION__));
-#ifdef DHD_USE_STATIC_CTRLBUF
- PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
-#else
- PKTFREE(dhdp->osh, pktbuf, FALSE);
-#endif // endif
- continue;
- }
-
- /*
- * For the event packets, there is a possibility
- * of ifidx getting modifed.Thus update the ifp
- * once again.
- */
- ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
- ifp = dhd->iflist[ifidx];
-#ifndef PROP_TXSTATUS_VSDB
- if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
-#else
- if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
- dhd->pub.up))
-#endif /* PROP_TXSTATUS_VSDB */
- {
- DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
- __FUNCTION__));
-#ifdef DHD_USE_STATIC_CTRLBUF
- PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
-#else
- PKTFREE(dhdp->osh, pktbuf, FALSE);
-#endif // endif
- continue;
- }
-
-#ifdef SENDPROB
- if (dhdp->wl_event_enabled ||
- (dhdp->recv_probereq && (event.event_type == WLC_E_PROBREQ_MSG)))
-#else
- if (dhdp->wl_event_enabled)
-#endif
- {
-#ifdef DHD_USE_STATIC_CTRLBUF
- /* If event bufs are allocated via static buf pool
- * and wl events are enabled, make a copy, free the
- * local one and send the copy up.
- */
- void *npkt = PKTDUP(dhdp->osh, skb);
- /* Clone event and send it up */
- PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
- if (npkt) {
- skb = npkt;
- } else {
- DHD_ERROR(("skb clone failed. dropping event.\n"));
- continue;
- }
-#endif /* DHD_USE_STATIC_CTRLBUF */
- } else {
- /* If event enabled not explictly set, drop events */
-#ifdef DHD_USE_STATIC_CTRLBUF
- PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
-#else
- PKTFREE(dhdp->osh, pktbuf, FALSE);
-#endif /* DHD_USE_STATIC_CTRLBUF */
- continue;
- }
- } else {
- tout_rx = DHD_PACKET_TIMEOUT_MS;
-
-#ifdef PROP_TXSTATUS
- dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
-#endif /* PROP_TXSTATUS */
-
-#ifdef DHD_WAKE_STATUS
- if (unlikely(pkt_wake)) {
- wcp->rxwake++;
-#ifdef DHD_WAKE_RX_STATUS
-#define ETHER_ICMP6_HEADER 20
-#define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
-#define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
-#define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
-
- if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
- wcp->rx_arp++;
- if (dump_data[0] == 0xFF) { /* Broadcast */
- wcp->rx_bcast++;
- } else if (dump_data[0] & 0x01) { /* Multicast */
- wcp->rx_mcast++;
- if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
- wcp->rx_multi_ipv6++;
- if ((skb->len > ETHER_ICMP6_HEADER) &&
- (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
- wcp->rx_icmpv6++;
- if (skb->len > ETHER_ICMPV6_TYPE) {
- switch (dump_data[ETHER_ICMPV6_TYPE]) {
- case NDISC_ROUTER_ADVERTISEMENT:
- wcp->rx_icmpv6_ra++;
- break;
- case NDISC_NEIGHBOUR_ADVERTISEMENT:
- wcp->rx_icmpv6_na++;
- break;
- case NDISC_NEIGHBOUR_SOLICITATION:
- wcp->rx_icmpv6_ns++;
- break;
- }
- }
- }
- } else if (dump_data[2] == 0x5E) {
- wcp->rx_multi_ipv4++;
- } else {
- wcp->rx_multi_other++;
- }
- } else { /* Unicast */
- wcp->rx_ucast++;
- }
-#undef ETHER_ICMP6_HEADER
-#undef ETHER_IPV6_SADDR
-#undef ETHER_IPV6_DAADR
-#undef ETHER_ICMPV6_TYPE
-#endif /* DHD_WAKE_RX_STATUS */
- pkt_wake = 0;
- }
-#endif /* DHD_WAKE_STATUS */
- }
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
- ifp->net->last_rx = jiffies;
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
-
- if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
- dhdp->dstats.rx_bytes += skb->len;
- dhdp->rx_packets++; /* Local count */
- ifp->stats.rx_bytes += skb->len;
- ifp->stats.rx_packets++;
- }
-
- if (in_interrupt()) {
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
- __FUNCTION__, __LINE__);
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
-#if defined(DHD_LB_RXP)
- netif_receive_skb(skb);
-#else /* !defined(DHD_LB_RXP) */
- netif_rx(skb);
-#endif /* !defined(DHD_LB_RXP) */
- DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- } else {
- if (dhd->rxthread_enabled) {
- if (!skbhead)
- skbhead = skb;
- else
- PKTSETNEXT(dhdp->osh, skbprev, skb);
- skbprev = skb;
- } else {
-
- /* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service
- * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
- * by netif_rx_ni(), but in earlier kernels, we need
- * to do it manually.
- */
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
- __FUNCTION__, __LINE__);
-
-#if defined(ARGOS_NOTIFY_CB)
- argos_register_notifier_deinit();
-#endif // endif
-#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
- dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
-#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
-#if defined(DHD_LB_RXP)
- netif_receive_skb(skb);
-#else /* !defined(DHD_LB_RXP) */
- netif_rx_ni(skb);
-#endif /* defined(DHD_LB_RXP) */
- DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- }
- }
- }
-
- if (dhd->rxthread_enabled && skbhead)
- dhd_sched_rxf(dhdp, skbhead);
-
- DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
- DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
-}
-
-void
-dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
-{
- /* Linux version has nothing to do */
- return;
-}
-
-void
-dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
-{
- dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
- struct ether_header *eh;
- uint16 type;
-
- dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
-
- eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
- type = ntoh16(eh->ether_type);
-
- if (type == ETHER_TYPE_802_1X) {
- atomic_dec(&dhd->pend_8021x_cnt);
- }
-
-#ifdef PROP_TXSTATUS
- if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
- dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
- uint datalen = PKTLEN(dhd->pub.osh, txp);
- if (ifp != NULL) {
- if (success) {
- dhd->pub.tx_packets++;
- ifp->stats.tx_packets++;
- ifp->stats.tx_bytes += datalen;
- } else {
- ifp->stats.tx_dropped++;
- }
- }
- }
-#endif // endif
-}
-
-static struct net_device_stats *
-dhd_get_stats(struct net_device *net)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(net);
- dhd_if_t *ifp;
-
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
- if (!dhd) {
- DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
- goto error;
- }
-
- ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
- if (!ifp) {
- /* return empty stats */
- DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
- goto error;
- }
-
- if (dhd->pub.up) {
- /* Use the protocol to get dongle stats */
- dhd_prot_dstats(&dhd->pub);
- }
- return &ifp->stats;
-
-error:
- memset(&net->stats, 0, sizeof(net->stats));
- return &net->stats;
-}
-
-#ifndef BCMDBUS
-static int
-dhd_watchdog_thread(void *data)
-{
- tsk_ctl_t *tsk = (tsk_ctl_t *)data;
- dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
- /* This thread doesn't need any user-level access,
- * so get rid of all our resources
- */
- if (dhd_watchdog_prio > 0) {
- struct sched_param param;
- param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
- dhd_watchdog_prio:(MAX_RT_PRIO-1);
- setScheduler(current, SCHED_FIFO, ¶m);
- }
-
- while (1) {
- if (down_interruptible (&tsk->sema) == 0) {
- unsigned long flags;
- unsigned long jiffies_at_start = jiffies;
- unsigned long time_lapse;
-#ifdef BCMPCIE
- DHD_OS_WD_WAKE_LOCK(&dhd->pub);
-#endif /* BCMPCIE */
-
- SMP_RD_BARRIER_DEPENDS();
- if (tsk->terminated) {
-#ifdef BCMPCIE
- DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
-#endif /* BCMPCIE */
- break;
- }
-
- if (dhd->pub.dongle_reset == FALSE) {
- DHD_TIMER(("%s:\n", __FUNCTION__));
- dhd_bus_watchdog(&dhd->pub);
-
- DHD_GENERAL_LOCK(&dhd->pub, flags);
- /* Count the tick for reference */
- dhd->pub.tickcnt++;
-#ifdef DHD_L2_FILTER
- dhd_l2_filter_watchdog(&dhd->pub);
-#endif /* DHD_L2_FILTER */
- time_lapse = jiffies - jiffies_at_start;
-
- /* Reschedule the watchdog */
- if (dhd->wd_timer_valid) {
- mod_timer(&dhd->timer,
- jiffies +
- msecs_to_jiffies(dhd_watchdog_ms) -
- min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
- }
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- }
-#ifdef BCMPCIE
- DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
-#endif /* BCMPCIE */
- } else {
- break;
- }
- }
-
- complete_and_exit(&tsk->completed, 0);
-}
-
-static void dhd_watchdog(ulong data)
-{
- dhd_info_t *dhd = (dhd_info_t *)data;
- unsigned long flags;
-
- if (dhd->pub.dongle_reset) {
- return;
- }
-
- if (dhd->thr_wdt_ctl.thr_pid >= 0) {
- up(&dhd->thr_wdt_ctl.sema);
- return;
- }
-
-#ifdef BCMPCIE
- DHD_OS_WD_WAKE_LOCK(&dhd->pub);
-#endif /* BCMPCIE */
- /* Call the bus module watchdog */
- dhd_bus_watchdog(&dhd->pub);
-
- DHD_GENERAL_LOCK(&dhd->pub, flags);
- /* Count the tick for reference */
- dhd->pub.tickcnt++;
-
-#ifdef DHD_L2_FILTER
- dhd_l2_filter_watchdog(&dhd->pub);
-#endif /* DHD_L2_FILTER */
- /* Reschedule the watchdog */
- if (dhd->wd_timer_valid)
- mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
-#ifdef BCMPCIE
- DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
-#endif /* BCMPCIE */
-}
-
-#ifdef ENABLE_ADAPTIVE_SCHED
-static void
-dhd_sched_policy(int prio)
-{
- struct sched_param param;
- if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
- param.sched_priority = 0;
- setScheduler(current, SCHED_NORMAL, ¶m);
- } else {
- if (get_scheduler_policy(current) != SCHED_FIFO) {
- param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
- setScheduler(current, SCHED_FIFO, ¶m);
- }
- }
-}
-#endif /* ENABLE_ADAPTIVE_SCHED */
-#ifdef DEBUG_CPU_FREQ
-static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
-{
- dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
- struct cpufreq_freqs *freq = data;
- if (dhd) {
- if (!dhd->new_freq)
- goto exit;
- if (val == CPUFREQ_POSTCHANGE) {
- DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
- freq->new, freq->cpu));
- *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
- }
- }
-exit:
- return 0;
-}
-#endif /* DEBUG_CPU_FREQ */
-
-static int
-dhd_dpc_thread(void *data)
-{
- tsk_ctl_t *tsk = (tsk_ctl_t *)data;
- dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
-
- /* This thread doesn't need any user-level access,
- * so get rid of all our resources
- */
- if (dhd_dpc_prio > 0)
- {
- struct sched_param param;
- param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
- setScheduler(current, SCHED_FIFO, ¶m);
- }
-
-#ifdef CUSTOM_DPC_CPUCORE
- set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
-#endif // endif
-#ifdef CUSTOM_SET_CPUCORE
- dhd->pub.current_dpc = current;
-#endif /* CUSTOM_SET_CPUCORE */
- /* Run until signal received */
- while (1) {
- if (dhd->pub.conf->dpc_cpucore >= 0) {
- printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
- set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
- dhd->pub.conf->dpc_cpucore = -1;
- }
- if (!binary_sema_down(tsk)) {
-#ifdef ENABLE_ADAPTIVE_SCHED
- dhd_sched_policy(dhd_dpc_prio);
-#endif /* ENABLE_ADAPTIVE_SCHED */
- SMP_RD_BARRIER_DEPENDS();
- if (tsk->terminated) {
- break;
- }
-
- /* Call bus dpc unless it indicated down (then clean stop) */
- if (dhd->pub.busstate != DHD_BUS_DOWN) {
-#ifdef DEBUG_DPC_THREAD_WATCHDOG
- int resched_cnt = 0;
-#endif /* DEBUG_DPC_THREAD_WATCHDOG */
- dhd_os_wd_timer_extend(&dhd->pub, TRUE);
- while (dhd_bus_dpc(dhd->pub.bus)) {
- /* process all data */
-#ifdef DEBUG_DPC_THREAD_WATCHDOG
- resched_cnt++;
- if (resched_cnt > MAX_RESCHED_CNT) {
- DHD_INFO(("%s Calling msleep to"
- "let other processes run. \n",
- __FUNCTION__));
- dhd->pub.dhd_bug_on = true;
- resched_cnt = 0;
- OSL_SLEEP(1);
- }
-#endif /* DEBUG_DPC_THREAD_WATCHDOG */
- }
- dhd_os_wd_timer_extend(&dhd->pub, FALSE);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- } else {
- if (dhd->pub.up)
- dhd_bus_stop(dhd->pub.bus, TRUE);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- }
- } else {
- break;
- }
- }
- complete_and_exit(&tsk->completed, 0);
-}
-
-static int
-dhd_rxf_thread(void *data)
-{
- tsk_ctl_t *tsk = (tsk_ctl_t *)data;
- dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
-#if defined(WAIT_DEQUEUE)
-#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
- ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
-#endif // endif
- dhd_pub_t *pub = &dhd->pub;
-
- /* This thread doesn't need any user-level access,
- * so get rid of all our resources
- */
- if (dhd_rxf_prio > 0)
- {
- struct sched_param param;
- param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
- setScheduler(current, SCHED_FIFO, ¶m);
- }
-
-#ifdef CUSTOM_SET_CPUCORE
- dhd->pub.current_rxf = current;
-#endif /* CUSTOM_SET_CPUCORE */
- /* Run until signal received */
- while (1) {
- if (dhd->pub.conf->rxf_cpucore >= 0) {
- printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
- set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
- dhd->pub.conf->rxf_cpucore = -1;
- }
- if (down_interruptible(&tsk->sema) == 0) {
- void *skb;
-#ifdef ENABLE_ADAPTIVE_SCHED
- dhd_sched_policy(dhd_rxf_prio);
-#endif /* ENABLE_ADAPTIVE_SCHED */
-
- SMP_RD_BARRIER_DEPENDS();
-
- if (tsk->terminated) {
- break;
- }
- skb = dhd_rxf_dequeue(pub);
-
- if (skb == NULL) {
- continue;
- }
- while (skb) {
- void *skbnext = PKTNEXT(pub->osh, skb);
- PKTSETNEXT(pub->osh, skb, NULL);
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
- __FUNCTION__, __LINE__);
- netif_rx_ni(skb);
- skb = skbnext;
- }
-#if defined(WAIT_DEQUEUE)
- if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
- OSL_SLEEP(1);
- watchdogTime = OSL_SYSUPTIME();
- }
-#endif // endif
-
- DHD_OS_WAKE_UNLOCK(pub);
- } else {
- break;
- }
- }
- complete_and_exit(&tsk->completed, 0);
-}
-
-#ifdef BCMPCIE
-void dhd_dpc_enable(dhd_pub_t *dhdp)
-{
-#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
- dhd_info_t *dhd;
-
- if (!dhdp || !dhdp->info)
- return;
- dhd = dhdp->info;
-#endif /* DHD_LB_RXP || DHD_LB_TXP */
-
-#ifdef DHD_LB_RXP
- __skb_queue_head_init(&dhd->rx_pend_queue);
-#endif /* DHD_LB_RXP */
-
-#ifdef DHD_LB_TXP
- skb_queue_head_init(&dhd->tx_pend_queue);
-#endif /* DHD_LB_TXP */
-}
-#endif /* BCMPCIE */
-
-#ifdef BCMPCIE
-void
-dhd_dpc_kill(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd;
-
- if (!dhdp) {
- return;
- }
-
- dhd = dhdp->info;
-
- if (!dhd) {
- return;
- }
-
- if (dhd->thr_dpc_ctl.thr_pid < 0) {
- tasklet_kill(&dhd->tasklet);
- DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
- }
-
-#ifdef DHD_LB
-#ifdef DHD_LB_RXP
- cancel_work_sync(&dhd->rx_napi_dispatcher_work);
- __skb_queue_purge(&dhd->rx_pend_queue);
-#endif /* DHD_LB_RXP */
-#ifdef DHD_LB_TXP
- cancel_work_sync(&dhd->tx_dispatcher_work);
- skb_queue_purge(&dhd->tx_pend_queue);
-#endif /* DHD_LB_TXP */
-
- /* Kill the Load Balancing Tasklets */
-#if defined(DHD_LB_TXC)
- tasklet_kill(&dhd->tx_compl_tasklet);
-#endif /* DHD_LB_TXC */
-#if defined(DHD_LB_RXC)
- tasklet_kill(&dhd->rx_compl_tasklet);
-#endif /* DHD_LB_RXC */
-#if defined(DHD_LB_TXP)
- tasklet_kill(&dhd->tx_tasklet);
-#endif /* DHD_LB_TXP */
-#endif /* DHD_LB */
-}
-
-void
-dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd;
-
- if (!dhdp) {
- return;
- }
-
- dhd = dhdp->info;
-
- if (!dhd) {
- return;
- }
-
- if (dhd->thr_dpc_ctl.thr_pid < 0) {
- tasklet_kill(&dhd->tasklet);
- }
-}
-#endif /* BCMPCIE */
-
-static void
-dhd_dpc(ulong data)
-{
- dhd_info_t *dhd;
-
- dhd = (dhd_info_t *)data;
-
- /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
- * down below , wake lock is set,
- * the tasklet is initialized in dhd_attach()
- */
- /* Call bus dpc unless it indicated down (then clean stop) */
- if (dhd->pub.busstate != DHD_BUS_DOWN) {
-#if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
- DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
-#endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
- if (dhd_bus_dpc(dhd->pub.bus)) {
- tasklet_schedule(&dhd->tasklet);
- }
- } else {
- dhd_bus_stop(dhd->pub.bus, TRUE);
- }
-}
-
-void
-dhd_sched_dpc(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
-
- if (dhd->thr_dpc_ctl.thr_pid >= 0) {
- DHD_OS_WAKE_LOCK(dhdp);
- /* If the semaphore does not get up,
- * wake unlock should be done here
- */
- if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
- DHD_OS_WAKE_UNLOCK(dhdp);
- }
- return;
- } else {
- dhd_bus_set_dpc_sched_time(dhdp);
- tasklet_schedule(&dhd->tasklet);
- }
-}
-#endif /* BCMDBUS */
-
-static void
-dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
-{
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
-
- DHD_OS_WAKE_LOCK(dhdp);
-
- DHD_TRACE(("dhd_sched_rxf: Enter\n"));
- do {
- if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
- break;
- } while (1);
- if (dhd->thr_rxf_ctl.thr_pid >= 0) {
- up(&dhd->thr_rxf_ctl.sema);
- }
- return;
-}
-
-#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
-#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
-
-#ifdef TOE
-/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
-static int
-dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
-{
- char buf[32];
- int ret;
-
- ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
-
- if (ret < 0) {
- if (ret == -EIO) {
- DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
- ifidx)));
- return -EOPNOTSUPP;
- }
-
- DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
- return ret;
- }
-
- memcpy(toe_ol, buf, sizeof(uint32));
- return 0;
-}
-
-/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
-static int
-dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
-{
- int toe, ret;
-
- /* Set toe_ol as requested */
- ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
- dhd_ifname(&dhd->pub, ifidx), ret));
- return ret;
- }
-
- /* Enable toe globally only if any components are enabled. */
- toe = (toe_ol != 0);
- ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
- return ret;
- }
-
- return 0;
+ return sta;
}
-#endif /* TOE */
-#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
-void dhd_set_scb_probe(dhd_pub_t *dhd)
+/** Delete all STAs from the interface's STA list. */
+void
+dhd_del_all_sta(void *pub, int ifidx)
{
- wl_scb_probe_t scb_probe;
- char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
- int ret;
-
- if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
- return;
- }
-
- ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
- }
-
- memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
-
- scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
+ dhd_sta_t *sta, *next;
+ dhd_if_t *ifp;
+ unsigned long flags;
- ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0,
- TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
return;
- }
-}
-#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
-
-static void
-dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(net);
-
- snprintf(info->driver, sizeof(info->driver), "wl");
- snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
-}
-
-struct ethtool_ops dhd_ethtool_ops = {
- .get_drvinfo = dhd_ethtool_get_drvinfo
-};
-
-static int
-dhd_ethtool(dhd_info_t *dhd, void *uaddr)
-{
- struct ethtool_drvinfo info;
- char drvname[sizeof(info.driver)];
- uint32 cmd;
-#ifdef TOE
- struct ethtool_value edata;
- uint32 toe_cmpnt, csum_dir;
- int ret;
-#endif // endif
-
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- /* all ethtool calls start with a cmd word */
- if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
- return -EFAULT;
-
- switch (cmd) {
- case ETHTOOL_GDRVINFO:
- /* Copy out any request driver name */
- if (copy_from_user(&info, uaddr, sizeof(info)))
- return -EFAULT;
- strncpy(drvname, info.driver, sizeof(drvname) - 1);
- drvname[sizeof(drvname) - 1] = '\0';
-
- /* clear struct for return */
- memset(&info, 0, sizeof(info));
- info.cmd = cmd;
-
- /* if dhd requested, identify ourselves */
- if (strcmp(drvname, "?dhd") == 0) {
- snprintf(info.driver, sizeof(info.driver), "dhd");
- strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
- info.version[sizeof(info.version) - 1] = '\0';
- }
-
- /* otherwise, require dongle to be up */
- else if (!dhd->pub.up) {
- DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
- return -ENODEV;
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+#if defined(BCM_GMAC3)
+ if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
+ ASSERT(ISALIGNED(sta->ea.octet, 2));
+ fwder_deassoc(ifp->fwdh, (uint16 *)sta->ea.octet, (uintptr_t)sta);
}
+#endif /* BCM_GMAC3 */
- /* finally, report dongle driver type */
- else if (dhd->pub.iswl)
- snprintf(info.driver, sizeof(info.driver), "wl");
- else
- snprintf(info.driver, sizeof(info.driver), "xx");
-
- snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
- if (copy_to_user(uaddr, &info, sizeof(info)))
- return -EFAULT;
- DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
- (int)sizeof(drvname), drvname, info.driver));
- break;
-
-#ifdef TOE
- /* Get toe offload components from dongle */
- case ETHTOOL_GRXCSUM:
- case ETHTOOL_GTXCSUM:
- if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
- return ret;
-
- csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
-
- edata.cmd = cmd;
- edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
-
- if (copy_to_user(uaddr, &edata, sizeof(edata)))
- return -EFAULT;
- break;
-
- /* Set toe offload components in dongle */
- case ETHTOOL_SRXCSUM:
- case ETHTOOL_STXCSUM:
- if (copy_from_user(&edata, uaddr, sizeof(edata)))
- return -EFAULT;
-
- /* Read the current settings, update and write back */
- if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
- return ret;
-
- csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
-
- if (edata.data != 0)
- toe_cmpnt |= csum_dir;
- else
- toe_cmpnt &= ~csum_dir;
-
- if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
- return ret;
-
- /* If setting TX checksum mode, tell Linux the new mode */
- if (cmd == ETHTOOL_STXCSUM) {
- if (edata.data)
- dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
- else
- dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
+#ifdef DHD_L2_FILTER
+ if (ifp->parp_enable) {
+ /* clear Proxy ARP cache of specific Ethernet Address */
+ bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
+ ifp->phnd_arp_table, FALSE,
+ sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
}
-
- break;
-#endif /* TOE */
-
- default:
- return -EOPNOTSUPP;
+#endif /* DHD_L2_FILTER */
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
- return 0;
+ return;
}
-static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
+/** Delete STA from the interface's STA list. */
+void
+dhd_del_sta(void *pub, int ifidx, void *ea)
{
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return FALSE;
- }
-
- if (!dhdp->up)
- return FALSE;
+ dhd_sta_t *sta, *next;
+ dhd_if_t *ifp;
+ unsigned long flags;
+ char macstr[ETHER_ADDR_STR_LEN];
-#if !defined(BCMPCIE) && !defined(BCMDBUS)
- if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
- DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
- return FALSE;
- }
-#endif /* !BCMPCIE && !BCMDBUS */
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+ if (ifp == NULL)
+ return;
- if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
- ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
-#ifdef BCMPCIE
- DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
- __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
- dhdp->d3ackcnt_timeout, error, dhdp->busstate));
-#else
- DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
- dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
-#endif /* BCMPCIE */
- if (dhdp->hang_reason == 0) {
- if (dhdp->dongle_trap_occured) {
- dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
-#ifdef BCMPCIE
- } else if (dhdp->d3ackcnt_timeout) {
- dhdp->hang_reason = dhdp->is_sched_error ?
- HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
- HANG_REASON_D3_ACK_TIMEOUT;
-#endif /* BCMPCIE */
- } else {
- dhdp->hang_reason = dhdp->is_sched_error ?
- HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
- HANG_REASON_IOCTL_RESP_TIMEOUT;
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+ if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+#if defined(BCM_GMAC3)
+ if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
+ ASSERT(ISALIGNED(ea, 2));
+ fwder_deassoc(ifp->fwdh, (uint16 *)ea, (uintptr_t)sta);
}
+#endif /* BCM_GMAC3 */
+ DHD_MAC_TO_STR(((char *)ea), macstr);
+ DHD_ERROR(("%s: Deleting STA %s\n", __FUNCTION__, macstr));
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
}
- printf("%s\n", info_string);
- net_os_send_hang_message(net);
- return TRUE;
}
- return FALSE;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+#ifdef DHD_L2_FILTER
+ if (ifp->parp_enable) {
+ /* clear Proxy ARP cache of specific Ethernet Address */
+ bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
+ ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
+ }
+#endif /* DHD_L2_FILTER */
+ return;
}
-#ifdef WL_MONITOR
-bool
-dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
+/** Add STA if it doesn't exist. Not reentrant. */
+dhd_sta_t*
+dhd_findadd_sta(void *pub, int ifidx, void *ea)
{
- return (dhd->info->monitor_type != 0);
+ dhd_sta_t *sta;
+
+ sta = dhd_find_sta(pub, ifidx, ea);
+
+ if (!sta) {
+ /* Add entry */
+ sta = dhd_add_sta(pub, ifidx, ea);
+ }
+
+ return sta;
}
-void
-dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+#if !defined(BCM_GMAC3)
+static struct list_head *
+dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
{
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
- {
- uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
- BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
- switch (amsdu_flag) {
- case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
- default:
- if (!dhd->monitor_skb) {
- if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt))
- == NULL)
- return;
- }
- if (dhd->monitor_type && dhd->monitor_dev)
- dhd->monitor_skb->dev = dhd->monitor_dev;
- else {
- PKTFREE(dhdp->osh, pkt, FALSE);
- dhd->monitor_skb = NULL;
- return;
- }
- dhd->monitor_skb->protocol =
- eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
- dhd->monitor_len = 0;
- break;
+ unsigned long flags;
+ dhd_sta_t *sta, *snapshot;
- case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
- if (!dhd->monitor_skb) {
- if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE))
- == NULL)
- return;
- dhd->monitor_len = 0;
- }
- if (dhd->monitor_type && dhd->monitor_dev)
- dhd->monitor_skb->dev = dhd->monitor_dev;
- else {
- PKTFREE(dhdp->osh, pkt, FALSE);
- dev_kfree_skb(dhd->monitor_skb);
- return;
- }
- memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
- PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
- dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
- PKTFREE(dhdp->osh, pkt, FALSE);
- return;
+ INIT_LIST_HEAD(snapshot_list);
- case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
- memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
- PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
- dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
- PKTFREE(dhdp->osh, pkt, FALSE);
- return;
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
- case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
- memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
- PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
- dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
- PKTFREE(dhdp->osh, pkt, FALSE);
- skb_put(dhd->monitor_skb, dhd->monitor_len);
- dhd->monitor_skb->protocol =
- eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
- dhd->monitor_len = 0;
- break;
+ list_for_each_entry(sta, &ifp->sta_list, list) {
+ /* allocate one and add to snapshot */
+ snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
+ if (snapshot == NULL) {
+ DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
+ continue;
}
- }
- if (in_interrupt()) {
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
- __FUNCTION__, __LINE__);
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- netif_rx(dhd->monitor_skb);
- DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- } else {
- /* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service
- * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
- * by netif_rx_ni(), but in earlier kernels, we need
- * to do it manually.
- */
- bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
- __FUNCTION__, __LINE__);
+ memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
- DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
- netif_rx_ni(dhd->monitor_skb);
- DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ INIT_LIST_HEAD(&snapshot->list);
+ list_add_tail(&snapshot->list, snapshot_list);
}
- dhd->monitor_skb = NULL;
-}
-
-typedef struct dhd_mon_dev_priv {
- struct net_device_stats stats;
-} dhd_mon_dev_priv_t;
-
-#define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
-#define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
-#define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
-static int
-dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
-{
- PKTFREE(NULL, skb, FALSE);
- return 0;
+ return snapshot_list;
}
-#if defined(BT_OVER_SDIO)
-
-void
-dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
+static void
+dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
{
- dhdp->info->bus_user_count++;
-}
+ dhd_sta_t *sta, *next;
-void
-dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
-{
- dhdp->info->bus_user_count--;
+ list_for_each_entry_safe(sta, next, snapshot_list, list) {
+ list_del(&sta->list);
+ MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
+ }
}
+#endif /* !BCM_GMAC3 */
+#endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
-/* Return values:
- * Success: Returns 0
- * Failure: Returns -1 or errono code
- */
-int
-dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
-{
- dhd_pub_t *dhdp = (dhd_pub_t *)handle;
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
- int ret = 0;
+#else
+static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
+static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
+static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
+static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
+static inline void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta) {}
+dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
+dhd_sta_t *dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL; }
+void dhd_del_sta(void *pub, int ifidx, void *ea) {}
+#endif /* PCIE_FULL_DONGLE */
- mutex_lock(&dhd->bus_user_lock);
- ++dhd->bus_user_count;
- if (dhd->bus_user_count < 0) {
- DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
- ret = -1;
- goto exit;
- }
- if (dhd->bus_user_count == 1) {
- dhd->pub.hang_was_sent = 0;
+#if defined(DHD_LB)
- /* First user, turn on WL_REG, start the bus */
- DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
+#if defined(DHD_LB_TXC) || defined(DHD_LB_RXC) || defined(DHD_LB_TXP)
+/**
+ * dhd_tasklet_schedule - Function that runs in IPI context of the destination
+ * CPU and schedules a tasklet.
+ * @tasklet: opaque pointer to the tasklet
+ */
+INLINE void
+dhd_tasklet_schedule(void *tasklet)
+{
+ tasklet_schedule((struct tasklet_struct *)tasklet);
+}
+/**
+ * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
+ * @tasklet: tasklet to be scheduled
+ * @on_cpu: cpu core id
+ *
+ * If the requested cpu is online, then an IPI is sent to this cpu via the
+ * smp_call_function_single with no wait and the tasklet_schedule function
+ * will be invoked to schedule the specified tasklet on the requested CPU.
+ */
+INLINE void
+dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
+{
+ const int wait = 0;
+ smp_call_function_single(on_cpu,
+ dhd_tasklet_schedule, (void *)tasklet, wait);
+}
- if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
- /* Enable F1 */
- ret = dhd_bus_resume(dhdp, 0);
- if (ret) {
- DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
- __FUNCTION__, ret));
- goto exit;
- }
- }
+/**
+ * dhd_work_schedule_on - Executes the passed work in a given CPU
+ * @work: work to be scheduled
+ * @on_cpu: cpu core id
+ *
+ * If the requested cpu is online, then an IPI is sent to this cpu via the
+ * schedule_work_on and the work function
+ * will be invoked to schedule the specified work on the requested CPU.
+ */
- dhd_update_fw_nv_path(dhd);
- /* update firmware and nvram path to sdio bus */
- dhd_bus_update_fw_nv_path(dhd->pub.bus,
- dhd->fw_path, dhd->nv_path);
- /* download the firmware, Enable F2 */
- /* TODO: Should be done only in case of FW switch */
- ret = dhd_bus_devreset(dhdp, FALSE);
- dhd_bus_resume(dhdp, 1);
- if (!ret) {
- if (dhd_sync_with_dongle(&dhd->pub) < 0) {
- DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
- ret = -EFAULT;
- }
- } else {
- DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
- }
- } else {
- DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
- __FUNCTION__, dhd->bus_user_count));
- }
-exit:
- mutex_unlock(&dhd->bus_user_lock);
- return ret;
+INLINE void
+dhd_work_schedule_on(struct work_struct *work, int on_cpu)
+{
+ schedule_work_on(on_cpu, work);
}
-EXPORT_SYMBOL(dhd_bus_get);
+#endif /* DHD_LB_TXC || DHD_LB_RXC || DHD_LB_TXP */
-/* Return values:
- * Success: Returns 0
- * Failure: Returns -1 or errono code
+#if defined(DHD_LB_TXC)
+/**
+ * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
+ * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
+ * freeing the packets placed in the tx_compl workq
*/
-int
-dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
+void
+dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
{
- dhd_pub_t *dhdp = (dhd_pub_t *)handle;
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
- int ret = 0;
- BCM_REFERENCE(owner);
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu, on_cpu;
- mutex_lock(&dhd->bus_user_lock);
- --dhd->bus_user_count;
- if (dhd->bus_user_count < 0) {
- DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
- dhd->bus_user_count = 0;
- ret = -1;
- goto exit;
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
}
- if (dhd->bus_user_count == 0) {
- /* Last user, stop the bus and turn Off WL_REG */
- DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
- __FUNCTION__));
-#ifdef PROP_TXSTATUS
- if (dhd->pub.wlfc_enabled) {
- dhd_wlfc_deinit(&dhd->pub);
- }
-#endif /* PROP_TXSTATUS */
-#ifdef PNO_SUPPORT
- if (dhd->pub.pno_state) {
- dhd_pno_deinit(&dhd->pub);
- }
-#endif /* PNO_SUPPORT */
-#ifdef RTT_SUPPORT
- if (dhd->pub.rtt_state) {
- dhd_rtt_deinit(&dhd->pub);
- }
-#endif /* RTT_SUPPORT */
- ret = dhd_bus_devreset(dhdp, TRUE);
- if (!ret) {
- dhd_bus_suspend(dhdp);
- wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
- }
- } else {
- DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
- __FUNCTION__, dhd->bus_user_count));
- }
-exit:
- mutex_unlock(&dhd->bus_user_lock);
- return ret;
-}
-EXPORT_SYMBOL(dhd_bus_put);
+ DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
-int
-dhd_net_bus_get(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return dhd_bus_get(&dhd->pub, WLAN_MODULE);
+ on_cpu = atomic_read(&dhd->tx_compl_cpu);
+
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+ dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
+ } else {
+ schedule_work(&dhd->tx_compl_dispatcher_work);
+ }
}
-int
-dhd_net_bus_put(struct net_device *dev)
+static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return dhd_bus_put(&dhd->pub, WLAN_MODULE);
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, tx_compl_dispatcher_work);
+ int cpu;
+
+ get_online_cpus();
+ cpu = atomic_read(&dhd->tx_compl_cpu);
+ if (!cpu_online(cpu))
+ dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
+ else
+ dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
+ put_online_cpus();
}
+#endif /* DHD_LB_TXC */
-/*
- * Function to enable the Bus Clock
- * Returns BCME_OK on success and BCME_xxx on failure
+#if defined(DHD_LB_RXC)
+/**
+ * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
+ * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
+ * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
+ * placed in the rx_compl workq.
*
- * This function is not callable from non-sleepable context
+ * @dhdp: pointer to dhd_pub object
*/
-int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
+void
+dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
{
- dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu, on_cpu;
- int ret;
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
- dhd_os_sdlock(dhdp);
+ DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
/*
- * The second argument is TRUE, that means, we expect
- * the function to "wait" until the clocks are really
- * available
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
*/
- ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
- dhd_os_sdunlock(dhdp);
+ curr_cpu = get_cpu();
+ put_cpu();
+ on_cpu = atomic_read(&dhd->rx_compl_cpu);
- return ret;
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+ dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
+ } else {
+ dhd_rx_compl_dispatcher_fn(dhdp);
+ }
}
-EXPORT_SYMBOL(dhd_bus_clk_enable);
-/*
- * Function to disable the Bus Clock
- * Returns BCME_OK on success and BCME_xxx on failure
- *
- * This function is not callable from non-sleepable context
- */
-int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
+static void dhd_rx_compl_dispatcher_fn(dhd_pub_t *dhdp)
{
- dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ struct dhd_info *dhd = dhdp->info;
+ int cpu;
- int ret;
+ preempt_disable();
+ cpu = atomic_read(&dhd->rx_compl_cpu);
+ if (!cpu_online(cpu))
+ dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
+ else {
+ dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
+ }
+ preempt_enable();
+}
+#endif /* DHD_LB_RXC */
+
+#if defined(DHD_LB_TXP)
+static void dhd_tx_dispatcher_work(struct work_struct * work)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, tx_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
+}
+
+static void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
+{
+ int cpu;
+ int net_tx_cpu;
+ dhd_info_t *dhd = dhdp->info;
+
+ preempt_disable();
+ cpu = atomic_read(&dhd->tx_cpu);
+ net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
- dhd_os_sdlock(dhdp);
/*
- * The second argument is TRUE, that means, we expect
- * the function to "wait" until the clocks are really
- * disabled
+ * Now if the NET_TX has pushed the packet in the same
+ * CPU that is chosen for Tx processing, seperate it out
+ * i.e run the TX processing tasklet in compl_cpu
*/
- ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
- dhd_os_sdunlock(dhdp);
+ if (net_tx_cpu == cpu)
+ cpu = atomic_read(&dhd->tx_compl_cpu);
- return ret;
+ if (!cpu_online(cpu)) {
+ /*
+ * Ooohh... but the Chosen CPU is not online,
+ * Do the job in the current CPU itself.
+ */
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
+ } else {
+ /*
+ * Schedule tx_dispatcher_work to on the cpu which
+ * in turn will schedule tx_tasklet.
+ */
+ dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
+ }
+ preempt_enable();
}
-EXPORT_SYMBOL(dhd_bus_clk_disable);
-/*
- * Function to reset bt_use_count counter to zero.
+/**
+ * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
+ * on another cpu. The tx_tasklet will take care of actually putting
+ * the skbs into appropriate flow ring and ringing H2D interrupt
*
- * This function is not callable from non-sleepable context
+ * @dhdp: pointer to dhd_pub object
*/
-void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
+static void
+dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
{
- dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu;
- /* take the lock and reset bt use count */
- dhd_os_sdlock(dhdp);
- dhdsdio_reset_bt_use_count(dhdp->bus);
- dhd_os_sdunlock(dhdp);
-}
-EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
+ curr_cpu = get_cpu();
+ put_cpu();
-void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle)
-{
- dhd_pub_t *dhdp = (dhd_pub_t *)handle;
- dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+ /* Record the CPU in which the TX request from Network stack came */
+ atomic_set(&dhd->net_tx_cpu, curr_cpu);
- dhdp->hang_was_sent = 0;
+ /* Schedule the work to dispatch ... */
+ dhd_tx_dispatcher_fn(dhdp);
- dhd_os_send_hang_message(&dhd->pub);
}
-EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
-
-#endif /* BT_OVER_SDIO */
+#endif /* DHD_LB_TXP */
+#if defined(DHD_LB_RXP)
+/**
+ * dhd_napi_poll - Load balance napi poll function to process received
+ * packets and send up the network stack using netif_receive_skb()
+ *
+ * @napi: napi object in which context this poll function is invoked
+ * @budget: number of packets to be processed.
+ *
+ * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
+ * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
+ * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
+ * packet tag and sendup.
+ */
static int
-dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- return 0;
-}
-
-static struct net_device_stats*
-dhd_monitor_get_stats(struct net_device *dev)
+dhd_napi_poll(struct napi_struct *napi, int budget)
{
- return &DHD_MON_DEV_STATS(dev);
-}
+ int ifid;
+ const int pkt_count = 1;
+ const int chan = 0;
+ struct sk_buff * skb;
+ unsigned long flags;
+ struct dhd_info *dhd;
+ int processed = 0;
+ struct sk_buff_head rx_process_queue;
-static const struct net_device_ops netdev_monitor_ops =
-{
- .ndo_start_xmit = dhd_monitor_start,
- .ndo_get_stats = dhd_monitor_get_stats,
- .ndo_do_ioctl = dhd_monitor_ioctl
-};
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ dhd = container_of(napi, struct dhd_info, rx_napi_struct);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
-static void
-dhd_add_monitor_if(dhd_info_t *dhd)
-{
- struct net_device *dev;
- char *devname;
- uint32 scan_suppress = FALSE;
- int ret = BCME_OK;
+ DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
+ __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
+ __skb_queue_head_init(&rx_process_queue);
- if (!dhd) {
- DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
- return;
- }
+ /* extract the entire rx_napi_queue into local rx_process_queue */
+ spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
+ skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
+ spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
- if (dhd->monitor_dev) {
- DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
- return;
- }
+ while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
+ OSL_PREFETCH(skb->data);
- dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
- if (!dev) {
- DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
- return;
- }
+ ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
- devname = "radiotap";
+ DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
+ __FUNCTION__, skb, ifid));
- snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
+ dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
+ processed++;
+ }
-#ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
-#define ARPHRD_IEEE80211_PRISM 802
-#endif // endif
+ DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
-#ifndef ARPHRD_IEEE80211_RADIOTAP
-#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
-#endif /* ARPHRD_IEEE80211_RADIOTAP */
+ DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
+ napi_complete(napi);
- dev->type = ARPHRD_IEEE80211_RADIOTAP;
+ return budget - 1;
+}
- dev->netdev_ops = &netdev_monitor_ops;
+/**
+ * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
+ * poll list. This function may be invoked via the smp_call_function_single
+ * from a remote CPU.
+ *
+ * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
+ * after the napi_struct is added to the softnet data's poll_list
+ *
+ * @info: pointer to a dhd_info struct
+ */
+static void
+dhd_napi_schedule(void *info)
+{
+ dhd_info_t *dhd = (dhd_info_t *)info;
- if (register_netdevice(dev)) {
- DHD_ERROR(("%s, register_netdev failed for %s\n",
- __FUNCTION__, dev->name));
- free_netdev(dev);
- return;
- }
+ DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
+ __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
- if (FW_SUPPORTED((&dhd->pub), monitor)) {
- scan_suppress = TRUE;
- /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
- ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
- sizeof(scan_suppress), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
- }
+ /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
+ if (napi_schedule_prep(&dhd->rx_napi_struct)) {
+ __napi_schedule(&dhd->rx_napi_struct);
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
}
- dhd->monitor_dev = dev;
+ /*
+ * If the rx_napi_struct was already running, then we let it complete
+ * processing all its packets. The rx_napi_struct may only run on one
+ * core at a time, to avoid out-of-order handling.
+ */
}
-static void
-dhd_del_monitor_if(dhd_info_t *dhd)
+/**
+ * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
+ * action after placing the dhd's rx_process napi object in the the remote CPU's
+ * softnet data's poll_list.
+ *
+ * @dhd: dhd_info which has the rx_process napi object
+ * @on_cpu: desired remote CPU id
+ */
+static INLINE int
+dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
{
- int ret = BCME_OK;
- uint32 scan_suppress = FALSE;
+ int wait = 0; /* asynchronous IPI */
+ DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
+ __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
- if (!dhd) {
- DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
- return;
+ if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
+ DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
+ __FUNCTION__, on_cpu));
}
- if (!dhd->monitor_dev) {
- DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__));
- return;
- }
+ DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
- if (FW_SUPPORTED((&dhd->pub), monitor)) {
- scan_suppress = FALSE;
- /* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */
- ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
- sizeof(scan_suppress), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
- }
- }
+ return 0;
+}
- if (dhd->monitor_dev) {
- if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
- free_netdev(dhd->monitor_dev);
- } else {
- unregister_netdevice(dhd->monitor_dev);
- }
- dhd->monitor_dev = NULL;
- }
+/*
+ * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
+ * Why should we do this?
+ * The candidacy algorithm is run from the call back function
+ * registered to CPU hotplug notifier. This call back happens from Worker
+ * context. The dhd_napi_schedule_on is also from worker context.
+ * Note that both of this can run on two different CPUs at the same time.
+ * So we can possibly have a window where a given CPUn is being brought
+ * down from CPUm while we try to run a function on CPUn.
+ * To prevent this its better have the whole code to execute an SMP
+ * function under get_online_cpus.
+ * This function call ensures that hotplug mechanism does not kick-in
+ * until we are done dealing with online CPUs
+ * If the hotplug worker is already running, no worries because the
+ * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
+ *
+ * The below mentioned code structure is proposed in
+ * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
+ * for the question
+ * Q: I need to ensure that a particular cpu is not removed when there is some
+ * work specific to this cpu is in progress
+ *
+ * According to the documentation calling get_online_cpus is NOT required, if
+ * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
+ * run from Work Queue context we have to call these functions
+ */
+static void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, rx_napi_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ int cpu;
+
+ get_online_cpus();
+ cpu = atomic_read(&dhd->rx_napi_cpu);
+
+ if (!cpu_online(cpu))
+ dhd_napi_schedule(dhd);
+ else
+ dhd_napi_schedule_on(dhd, cpu);
+
+ put_online_cpus();
}
-static void
-dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
+/**
+ * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
+ * to run on another CPU. The rx_napi_struct's poll function will retrieve all
+ * the packets enqueued into the rx_napi_queue and sendup.
+ * The producer's rx packet queue is appended to the rx_napi_queue before
+ * dispatching the rx_napi_struct.
+ */
+void
+dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = pub->info;
+ unsigned long flags;
+ dhd_info_t *dhd = dhdp->info;
+ int curr_cpu;
+ int on_cpu;
- DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
+ if (dhd->rx_napi_netdev == NULL) {
+ DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
+ return;
+ }
- dhd_net_if_lock_local(dhd);
- if (!val) {
- /* Delete monitor */
- dhd_del_monitor_if(dhd);
+ DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
+ skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
+
+ /* append the producer's queue of packets to the napi's rx process queue */
+ spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
+ skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
+ spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
+
+ /*
+ * If the destination CPU is NOT online or is same as current CPU
+ * no need to schedule the work
+ */
+ curr_cpu = get_cpu();
+ put_cpu();
+
+ on_cpu = atomic_read(&dhd->rx_napi_cpu);
+ if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
+ dhd_napi_schedule(dhd);
} else {
- /* Add monitor */
- dhd_add_monitor_if(dhd);
+ schedule_work(&dhd->rx_napi_dispatcher_work);
}
- dhd->monitor_type = val;
- dhd_net_if_unlock_local(dhd);
}
-#endif /* WL_MONITOR */
-#if defined(DHD_H2D_LOG_TIME_SYNC)
-/*
- * Helper function:
- * Used for RTE console message time syncing with Host printk
+/**
+ * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
*/
-void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp)
+void
+dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
{
- dhd_info_t *info = dhdp->info;
+ dhd_info_t *dhd = dhdp->info;
- /* Ideally the "state" should be always TRUE */
- dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL,
- DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
- dhd_deferred_work_rte_log_time_sync,
- DHD_WQ_WORK_PRIORITY_LOW);
+ DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
+ pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
+ DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
+ __skb_queue_tail(&dhd->rx_pend_queue, pkt);
}
+#endif /* DHD_LB_RXP */
-void
-dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event)
+#endif /* DHD_LB */
+
+
+/** Returns dhd iflist index corresponding the the bssidx provided by apps */
+int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
{
- dhd_info_t *dhd_info = handle;
- dhd_pub_t *dhd;
+ dhd_if_t *ifp;
+ dhd_info_t *dhd = dhdp->info;
+ int i;
- if (event != DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH) {
- DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
- return;
- }
+ ASSERT(bssidx < DHD_MAX_IFS);
+ ASSERT(dhdp);
- if (!dhd_info) {
- DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
- return;
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ifp = dhd->iflist[i];
+ if (ifp && (ifp->bssidx == bssidx)) {
+ DHD_TRACE(("Index manipulated for %s from %d to %d\n",
+ ifp->name, bssidx, i));
+ break;
+ }
}
-
- dhd = &dhd_info->pub;
-
- /*
- * Function to send IOVAR for console timesyncing
- * between Host and Dongle.
- * If the IOVAR fails,
- * 1. dhd_rte_time_sync_ms is set to 0 and
- * 2. HOST Dongle console time sync will *not* happen.
- */
- dhd_h2d_log_time_sync(dhd);
+ return i;
}
-#endif /* DHD_H2D_LOG_TIME_SYNC */
-int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
+static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
{
- int bcmerror = BCME_OK;
- int buflen = 0;
- struct net_device *net;
+ uint32 store_idx;
+ uint32 sent_idx;
- net = dhd_idx2net(pub, ifidx);
- if (!net) {
- bcmerror = BCME_BADARG;
- /*
- * The netdev pointer is bad means the DHD can't communicate
- * to higher layers, so just return from here
- */
- return bcmerror;
+ if (!skb) {
+ DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
+ return BCME_ERROR;
}
- /* check for local dhd ioctl and handle it */
- if (ioc->driver == DHD_IOCTL_MAGIC) {
- /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
- if (data_buf)
- buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
- bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
- if (bcmerror)
- pub->bcmerror = bcmerror;
- goto done;
+ dhd_os_rxflock(dhdp);
+ store_idx = dhdp->store_idx;
+ sent_idx = dhdp->sent_idx;
+ if (dhdp->skbbuf[store_idx] != NULL) {
+ /* Make sure the previous packets are processed */
+ dhd_os_rxfunlock(dhdp);
+#ifdef RXF_DEQUEUE_ON_BUSY
+ DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+ skb, store_idx, sent_idx));
+ return BCME_BUSY;
+#else /* RXF_DEQUEUE_ON_BUSY */
+ DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+ skb, store_idx, sent_idx));
+ /* removed msleep here, should use wait_event_timeout if we
+ * want to give rx frame thread a chance to run
+ */
+#if defined(WAIT_DEQUEUE)
+ OSL_SLEEP(1);
+#endif
+ return BCME_ERROR;
+#endif /* RXF_DEQUEUE_ON_BUSY */
}
+ DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
+ skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
+ dhdp->skbbuf[store_idx] = skb;
+ dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
+ dhd_os_rxfunlock(dhdp);
- /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
- if (data_buf)
- buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
+ return BCME_OK;
+}
-#ifndef BCMDBUS
- /* send to dongle (must be up, and wl). */
- if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
- if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
- int ret;
- if (atomic_read(&exit_in_progress)) {
- DHD_ERROR(("%s module exit in progress\n", __func__));
- bcmerror = BCME_DONGLE_DOWN;
- goto done;
- }
- ret = dhd_bus_start(pub);
- if (ret != 0) {
- DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
- bcmerror = BCME_DONGLE_DOWN;
- goto done;
- }
- } else {
- bcmerror = BCME_DONGLE_DOWN;
- goto done;
- }
- }
+static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
+{
+ uint32 store_idx;
+ uint32 sent_idx;
+ void *skb;
- if (!pub->iswl) {
- bcmerror = BCME_DONGLE_DOWN;
- goto done;
- }
-#endif /* !BCMDBUS */
+ dhd_os_rxflock(dhdp);
- /*
- * Flush the TX queue if required for proper message serialization:
- * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
- * prevent M4 encryption and
- * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
- * prevent disassoc frame being sent before WPS-DONE frame.
- */
- if (ioc->cmd == WLC_SET_KEY ||
- (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
- strncmp("wsec_key", data_buf, 9) == 0) ||
- (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
- strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
- ioc->cmd == WLC_DISASSOC)
- dhd_wait_pend8021x(net);
+ store_idx = dhdp->store_idx;
+ sent_idx = dhdp->sent_idx;
+ skb = dhdp->skbbuf[sent_idx];
- if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
- data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
- bcmerror = BCME_UNSUPPORTED;
- goto done;
+ if (skb == NULL) {
+ dhd_os_rxfunlock(dhdp);
+ DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
+ store_idx, sent_idx));
+ return NULL;
}
- bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+ dhdp->skbbuf[sent_idx] = NULL;
+ dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
-#ifdef WL_MONITOR
- /* Intercept monitor ioctl here, add/del monitor if */
- if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
- int val = 0;
- if (data_buf != NULL && buflen != 0) {
- if (buflen >= 4) {
- val = *(int*)data_buf;
- } else if (buflen >= 2) {
- val = *(short*)data_buf;
- } else {
- val = *(char*)data_buf;
- }
- }
- dhd_set_monitor(pub, ifidx, val);
- }
-#endif /* WL_MONITOR */
+ DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
+ skb, sent_idx));
-done:
- dhd_check_hang(net, pub, bcmerror);
+ dhd_os_rxfunlock(dhdp);
- return bcmerror;
+ return skb;
}
-/**
- * Called by the OS (optionally via a wrapper function).
- * @param net Linux per dongle instance
- * @param ifr Linux request structure
- * @param cmd e.g. SIOCETHTOOL
- */
-static int
-dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
{
- dhd_info_t *dhd = DHD_DEV_INFO(net);
- dhd_ioctl_t ioc;
- int bcmerror = 0;
- int ifidx;
- int ret;
- void *local_buf = NULL; /**< buffer in kernel space */
- void __user *ioc_buf_user = NULL; /**< buffer in user space */
- u16 buflen = 0;
-
- if (atomic_read(&exit_in_progress)) {
- DHD_ERROR(("%s module exit in progress\n", __func__));
- bcmerror = BCME_DONGLE_DOWN;
- return OSL_ERROR(bcmerror);
+ if (prepost) { /* pre process */
+ dhd_read_cis(dhdp);
+ dhd_check_module_cid(dhdp);
+ dhd_check_module_mac(dhdp);
+ dhd_set_macaddr_from_file(dhdp);
+ } else { /* post process */
+ dhd_write_macaddr(&dhdp->mac);
+ dhd_clear_cis(dhdp);
}
- DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK(&dhd->pub);
+ return 0;
+}
- /* Interface up check for built-in type */
- if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
- DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return OSL_ERROR(BCME_NOTUP);
+// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
+#if defined(PKT_FILTER_SUPPORT)
+#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
+static bool
+_turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
+{
+ bool _apply = FALSE;
+ /* In case of IBSS mode, apply arp pkt filter */
+ if (op_mode_param & DHD_FLAG_IBSS_MODE) {
+ _apply = TRUE;
+ goto exit;
}
-
- ifidx = dhd_net2idx(dhd, net);
- DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
-
-#if defined(WL_STATIC_IF)
- /* skip for static ndev when it is down */
- if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return -1;
+ /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
+ if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
+ _apply = TRUE;
+ goto exit;
}
-#endif /* WL_STATIC_iF */
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return -1;
- }
+exit:
+ return _apply;
+}
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
-#if defined(WL_WIRELESS_EXT)
- /* linux wireless extensions */
- if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
- /* may recurse, do NOT lock */
- ret = wl_iw_ioctl(net, ifr, cmd);
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return ret;
- }
-#endif /* defined(WL_WIRELESS_EXT) */
+void
+dhd_set_packet_filter(dhd_pub_t *dhd)
+{
+ int i;
- if (cmd == SIOCETHTOOL) {
- ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return ret;
+ DHD_TRACE(("%s: enter\n", __FUNCTION__));
+ if (dhd_pkt_filter_enable) {
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+ dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
+ }
}
+}
- if (cmd == SIOCDEVPRIVATE+1) {
- ret = wl_android_priv_cmd(net, ifr);
- dhd_check_hang(net, &dhd->pub, ret);
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return ret;
- }
+void
+dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
+{
+ int i;
- if (cmd != SIOCDEVPRIVATE) {
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return -EOPNOTSUPP;
+ DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
+ if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
+ DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
+ return;
+ }
+ /* 1 - Enable packet filter, only allow unicast packet to send up */
+ /* 0 - Disable packet filter */
+ if (dhd_pkt_filter_enable && (!value ||
+ (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
+ {
+ for (i = 0; i < dhd->pktfilter_count; i++) {
+// terence 20160615: fix building error if ARP_OFFLOAD_SUPPORT removed
+#if defined(ARP_OFFLOAD_SUPPORT) && !defined(GAN_LITE_NAT_KEEPALIVE_FILTER)
+ if (value && (i == DHD_ARP_FILTER_NUM) &&
+ !_turn_on_arp_filter(dhd, dhd->op_mode)) {
+ DHD_TRACE(("Do not turn on ARP white list pkt filter:"
+ "val %d, cnt %d, op_mode 0x%x\n",
+ value, i, dhd->op_mode));
+ continue;
+ }
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+ dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
+ value, dhd_master_mode);
+ }
}
+}
- memset(&ioc, 0, sizeof(ioc));
-
-#ifdef CONFIG_COMPAT
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
- if (in_compat_syscall())
-#else
- if (is_compat_task())
-#endif /* LINUX_VER >= 4.6 */
- {
- compat_wl_ioctl_t compat_ioc;
- if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
- bcmerror = BCME_BADADDR;
- goto done;
- }
- ioc.cmd = compat_ioc.cmd;
- if (ioc.cmd & WLC_SPEC_FLAG) {
- memset(&ioc, 0, sizeof(ioc));
- /* Copy the ioc control structure part of ioctl request */
- if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
- bcmerror = BCME_BADADDR;
- goto done;
- }
- ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */
+int
+dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
+{
+ char *filterp = NULL;
+ int filter_id = 0;
- /* To differentiate between wl and dhd read 4 more byes */
- if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
- sizeof(uint)) != 0)) {
- bcmerror = BCME_BADADDR;
- goto done;
+ switch (num) {
+ case DHD_BROADCAST_FILTER_NUM:
+ filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
+ filter_id = 101;
+ break;
+ case DHD_MULTICAST4_FILTER_NUM:
+ filter_id = 102;
+ if (FW_SUPPORTED((dhdp), pf6)) {
+ if (dhdp->pktfilter[num] != NULL) {
+ dhd_pktfilter_offload_delete(dhdp, filter_id);
+ dhdp->pktfilter[num] = NULL;
+ }
+ if (!add_remove) {
+ filterp = DISCARD_IPV4_MCAST;
+ add_remove = 1;
+ break;
+ }
}
-
- } else { /* ioc.cmd & WLC_SPEC_FLAG */
- ioc.buf = compat_ptr(compat_ioc.buf);
- ioc.len = compat_ioc.len;
- ioc.set = compat_ioc.set;
- ioc.used = compat_ioc.used;
- ioc.needed = compat_ioc.needed;
- /* To differentiate between wl and dhd read 4 more byes */
- if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
- sizeof(uint)) != 0)) {
- bcmerror = BCME_BADADDR;
- goto done;
+ filterp = "102 0 0 0 0xFFFFFF 0x01005E";
+ break;
+ case DHD_MULTICAST6_FILTER_NUM:
+ filter_id = 103;
+ if (FW_SUPPORTED((dhdp), pf6)) {
+ if (dhdp->pktfilter[num] != NULL) {
+ dhd_pktfilter_offload_delete(dhdp, filter_id);
+ dhdp->pktfilter[num] = NULL;
+ }
+ if (!add_remove) {
+ filterp = DISCARD_IPV6_MCAST;
+ add_remove = 1;
+ break;
+ }
}
- } /* ioc.cmd & WLC_SPEC_FLAG */
- } else
-#endif /* CONFIG_COMPAT */
- {
- /* Copy the ioc control structure part of ioctl request */
- if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
- bcmerror = BCME_BADADDR;
- goto done;
- }
-#ifdef CONFIG_COMPAT
- ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/
-#endif
+ filterp = "103 0 0 0 0xFFFF 0x3333";
+ break;
+ case DHD_MDNS_FILTER_NUM:
+ filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
+ filter_id = 104;
+ break;
+ case DHD_ARP_FILTER_NUM:
+ filterp = "105 0 0 12 0xFFFF 0x0806";
+ filter_id = 105;
+ break;
+ case DHD_BROADCAST_ARP_FILTER_NUM:
+ filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
+ " 0xFFFFFFFFFFFF0000000000000806";
+ filter_id = 106;
+ break;
+ default:
+ return -EINVAL;
+ }
- /* To differentiate between wl and dhd read 4 more byes */
- if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
- sizeof(uint)) != 0)) {
- bcmerror = BCME_BADADDR;
- goto done;
+ /* Add filter */
+ if (add_remove) {
+ dhdp->pktfilter[num] = filterp;
+ dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
+ } else { /* Delete filter */
+ if (dhdp->pktfilter[num]) {
+ dhd_pktfilter_offload_delete(dhdp, filter_id);
+ dhdp->pktfilter[num] = NULL;
}
}
-#ifndef CONFIG_VTS_SUPPORT
- if (!capable(CAP_NET_ADMIN)) {
- bcmerror = BCME_EPERM;
- goto done;
- }
-#endif
+ return 0;
+}
+#endif /* PKT_FILTER_SUPPORT */
- /* Take backup of ioc.buf and restore later */
- ioc_buf_user = ioc.buf;
+static int dhd_set_suspend(int value, dhd_pub_t *dhd)
+{
+ int power_mode = PM_MAX;
+#ifdef SUPPORT_SENSORHUB
+ shub_control_t shub_ctl;
+#endif /* SUPPORT_SENSORHUB */
+ /* wl_pkt_filter_enable_t enable_parm; */
+ int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
+ int ret = 0;
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
+ int bcn_timeout = 0;
+#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+ int roam_time_thresh = 0; /* (ms) */
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+ uint roamvar = dhd->conf->roam_off_suspend;
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ int bcn_li_bcn;
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+ uint nd_ra_filter = 0;
+#endif /* DHD_USE_EARLYSUSPEND */
+#ifdef PASS_ALL_MCAST_PKTS
+ struct dhd_info *dhdinfo;
+ uint32 allmulti;
+ uint i;
+#endif /* PASS_ALL_MCAST_PKTS */
+#ifdef ENABLE_IPMCAST_FILTER
+ int ipmcast_l2filter;
+#endif /* ENABLE_IPMCAST_FILTER */
+#ifdef DYNAMIC_SWOOB_DURATION
+#ifndef CUSTOM_INTR_WIDTH
+#define CUSTOM_INTR_WIDTH 100
+ int intr_width = 0;
+#endif /* CUSTOM_INTR_WIDTH */
+#endif /* DYNAMIC_SWOOB_DURATION */
- if (ioc.len > 0) {
- buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
- if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
- bcmerror = BCME_NOMEM;
- goto done;
- }
+#if defined(BCMPCIE)
+ int lpas = 0;
+ int dtim_period = 0;
+ int bcn_interval = 0;
+ int bcn_to_dly = 0;
+#ifndef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+ int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
+#else
+ bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#endif /* OEM_ANDROID && BCMPCIE */
- DHD_PERIM_UNLOCK(&dhd->pub);
- if (copy_from_user(local_buf, ioc.buf, buflen)) {
- DHD_PERIM_LOCK(&dhd->pub);
- bcmerror = BCME_BADADDR;
- goto done;
- }
- DHD_PERIM_LOCK(&dhd->pub);
+ if (!dhd)
+ return -ENODEV;
- *((char *)local_buf + buflen) = '\0';
+#ifdef PASS_ALL_MCAST_PKTS
+ dhdinfo = dhd->info;
+#endif /* PASS_ALL_MCAST_PKTS */
- /* For some platforms accessing userspace memory
- * of ioc.buf is causing kernel panic, so to avoid that
- * make ioc.buf pointing to kernel space memory local_buf
- */
- ioc.buf = local_buf;
- }
+ DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
+ __FUNCTION__, value, dhd->in_suspend));
- /* Skip all the non DHD iovars (wl iovars) after f/w hang */
- if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
- DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
- DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
- bcmerror = BCME_DONGLE_DOWN;
- goto done;
- }
+ dhd_suspend_lock(dhd);
- bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
+#ifdef CUSTOM_SET_CPUCORE
+ DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
+ /* set specific cpucore */
+ dhd_set_cpucore(dhd, TRUE);
+#endif /* CUSTOM_SET_CPUCORE */
- /* Restore back userspace pointer to ioc.buf */
- ioc.buf = ioc_buf_user;
+ if (dhd->conf->pm >= 0)
+ power_mode = dhd->conf->pm;
+ else
+ power_mode = PM_FAST;
- if (!bcmerror && buflen && local_buf && ioc.buf) {
- DHD_PERIM_UNLOCK(&dhd->pub);
- if (copy_to_user(ioc.buf, local_buf, buflen))
- bcmerror = -EFAULT;
- DHD_PERIM_LOCK(&dhd->pub);
- }
+ if (dhd->up) {
+ if (value && dhd->in_suspend) {
+#ifdef PKT_FILTER_SUPPORT
+ dhd->early_suspended = 1;
+#endif
+ /* Kernel suspended */
+ DHD_ERROR(("%s: force extra suspend setting\n", __FUNCTION__));
-done:
- if (local_buf)
- MFREE(dhd->pub.osh, local_buf, buflen+1);
+ if (dhd->conf->pm_in_suspend >= 0)
+ power_mode = dhd->conf->pm_in_suspend;
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode), TRUE, 0);
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#ifdef PKT_FILTER_SUPPORT
+ /* Enable packet filter,
+ * only allow unicast packet to send up
+ */
+ dhd_enable_packet_filter(1, dhd);
+#ifdef APF
+ dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
+#endif /* PKT_FILTER_SUPPORT */
- return OSL_ERROR(bcmerror);
-}
+#ifdef SUPPORT_SENSORHUB
+ shub_ctl.enable = 1;
+ shub_ctl.cmd = 0x000;
+ shub_ctl.op_mode = 1;
+ shub_ctl.interval = 0;
+ if (dhd->info->shub_enable == 1) {
+ ret = dhd_iovar(dhd, 0, "shub_msreq",
+ (char *)&shub_ctl, sizeof(shub_ctl), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s SensorHub MS start: failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* SUPPORT_SENSORHUB */
+
+
+#ifdef PASS_ALL_MCAST_PKTS
+ allmulti = 0;
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
+ dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
+ sizeof(allmulti), NULL, 0, TRUE);
-#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
-/* Flags to indicate if we distingish power off policy when
- * user set the memu "Keep Wi-Fi on during sleep" to "Never"
- */
-int trigger_deep_sleep = 0;
-#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
+ }
+#endif /* PASS_ALL_MCAST_PKTS */
-#ifdef FIX_CPU_MIN_CLOCK
-static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
-{
- if (dhd) {
- mutex_init(&dhd->cpufreq_fix);
- dhd->cpufreq_fix_status = FALSE;
- }
- return 0;
-}
+ /* If DTIM skip is set up as default, force it to wake
+ * each third DTIM for better power savings. Note that
+ * one side effect is a chance to miss BC/MC packet.
+ */
+#ifdef WLTDLS
+ /* Do not set bcn_li_ditm on WFD mode */
+ if (dhd->tdls_mode) {
+ bcn_li_dtim = 0;
+ } else
+#endif /* WLTDLS */
+#if defined(BCMPCIE)
+ bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
+ &bcn_interval);
+ dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+ sizeof(bcn_li_dtim), NULL, 0, TRUE);
-static void dhd_fix_cpu_freq(dhd_info_t *dhd)
-{
- mutex_lock(&dhd->cpufreq_fix);
- if (dhd && !dhd->cpufreq_fix_status) {
- pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
-#ifdef FIX_BUS_MIN_CLOCK
- pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
-#endif /* FIX_BUS_MIN_CLOCK */
- DHD_ERROR(("pm_qos_add_requests called\n"));
+ if ((bcn_li_dtim * dtim_period * bcn_interval) >=
+ MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
+ /*
+ * Increase max roaming threshold from 2 secs to 8 secs
+ * the real roam threshold is MIN(max_roam_threshold,
+ * bcn_timeout/2)
+ */
+ lpas = 1;
+ dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
+ 0, TRUE);
- dhd->cpufreq_fix_status = TRUE;
- }
- mutex_unlock(&dhd->cpufreq_fix);
-}
+ bcn_to_dly = 1;
+ /*
+ * if bcn_to_dly is 1, the real roam threshold is
+ * MIN(max_roam_threshold, bcn_timeout -1);
+ * notify link down event after roaming procedure complete
+ * if we hit bcn_timeout while we are in roaming progress.
+ */
+ dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
+ sizeof(bcn_to_dly), NULL, 0, TRUE);
+ /* Increase beacon timeout to 6 secs or use bigger one */
+ bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
+ dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
+ sizeof(bcn_timeout), NULL, 0, TRUE);
+ }
+#else
+ bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
+ if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+ sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
+ DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
+#endif /* OEM_ANDROID && BCMPCIE */
-static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
-{
- mutex_lock(&dhd ->cpufreq_fix);
- if (dhd && dhd->cpufreq_fix_status != TRUE) {
- mutex_unlock(&dhd->cpufreq_fix);
- return;
- }
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
+ bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
+ dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
+ sizeof(bcn_timeout), NULL, 0, TRUE);
+#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+ roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
+ dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
+ sizeof(roam_time_thresh), NULL, 0, TRUE);
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+ /* Disable firmware roaming during suspend */
+ dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar),
+ NULL, 0, TRUE);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ bcn_li_bcn = 0;
+ dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
+ sizeof(bcn_li_bcn), NULL, 0, TRUE);
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+#ifdef NDO_CONFIG_SUPPORT
+ if (dhd->ndo_enable) {
+ if (!dhd->ndo_host_ip_overflow) {
+ /* enable ND offload on suspend */
+ ret = dhd_ndo_enable(dhd, 1);
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to enable NDO\n",
+ __FUNCTION__));
+ }
+ } else {
+ DHD_INFO(("%s: NDO disabled on suspend due to"
+ "HW capacity\n", __FUNCTION__));
+ }
+ }
+#endif /* NDO_CONFIG_SUPPORT */
+#ifndef APF
+ if (FW_SUPPORTED(dhd, ndoe))
+#else
+ if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
+#endif /* APF */
+ {
+ /* enable IPv6 RA filter in firmware during suspend */
+ nd_ra_filter = 1;
+ ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
+ (char *)&nd_ra_filter, sizeof(nd_ra_filter),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+ ret));
+ }
+ dhd_os_suppress_logging(dhd, TRUE);
+#ifdef ENABLE_IPMCAST_FILTER
+ ipmcast_l2filter = 1;
+ ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
+ (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
+ NULL, 0, TRUE);
+#endif /* ENABLE_IPMCAST_FILTER */
+#ifdef DYNAMIC_SWOOB_DURATION
+ intr_width = CUSTOM_INTR_WIDTH;
+ ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
+ sizeof(intr_width), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to set intr_width (%d)\n", ret));
+ }
+#endif /* DYNAMIC_SWOOB_DURATION */
+#endif /* DHD_USE_EARLYSUSPEND */
+ dhd_conf_set_ap_in_suspend(dhd, value);
+ } else {
+ dhd_conf_set_ap_in_suspend(dhd, value);
+#ifdef PKT_FILTER_SUPPORT
+ dhd->early_suspended = 0;
+#endif
+ /* Kernel resumed */
+ DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
- pm_qos_remove_request(&dhd->dhd_cpu_qos);
-#ifdef FIX_BUS_MIN_CLOCK
- pm_qos_remove_request(&dhd->dhd_bus_qos);
-#endif /* FIX_BUS_MIN_CLOCK */
- DHD_ERROR(("pm_qos_add_requests called\n"));
+#ifdef SUPPORT_SENSORHUB
+ shub_ctl.enable = 1;
+ shub_ctl.cmd = 0x000;
+ shub_ctl.op_mode = 0;
+ shub_ctl.interval = 0;
+ if (dhd->info->shub_enable == 1) {
+ ret = dhd_iovar(dhd, 0, "shub_msreq",
+ (char *)&shub_ctl, sizeof(shub_ctl),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s SensorHub MS stop: failed %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* SUPPORT_SENSORHUB */
- dhd->cpufreq_fix_status = FALSE;
- mutex_unlock(&dhd->cpufreq_fix);
-}
-#endif /* FIX_CPU_MIN_CLOCK */
+#ifdef DYNAMIC_SWOOB_DURATION
+ intr_width = 0;
+ ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
+ sizeof(intr_width), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to set intr_width (%d)\n", ret));
+ }
+#endif /* DYNAMIC_SWOOB_DURATION */
+#ifndef SUPPORT_PM2_ONLY
+ power_mode = PM_FAST;
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
+ sizeof(power_mode), TRUE, 0);
+#endif /* SUPPORT_PM2_ONLY */
+#ifdef PKT_FILTER_SUPPORT
+ /* disable pkt filter */
+ dhd_enable_packet_filter(0, dhd);
+#ifdef APF
+ dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
+#endif /* APF */
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef PASS_ALL_MCAST_PKTS
+ allmulti = 1;
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
+ dhd_iovar(dhd, i, "allmulti", (char *)&allmulti,
+ sizeof(allmulti), NULL, 0, TRUE);
+ }
+#endif /* PASS_ALL_MCAST_PKTS */
+#if defined(BCMPCIE)
+ /* restore pre-suspend setting */
+ ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+ sizeof(bcn_li_dtim), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
+ }
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-static int
-dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr, int cmd)
-{
- int error;
- dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL, 0,
+ TRUE);
- if (atomic_read(&dhd->pub.block_bus))
- return -EHOSTDOWN;
+ dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
+ sizeof(bcn_to_dly), NULL, 0, TRUE);
- if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
- return BCME_ERROR;
+ dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
+ sizeof(bcn_timeout), NULL, 0, TRUE);
+#else
+ /* restore pre-suspend setting for dtim_skip */
+ ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
+ sizeof(bcn_li_dtim), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
+ }
+#endif /* OEM_ANDROID && BCMPCIE */
+#ifdef DHD_USE_EARLYSUSPEND
+#ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
+ bcn_timeout = CUSTOM_BCN_TIMEOUT;
+ dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
+ sizeof(bcn_timeout), NULL, 0, TRUE);
+#endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
+#ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
+ roam_time_thresh = 2000;
+ dhd_iovar(dhd, 0, "roam_time_thresh", (char *)&roam_time_thresh,
+ sizeof(roam_time_thresh), NULL, 0, TRUE);
- error = dhd_ioctl_entry(net, ifr, cmd);
+#endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
+#ifndef ENABLE_FW_ROAM_SUSPEND
+ roamvar = dhd_roam_disable;
+ dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar),
+ NULL, 0, TRUE);
+#endif /* ENABLE_FW_ROAM_SUSPEND */
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ bcn_li_bcn = 1;
+ dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
+ sizeof(bcn_li_bcn), NULL, 0, TRUE);
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+#ifdef NDO_CONFIG_SUPPORT
+ if (dhd->ndo_enable) {
+ /* Disable ND offload on resume */
+ ret = dhd_ndo_enable(dhd, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to disable NDO\n",
+ __FUNCTION__));
+ }
+ }
+#endif /* NDO_CONFIG_SUPPORT */
+#ifndef APF
+ if (FW_SUPPORTED(dhd, ndoe))
+#else
+ if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf))
+#endif /* APF */
+ {
+ /* disable IPv6 RA filter in firmware during suspend */
+ nd_ra_filter = 0;
+ ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
+ (char *)&nd_ra_filter, sizeof(nd_ra_filter),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+ ret));
+ }
+ }
+ dhd_os_suppress_logging(dhd, FALSE);
+#ifdef ENABLE_IPMCAST_FILTER
+ ipmcast_l2filter = 0;
+ ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
+ (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
+ NULL, 0, TRUE);
+#endif /* ENABLE_IPMCAST_FILTER */
+#endif /* DHD_USE_EARLYSUSPEND */
- pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
- pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
+ /* terence 2017029: Reject in early suspend */
+ if (!dhd->conf->xmit_in_suspend) {
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+ }
+ }
+ }
+ dhd_suspend_unlock(dhd);
- return error;
+ return 0;
}
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-static int
-dhd_stop(struct net_device *net)
+static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
{
- int ifidx = 0;
- bool skip_reset = false;
-#if defined(WL_CFG80211)
- unsigned long flags = 0;
-#ifdef WL_STATIC_IF
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
-#endif /* WL_STATIC_IF */
-#endif /* WL_CFG80211 */
- dhd_info_t *dhd = DHD_DEV_INFO(net);
- DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK(&dhd->pub);
- printf("%s: Enter %s\n", __FUNCTION__, net->name);
- dhd->pub.rxcnt_timeout = 0;
- dhd->pub.txcnt_timeout = 0;
-
-#ifdef BCMPCIE
- dhd->pub.d3ackcnt_timeout = 0;
-#endif /* BCMPCIE */
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
- mutex_lock(&dhd->pub.ndev_op_sync);
+ DHD_OS_WAKE_LOCK(dhdp);
+ DHD_PERIM_LOCK(dhdp);
- if (dhd->pub.up == 0) {
- goto exit;
+ /* Set flag when early suspend was called */
+ dhdp->in_suspend = val;
+ if ((force || !dhdp->suspend_disable_flag) &&
+ (dhd_support_sta_mode(dhdp) || dhd_conf_get_ap_mode_in_suspend(dhdp)))
+ {
+ ret = dhd_set_suspend(val, dhdp);
}
- dhd_if_flush_sta(DHD_DEV_IFP(net));
-
-#ifdef FIX_CPU_MIN_CLOCK
- if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
- dhd_rollback_cpu_freq(dhd);
-#endif /* FIX_CPU_MIN_CLOCK */
-
- ifidx = dhd_net2idx(dhd, net);
- BCM_REFERENCE(ifidx);
+ DHD_PERIM_UNLOCK(dhdp);
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ return ret;
+}
- DHD_ERROR(("%s: ######### dhd_stop called for ifidx=%d #########\n", __FUNCTION__, ifidx));
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+static void dhd_early_suspend(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+ DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
-#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
- /* If static if is operational, don't reset the chip */
- if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
- DHD_ERROR(("static if operational. skip chip reset.\n"));
- skip_reset = true;
- wl_cfg80211_sta_ifdown(net);
- goto exit;
- }
-#endif /* WL_STATIC_IF && WL_CFG80211 */
+ if (dhd)
+ dhd_suspend_resume_helper(dhd, 1, 0);
+}
- DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
-#ifdef WL_CFG80211
+static void dhd_late_resume(struct early_suspend *h)
+{
+ struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
+ DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
- /* Disable Runtime PM before interface down */
- DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+ if (dhd)
+ dhd_suspend_resume_helper(dhd, 0, 0);
+}
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
- spin_lock_irqsave(&dhd->pub.up_lock, flags);
- dhd->pub.up = 0;
- spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
-#else
- dhd->pub.up = 0;
-#endif /* WL_CFG80211 */
+/*
+ * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
+ * the sleep time reaches one jiffy, then switches over to task delay. Usage:
+ *
+ * dhd_timeout_start(&tmo, usec);
+ * while (!dhd_timeout_expired(&tmo))
+ * if (poll_something())
+ * break;
+ * if (dhd_timeout_expired(&tmo))
+ * fatal();
+ */
-#ifdef WL_CFG80211
- if (ifidx == 0) {
- dhd_if_t *ifp;
- wl_cfg80211_down(net);
+void
+dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
+{
+ tmo->limit = usec;
+ tmo->increment = 0;
+ tmo->elapsed = 0;
+ tmo->tick = jiffies_to_usecs(1);
+}
- ifp = dhd->iflist[0];
- /*
- * For CFG80211: Clean up all the left over virtual interfaces
- * when the primary Interface is brought down. [ifconfig wlan0 down]
- */
- if (!dhd_download_fw_on_driverload) {
- DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0);
- if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
- (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
- int i;
-#ifdef WL_CFG80211_P2P_DEV_IF
- wl_cfg80211_del_p2p_wdev(net);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
- dhd_cleanup_m4_state_work(&dhd->pub, ifidx);
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-#ifdef DHD_PKTDUMP_ROAM
- dhd_dump_pkt_clear(&dhd->pub);
-#endif /* DHD_PKTDUMP_ROAM */
+int
+dhd_timeout_expired(dhd_timeout_t *tmo)
+{
+ /* Does nothing the first call */
+ if (tmo->increment == 0) {
+ tmo->increment = 1;
+ return 0;
+ }
- dhd_net_if_lock_local(dhd);
- for (i = 1; i < DHD_MAX_IFS; i++)
- dhd_remove_if(&dhd->pub, i, FALSE);
+ if (tmo->elapsed >= tmo->limit)
+ return 1;
- if (ifp && ifp->net) {
- dhd_if_del_sta_list(ifp);
- }
-#ifdef ARP_OFFLOAD_SUPPORT
- if (dhd_inetaddr_notifier_registered) {
- dhd_inetaddr_notifier_registered = FALSE;
- unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
- }
-#endif /* ARP_OFFLOAD_SUPPORT */
-#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
- if (dhd_inet6addr_notifier_registered) {
- dhd_inet6addr_notifier_registered = FALSE;
- unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
- }
-#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
- dhd_net_if_unlock_local(dhd);
- }
-#if 0
- // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
- cancel_work_sync(dhd->dhd_deferred_wq);
-#endif
+ /* Add the delay that's about to take place */
+ tmo->elapsed += tmo->increment;
-#ifdef SHOW_LOGTRACE
- /* Wait till event logs work/kthread finishes */
- dhd_cancel_logtrace_process_sync(dhd);
-#endif /* SHOW_LOGTRACE */
+ if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
+ OSL_DELAY(tmo->increment);
+ tmo->increment *= 2;
+ if (tmo->increment > tmo->tick)
+ tmo->increment = tmo->tick;
+ } else {
+ wait_queue_head_t delay_wait;
+ DECLARE_WAITQUEUE(wait, current);
+ init_waitqueue_head(&delay_wait);
+ add_wait_queue(&delay_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ (void)schedule_timeout(1);
+ remove_wait_queue(&delay_wait, &wait);
+ set_current_state(TASK_RUNNING);
+ }
-#if defined(DHD_LB_RXP)
- __skb_queue_purge(&dhd->rx_pend_queue);
-#endif /* DHD_LB_RXP */
+ return 0;
+}
-#if defined(DHD_LB_TXP)
- skb_queue_purge(&dhd->tx_pend_queue);
-#endif /* DHD_LB_TXP */
- }
+int
+dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
+{
+ int i = 0;
-#if defined(ARGOS_NOTIFY_CB)
- argos_register_notifier_deinit();
-#endif // endif
-#ifdef DHDTCPACK_SUPPRESS
- dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
-#endif /* DHDTCPACK_SUPPRESS */
-#if defined(DHD_LB_RXP)
- if (ifp && ifp->net == dhd->rx_napi_netdev) {
- DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
- __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
- skb_queue_purge(&dhd->rx_napi_queue);
- napi_disable(&dhd->rx_napi_struct);
- netif_napi_del(&dhd->rx_napi_struct);
- dhd->rx_napi_netdev = NULL;
- }
-#endif /* DHD_LB_RXP */
+ if (!dhd) {
+ DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
+ return DHD_BAD_IF;
}
-#endif /* WL_CFG80211 */
-
- DHD_SSSR_DUMP_DEINIT(&dhd->pub);
-#ifdef PROP_TXSTATUS
- dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
-#endif // endif
-#ifdef SHOW_LOGTRACE
- if (!dhd_download_fw_on_driverload) {
- /* Release the skbs from queue for WLC_E_TRACE event */
- dhd_event_logtrace_flush_queue(&dhd->pub);
- if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
- if (dhd->event_data.fmts) {
- MFREE(dhd->pub.osh, dhd->event_data.fmts,
- dhd->event_data.fmts_size);
- dhd->event_data.fmts = NULL;
- }
- if (dhd->event_data.raw_fmts) {
- MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
- dhd->event_data.raw_fmts_size);
- dhd->event_data.raw_fmts = NULL;
- }
- if (dhd->event_data.raw_sstr) {
- MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
- dhd->event_data.raw_sstr_size);
- dhd->event_data.raw_sstr = NULL;
- }
- if (dhd->event_data.rom_raw_sstr) {
- MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
- dhd->event_data.rom_raw_sstr_size);
- dhd->event_data.rom_raw_sstr = NULL;
- }
- dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
- }
+ while (i < DHD_MAX_IFS) {
+ if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
+ return i;
+ i++;
}
-#endif /* SHOW_LOGTRACE */
-#ifdef APF
- dhd_dev_apf_delete_filter(net);
-#endif /* APF */
- /* Stop the protocol module */
- dhd_prot_stop(&dhd->pub);
+ return DHD_BAD_IF;
+}
- OLD_MOD_DEC_USE_COUNT;
-exit:
- if (skip_reset == false) {
-#if defined(WL_WIRELESS_EXT)
- if (ifidx == 0) {
- wl_iw_down(net, &dhd->pub);
- }
-#endif /* defined(WL_WIRELESS_EXT) */
-#ifdef WL_ESCAN
- if (ifidx == 0) {
- wl_escan_down(net, &dhd->pub);
- }
-#endif /* WL_ESCAN */
- if (ifidx == 0 && !dhd_download_fw_on_driverload) {
-#if defined(BT_OVER_SDIO)
- dhd_bus_put(&dhd->pub, WLAN_MODULE);
- wl_android_set_wifi_on_flag(FALSE);
-#else
- wl_android_wifi_off(net, TRUE);
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_dettach_netdev(net, ifidx);
-#endif /* WL_EXT_IAPSTA */
-#ifdef WL_ESCAN
- wl_escan_event_dettach(net, &dhd->pub);
-#endif /* WL_ESCAN */
- wl_ext_event_dettach_netdev(net, ifidx);
-#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
-#endif /* BT_OVER_SDIO */
- }
-#ifdef SUPPORT_DEEP_SLEEP
- else {
- /* CSP#505233: Flags to indicate if we distingish
- * power off policy when user set the memu
- * "Keep Wi-Fi on during sleep" to "Never"
- */
- if (trigger_deep_sleep) {
- dhd_deepsleep(net, 1);
- trigger_deep_sleep = 0;
- }
- }
-#endif /* SUPPORT_DEEP_SLEEP */
- dhd->pub.hang_was_sent = 0;
- dhd->pub.hang_was_pending = 0;
+struct net_device * dhd_idx2net(void *pub, int ifidx)
+{
+ struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
+ struct dhd_info *dhd_info;
+
+ if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
+ return NULL;
+ dhd_info = dhd_pub->info;
+ if (dhd_info && dhd_info->iflist[ifidx])
+ return dhd_info->iflist[ifidx]->net;
+ return NULL;
+}
- /* Clear country spec for for built-in type driver */
- if (!dhd_download_fw_on_driverload) {
- dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
- dhd->pub.dhd_cspec.rev = 0;
- dhd->pub.dhd_cspec.ccode[0] = 0x00;
- }
+int
+dhd_ifname2idx(dhd_info_t *dhd, char *name)
+{
+ int i = DHD_MAX_IFS;
-#ifdef BCMDBGFS
- dhd_dbgfs_remove();
-#endif // endif
- }
+ ASSERT(dhd);
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ if (name == NULL || *name == '\0')
+ return 0;
- /* Destroy wakelock */
- if (!dhd_download_fw_on_driverload &&
- (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) &&
- (skip_reset == false)) {
- DHD_OS_WAKE_LOCK_DESTROY(dhd);
- dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
- }
- printf("%s: Exit %s\n", __FUNCTION__, net->name);
+ while (--i > 0)
+ if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
+ break;
- mutex_unlock(&dhd->pub.ndev_op_sync);
- return 0;
-}
+ DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
-#if defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
- defined(USE_INITIAL_SHORT_DWELL_TIME))
-extern bool g_first_broadcast_scan;
-#endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
+ return i; /* default - the primary interface */
+}
-#ifdef WL11U
-static int dhd_interworking_enable(dhd_pub_t *dhd)
+char *
+dhd_ifname(dhd_pub_t *dhdp, int ifidx)
{
- uint32 enable = true;
- int ret = BCME_OK;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
- ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
+ ASSERT(dhd);
+
+ if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
+ return "<if_bad>";
}
- return ret;
+ if (dhd->iflist[ifidx] == NULL) {
+ DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
+ return "<if_null>";
+ }
+
+ if (dhd->iflist[ifidx]->net)
+ return dhd->iflist[ifidx]->net->name;
+
+ return "<if_none>";
}
-#endif /* WL11u */
-static int
-dhd_open(struct net_device *net)
+uint8 *
+dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
{
- dhd_info_t *dhd = DHD_DEV_INFO(net);
-#ifdef TOE
- uint32 toe_ol;
-#endif // endif
- int ifidx;
- int32 ret = 0;
-#if defined(OOB_INTR_ONLY)
- uint32 bus_type = -1;
- uint32 bus_num = -1;
- uint32 slot_num = -1;
- wifi_adapter_info_t *adapter = NULL;
-#endif
-#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
- int bytes_written = 0;
-#endif
+ int i;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp;
- mutex_lock(&dhd->pub.ndev_op_sync);
+ ASSERT(dhd);
+ for (i = 0; i < DHD_MAX_IFS; i++)
+ if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
+ return dhd->iflist[i]->mac_addr;
- if (dhd->pub.up == 1) {
- /* already up */
- DHD_ERROR(("Primary net_device is already up \n"));
- mutex_unlock(&dhd->pub.ndev_op_sync);
- return BCME_OK;
- }
+ return NULL;
+}
- if (!dhd_download_fw_on_driverload) {
- if (!dhd_driver_init_done) {
- DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
- mutex_unlock(&dhd->pub.ndev_op_sync);
- return -1;
- }
- }
- printf("%s: Enter %s\n", __FUNCTION__, net->name);
- DHD_MUTEX_LOCK();
- /* Init wakelock */
- if (!dhd_download_fw_on_driverload) {
- if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- DHD_OS_WAKE_LOCK_INIT(dhd);
- dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
- }
+static void
+_dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
+{
+ struct net_device *dev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *mclist;
+#endif
+ uint32 allmulti, cnt;
-#ifdef SHOW_LOGTRACE
- skb_queue_head_init(&dhd->evt_trace_queue);
+ wl_ioctl_t ioc;
+ char *buf, *bufp;
+ uint buflen;
+ int ret;
- if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
- ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
- if (ret == BCME_OK) {
- dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
- st_str_file_path, map_file_path);
- dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
- rom_st_str_file_path, rom_map_file_path);
- dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
- }
- }
-#endif /* SHOW_LOGTRACE */
+ if (!dhd->iflist[ifidx]) {
+ DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
+ return;
}
+ dev = dhd->iflist[ifidx]->net;
+ if (!dev)
+ return;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_lock_bh(dev);
+#endif /* LINUX >= 2.6.27 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+ cnt = netdev_mc_count(dev);
+#else
+ cnt = dev->mc_count;
+#endif /* LINUX >= 2.6.35 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_unlock_bh(dev);
+#endif /* LINUX >= 2.6.27 */
- DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK(&dhd->pub);
- dhd->pub.dongle_trap_occured = 0;
- dhd->pub.hang_was_sent = 0;
- dhd->pub.hang_was_pending = 0;
- dhd->pub.hang_reason = 0;
- dhd->pub.iovar_timeout_occured = 0;
-#ifdef PCIE_FULL_DONGLE
- dhd->pub.d3ack_timeout_occured = 0;
- dhd->pub.livelock_occured = 0;
- dhd->pub.pktid_audit_failed = 0;
-#endif /* PCIE_FULL_DONGLE */
- dhd->pub.iface_op_failed = 0;
- dhd->pub.scan_timeout_occurred = 0;
- dhd->pub.scan_busy_occurred = 0;
- dhd->pub.smmu_fault_occurred = 0;
-
-#ifdef DHD_LOSSLESS_ROAMING
- dhd->pub.dequeue_prec_map = ALLPRIO;
-#endif // endif
+ /* Determine initial value of allmulti flag */
+ allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
-#if 0
- /*
- * Force start if ifconfig_up gets called before START command
- * We keep WEXT's wl_control_wl_start to provide backward compatibility
- * This should be removed in the future
- */
- ret = wl_control_wl_start(net);
- if (ret != 0) {
- DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
- ret = -1;
- goto exit;
- }
+#ifdef PASS_ALL_MCAST_PKTS
+#ifdef PKT_FILTER_SUPPORT
+ if (!dhd->pub.early_suspended)
+#endif /* PKT_FILTER_SUPPORT */
+ allmulti = TRUE;
+#endif /* PASS_ALL_MCAST_PKTS */
-#endif // endif
+ /* Send down the multicast list first. */
- ifidx = dhd_net2idx(dhd, net);
- DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
- if (ifidx < 0) {
- DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
- ret = -1;
- goto exit;
+ buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
+ if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
+ DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ return;
}
- if (!dhd->iflist[ifidx]) {
- DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
- ret = -1;
- goto exit;
- }
+ strncpy(bufp, "mcast_list", buflen - 1);
+ bufp[buflen - 1] = '\0';
+ bufp += strlen("mcast_list") + 1;
- if (ifidx == 0) {
- atomic_set(&dhd->pend_8021x_cnt, 0);
- if (!dhd_download_fw_on_driverload) {
- DHD_ERROR(("\n%s\n", dhd_version));
- DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0);
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
- wl_ext_event_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
-#ifdef WL_ESCAN
- wl_escan_event_attach(net, &dhd->pub);
-#endif /* WL_ESCAN */
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
-#endif /* WL_EXT_IAPSTA */
-#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- g_first_broadcast_scan = TRUE;
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
-#ifdef SHOW_LOGTRACE
- /* dhd_cancel_logtrace_process_sync is called in dhd_stop
- * for built-in models. Need to start logtrace kthread before
- * calling wifi on, because once wifi is on, EDL will be in action
- * any moment, and if kthread is not active, FW event logs will
- * not be available
- */
- if (dhd_reinit_logtrace_process(dhd) != BCME_OK) {
- goto exit;
- }
-#endif /* SHOW_LOGTRACE */
-#if defined(BT_OVER_SDIO)
- ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
- wl_android_set_wifi_on_flag(TRUE);
-#else
- ret = wl_android_wifi_on(net);
-#endif /* BT_OVER_SDIO */
- if (ret != 0) {
- DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
- __FUNCTION__, ret));
- ret = -1;
- goto exit;
- }
- }
-#ifdef SUPPORT_DEEP_SLEEP
- else {
- /* Flags to indicate if we distingish
- * power off policy when user set the memu
- * "Keep Wi-Fi on during sleep" to "Never"
- */
- if (trigger_deep_sleep) {
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- g_first_broadcast_scan = TRUE;
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
- dhd_deepsleep(net, 0);
- trigger_deep_sleep = 0;
- }
- }
-#endif /* SUPPORT_DEEP_SLEEP */
-#ifdef FIX_CPU_MIN_CLOCK
- if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
- dhd_init_cpufreq_fix(dhd);
- dhd_fix_cpu_freq(dhd);
- }
-#endif /* FIX_CPU_MIN_CLOCK */
-#if defined(OOB_INTR_ONLY)
- if (dhd->pub.conf->dpc_cpucore >= 0) {
- dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
- adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
- if (adapter) {
- printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
- irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
- }
- }
-#endif
+ cnt = htol32(cnt);
+ memcpy(bufp, &cnt, sizeof(cnt));
+ bufp += sizeof(cnt);
- if (dhd->pub.busstate != DHD_BUS_DATA) {
-#ifdef BCMDBUS
- dhd_set_path(&dhd->pub);
- DHD_MUTEX_UNLOCK();
- wait_event_interruptible_timeout(dhd->adapter->status_event,
- wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY),
- msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
- DHD_MUTEX_LOCK();
- if ((ret = dbus_up(dhd->pub.bus)) != 0) {
- DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret));
- goto exit;
- } else {
- dhd->pub.busstate = DHD_BUS_DATA;
- }
- if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
- DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
- goto exit;
- }
-#else
- /* try to bring up bus */
- DHD_PERIM_UNLOCK(&dhd->pub);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_lock_bh(dev);
+#endif /* LINUX >= 2.6.27 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt)
+ break;
+ memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ cnt--;
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+#else /* LINUX < 2.6.35 */
+ for (mclist = dev->mc_list; (mclist && (cnt > 0));
+ cnt--, mclist = mclist->next) {
+ memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ }
+#endif /* LINUX >= 2.6.35 */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
+ netif_addr_unlock_bh(dev);
+#endif /* LINUX >= 2.6.27 */
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) {
- ret = dhd_bus_start(&dhd->pub);
- pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
- pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
- }
-#else
- ret = dhd_bus_start(&dhd->pub);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = buflen;
+ ioc.set = TRUE;
- DHD_PERIM_LOCK(&dhd->pub);
- if (ret) {
- DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
- ret = -1;
- goto exit;
- }
-#endif /* !BCMDBUS */
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
+ dhd_ifname(&dhd->pub, ifidx), cnt));
+ allmulti = cnt ? TRUE : allmulti;
+ }
- }
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_attach_name(net, ifidx);
-#endif
+ MFREE(dhd->pub.osh, buf, buflen);
-#ifdef BT_OVER_SDIO
- if (dhd->pub.is_bt_recovery_required) {
- DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
- bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
- }
- dhd->pub.is_bt_recovery_required = FALSE;
-#endif // endif
+ /* Now send the allmulti setting. This is based on the setting in the
+ * net_device flags, but might be modified above to be turned on if we
+ * were trying to set some addresses and dongle rejected it...
+ */
- /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
- memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+ allmulti = htol32(allmulti);
+ ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
+ sizeof(allmulti), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set allmulti %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
+ }
-#ifdef TOE
- /* Get current TOE mode from dongle */
- if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
- dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
- } else {
- dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
- }
-#endif /* TOE */
+ /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
-#if defined(DHD_LB_RXP)
- __skb_queue_head_init(&dhd->rx_pend_queue);
- if (dhd->rx_napi_netdev == NULL) {
- dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
- memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
- netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
- dhd_napi_poll, dhd_napi_weight);
- DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
- __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
- napi_enable(&dhd->rx_napi_struct);
- DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
- skb_queue_head_init(&dhd->rx_napi_queue);
- } /* rx_napi_netdev == NULL */
-#endif /* DHD_LB_RXP */
+ allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
-#if defined(DHD_LB_TXP)
- /* Use the variant that uses locks */
- skb_queue_head_init(&dhd->tx_pend_queue);
-#endif /* DHD_LB_TXP */
+ allmulti = htol32(allmulti);
-#if defined(WL_CFG80211)
- if (unlikely(wl_cfg80211_up(net))) {
- DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
- ret = -1;
- goto exit;
- }
- if (!dhd_download_fw_on_driverload) {
-#ifdef ARP_OFFLOAD_SUPPORT
- dhd->pend_ipaddr = 0;
- if (!dhd_inetaddr_notifier_registered) {
- dhd_inetaddr_notifier_registered = TRUE;
- register_inetaddr_notifier(&dhd_inetaddr_notifier);
- }
-#endif /* ARP_OFFLOAD_SUPPORT */
-#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
- if (!dhd_inet6addr_notifier_registered) {
- dhd_inet6addr_notifier_registered = TRUE;
- register_inet6addr_notifier(&dhd_inet6addr_notifier);
- }
-#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
- }
+ memset(&ioc, 0, sizeof(ioc));
+ ioc.cmd = WLC_SET_PROMISC;
+ ioc.buf = &allmulti;
+ ioc.len = sizeof(allmulti);
+ ioc.set = TRUE;
-#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
- dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE);
-#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
-#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
- dhd_irq_set_affinity(&dhd->pub, cpumask_of(0));
-#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
-#ifdef DHD_LB_IRQSET
- dhd_irq_set_affinity(&dhd->pub, dhd->cpumask_primary);
-#endif /* DHD_LB_IRQSET */
-#if defined(ARGOS_NOTIFY_CB)
- argos_register_notifier_init(net);
-#endif // endif
-#if defined(NUM_SCB_MAX_PROBE)
- dhd_set_scb_probe(&dhd->pub);
-#endif /* NUM_SCB_MAX_PROBE */
-#endif /* WL_CFG80211 */
-#if defined(WL_WIRELESS_EXT)
- if (unlikely(wl_iw_up(net, &dhd->pub))) {
- DHD_ERROR(("%s: failed to bring up wext\n", __FUNCTION__));
- ret = -1;
- goto exit;
- }
-#endif
-#ifdef WL_ESCAN
- if (unlikely(wl_escan_up(net, &dhd->pub))) {
- DHD_ERROR(("%s: failed to bring up escan\n", __FUNCTION__));
- ret = -1;
- goto exit;
- }
-#endif /* WL_ESCAN */
-#if defined(ISAM_PREINIT)
- if (!dhd_download_fw_on_driverload) {
- if (dhd->pub.conf) {
- wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_init, 0, &bytes_written);
- wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_config, 0, &bytes_written);
- wl_android_ext_priv_cmd(net, dhd->pub.conf->isam_enable, 0, &bytes_written);
- }
- }
-#endif
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set promisc %d failed\n",
+ dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
}
+}
- dhd->pub.up = 1;
+int
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
+{
+ int ret;
- if (wl_event_enable) {
- /* For wl utility to receive events */
- dhd->pub.wl_event_enabled = true;
+ ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
+ ETHER_ADDR_LEN, NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
} else {
- dhd->pub.wl_event_enabled = false;
+ memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
+ if (ifidx == 0)
+ memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
}
- if (logtrace_pkt_sendup) {
- /* For any deamon to recieve logtrace */
- dhd->pub.logtrace_pkt_sendup = true;
- } else {
- dhd->pub.logtrace_pkt_sendup = false;
+ return ret;
+}
+
+#ifdef SOFTAP
+extern struct net_device *ap_net_dev;
+extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
+#endif
+
+#ifdef DHD_WMF
+void dhd_update_psta_interface_for_sta(dhd_pub_t* dhdp, char* ifname, void* ea,
+ void* event_data)
+{
+ struct wl_psta_primary_intf_event *psta_prim_event =
+ (struct wl_psta_primary_intf_event*)event_data;
+ dhd_sta_t *psta_interface = NULL;
+ dhd_sta_t *sta = NULL;
+ uint8 ifindex;
+ ASSERT(ifname);
+ ASSERT(psta_prim_event);
+ ASSERT(ea);
+
+ ifindex = (uint8)dhd_ifname2idx(dhdp->info, ifname);
+ sta = dhd_find_sta(dhdp, ifindex, ea);
+ if (sta != NULL) {
+ psta_interface = dhd_find_sta(dhdp, ifindex,
+ (void *)(psta_prim_event->prim_ea.octet));
+ if (psta_interface != NULL) {
+ sta->psta_prim = psta_interface;
+ }
}
+}
- OLD_MOD_INC_USE_COUNT;
+/* Get wmf_psta_disable configuration configuration */
+int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+ return ifp->wmf_psta_disable;
+}
-#ifdef BCMDBGFS
- dhd_dbgfs_init(&dhd->pub);
-#endif // endif
+/* Set wmf_psta_disable configuration configuration */
+int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+ ifp->wmf_psta_disable = val;
+ return 0;
+}
+#endif /* DHD_WMF */
-exit:
- mutex_unlock(&dhd->pub.ndev_op_sync);
- if (ret) {
- dhd_stop(net);
+#ifdef DHD_PSTA
+/* Get psta/psr configuration configuration */
+int dhd_get_psta_mode(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ return (int)dhd->psta_mode;
+}
+/* Set psta/psr configuration configuration */
+int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd->psta_mode = val;
+ return 0;
+}
+#endif /* DHD_PSTA */
+
+#if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
+static void
+dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ if (
+#ifdef DHD_L2_FILTER
+ (ifp->block_ping) ||
+#endif
+#ifdef DHD_WET
+ (dhd->wet_mode) ||
+#endif
+#ifdef DHD_MCAST_REGEN
+ (ifp->mcast_regen_bss_enable) ||
+#endif
+ FALSE) {
+ ifp->rx_pkt_chainable = FALSE;
}
+}
+#endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- DHD_MUTEX_UNLOCK();
+#ifdef DHD_WET
+/* Get wet configuration configuration */
+int dhd_get_wet_mode(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+ return (int)dhd->wet_mode;
+}
- printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret);
- return ret;
+/* Set wet configuration configuration */
+int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd->wet_mode = val;
+ dhd_update_rx_pkt_chainable_state(dhdp, 0);
+ return 0;
+}
+#endif /* DHD_WET */
+
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+int32 dhd_role_to_nl80211_iftype(int32 role)
+{
+ switch (role) {
+ case WLC_E_IF_ROLE_STA:
+ return NL80211_IFTYPE_STATION;
+ case WLC_E_IF_ROLE_AP:
+ return NL80211_IFTYPE_AP;
+ case WLC_E_IF_ROLE_WDS:
+ return NL80211_IFTYPE_WDS;
+ case WLC_E_IF_ROLE_P2P_GO:
+ return NL80211_IFTYPE_P2P_GO;
+ case WLC_E_IF_ROLE_P2P_CLIENT:
+ return NL80211_IFTYPE_P2P_CLIENT;
+ case WLC_E_IF_ROLE_IBSS:
+ case WLC_E_IF_ROLE_NAN:
+ return NL80211_IFTYPE_ADHOC;
+ default:
+ return NL80211_IFTYPE_UNSPECIFIED;
+ }
}
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
-/*
- * ndo_start handler for primary ndev
- */
-static int
-dhd_pri_open(struct net_device *net)
+static void
+dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
{
- s32 ret;
+ dhd_info_t *dhd = handle;
+ dhd_if_event_t *if_event = event_info;
+ struct net_device *ndev;
+ int ifidx, bssidx;
+ int ret;
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ struct wl_if_event_info info;
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
- ret = dhd_open(net);
- if (unlikely(ret)) {
- DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
- return ret;
+ if (event != DHD_WQ_WORK_IF_ADD) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
}
- /* Allow transmit calls */
- netif_start_queue(net);
- DHD_ERROR(("[%s] tx queue started\n", net->name));
- return ret;
-}
-
-/*
- * ndo_stop handler for primary ndev
- */
-static int
-dhd_pri_stop(struct net_device *net)
-{
- s32 ret;
-
- /* stop tx queue */
- netif_stop_queue(net);
- DHD_ERROR(("[%s] tx queue stopped\n", net->name));
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
- ret = dhd_stop(net);
- if (unlikely(ret)) {
- DHD_ERROR(("dhd_stop failed: %d\n", ret));
- return ret;
+ if (!if_event) {
+ DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+ return;
}
- return ret;
-}
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
-#if defined(WL_STATIC_IF) && defined(WL_CFG80211)
-/*
- * For static I/Fs, the firmware interface init
- * is done from the IFF_UP context.
- */
-static int
-dhd_static_if_open(struct net_device *net)
-{
- s32 ret = 0;
- struct bcm_cfg80211 *cfg;
- struct net_device *primary_netdev = NULL;
+ ifidx = if_event->event.ifidx;
+ bssidx = if_event->event.bssidx;
+ DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
- cfg = wl_get_cfg(net);
- primary_netdev = bcmcfg_to_prmry_ndev(cfg);
- if (!IS_CFG80211_STATIC_IF(cfg, net)) {
- DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
- ret = BCME_OK;
- goto done;
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (if_event->event.ifidx > 0) {
+ bzero(&info, sizeof(info));
+ info.ifidx = if_event->event.ifidx;
+ info.bssidx = if_event->event.bssidx;
+ info.role = if_event->event.role;
+ strncpy(info.name, if_event->name, IFNAMSIZ);
+ if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
+ &info, if_event->mac, NULL, true) != NULL) {
+ /* Do the post interface create ops */
+ DHD_ERROR(("Post ifcreate ops done. Returning \n"));
+ goto done;
+ }
}
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
- printf("%s: Enter %s\n", __FUNCTION__, net->name);
- /* Ensure fw is initialized. If it is already initialized,
- * dhd_open will return success.
- */
- ret = dhd_open(primary_netdev);
- if (unlikely(ret)) {
- DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
+ /* This path is for non-android case */
+ /* The interface name in host and in event msg are same */
+ /* if name in event msg is used to create dongle if list on host */
+ ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
+ if_event->mac, bssidx, TRUE, if_event->name);
+ if (!ndev) {
+ DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
goto done;
}
- ret = wl_cfg80211_static_if_open(net);
- if (!ret) {
- /* Allow transmit calls */
- netif_start_queue(net);
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
+ DHD_PERIM_LOCK(&dhd->pub);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
+ dhd_remove_if(&dhd->pub, ifidx, TRUE);
+ goto done;
+ }
+#ifndef PCIE_FULL_DONGLE
+ /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
+ if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
+ uint32 var_int = 1;
+ ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
+ NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
+ dhd_remove_if(&dhd->pub, ifidx, TRUE);
+ }
}
+#endif /* PCIE_FULL_DONGLE */
+
done:
- printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret);
- return ret;
+ MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
}
-static int
-dhd_static_if_stop(struct net_device *net)
+static void
+dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
{
- struct bcm_cfg80211 *cfg;
- struct net_device *primary_netdev = NULL;
- int ret = BCME_OK;
- dhd_info_t *dhd = DHD_DEV_INFO(net);
-
- printf("%s: Enter %s\n", __FUNCTION__, net->name);
+ dhd_info_t *dhd = handle;
+ int ifidx;
+ dhd_if_event_t *if_event = event_info;
- /* Ensure queue is disabled */
- netif_tx_disable(net);
- cfg = wl_get_cfg(net);
- if (!IS_CFG80211_STATIC_IF(cfg, net)) {
- DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
- return BCME_OK;
+ if (event != DHD_WQ_WORK_IF_DEL) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
}
- ret = wl_cfg80211_static_if_close(net);
-
- if (dhd->pub.up == 0) {
- /* If fw is down, return */
- DHD_ERROR(("fw down\n"));
- return BCME_OK;
- }
- /* If STA iface is not in operational, invoke dhd_close from this
- * context.
- */
- primary_netdev = bcmcfg_to_prmry_ndev(cfg);
- if (!(primary_netdev->flags & IFF_UP)) {
- ret = dhd_stop(primary_netdev);
- } else {
- DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
}
- printf("%s: Exit %s ret=%d\n", __FUNCTION__, net->name, ret);
-
- return ret;
-}
-#endif /* WL_STATIC_IF && WL_CF80211 */
-
-int dhd_do_driver_init(struct net_device *net)
-{
- dhd_info_t *dhd = NULL;
- if (!net) {
- DHD_ERROR(("Primary Interface not initialized \n"));
- return -EINVAL;
+ if (!if_event) {
+ DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+ return;
}
- DHD_MUTEX_IS_LOCK_RETURN();
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
- /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
- dhd = DHD_DEV_INFO(net);
+ ifidx = if_event->event.ifidx;
+ DHD_TRACE(("Removing interface with idx %d\n", ifidx));
- /* If driver is already initialized, do nothing
- */
- if (dhd->pub.busstate == DHD_BUS_DATA) {
- DHD_TRACE(("Driver already Inititalized. Nothing to do"));
- return 0;
+ DHD_PERIM_UNLOCK(&dhd->pub);
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (if_event->event.ifidx > 0) {
+ /* Do the post interface del ops */
+ if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net, true) == 0) {
+ DHD_TRACE(("Post ifdel ops done. Returning \n"));
+ DHD_PERIM_LOCK(&dhd->pub);
+ goto done;
+ }
}
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
- if (dhd_open(net) < 0) {
- DHD_ERROR(("Driver Init Failed \n"));
- return -1;
- }
+ dhd_remove_if(&dhd->pub, ifidx, TRUE);
+ DHD_PERIM_LOCK(&dhd->pub);
- return 0;
+#if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+done:
+#endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
}
-int
-dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+#ifdef DHD_UPDATE_INTF_MAC
+static void
+dhd_ifupdate_event_handler(void *handle, void *event_info, u8 event)
{
+ dhd_info_t *dhd = handle;
+ int ifidx;
+ dhd_if_event_t *if_event = event_info;
-#ifdef WL_CFG80211
- if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
- ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK)
- return BCME_OK;
-#endif // endif
+ if (event != DHD_WQ_WORK_IF_UPDATE) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
- /* handle IF event caused by wl commands, SoftAP, WEXT and
- * anything else. This has to be done asynchronously otherwise
- * DPC will be blocked (and iovars will timeout as DPC has no chance
- * to read the response back)
- */
- if (ifevent->ifidx > 0) {
- dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
- if (if_event == NULL) {
- DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
- MALLOCED(dhdinfo->pub.osh)));
- return BCME_NOMEM;
- }
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
- memcpy(&if_event->event, ifevent, sizeof(if_event->event));
- memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
- strncpy(if_event->name, name, IFNAMSIZ);
- if_event->name[IFNAMSIZ - 1] = '\0';
- dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
- DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
+ if (!if_event) {
+ DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+ return;
}
- return BCME_OK;
-}
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
-int
-dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
-{
- dhd_if_event_t *if_event;
+ ifidx = if_event->event.ifidx;
+ DHD_TRACE(("%s: Update interface with idx %d\n", __FUNCTION__, ifidx));
-#ifdef WL_CFG80211
- if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
- ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
- return BCME_OK;
-#endif /* WL_CFG80211 */
+ dhd_op_if_update(&dhd->pub, ifidx);
- /* handle IF event caused by wl commands, SoftAP, WEXT and
- * anything else
- */
- if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
- if (if_event == NULL) {
- DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
- MALLOCED(dhdinfo->pub.osh)));
- return BCME_NOMEM;
- }
- memcpy(&if_event->event, ifevent, sizeof(if_event->event));
- memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
- strncpy(if_event->name, name, IFNAMSIZ);
- if_event->name[IFNAMSIZ - 1] = '\0';
- dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
- dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
+ MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
- return BCME_OK;
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
}
-int
-dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx)
{
-#ifdef DHD_UPDATE_INTF_MAC
- dhd_if_event_t *if_event;
-#endif /* DHD_UPDATE_INTF_MAC */
+ dhd_info_t * dhdinfo = NULL;
+ dhd_if_t * ifp = NULL;
+ int ret = 0;
+ char buf[128];
-#ifdef WL_CFG80211
- wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
- ifevent->ifidx, name, mac, ifevent->bssidx);
-#endif /* WL_CFG80211 */
+ if ((NULL==dhdpub)||(NULL==dhdpub->info)) {
+ DHD_ERROR(("%s: *** DHD handler is NULL!\n", __FUNCTION__));
+ return -1;
+ } else {
+ dhdinfo = (dhd_info_t *)dhdpub->info;
+ ifp = dhdinfo->iflist[ifidx];
+ if (NULL==ifp) {
+ DHD_ERROR(("%s: *** ifp handler is NULL!\n", __FUNCTION__));
+ return -2;
+ }
+ }
-#ifdef DHD_UPDATE_INTF_MAC
- /* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and
- * anything else
- */
- if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
- if (if_event == NULL) {
- DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
- MALLOCED(dhdinfo->pub.osh)));
- return BCME_NOMEM;
+ DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
+ // Get MAC address
+ strcpy(buf, "cur_etheraddr");
+ ret = dhd_wl_ioctl_cmd(&dhdinfo->pub, WLC_GET_VAR, buf, sizeof(buf), FALSE, ifp->idx);
+ if (0>ret) {
+ DHD_ERROR(("Failed to upudate the MAC address for itf=%s, ret=%d\n", ifp->name, ret));
+ // avoid collision
+ dhdinfo->iflist[ifp->idx]->mac_addr[5] += 1;
+ // force locally administrate address
+ ETHER_SET_LOCALADDR(&dhdinfo->iflist[ifp->idx]->mac_addr);
+ } else {
+ DHD_EVENT(("Got mac for itf %s, idx %d, MAC=%02X:%02X:%02X:%02X:%02X:%02X\n",
+ ifp->name, ifp->idx,
+ (unsigned char)buf[0], (unsigned char)buf[1], (unsigned char)buf[2],
+ (unsigned char)buf[3], (unsigned char)buf[4], (unsigned char)buf[5]));
+ memcpy(dhdinfo->iflist[ifp->idx]->mac_addr, buf, ETHER_ADDR_LEN);
+ if (dhdinfo->iflist[ifp->idx]->net) {
+ memcpy(dhdinfo->iflist[ifp->idx]->net->dev_addr, buf, ETHER_ADDR_LEN);
+ }
}
- memcpy(&if_event->event, ifevent, sizeof(if_event->event));
- // construct a change event
- if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name);
- if_event->event.opcode = WLC_E_IF_CHANGE;
- memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
- strncpy(if_event->name, name, IFNAMSIZ);
- if_event->name[IFNAMSIZ - 1] = '\0';
- dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE,
- dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
-#endif /* DHD_UPDATE_INTF_MAC */
- return BCME_OK;
+ return ret;
}
+#endif /* DHD_UPDATE_INTF_MAC */
-#ifdef WL_NATOE
-/* Handler to update natoe info and bind with new subscriptions if there is change in config */
static void
-dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event)
+dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
{
dhd_info_t *dhd = handle;
- wl_event_data_natoe_t *natoe = event_info;
- dhd_nfct_info_t *nfct = dhd->pub.nfct;
+ dhd_if_t *ifp = event_info;
- if (event != DHD_WQ_WORK_NATOE_EVENT) {
+ if (event != DHD_WQ_WORK_SET_MAC) {
DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
- return;
}
if (!dhd) {
DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
return;
}
- if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port &&
- (natoe->start_port < natoe->end_port)) {
- /* Rebind subscriptions to start receiving notifications from groups */
- if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) {
- dhd_ct_close(nfct);
- }
- dhd_ct_send_dump_req(nfct);
- } else if (!natoe->natoe_active) {
- /* Rebind subscriptions to stop receiving notifications from groups */
- if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) {
- dhd_ct_close(nfct);
- }
- }
-}
-
-/* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
- * Scheduling workq to switch from tasklet context as bind call may sleep in handler
- */
-int
-dhd_natoe_ct_event(dhd_pub_t *dhd, char *data)
-{
- wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data;
- if (dhd->nfct) {
- wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info;
- uint8 prev_enable = natoe->natoe_active;
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
- spin_lock_bh(&dhd->nfct_lock);
- memcpy(natoe, event_data, sizeof(*event_data));
- spin_unlock_bh(&dhd->nfct_lock);
+#ifdef SOFTAP
+ {
+ unsigned long flags;
+ bool in_ap = FALSE;
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ in_ap = (ap_net_dev != NULL);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- if (prev_enable != event_data->natoe_active) {
- dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
- (void *)natoe, DHD_WQ_WORK_NATOE_EVENT,
- dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW);
+ if (in_ap) {
+ DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
+ ifp->net->name));
+ goto done;
}
- return BCME_OK;
}
- DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__));
- return BCME_ERROR;
+#endif /* SOFTAP */
+
+ // terence 20160907: fix for not able to set mac when wlan0 is down
+ if (ifp == NULL || !ifp->set_macaddress) {
+ goto done;
+ }
+ if (ifp == NULL || !dhd->pub.up) {
+ DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+ goto done;
+ }
+
+ DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
+ ifp->set_macaddress = FALSE;
+ if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
+ DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
+ else
+ DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
+
+done:
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
}
-/* Handler to send natoe ioctl to dongle */
static void
-dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event)
+dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
{
dhd_info_t *dhd = handle;
- dhd_ct_ioc_t *ct_ioc = event_info;
+ int ifidx = (int)((long int)event_info);
+ dhd_if_t *ifp = NULL;
- if (event != DHD_WQ_WORK_NATOE_IOCTL) {
+ if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
return;
}
return;
}
- if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) {
- DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__));
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
+
+ ifp = dhd->iflist[ifidx];
+
+ if (ifp == NULL || !dhd->pub.up) {
+ DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+ goto done;
+ }
+
+#ifdef SOFTAP
+ {
+ bool in_ap = FALSE;
+ unsigned long flags;
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ in_ap = (ap_net_dev != NULL);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+ if (in_ap) {
+ DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
+ ifp->net->name));
+ ifp->set_multicast = FALSE;
+ goto done;
+ }
+ }
+#endif /* SOFTAP */
+
+ if (ifp == NULL || !dhd->pub.up) {
+ DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+ goto done;
}
+
+ ifidx = ifp->idx;
+
+
+ _dhd_set_multicast_list(dhd, ifidx);
+ DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
+
+done:
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
}
-/* When Netlink message contains port collision info, the info must be sent to dongle FW
- * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
- */
-void
-dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc)
+static int
+dhd_set_mac_address(struct net_device *dev, void *addr)
{
+ int ret = 0;
- dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc,
- DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler,
- DHD_WQ_WORK_PRIORITY_HIGH);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ struct sockaddr *sa = (struct sockaddr *)addr;
+ int ifidx;
+ dhd_if_t *dhdif;
+
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return -1;
+
+ dhdif = dhd->iflist[ifidx];
+
+ dhd_net_if_lock_local(dhd);
+ memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
+ dhdif->set_macaddress = TRUE;
+ dhd_net_if_unlock_local(dhd);
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
+ dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
+ return ret;
}
-#endif /* WL_NATOE */
-/* This API maps ndev to ifp inclusive of static IFs */
-static dhd_if_t *
-dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
+static void
+dhd_set_multicast_list(struct net_device *dev)
{
- dhd_if_t *ifp = NULL;
-#ifdef WL_STATIC_IF
- u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
-#else
- u32 ifidx = (DHD_MAX_IFS - 1);
-#endif /* WL_STATIC_IF */
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ifidx;
- dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
- do {
- ifp = dhdinfo->iflist[ifidx];
- if (ifp && (ifp->net == ndev)) {
- DHD_TRACE(("match found for %s. ifidx:%d\n",
- ndev->name, ifidx));
- return ifp;
- }
- } while (ifidx--);
+ ifidx = dhd_net2idx(dhd, dev);
+ if (ifidx == DHD_BAD_IF)
+ return;
- DHD_ERROR(("no entry found for %s\n", ndev->name));
- return NULL;
+ dhd->iflist[ifidx]->set_multicast = TRUE;
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
+ DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
+
+ // terence 20160907: fix for not able to set mac when wlan0 is down
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
+ DHD_WQ_WORK_SET_MAC, dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
}
-bool
-dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
+#ifdef DHD_UCODE_DOWNLOAD
+/* Get ucode path */
+char *
+dhd_get_ucode_path(dhd_pub_t *dhdp)
{
- dhd_if_t *ifp = NULL;
-
- if (!dhdp || !ndev) {
- DHD_ERROR(("wrong input\n"));
- ASSERT(0);
- return false;
- }
+ dhd_info_t *dhd = dhdp->info;
+ return dhd->uc_path;
+}
+#endif /* DHD_UCODE_DOWNLOAD */
- ifp = dhd_get_ifp_by_ndev(dhdp, ndev);
- return (ifp && (ifp->static_if == true));
+#ifdef PROP_TXSTATUS
+int
+dhd_os_wlfc_block(dhd_pub_t *pub)
+{
+ dhd_info_t *di = (dhd_info_t *)(pub->info);
+ ASSERT(di != NULL);
+ /* terence 20161229: don't do spin lock if proptx not enabled */
+ if (disable_proptx)
+ return 1;
+#ifdef BCMDBUS
+ spin_lock_irqsave(&di->wlfc_spinlock, di->wlfc_lock_flags);
+#else
+ spin_lock_bh(&di->wlfc_spinlock);
+#endif /* BCMDBUS */
+ return 1;
}
-#ifdef WL_STATIC_IF
-/* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
- * are not known. For e.g: static i/f case. This function lets to update it once
- * it is known.
- */
-s32
-dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
- uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state)
+int
+dhd_os_wlfc_unblock(dhd_pub_t *pub)
{
- dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
- dhd_if_t *ifp, *ifp_new;
- s32 cur_idx;
- dhd_dev_priv_t * dev_priv;
+ dhd_info_t *di = (dhd_info_t *)(pub->info);
- DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
- if_state, ifidx));
+ ASSERT(di != NULL);
+ /* terence 20161229: don't do spin lock if proptx not enabled */
+ if (disable_proptx)
+ return 1;
+#ifdef BCMDBUS
+ spin_unlock_irqrestore(&di->wlfc_spinlock, di->wlfc_lock_flags);
+#else
+ spin_unlock_bh(&di->wlfc_spinlock);
+#endif /* BCMDBUS */
+ return 1;
+}
- ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
+#endif /* PROP_TXSTATUS */
- if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) {
- return -ENODEV;
- }
- cur_idx = ifp->idx;
+#if defined(DHD_RX_DUMP) || defined(DHD_TX_DUMP)
+typedef struct {
+ uint16 type;
+ const char *str;
+} PKTTYPE_INFO;
- if (if_state == NDEV_STATE_OS_IF_CREATED) {
- /* mark static if */
- ifp->static_if = TRUE;
- return BCME_OK;
- }
+static const PKTTYPE_INFO packet_type_info[] =
+{
+ { ETHER_TYPE_IP, "IP" },
+ { ETHER_TYPE_ARP, "ARP" },
+ { ETHER_TYPE_BRCM, "BRCM" },
+ { ETHER_TYPE_802_1X, "802.1X" },
+ { ETHER_TYPE_WAI, "WAPI" },
+ { 0, ""}
+};
- ifp_new = dhdinfo->iflist[ifidx];
- if (ifp_new && (ifp_new != ifp)) {
- /* There should be only one entry for a given ifidx. */
- DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx));
- ASSERT(0);
- dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
- net_os_send_hang_message(ifp->net);
- return -EINVAL;
- }
+static const char *_get_packet_type_str(uint16 type)
+{
+ int i;
+ int n = sizeof(packet_type_info)/sizeof(packet_type_info[1]) - 1;
- /* For static if delete case, cleanup the if before ifidx update */
- if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
- (if_state == NDEV_STATE_FW_IF_FAILED)) {
- dhd_cleanup_if(ifp->net);
- dev_priv = DHD_DEV_PRIV(ndev);
- dev_priv->ifidx = ifidx;
+ for (i = 0; i < n; i++) {
+ if (packet_type_info[i].type == type)
+ return packet_type_info[i].str;
}
- /* update the iflist ifidx slot with cached info */
- dhdinfo->iflist[ifidx] = ifp;
- dhdinfo->iflist[cur_idx] = NULL;
-
- /* update the values */
- ifp->idx = ifidx;
- ifp->bssidx = bssidx;
+ return packet_type_info[n].str;
+}
- if (if_state == NDEV_STATE_FW_IF_CREATED) {
- dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx);
- /* initialize the dongle provided if name */
- if (dngl_name) {
- strlcpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
- } else if (ndev->name[0] != '\0') {
- strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
- }
- if (mac != NULL) {
- (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
- }
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
- wl_ext_event_attach_netdev(ndev, ifidx, bssidx);
-#ifdef WL_ESCAN
- wl_escan_event_attach(ndev, dhdp);
-#endif /* WL_ESCAN */
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_ifadding(ndev, ifidx);
- wl_ext_iapsta_attach_netdev(ndev, ifidx, bssidx);
- wl_ext_iapsta_attach_name(ndev, ifidx);
-#endif /* WL_EXT_IAPSTA */
- } else if (if_state == NDEV_STATE_FW_IF_DELETED) {
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_dettach_netdev(ndev, cur_idx);
-#endif /* WL_EXT_IAPSTA */
-#ifdef WL_ESCAN
- wl_escan_event_dettach(ndev, dhdp);
-#endif /* WL_ESCAN */
- wl_ext_event_dettach_netdev(ndev, cur_idx);
-#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
+void
+dhd_trx_dump(struct net_device *ndev, uint8 *dump_data, uint datalen, bool tx)
+{
+ uint16 protocol;
+ char *ifname;
+
+ protocol = (dump_data[12] << 8) | dump_data[13];
+ ifname = ndev ? ndev->name : "N/A";
+
+ if (protocol != ETHER_TYPE_BRCM) {
+ DHD_ERROR(("%s DUMP[%s] - %s\n", tx?"Tx":"Rx", ifname,
+ _get_packet_type_str(protocol)));
+#if defined(DHD_TX_FULL_DUMP) || defined(DHD_RX_FULL_DUMP)
+ prhex("Data", dump_data, datalen);
+#endif /* DHD_TX_FULL_DUMP || DHD_RX_FULL_DUMP */
}
- DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
- ifidx, cur_idx, if_state));
- return BCME_OK;
}
-#endif /* WL_STATIC_IF */
+#endif /* DHD_TX_DUMP || DHD_RX_DUMP */
-/* unregister and free the existing net_device interface (if any) in iflist and
- * allocate a new one. the slot is reused. this function does NOT register the
- * new interface to linux kernel. dhd_register_if does the job
+/* This routine do not support Packet chain feature, Currently tested for
+ * proxy arp feature
*/
-struct net_device*
-dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
- uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
+int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
{
- dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+ struct sk_buff *skb;
+ void *skbhead = NULL;
+ void *skbprev = NULL;
dhd_if_t *ifp;
+ ASSERT(!PKTISCHAINED(p));
+ skb = PKTTONATIVE(dhdp->osh, p);
- ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
-
- ifp = dhdinfo->iflist[ifidx];
-
- if (ifp != NULL) {
- if (ifp->net != NULL) {
- DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
- __FUNCTION__, ifp->net->name, ifidx));
-
- if (ifidx == 0) {
- /* For primary ifidx (0), there shouldn't be
- * any netdev present already.
- */
- DHD_ERROR(("Primary ifidx populated already\n"));
- ASSERT(0);
- return NULL;
+ ifp = dhdp->info->iflist[ifidx];
+ skb->dev = ifp->net;
+#if defined(BCM_GMAC3)
+ /* Forwarder capable interfaces use WOFA based forwarding */
+ if (ifp->fwdh) {
+ struct ether_header *eh = (struct ether_header *)PKTDATA(dhdp->osh, p);
+ uint16 * da = (uint16 *)(eh->ether_dhost);
+ uintptr_t wofa_data;
+ ASSERT(ISALIGNED(da, 2));
+
+ wofa_data = fwder_lookup(ifp->fwdh->mate, da, ifp->idx);
+ if (wofa_data == WOFA_DATA_INVALID) { /* Unknown MAC address */
+ if (fwder_transmit(ifp->fwdh, skb, 1, skb->dev) == FWDER_SUCCESS) {
+ return BCME_OK;
}
+ }
+ PKTFRMNATIVE(dhdp->osh, p);
+ PKTFREE(dhdp->osh, p, FALSE);
+ return BCME_OK;
+ }
+#endif /* BCM_GMAC3 */
- dhd_dev_priv_clear(ifp->net); /* clear net_device private */
+ skb->protocol = eth_type_trans(skb, skb->dev);
- /* in unregister_netdev case, the interface gets freed by net->destructor
- * (which is set to free_netdev)
- */
- if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
- free_netdev(ifp->net);
+ if (in_interrupt()) {
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ netif_rx(skb);
+ } else {
+ if (dhdp->info->rxthread_enabled) {
+ if (!skbhead) {
+ skbhead = skb;
} else {
- netif_stop_queue(ifp->net);
- if (need_rtnl_lock)
- unregister_netdev(ifp->net);
- else
- unregister_netdevice(ifp->net);
+ PKTSETNEXT(dhdp->osh, skbprev, skb);
}
- ifp->net = NULL;
- }
- } else {
- ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
- if (ifp == NULL) {
- DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
- return NULL;
+ skbprev = skb;
+ } else {
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ netif_rx_ni(skb);
+#else
+ ulong flags;
+ netif_rx(skb);
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
}
}
- memset(ifp, 0, sizeof(dhd_if_t));
- ifp->info = dhdinfo;
- ifp->idx = ifidx;
- ifp->bssidx = bssidx;
-#ifdef DHD_MCAST_REGEN
- ifp->mcast_regen_bss_enable = FALSE;
-#endif // endif
- /* set to TRUE rx_pkt_chainable at alloc time */
- ifp->rx_pkt_chainable = TRUE;
-
- if (mac != NULL)
- memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
+ if (dhdp->info->rxthread_enabled && skbhead)
+ dhd_sched_rxf(dhdp, skbhead);
- /* Allocate etherdev, including space for private structure */
- ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
- if (ifp->net == NULL) {
- DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
- goto fail;
- }
+ return BCME_OK;
+}
- /* Setup the dhd interface's netdevice private structure. */
- dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
+int BCMFASTPATH
+__dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh = NULL;
+#if defined(DHD_L2_FILTER)
+ dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
+#endif
- if (name && name[0]) {
- strncpy(ifp->net->name, name, IFNAMSIZ);
- ifp->net->name[IFNAMSIZ - 1] = '\0';
+ /* Reject if down */
+ if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
+ /* free the packet here since the caller won't */
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
-#define IFP_NET_DESTRUCTOR ifp->net->priv_destructor
+#ifdef PCIE_FULL_DONGLE
+ if (dhdp->busstate == DHD_BUS_SUSPEND) {
+ DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
#else
-#define IFP_NET_DESTRUCTOR ifp->net->destructor
-#endif // endif
+ return NETDEV_TX_BUSY;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) */
+ }
+#endif /* PCIE_FULL_DONGLE */
-#ifdef WL_CFG80211
- if (ifidx == 0) {
- IFP_NET_DESTRUCTOR = free_netdev;
- } else {
- IFP_NET_DESTRUCTOR = dhd_netdev_free;
+#ifdef DHD_L2_FILTER
+ /* if dhcp_unicast is enabled, we need to convert the */
+ /* broadcast DHCP ACK/REPLY packets to Unicast. */
+ if (ifp->dhcp_unicast) {
+ uint8* mac_addr;
+ uint8* ehptr = NULL;
+ int ret;
+ ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
+ if (ret == BCME_OK) {
+ /* if given mac address having valid entry in sta list
+ * copy the given mac address, and return with BCME_OK
+ */
+ if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
+ ehptr = PKTDATA(dhdp->osh, pktbuf);
+ bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+ }
+ }
}
-#else
- IFP_NET_DESTRUCTOR = free_netdev;
-#endif /* WL_CFG80211 */
- strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
- ifp->name[IFNAMSIZ - 1] = '\0';
- dhdinfo->iflist[ifidx] = ifp;
- /* initialize the dongle provided if name */
- if (dngl_name) {
- strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
- } else if (name) {
- strncpy(ifp->dngl_name, name, IFNAMSIZ);
+ if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+ if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
}
-#ifdef PCIE_FULL_DONGLE
- /* Initialize STA info list */
- INIT_LIST_HEAD(&ifp->sta_list);
- DHD_IF_STA_LIST_LOCK_INIT(ifp);
-#endif /* PCIE_FULL_DONGLE */
+ if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+ ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
-#ifdef DHD_L2_FILTER
- ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
- ifp->parp_allnode = TRUE;
+ /* Drop the packets if l2 filter has processed it already
+ * otherwise continue with the normal path
+ */
+ if (ret == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
+ }
#endif /* DHD_L2_FILTER */
+ /* Update multicast statistic */
+ if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
+ uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+ eh = (struct ether_header *)pktdata;
- DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
-
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
- INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler);
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-
-#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
- ifp->recv_reassoc_evt = FALSE;
- ifp->post_roam_evt = FALSE;
-#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ dhdp->tx_multicast++;
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
+#ifdef DHD_LOSSLESS_ROAMING
+ uint8 prio = (uint8)PKTPRIO(pktbuf);
-#ifdef DHDTCPSYNC_FLOOD_BLK
- INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
- dhd_reset_tcpsync_info_by_ifp(ifp);
-#endif /* DHDTCPSYNC_FLOOD_BLK */
+ /* back up 802.1x's priority */
+ dhdp->prio_8021x = prio;
+#endif /* DHD_LOSSLESS_ROAMING */
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
+ atomic_inc(&dhd->pend_8021x_cnt);
+#if defined(DHD_8021X_DUMP)
+ dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
+#endif /* DHD_8021X_DUMP */
+ dhd_conf_set_eapol_status(dhdp, dhd_ifname(dhdp, ifidx), pktdata);
+ }
- return ifp->net;
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+#ifdef DHD_DHCP_DUMP
+ dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
+#endif /* DHD_DHCP_DUMP */
+#ifdef DHD_ICMP_DUMP
+ dhd_icmp_dump(dhd_ifname(dhdp, ifidx), pktdata, TRUE);
+#endif /* DHD_ICMP_DUMP */
+ }
+ } else {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return BCME_ERROR;
+ }
-fail:
- if (ifp != NULL) {
- if (ifp->net != NULL) {
-#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
- if (ifp->net == dhdinfo->rx_napi_netdev) {
- napi_disable(&dhdinfo->rx_napi_struct);
- netif_napi_del(&dhdinfo->rx_napi_struct);
- skb_queue_purge(&dhdinfo->rx_napi_queue);
- dhdinfo->rx_napi_netdev = NULL;
- }
-#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
- dhd_dev_priv_clear(ifp->net);
- free_netdev(ifp->net);
- ifp->net = NULL;
+ {
+ /* Look into the packet and update the packet priority */
+#ifndef PKTPRIO_OVERRIDE
+ if (PKTPRIO(pktbuf) == 0)
+#endif /* !PKTPRIO_OVERRIDE */
+ {
+#if defined(QOS_MAP_SET)
+ pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
+#else
+ pktsetprio(pktbuf, FALSE);
+#endif /* QOS_MAP_SET */
}
- MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
- ifp = NULL;
}
- dhdinfo->iflist[ifidx] = NULL;
- return NULL;
-}
-static void
-dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
-{
+#if defined(TRAFFIC_MGMT_DWM)
+ traffic_mgmt_pkt_set_prio(dhdp, pktbuf);
+
+#ifdef BCM_GMAC3
+ DHD_PKT_SET_DATAOFF(pktbuf, 0);
+#endif /* BCM_GMAC3 */
+#endif
+
#ifdef PCIE_FULL_DONGLE
- s32 ifidx = 0;
- if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
-#endif /* PCIE_FULL_DONGLE */
+ /*
+ * Lkup the per interface hash table, for a matching flowring. If one is not
+ * available, allocate a unique flowid and add a flowring entry.
+ * The found or newly created flowid is placed into the pktbuf's tag.
+ */
+ ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
+ if (ret != BCME_OK) {
+ PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
+ return ret;
+ }
+#endif
+
+#if defined(DHD_TX_DUMP)
+ dhd_trx_dump(dhd_idx2net(dhdp, ifidx), PKTDATA(dhdp->osh, pktbuf),
+ PKTLEN(dhdp->osh, pktbuf), TRUE);
+#endif
+ /* terence 20150901: Micky add to ajust the 802.1X priority */
+ /* Set the 802.1X packet with the highest priority 7 */
+ if (dhdp->conf->pktprio8021x >= 0)
+ pktset8021xprio(pktbuf, dhdp->conf->pktprio8021x);
+
+#ifdef PROP_TXSTATUS
+ if (dhd_wlfc_is_supported(dhdp)) {
+ /* store the interface ID */
+ DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
- if (ifp != NULL) {
- if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
- DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
- ASSERT(0);
- return;
- }
-#ifdef DHD_L2_FILTER
- bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
- NULL, FALSE, dhdpub->tickcnt);
- deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
- ifp->phnd_arp_table = NULL;
-#endif /* DHD_L2_FILTER */
+ /* store destination MAC in the tag as well */
+ DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
- dhd_if_del_sta_list(ifp);
-#ifdef PCIE_FULL_DONGLE
- /* Delete flowrings of virtual interface */
- ifidx = ifp->idx;
- if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) {
- dhd_flow_rings_delete(dhdp, ifidx);
- }
-#endif /* PCIE_FULL_DONGLE */
+ /* decide which FIFO this packet belongs to */
+ if (ETHER_ISMULTI(eh->ether_dhost))
+ /* one additional queue index (highest AC + 1) is used for bc/mc queue */
+ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
+ else
+ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
+ } else
+#endif /* PROP_TXSTATUS */
+ {
+ /* If the protocol uses a data header, apply it */
+ dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
}
-}
-
-void
-dhd_cleanup_if(struct net_device *net)
-{
- dhd_info_t *dhdinfo = DHD_DEV_INFO(net);
- dhd_pub_t *dhdp = &dhdinfo->pub;
- dhd_if_t *ifp;
- if (!(ifp = dhd_get_ifp_by_ndev(dhdp, net)) ||
- (ifp->idx >= DHD_MAX_IFS)) {
- DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp ? ifp->idx : -1));
- ASSERT(0);
- return;
+ /* Use bus module to send data frame */
+#ifdef WLMEDIA_HTSF
+ dhd_htsf_addtxts(dhdp, pktbuf);
+#endif
+#ifdef PROP_TXSTATUS
+ {
+ if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
+ dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
+ /* non-proptxstatus way */
+#ifdef BCMPCIE
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+ }
}
+#else
+#ifdef BCMPCIE
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
+#endif /* PROP_TXSTATUS */
+#ifdef BCMDBUS
+ if (ret)
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+#endif /* BCMDBUS */
- dhd_cleanup_ifp(dhdp, ifp);
+ return ret;
}
-/* unregister and free the the net_device interface associated with the indexed
- * slot, also free the slot memory and set the slot pointer to NULL
- */
-#define DHD_TX_COMPLETION_TIMEOUT 5000
-int
-dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
+int BCMFASTPATH
+dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
{
- dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
- dhd_if_t *ifp;
+ int ret = 0;
unsigned long flags;
- long timeout;
-
- ifp = dhdinfo->iflist[ifidx];
-
- if (ifp != NULL) {
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
- cancel_delayed_work_sync(&ifp->m4state_work);
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-
-#ifdef DHDTCPSYNC_FLOOD_BLK
- cancel_work_sync(&ifp->blk_tsfl_work);
-#endif /* DHDTCPSYNC_FLOOD_BLK */
-
-#ifdef WL_STATIC_IF
- /* static IF will be handled in detach */
- if (ifp->static_if) {
- DHD_TRACE(("Skip del iface for static interface\n"));
- return BCME_OK;
- }
-#endif /* WL_STATIC_IF */
- if (ifp->net != NULL) {
- DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
- DHD_GENERAL_LOCK(dhdpub, flags);
- ifp->del_in_progress = true;
- DHD_GENERAL_UNLOCK(dhdpub, flags);
-
- /* If TX is in progress, hold the if del */
- if (DHD_IF_IS_TX_ACTIVE(ifp)) {
- DHD_INFO(("TX in progress. Wait for it to be complete."));
- timeout = wait_event_timeout(dhdpub->tx_completion_wait,
- ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0),
- msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT));
- if (!timeout) {
- /* Tx completion timeout. Attempt proceeding ahead */
- DHD_ERROR(("Tx completion timed out!\n"));
- ASSERT(0);
- }
- } else {
- DHD_TRACE(("No outstanding TX!\n"));
- }
- dhdinfo->iflist[ifidx] = NULL;
- /* in unregister_netdev case, the interface gets freed by net->destructor
- * (which is set to free_netdev)
- */
- if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
- free_netdev(ifp->net);
- } else {
- netif_tx_disable(ifp->net);
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
+ DHD_ERROR(("%s: returning as busstate=%d\n",
+ __FUNCTION__, dhdp->busstate));
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
+ }
+ DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
-#if defined(SET_RPS_CPUS)
- custom_rps_map_clear(ifp->net->_rx);
-#endif /* SET_RPS_CPUS */
-#if defined(SET_RPS_CPUS)
-#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
- dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
-#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
-#endif // endif
- if (need_rtnl_lock)
- unregister_netdev(ifp->net);
- else
- unregister_netdevice(ifp->net);
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_dettach_netdev(ifp->net, ifidx);
-#endif /* WL_EXT_IAPSTA */
-#ifdef WL_ESCAN
- wl_escan_event_dettach(ifp->net, dhdpub);
-#endif /* WL_ESCAN */
- wl_ext_event_dettach_netdev(ifp->net, ifidx);
-#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
- }
- ifp->net = NULL;
- DHD_GENERAL_LOCK(dhdpub, flags);
- ifp->del_in_progress = false;
- DHD_GENERAL_UNLOCK(dhdpub, flags);
- }
- dhd_cleanup_ifp(dhdpub, ifp);
- DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
+ DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ ret = -EBUSY;
+ goto exit;
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
- MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
- ifp = NULL;
+ DHD_GENERAL_LOCK(dhdp, flags);
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
+ DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ return -ENODEV;
}
+ DHD_GENERAL_UNLOCK(dhdp, flags);
- return BCME_OK;
-}
+ ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
-static struct net_device_ops dhd_ops_pri = {
- .ndo_open = dhd_pri_open,
- .ndo_stop = dhd_pri_stop,
- .ndo_get_stats = dhd_get_stats,
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
- .ndo_start_xmit = dhd_start_xmit_wrapper,
-#else
- .ndo_do_ioctl = dhd_ioctl_entry,
- .ndo_start_xmit = dhd_start_xmit,
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
- .ndo_set_mac_address = dhd_set_mac_address,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
- .ndo_set_rx_mode = dhd_set_multicast_list,
-#else
- .ndo_set_multicast_list = dhd_set_multicast_list,
-#endif // endif
-};
+#ifdef DHD_PCIE_RUNTIMEPM
+exit:
+#endif
+ DHD_GENERAL_LOCK(dhdp, flags);
+ DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
+ dhd_os_busbusy_wake(dhdp);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+ return ret;
+}
-static struct net_device_ops dhd_ops_virt = {
-#if defined(WL_CFG80211) && defined(WL_STATIC_IF)
- .ndo_open = dhd_static_if_open,
- .ndo_stop = dhd_static_if_stop,
-#endif // endif
- .ndo_get_stats = dhd_get_stats,
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
- .ndo_start_xmit = dhd_start_xmit_wrapper,
-#else
- .ndo_do_ioctl = dhd_ioctl_entry,
- .ndo_start_xmit = dhd_start_xmit,
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
- .ndo_set_mac_address = dhd_set_mac_address,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
- .ndo_set_rx_mode = dhd_set_multicast_list,
-#else
- .ndo_set_multicast_list = dhd_set_multicast_list,
-#endif // endif
-};
+#if defined(DHD_LB_TXP)
-int
-dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
- unsigned long buflen)
+int BCMFASTPATH
+dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
+ int ifidx, void *skb)
{
- loff_t wr_posn = *posn;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
- if (!fp || !buf || buflen == 0)
- return -1;
+ /* If the feature is disabled run-time do TX from here */
+ if (atomic_read(&dhd->lb_txp_active) == 0) {
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
+ return __dhd_sendpkt(&dhd->pub, ifidx, skb);
+ }
- if (compat_vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
- return -1;
+ /* Store the address of net device and interface index in the Packet tag */
+ DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
+ DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
- *posn = wr_posn;
- return 0;
-}
+ /* Enqueue the skb into tx_pend_queue */
+ skb_queue_tail(&dhd->tx_pend_queue, skb);
-#ifdef SHOW_LOGTRACE
-int
-dhd_os_read_file(void *file, char *buf, uint32 size)
-{
- struct file *filep = (struct file *)file;
+ DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
- if (!file || !buf)
- return -1;
+ /* Dispatch the Tx job to be processed by the tx_tasklet */
+ dhd_lb_tx_dispatch(&dhd->pub);
- return vfs_read(filep, buf, size, &filep->f_pos);
+ return NETDEV_TX_OK;
}
+#endif /* DHD_LB_TXP */
-int
-dhd_os_seek_file(void *file, int64 offset)
+int BCMFASTPATH
+dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
{
- struct file *filep = (struct file *)file;
- if (!file)
- return -1;
+ int ret;
+ uint datalen;
+ void *pktbuf;
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_if_t *ifp = NULL;
+ int ifidx;
+ unsigned long flags;
+#ifdef WLMEDIA_HTSF
+ uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz;
+#else
+ uint8 htsfdlystat_sz = 0;
+#endif
+#ifdef DHD_WMF
+ struct ether_header *eh;
+ uint8 *iph;
+#endif /* DHD_WMF */
- /* offset can be -ve */
- filep->f_pos = filep->f_pos + offset;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- return 0;
-}
+ if (dhd_query_bus_erros(&dhd->pub)) {
+ return -ENODEV;
+ }
-static int
-dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
-{
- struct file *filep = NULL;
- struct kstat stat;
- mm_segment_t fs;
- char *raw_fmts = NULL;
- int logstrs_size = 0;
- int error = 0;
+ /* terence 2017029: Reject in early suspend */
+ if (!dhd->pub.conf->xmit_in_suspend && dhd->pub.early_suspended) {
+ dhd_txflowcontrol(&dhd->pub, ALL_INTERFACES, ON);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
+ }
- fs = get_fs();
- set_fs(KERNEL_DS);
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- filep = filp_open(logstrs_path, O_RDONLY, 0);
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
+ /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
+ /* stop the network queue temporarily until resume done */
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ if (!dhdpcie_is_resume_done(&dhd->pub)) {
+ dhd_bus_stop_queue(dhd->pub.bus);
+ }
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
- if (IS_ERR(filep)) {
- DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
- goto fail;
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+#ifdef BCMPCIE
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+#ifdef PCIE_FULL_DONGLE
+ /* Stop tx queues if suspend is in progress */
+ if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
+ dhd_bus_stop_queue(dhd->pub.bus);
+ }
+#endif /* PCIE_FULL_DONGLE */
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
}
- error = vfs_stat(logstrs_path, &stat);
- if (error) {
- DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
- goto fail;
+#else
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
+ __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
}
- logstrs_size = (int) stat.size;
+#endif
- if (logstrs_size == 0) {
- DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
- goto fail1;
- }
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
- raw_fmts = MALLOC(osh, logstrs_size);
- if (raw_fmts == NULL) {
- DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
- goto fail;
- }
- if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
- DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
- goto fail;
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
+ dhd->pub.busstate = DHD_BUS_DOWN;
}
+#endif /* DHD_HANG_SEND_UP_TEST */
- if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
- == BCME_OK) {
- filp_close(filep, NULL);
- set_fs(fs);
- return BCME_OK;
+ /* Reject if down */
+ if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
+ DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
+ __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
+ netif_stop_queue(net);
+ /* Send Event when bus down detected during data session */
+ if (dhd->pub.up && !dhd->pub.hang_was_sent && !DHD_BUS_CHECK_REMOVE(&dhd->pub)) {
+ DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
+ dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
+ net_os_send_hang_message(net);
+ }
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
}
-fail:
- if (raw_fmts) {
- MFREE(osh, raw_fmts, logstrs_size);
- raw_fmts = NULL;
+ ifp = DHD_DEV_IFP(net);
+ ifidx = DHD_DEV_IFIDX(net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
+ netif_stop_queue(net);
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
+ return -ENODEV;
+#else
+ return NETDEV_TX_BUSY;
+#endif
}
-fail1:
- if (!IS_ERR(filep))
- filp_close(filep, NULL);
-
- set_fs(fs);
- temp->fmts = NULL;
- return BCME_ERROR;
-}
-
-static int
-dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
- uint32 *rodata_end)
-{
- struct file *filep = NULL;
- mm_segment_t fs;
- int err = BCME_ERROR;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- if (fname == NULL) {
- DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
- return BCME_ERROR;
- }
+ ASSERT(ifidx == dhd_net2idx(dhd, net));
+ ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
- fs = get_fs();
- set_fs(KERNEL_DS);
+ bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
- filep = filp_open(fname, O_RDONLY, 0);
- if (IS_ERR(filep)) {
- DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__, fname));
- goto fail;
+ /* re-align socket buffer if "skb->data" is odd address */
+ if (((unsigned long)(skb->data)) & 0x1) {
+ unsigned char *data = skb->data;
+ uint32 length = skb->len;
+ PKTPUSH(dhd->pub.osh, skb, 1);
+ memmove(skb->data, data, length);
+ PKTSETLEN(dhd->pub.osh, skb, length);
}
- if ((err = dhd_parse_map_file(osh, filep, ramstart,
- rodata_start, rodata_end)) < 0)
- goto fail;
-
-fail:
- if (!IS_ERR(filep))
- filp_close(filep, NULL);
+ datalen = PKTLEN(dhd->pub.osh, skb);
- set_fs(fs);
+ /* Make sure there's enough room for any header */
+ if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
+ struct sk_buff *skb2;
- return err;
-}
+ DHD_INFO(("%s: insufficient headroom\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ dhd->pub.tx_realloc++;
-static int
-dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
-{
- struct file *filep = NULL;
- mm_segment_t fs;
- char *raw_fmts = NULL;
- uint32 logstrs_size = 0;
- int error = 0;
- uint32 ramstart = 0;
- uint32 rodata_start = 0;
- uint32 rodata_end = 0;
- uint32 logfilebase = 0;
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
+ skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
- error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
- if (error != BCME_OK) {
- DHD_ERROR(("readmap Error!! \n"));
- /* don't do event log parsing in actual case */
- if (strstr(str_file, ram_file_str) != NULL) {
- temp->raw_sstr = NULL;
- } else if (strstr(str_file, rom_file_str) != NULL) {
- temp->rom_raw_sstr = NULL;
+ dev_kfree_skb(skb);
+ if ((skb = skb2) == NULL) {
+ DHD_ERROR(("%s: skb_realloc_headroom failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ ret = -ENOMEM;
+ goto done;
}
- return error;
- }
- DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
- ramstart, rodata_start, rodata_end));
-
- fs = get_fs();
- set_fs(KERNEL_DS);
-
- filep = filp_open(str_file, O_RDONLY, 0);
- if (IS_ERR(filep)) {
- DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
- goto fail;
- }
-
- if (TRUE) {
- /* Full file size is huge. Just read required part */
- logstrs_size = rodata_end - rodata_start;
- logfilebase = rodata_start - ramstart;
+ bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
}
- if (logstrs_size == 0) {
- DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
- goto fail1;
+ /* Convert to packet */
+ if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
+ DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
+ dev_kfree_skb_any(skb);
+ ret = -ENOMEM;
+ goto done;
}
- raw_fmts = MALLOC(osh, logstrs_size);
- if (raw_fmts == NULL) {
- DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
- goto fail;
- }
+#if defined(WLMEDIA_HTSF)
+ if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) {
+ uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf);
+ struct ether_header *eh = (struct ether_header *)pktdata;
- if (TRUE) {
- error = generic_file_llseek(filep, logfilebase, SEEK_SET);
- if (error < 0) {
- DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
- goto fail;
+ if (!ETHER_ISMULTI(eh->ether_dhost) &&
+ (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) {
+ eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS);
}
}
-
- error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
- if (error != logstrs_size) {
- DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
- goto fail;
- }
-
- if (strstr(str_file, ram_file_str) != NULL) {
- temp->raw_sstr = raw_fmts;
- temp->raw_sstr_size = logstrs_size;
- temp->rodata_start = rodata_start;
- temp->rodata_end = rodata_end;
- } else if (strstr(str_file, rom_file_str) != NULL) {
- temp->rom_raw_sstr = raw_fmts;
- temp->rom_raw_sstr_size = logstrs_size;
- temp->rom_rodata_start = rodata_start;
- temp->rom_rodata_end = rodata_end;
+#endif
+#ifdef DHD_WET
+ /* wet related packet proto manipulation should be done in DHD
+ since dongle doesn't have complete payload
+ */
+ if (WET_ENABLED(&dhd->pub) &&
+ (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
+ DHD_INFO(("%s:%s: wet send proc failed\n",
+ __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
+ PKTFREE(dhd->pub.osh, pktbuf, FALSE);
+ ret = -EFAULT;
+ goto done;
}
+#endif /* DHD_WET */
- filp_close(filep, NULL);
- set_fs(fs);
+#ifdef DHD_WMF
+ eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
+ iph = (uint8 *)eh + ETHER_HDR_LEN;
- return BCME_OK;
+ /* WMF processing for multicast packets
+ * Only IPv4 packets are handled
+ */
+ if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
+ (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
+ ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+ void *sdu_clone;
+ bool ucast_convert = FALSE;
+#ifdef DHD_UCAST_UPNP
+ uint32 dest_ip;
+
+ dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
+ ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
+#endif /* DHD_UCAST_UPNP */
+#ifdef DHD_IGMP_UCQUERY
+ ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
+ (IPV4_PROT(iph) == IP_PROT_IGMP) &&
+ (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
+#endif /* DHD_IGMP_UCQUERY */
+ if (ucast_convert) {
+ dhd_sta_t *sta;
+ unsigned long flags;
+ struct list_head snapshot_list;
+ struct list_head *wmf_ucforward_list;
-fail:
- if (raw_fmts) {
- MFREE(osh, raw_fmts, logstrs_size);
- raw_fmts = NULL;
- }
+ ret = NETDEV_TX_OK;
-fail1:
- if (!IS_ERR(filep))
- filp_close(filep, NULL);
+ /* For non BCM_GMAC3 platform we need a snapshot sta_list to
+ * resolve double DHD_IF_STA_LIST_LOCK call deadlock issue.
+ */
+ wmf_ucforward_list = DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, &snapshot_list);
- set_fs(fs);
+ /* Convert upnp/igmp query to unicast for each assoc STA */
+ list_for_each_entry(sta, wmf_ucforward_list, list) {
+ /* Skip sending to proxy interfaces of proxySTA */
+ if (sta->psta_prim != NULL && !ifp->wmf_psta_disable) {
+ continue;
+ }
+ if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
+ ret = WMF_NOP;
+ break;
+ }
+ dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
+ }
+ DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, wmf_ucforward_list);
- if (strstr(str_file, ram_file_str) != NULL) {
- temp->raw_sstr = NULL;
- } else if (strstr(str_file, rom_file_str) != NULL) {
- temp->rom_raw_sstr = NULL;
- }
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return error;
-} /* dhd_init_static_strs_array */
+ if (ret == NETDEV_TX_OK)
+ PKTFREE(dhd->pub.osh, pktbuf, TRUE);
-#endif /* SHOW_LOGTRACE */
+ return ret;
+ } else
+#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
+ {
+ /* There will be no STA info if the packet is coming from LAN host
+ * Pass as NULL
+ */
+ ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
+ switch (ret) {
+ case WMF_TAKEN:
+ case WMF_DROP:
+ /* Either taken by WMF or we should drop it.
+ * Exiting send path
+ */
-#ifdef DHD_ERPOM
-uint enable_erpom = 0;
-module_param(enable_erpom, int, 0);
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return NETDEV_TX_OK;
+ default:
+ /* Continue the transmit path */
+ break;
+ }
+ }
+ }
+#endif /* DHD_WMF */
+#ifdef DHD_PSTA
+ /* PSR related packet proto manipulation should be done in DHD
+ * since dongle doesn't have complete payload
+ */
+ if (PSR_ENABLED(&dhd->pub) && (dhd_psta_proc(&dhd->pub,
+ ifidx, &pktbuf, TRUE) < 0)) {
+ DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
+ dhd_ifname(&dhd->pub, ifidx)));
+ }
+#endif /* DHD_PSTA */
-int
-dhd_wlan_power_off_handler(void *handler, unsigned char reason)
-{
- dhd_pub_t *dhdp = (dhd_pub_t *)handler;
- bool dongle_isolation = dhdp->dongle_isolation;
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
+ /* If this packet has been hold or got freed, just return */
+ if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
+ ret = 0;
+ goto done;
+ }
+ } else {
+ /* If this packet has replaced another packet and got freed, just return */
+ if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
+ ret = 0;
+ goto done;
+ }
+ }
+#endif /* DHDTCPACK_SUPPRESS */
- DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason));
+ /*
+ * If Load Balance is enabled queue the packet
+ * else send directly from here.
+ */
+#if defined(DHD_LB_TXP)
+ ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
+#else
+ ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
+#endif
- if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) {
-#if defined(DHD_FW_COREDUMP)
- /* save core dump to a file */
- if (dhdp->memdump_enabled) {
-#ifdef DHD_SSSR_DUMP
- dhdp->collect_sssr = TRUE;
-#endif /* DHD_SSSR_DUMP */
- dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
- dhd_bus_mem_dump(dhdp);
+done:
+ if (ret) {
+ ifp->stats.tx_dropped++;
+ dhd->pub.tx_dropped++;
+ } else {
+#ifdef PROP_TXSTATUS
+ /* tx_packets counter can counted only when wlfc is disabled */
+ if (!dhd_wlfc_is_supported(&dhd->pub))
+#endif
+ {
+ dhd->pub.tx_packets++;
+ ifp->stats.tx_packets++;
+ ifp->stats.tx_bytes += datalen;
}
-#endif /* DHD_FW_COREDUMP */
}
- /* pause data on all the interfaces */
- dhd_bus_stop_queue(dhdp->bus);
- /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
- dhdp->dongle_isolation = TRUE;
- dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
- dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
+ dhd_os_busbusy_wake(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ /* Return ok: we always eat the packet */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
return 0;
+#else
+ return NETDEV_TX_OK;
+#endif
}
-int
-dhd_wlan_power_on_handler(void *handler, unsigned char reason)
-{
- dhd_pub_t *dhdp = (dhd_pub_t *)handler;
- bool dongle_isolation = dhdp->dongle_isolation;
-
- DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason));
- /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
- dhdp->dongle_isolation = TRUE;
- dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
- dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
- /* resume data on all the interfaces */
- dhd_bus_start_queue(dhdp->bus);
- return 0;
-}
+void
+dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
+{
+ struct net_device *net;
+ dhd_info_t *dhd = dhdp->info;
+ int i;
-#endif /* DHD_ERPOM */
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-#ifdef BCMDBUS
-uint
-dhd_get_rxsz(dhd_pub_t *pub)
-{
- struct net_device *net = NULL;
- dhd_info_t *dhd = NULL;
- uint rxsz;
+ ASSERT(dhd);
- /* Assign rxsz for dbus_attach */
- dhd = pub->info;
- net = dhd->iflist[0]->net;
- net->hard_header_len = ETH_HLEN + pub->hdrlen;
- rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+#ifdef DHD_LOSSLESS_ROAMING
+ /* block flowcontrol during roaming */
+ if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
+ return;
+ }
+#endif
- return rxsz;
+ if (ifidx == ALL_INTERFACES) {
+ /* Flow control on all active interfaces */
+ dhdp->txoff = state;
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ net = dhd->iflist[i]->net;
+ if (state == ON)
+ netif_stop_queue(net);
+ else
+ netif_wake_queue(net);
+ }
+ }
+ } else {
+ if (dhd->iflist[ifidx]) {
+ net = dhd->iflist[ifidx]->net;
+ if (state == ON)
+ netif_stop_queue(net);
+ else
+ netif_wake_queue(net);
+ }
+ }
}
-void
-dhd_set_path(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = NULL;
- dhd = pub->info;
+#ifdef DHD_WMF
+bool
+dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
- /* try to download image and nvram to the dongle */
- if (dhd_update_fw_nv_path(dhd) && dhd->pub.bus) {
- DHD_INFO(("%s: fw %s, nv %s, conf %s\n",
- __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
- dhd_bus_update_fw_nv_path(dhd->pub.bus,
- dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
- }
+ return dhd->rxthread_enabled;
}
-#endif
+#endif /* DHD_WMF */
-/** Called once for each hardware (dongle) instance that this DHD manages */
-dhd_pub_t *
-dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
-#ifdef BCMDBUS
- , void *data
-#endif
-)
+#ifdef DHD_MCAST_REGEN
+/*
+ * Description: This function is called to do the reverse translation
+ *
+ * Input eh - pointer to the ethernet header
+ */
+int32
+dhd_mcast_reverse_translation(struct ether_header *eh)
{
- dhd_info_t *dhd = NULL;
- struct net_device *net = NULL;
- char if_name[IFNAMSIZ] = {'\0'};
-#ifdef SHOW_LOGTRACE
- int ret;
-#endif /* SHOW_LOGTRACE */
-#ifdef DHD_ERPOM
- pom_func_handler_t *pom_handler;
-#endif /* DHD_ERPOM */
-#if defined(BCMSDIO) || defined(BCMPCIE)
- uint32 bus_type = -1;
- uint32 bus_num = -1;
- uint32 slot_num = -1;
- wifi_adapter_info_t *adapter = NULL;
-#elif defined(BCMDBUS)
- wifi_adapter_info_t *adapter = data;
-#endif
-#ifdef GET_CUSTOM_MAC_ENABLE
- char hw_ether[62];
-#endif /* GET_CUSTOM_MAC_ENABLE */
+ uint8 *iph;
+ uint32 dest_ip;
- dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ iph = (uint8 *)eh + ETHER_HDR_LEN;
+ dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
-#ifdef PCIE_FULL_DONGLE
- ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ);
- ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ);
-#endif /* PCIE_FULL_DONGLE */
+ /* Only IP packets are handled */
+ if (eh->ether_type != hton16(ETHER_TYPE_IP))
+ return BCME_ERROR;
- /* will implement get_ids for DBUS later */
-#if defined(BCMSDIO)
- dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
-#endif // endif
-#if defined(BCMSDIO) || defined(BCMPCIE)
- adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
-#endif
+ /* Non-IPv4 multicast packets are not handled */
+ if (IP_VER(iph) != IP_VER_4)
+ return BCME_ERROR;
- /* Allocate primary dhd_info */
- dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
- if (dhd == NULL) {
- dhd = MALLOC(osh, sizeof(dhd_info_t));
- if (dhd == NULL) {
- DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
- goto dhd_null_flag;
- }
+ /*
+ * The packet has a multicast IP and unicast MAC. That means
+ * we have to do the reverse translation
+ */
+ if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
+ ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
+ return BCME_OK;
}
- memset(dhd, 0, sizeof(dhd_info_t));
- dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
-
- dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
- dhd->pub.osh = osh;
-#ifdef DUMP_IOCTL_IOV_LIST
- dll_init(&(dhd->pub.dump_iovlist_head));
-#endif /* DUMP_IOCTL_IOV_LIST */
- dhd->adapter = adapter;
- dhd->pub.adapter = (void *)adapter;
-#ifdef BT_OVER_SDIO
- dhd->pub.is_bt_recovery_required = FALSE;
- mutex_init(&dhd->bus_user_lock);
-#endif /* BT_OVER_SDIO */
+ return BCME_ERROR;
+}
+#endif /* MCAST_REGEN */
- g_dhd_pub = &dhd->pub;
+#ifdef SHOW_LOGTRACE
+static int
+dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ int ret = BCME_OK;
+ uint datalen;
+ bcm_event_msg_u_t evu;
+ void *data = NULL;
+ void *pktdata = NULL;
+ bcm_event_t *pvt_data;
+ uint pktlen;
-#ifdef DHD_DEBUG
- dll_init(&(dhd->pub.mw_list_head));
-#endif /* DHD_DEBUG */
+ DHD_TRACE(("%s:Enter\n", __FUNCTION__));
-#ifdef GET_CUSTOM_MAC_ENABLE
- wifi_platform_get_mac_addr(dhd->adapter, hw_ether, iface_name);
- bcopy(hw_ether, dhd->pub.mac.octet, sizeof(struct ether_addr));
-#endif /* GET_CUSTOM_MAC_ENABLE */
-#ifdef CUSTOM_FORCE_NODFS_FLAG
- dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
- dhd->pub.force_country_change = TRUE;
-#endif /* CUSTOM_FORCE_NODFS_FLAG */
-#ifdef CUSTOM_COUNTRY_CODE
- get_customized_country_code(dhd->adapter,
- dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
- dhd->pub.dhd_cflags);
-#endif /* CUSTOM_COUNTRY_CODE */
-#ifndef BCMDBUS
- dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
- dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
-#ifdef DHD_WET
- dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
-#endif /* DHD_WET */
- /* Initialize thread based operation and lock */
- sema_init(&dhd->sdsem, 1);
-#endif /* !BCMDBUS */
- dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable;
+ /* In dhd_rx_frame, header is stripped using skb_pull
+ * of size ETH_HLEN, so adjust pktlen accordingly
+ */
+ pktlen = skb->len + ETH_HLEN;
- /* Link to info module */
- dhd->pub.info = dhd;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+ pktdata = (void *)skb_mac_header(skb);
+#else
+ pktdata = (void *)skb->mac.raw;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
- /* Link to bus module */
- dhd->pub.bus = bus;
- dhd->pub.hdrlen = bus_hdrlen;
- dhd->pub.txoff = FALSE;
+ ret = wl_host_event_get_data(pktdata, pktlen, &evu);
- /* dhd_conf must be attached after linking dhd to dhd->pub.info,
- * because dhd_detech will check .info is NULL or not.
- */
- if (dhd_conf_attach(&dhd->pub) != 0) {
- DHD_ERROR(("dhd_conf_attach failed\n"));
- goto fail;
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
+ __FUNCTION__, ret));
+ goto exit;
}
-#ifndef BCMDBUS
- dhd_conf_reset(&dhd->pub);
- dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
- dhd_conf_preinit(&dhd->pub);
-#endif /* !BCMDBUS */
- /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
- * This is indeed a hack but we have to make it work properly before we have a better
- * solution
- */
- dhd_update_fw_nv_path(dhd);
+ datalen = ntoh32(evu.event.datalen);
- /* Set network interface name if it was provided as module parameter */
- if (iface_name[0]) {
- int len;
- char ch;
- strncpy(if_name, iface_name, IFNAMSIZ);
- if_name[IFNAMSIZ - 1] = 0;
- len = strlen(if_name);
- ch = if_name[len - 1];
- if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
- strncat(if_name, "%d", IFNAMSIZ - len - 1);
- }
+ pvt_data = (bcm_event_t *)pktdata;
+ data = &pvt_data[1];
- /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
- net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
- if (net == NULL) {
- goto fail;
+ dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
+
+exit:
+ return ret;
+}
+
+static void
+dhd_event_logtrace_process(struct work_struct * work)
+{
+/* Ignore compiler warnings due to -Werror=cast-qual */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ struct dhd_info *dhd =
+ container_of(work, struct dhd_info, event_log_dispatcher_work);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+ dhd_pub_t *dhdp;
+ struct sk_buff *skb;
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
}
- mutex_init(&dhd->pub.ndev_op_sync);
- dhd_state |= DHD_ATTACH_STATE_ADD_IF;
-#ifdef DHD_L2_FILTER
- /* initialize the l2_filter_cnt */
- dhd->pub.l2_filter_cnt = 0;
-#endif // endif
- net->netdev_ops = NULL;
+ dhdp = &dhd->pub;
- mutex_init(&dhd->dhd_iovar_mutex);
- sema_init(&dhd->proto_sem, 1);
-#ifdef DHD_ULP
- if (!(dhd_ulp_init(osh, &dhd->pub)))
- goto fail;
-#endif /* DHD_ULP */
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
+ return;
+ }
-#ifdef PROP_TXSTATUS
- spin_lock_init(&dhd->wlfc_spinlock);
+ DHD_TRACE(("%s:Enter\n", __FUNCTION__));
- dhd->pub.skip_fc = dhd_wlfc_skip_fc;
- dhd->pub.plat_init = dhd_wlfc_plat_init;
- dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
+ /* Run while(1) loop till all skbs are dequeued */
+ while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
+#ifdef PCIE_FULL_DONGLE
+ int ifid;
+ ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
+ if (ifid == DHD_EVENT_IF) {
+ dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
+ /* For sending skb to network layer, convert it to Native PKT
+ * after that assign skb->dev with Primary interface n/w device
+ * as for infobuf events, we are sending special DHD_EVENT_IF
+ */
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, skb, FALSE);
+#else
+ PKTFREE(dhdp->osh, skb, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ continue;
+ }
+ else {
+ dhd_event_logtrace_pkt_process(dhdp, skb);
+ }
+#else
+ dhd_event_logtrace_pkt_process(dhdp, skb);
+#endif /* PCIE_FULL_DONGLE */
-#ifdef DHD_WLFC_THREAD
- init_waitqueue_head(&dhd->pub.wlfc_wqhead);
- dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
- if (IS_ERR(dhd->pub.wlfc_thread)) {
- DHD_ERROR(("create wlfc thread failed\n"));
- goto fail;
- } else {
- wake_up_process(dhd->pub.wlfc_thread);
+ /* Free skb buffer here if DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
+ * macro is defined the Info Ring event and WLC_E_TRACE event is freed in DHD
+ * else it is always sent up to network layers.
+ */
+#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, skb, FALSE);
+#else
+ PKTFREE(dhdp->osh, skb, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+#else /* !DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
+ /* Do not call netif_recieve_skb as this workqueue scheduler is not from NAPI
+ * Also as we are not in INTR context, do not call netif_rx, instead call
+ * netif_rx_ni (for kerenl >= 2.6) which does netif_rx, disables irq, raise
+ * NET_IF_RX softirq and enables interrupts back
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ netif_rx_ni(skb);
+#else
+ {
+ ulong flags;
+ netif_rx(skb);
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
}
-#endif /* DHD_WLFC_THREAD */
-#endif /* PROP_TXSTATUS */
+}
- /* Initialize other structure content */
- init_waitqueue_head(&dhd->ioctl_resp_wait);
- init_waitqueue_head(&dhd->d3ack_wait);
- init_waitqueue_head(&dhd->ctrl_wait);
- init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
- init_waitqueue_head(&dhd->dmaxfer_wait);
- init_waitqueue_head(&dhd->pub.tx_completion_wait);
- dhd->pub.dhd_bus_busy_state = 0;
- /* Initialize the spinlocks */
- spin_lock_init(&dhd->sdlock);
- spin_lock_init(&dhd->txqlock);
- spin_lock_init(&dhd->dhd_lock);
- spin_lock_init(&dhd->rxf_lock);
-#ifdef WLTDLS
- spin_lock_init(&dhd->pub.tdls_lock);
-#endif /* WLTDLS */
-#if defined(RXFRAME_THREAD)
- dhd->rxthread_enabled = TRUE;
-#endif /* defined(RXFRAME_THREAD) */
+void
+dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
-#ifdef DHDTCPACK_SUPPRESS
- spin_lock_init(&dhd->tcpack_lock);
-#endif /* DHDTCPACK_SUPPRESS */
+#ifdef PCIE_FULL_DONGLE
+ /* Add ifidx in the PKTTAG */
+ DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
+#endif /* PCIE_FULL_DONGLE */
+ skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
- /* Initialize Wakelock stuff */
- spin_lock_init(&dhd->wakelock_spinlock);
- spin_lock_init(&dhd->wakelock_evt_spinlock);
- DHD_OS_WAKE_LOCK_INIT(dhd);
- dhd->wakelock_counter = 0;
- /* wakelocks prevent a system from going into a low power state */
-#ifdef CONFIG_HAS_WAKELOCK
- // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
- wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
- wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
-#endif /* CONFIG_HAS_WAKELOCK */
+ schedule_work(&dhd->event_log_dispatcher_work);
+}
- mutex_init(&dhd->dhd_net_if_mutex);
- mutex_init(&dhd->dhd_suspend_mutex);
-#if defined(PKT_FILTER_SUPPORT) && defined(APF)
- mutex_init(&dhd->dhd_apf_mutex);
-#endif /* PKT_FILTER_SUPPORT && APF */
- dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
+void
+dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
- /* Attach and link in the protocol */
- if (dhd_prot_attach(&dhd->pub) != 0) {
- DHD_ERROR(("dhd_prot_attach failed\n"));
- goto fail;
+ while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, skb, FALSE);
+#else
+ PKTFREE(dhdp->osh, skb, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
}
- dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
+}
+#endif /* SHOW_LOGTRACE */
-#ifdef WL_CFG80211
- spin_lock_init(&dhd->pub.up_lock);
- /* Attach and link in the cfg80211 */
- if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
- DHD_ERROR(("wl_cfg80211_attach failed\n"));
- goto fail;
- }
+/** Called when a frame is received by the dongle on interface 'ifidx' */
+void
+dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+ uchar *eth;
+ uint len;
+ void *data, *pnext = NULL;
+ int i;
+ dhd_if_t *ifp;
+ wl_event_msg_t event;
+ int tout_rx = 0;
+ int tout_ctrl = 0;
+ void *skbhead = NULL;
+ void *skbprev = NULL;
+ uint16 protocol;
+ unsigned char *dump_data;
+#ifdef DHD_MCAST_REGEN
+ uint8 interface_role;
+ if_flow_lkup_t *if_flow_lkup;
+ unsigned long flags;
+#endif
+#ifdef DHD_WAKE_STATUS
+ int pkt_wake = 0;
+ wake_counts_t *wcp = NULL;
+#endif /* DHD_WAKE_STATUS */
- dhd_monitor_init(&dhd->pub);
- dhd_state |= DHD_ATTACH_STATE_CFG80211;
-#endif // endif
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
- if (wl_ext_event_attach(net, &dhd->pub) != 0) {
- DHD_ERROR(("wl_ext_event_attach failed\n"));
- goto fail;
- }
-#ifdef WL_ESCAN
- /* Attach and link in the escan */
- if (wl_escan_attach(net, &dhd->pub) != 0) {
- DHD_ERROR(("wl_escan_attach failed\n"));
- goto fail;
- }
-#endif /* WL_ESCAN */
-#ifdef WL_EXT_IAPSTA
- if (wl_ext_iapsta_attach(&dhd->pub) != 0) {
- DHD_ERROR(("wl_ext_iapsta_attach failed\n"));
- goto fail;
- }
-#endif /* WL_EXT_IAPSTA */
-#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
-#if defined(WL_WIRELESS_EXT)
- /* Attach and link in the iw */
- if (wl_iw_attach(net, &dhd->pub) != 0) {
- DHD_ERROR(("wl_iw_attach failed\n"));
- goto fail;
- }
- dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
-#endif /* defined(WL_WIRELESS_EXT) */
+ for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
+ struct ether_header *eh;
+
+ pnext = PKTNEXT(dhdp->osh, pktbuf);
+ PKTSETNEXT(dhdp->osh, pktbuf, NULL);
+ /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
+ * special ifidx of DHD_EVENT_IF. This is just internal to dhd to get the data from
+ * dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
+ */
+ if (ifidx == DHD_EVENT_IF) {
+ /* Event msg printing is called from dhd_rx_frame which is in Tasklet
+ * context in case of PCIe FD, in case of other bus this will be from
+ * DPC context. If we get bunch of events from Dongle then printing all
+ * of them from Tasklet/DPC context that too in data path is costly.
+ * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
+ * events with type WLC_E_TRACE.
+ * We'll print this console logs from the WorkQueue context by enqueing SKB
+ * here and Dequeuing will be done in WorkQueue and will be freed only if
+ * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined
+ */
#ifdef SHOW_LOGTRACE
- ret = dhd_init_logstrs_array(osh, &dhd->event_data);
- if (ret == BCME_OK) {
- dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
- dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
- rom_map_file_path);
- dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
- }
+ dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
+#else /* !SHOW_LOGTRACE */
+ /* If SHOW_LOGTRACE not defined and ifidx is DHD_EVENT_IF,
+ * free the PKT here itself
+ */
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
#endif /* SHOW_LOGTRACE */
+ continue;
+ }
+#ifdef DHD_WAKE_STATUS
+#ifdef BCMDBUS
+ wcp = NULL;
+#else
+ pkt_wake = dhd_bus_get_bus_wake(dhdp);
+ wcp = dhd_bus_get_wakecount(dhdp);
+#endif /* BCMDBUS */
+ if (wcp == NULL) {
+ /* If wakeinfo count buffer is null do not update wake count values */
+ pkt_wake = 0;
+ }
+#endif /* DHD_WAKE_STATUS */
- /* attach debug if support */
- if (dhd_os_dbg_attach(&dhd->pub)) {
- DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
- goto fail;
- }
-#ifdef DEBUGABILITY
-#if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT)
- /* enable verbose ring to support dump_trace_buf */
- dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
-#endif /* SHOW_LOGTRACE */
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL) {
+ DHD_ERROR(("%s: ifp is NULL. drop packet\n",
+ __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
-#ifdef DBG_PKT_MON
- dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
-#ifdef DBG_PKT_MON_INIT_DEFAULT
- dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
-#endif /* DBG_PKT_MON_INIT_DEFAULT */
-#endif /* DBG_PKT_MON */
-#endif /* DEBUGABILITY */
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
-#ifdef DHD_STATUS_LOGGING
- dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM,
- MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN);
- if (dhd->pub.statlog == NULL) {
- DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__));
- }
-#endif /* DHD_STATUS_LOGGING */
+ /* Dropping only data packets before registering net device to avoid kernel panic */
+#ifndef PROP_TXSTATUS_VSDB
+ if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
+ (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
+#else
+ if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
+ (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
+#endif /* PROP_TXSTATUS_VSDB */
+ {
+ DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
+ __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
-#ifdef DHD_LOG_DUMP
- dhd_log_dump_init(&dhd->pub);
-#endif /* DHD_LOG_DUMP */
-#ifdef DHD_PKTDUMP_ROAM
- dhd_dump_pkt_init(&dhd->pub);
-#endif /* DHD_PKTDUMP_ROAM */
+#ifdef PROP_TXSTATUS
+ if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
+ /* WLFC may send header only packet when
+ there is an urgent message but no packet to
+ piggy-back on
+ */
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+#endif
+#ifdef DHD_L2_FILTER
+ /* If block_ping is enabled drop the ping packet */
+ if (ifp->block_ping) {
+ if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+ }
+ if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
+ if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+ }
+ if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
+ int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
- if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
- DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
- goto fail;
- }
+ /* Drop the packets if l2 filter has processed it already
+ * otherwise continue with the normal path
+ */
+ if (ret == BCME_OK) {
+ PKTCFREE(dhdp->osh, pktbuf, TRUE);
+ continue;
+ }
+ }
+#endif /* DHD_L2_FILTER */
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
- if (!dhd->tx_wq) {
- DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__));
- goto fail;
- }
- dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
- if (!dhd->rx_wq) {
- DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__));
- destroy_workqueue(dhd->tx_wq);
- dhd->tx_wq = NULL;
- goto fail;
- }
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#ifdef DHD_MCAST_REGEN
+ DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
+ if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
+ ASSERT(if_flow_lkup);
-#ifndef BCMDBUS
- /* Set up the watchdog timer */
- init_timer_compat(&dhd->timer, dhd_watchdog, dhd);
- dhd->default_wd_interval = dhd_watchdog_ms;
+ interface_role = if_flow_lkup[ifidx].role;
+ DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
- if (dhd_watchdog_prio >= 0) {
- /* Initialize watchdog thread */
- PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
- if (dhd->thr_wdt_ctl.thr_pid < 0) {
- goto fail;
+ if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
+ !DHD_IF_ROLE_AP(dhdp, ifidx) &&
+ ETHER_ISUCAST(eh->ether_dhost)) {
+ if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
+#ifdef DHD_PSTA
+ /* Change bsscfg to primary bsscfg for unicast-multicast packets */
+ if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
+ (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
+ if (ifidx != 0) {
+ /* Let the primary in PSTA interface handle this
+ * frame after unicast to Multicast conversion
+ */
+ ifp = dhd_get_ifp(dhdp, 0);
+ ASSERT(ifp);
+ }
+ }
+ }
+#endif /* PSTA */
}
+#endif /* MCAST_REGEN */
- } else {
- dhd->thr_wdt_ctl.thr_pid = -1;
- }
+#ifdef DHD_WMF
+ /* WMF processing for multicast packets */
+ if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
+ dhd_sta_t *sta;
+ int ret;
-#ifdef SHOW_LOGTRACE
- skb_queue_head_init(&dhd->evt_trace_queue);
+ sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
+ ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
+ switch (ret) {
+ case WMF_TAKEN:
+ /* The packet is taken by WMF. Continue to next iteration */
+ continue;
+ case WMF_DROP:
+ /* Packet DROP decision by WMF. Toss it */
+ DHD_ERROR(("%s: WMF decides to drop packet\n",
+ __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ default:
+ /* Continue the transmit path */
+ break;
+ }
+ }
+#endif /* DHD_WMF */
- /* Create ring proc entries */
- dhd_dbg_ring_proc_create(&dhd->pub);
-#endif /* SHOW_LOGTRACE */
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpdata_info_get(dhdp, pktbuf);
+#endif
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
- /* Set up the bottom half handler */
- if (dhd_dpc_prio >= 0) {
- /* Initialize DPC thread */
- PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
- if (dhd->thr_dpc_ctl.thr_pid < 0) {
- goto fail;
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+#ifdef DHD_WET
+ /* wet related packet proto manipulation should be done in DHD
+ * since dongle doesn't have complete payload
+ */
+ if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
+ pktbuf) < 0)) {
+ DHD_INFO(("%s:%s: wet recv proc failed\n",
+ __FUNCTION__, dhd_ifname(dhdp, ifidx)));
}
- } else {
- /* use tasklet for dpc */
- tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
- dhd->thr_dpc_ctl.thr_pid = -1;
- }
+#endif /* DHD_WET */
- if (dhd->rxthread_enabled) {
- bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
- /* Initialize RXF thread */
- PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
- if (dhd->thr_rxf_ctl.thr_pid < 0) {
- goto fail;
+#ifdef DHD_PSTA
+ if (PSR_ENABLED(dhdp) && (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
+ DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
+ dhd_ifname(dhdp, ifidx)));
}
- }
-#endif /* !BCMDBUS */
-
- dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
+#endif /* DHD_PSTA */
-#if defined(CONFIG_PM_SLEEP)
- if (!dhd_pm_notifier_registered) {
- dhd_pm_notifier_registered = TRUE;
- dhd->pm_notifier.notifier_call = dhd_pm_callback;
- dhd->pm_notifier.priority = 10;
- register_pm_notifier(&dhd->pm_notifier);
- }
+#ifdef PCIE_FULL_DONGLE
+ if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
+ (!ifp->ap_isolate)) {
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+ if (ETHER_ISUCAST(eh->ether_dhost)) {
+ if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
+ dhd_sendpkt(dhdp, ifidx, pktbuf);
+ continue;
+ }
+ } else {
+ void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
+ if (npktbuf)
+ dhd_sendpkt(dhdp, ifidx, npktbuf);
+ }
+ }
+#endif /* PCIE_FULL_DONGLE */
-#endif /* CONFIG_PM_SLEEP */
+ /* Get the protocol, maintain skb around eth_type_trans()
+ * The main reason for this hack is for the limitation of
+ * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+ * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+ * coping of the packet coming from the network stack to add
+ * BDC, Hardware header etc, during network interface registration
+ * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+ * for BDC, Hardware header etc. and not just the ETH_HLEN
+ */
+ eth = skb->data;
+ len = skb->len;
-#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
- dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
- dhd->early_suspend.suspend = dhd_early_suspend;
- dhd->early_suspend.resume = dhd_late_resume;
- register_early_suspend(&dhd->early_suspend);
- dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
-#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+ dump_data = skb->data;
-#ifdef ARP_OFFLOAD_SUPPORT
- dhd->pend_ipaddr = 0;
- if (!dhd_inetaddr_notifier_registered) {
- dhd_inetaddr_notifier_registered = TRUE;
- register_inetaddr_notifier(&dhd_inetaddr_notifier);
- }
-#endif /* ARP_OFFLOAD_SUPPORT */
+ protocol = (skb->data[12] << 8) | skb->data[13];
+ if (protocol == ETHER_TYPE_802_1X) {
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
+#ifdef DHD_8021X_DUMP
+ dhd_dump_eapol_4way_message(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
+#endif /* DHD_8021X_DUMP */
+ dhd_conf_set_eapol_status(dhdp, dhd_ifname(dhdp, ifidx), dump_data);
+ }
-#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
- if (!dhd_inet6addr_notifier_registered) {
- dhd_inet6addr_notifier_registered = TRUE;
- register_inet6addr_notifier(&dhd_inet6addr_notifier);
- }
-#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
- dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
- INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
-#ifdef DEBUG_CPU_FREQ
- dhd->new_freq = alloc_percpu(int);
- dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
- cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
-#endif // endif
-#ifdef DHDTCPACK_SUPPRESS
-#ifdef BCMSDIO
- dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
-#elif defined(BCMPCIE)
- dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
-#else
- dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
-#endif /* BCMSDIO */
-#endif /* DHDTCPACK_SUPPRESS */
+ if (protocol != ETHER_TYPE_BRCM && protocol == ETHER_TYPE_IP) {
+#ifdef DHD_DHCP_DUMP
+ dhd_dhcp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
+#endif /* DHD_DHCP_DUMP */
+#ifdef DHD_ICMP_DUMP
+ dhd_icmp_dump(dhd_ifname(dhdp, ifidx), dump_data, FALSE);
+#endif /* DHD_ICMP_DUMP */
+ }
+#ifdef DHD_RX_DUMP
+ dhd_trx_dump(dhd_idx2net(dhdp, ifidx), dump_data, skb->len, FALSE);
+#endif /* DHD_RX_DUMP */
+#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
+ if (pkt_wake) {
+ prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32));
+ }
+#endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
-#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
-#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+ skb->protocol = eth_type_trans(skb, skb->dev);
-#ifdef DHD_DEBUG_PAGEALLOC
- register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
-#endif /* DHD_DEBUG_PAGEALLOC */
+ if (skb->pkt_type == PACKET_MULTICAST) {
+ dhd->pub.rx_multicast++;
+ ifp->stats.multicast++;
+ }
-#if defined(DHD_LB)
+ skb->data = eth;
+ skb->len = len;
- dhd_lb_set_default_cpus(dhd);
- DHD_LB_STATS_INIT(&dhd->pub);
+#ifdef WLMEDIA_HTSF
+ dhd_htsf_addrxts(dhdp, pktbuf);
+#endif
+#ifdef DBG_PKT_MON
+ DHD_DBG_PKT_MON_RX(dhdp, skb);
+#endif /* DBG_PKT_MON */
+#ifdef DHD_PKT_LOGGING
+ DHD_PKTLOG_RX(dhdp, skb);
+#endif /* DHD_PKT_LOGGING */
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
- /* Initialize the CPU Masks */
- if (dhd_cpumasks_init(dhd) == 0) {
- /* Now we have the current CPU maps, run through candidacy */
- dhd_select_cpu_candidacy(dhd);
+ /* Process special event packets and then discard them */
+ memset(&event, 0, sizeof(event));
- /* Register the call backs to CPU Hotplug sub-system */
- dhd_register_cpuhp_callback(dhd);
+ if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
+ bcm_event_msg_u_t evu;
+ int ret_event;
+ int event_type;
- } else {
- /*
- * We are unable to initialize CPU masks, so candidacy algorithm
- * won't run, but still Load Balancing will be honoured based
- * on the CPUs allocated for a given job statically during init
- */
- dhd->cpu_notifier.notifier_call = NULL;
- DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
- __FUNCTION__));
- }
+ ret_event = wl_host_event_get_data(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+ skb_mac_header(skb),
+#else
+ skb->mac.raw,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
+ len, &evu);
-#ifdef DHD_LB_TXP
-#ifdef DHD_LB_TXP_DEFAULT_ENAB
- /* Trun ON the feature by default */
- atomic_set(&dhd->lb_txp_active, 1);
+ if (ret_event != BCME_OK) {
+ DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
+ __FUNCTION__, ret_event));
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
#else
- /* Trun OFF the feature by default */
- atomic_set(&dhd->lb_txp_active, 0);
-#endif /* DHD_LB_TXP_DEFAULT_ENAB */
-#endif /* DHD_LB_TXP */
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif
+ continue;
+ }
-#ifdef DHD_LB_RXP
- /* Trun ON the feature by default */
- atomic_set(&dhd->lb_rxp_active, 1);
-#endif /* DHD_LB_RXP */
+ memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
+ event_type = ntoh32_ua((void *)&event.event_type);
+#ifdef SHOW_LOGTRACE
+ /* Event msg printing is called from dhd_rx_frame which is in Tasklet
+ * context in case of PCIe FD, in case of other bus this will be from
+ * DPC context. If we get bunch of events from Dongle then printing all
+ * of them from Tasklet/DPC context that too in data path is costly.
+ * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
+ * events with type WLC_E_TRACE.
+ * We'll print this console logs from the WorkQueue context by enqueing SKB
+ * here and Dequeuing will be done in WorkQueue and will be freed only if
+ * DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT is defined
+ */
+ if (event_type == WLC_E_TRACE) {
+ DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__));
+ dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
+ continue;
+ }
+#endif /* SHOW_LOGTRACE */
- /* Initialize the Load Balancing Tasklets and Napi object */
-#if defined(DHD_LB_TXC)
- tasklet_init(&dhd->tx_compl_tasklet,
- dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
- INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
- DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
-#endif /* DHD_LB_TXC */
-#if defined(DHD_LB_RXC)
- tasklet_init(&dhd->rx_compl_tasklet,
- dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
- INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
- DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
-#endif /* DHD_LB_RXC */
+ ret_event = dhd_wl_host_event(dhd, ifidx,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+ skb_mac_header(skb),
+#else
+ skb->mac.raw,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
+ len, &event, &data);
-#if defined(DHD_LB_RXP)
- __skb_queue_head_init(&dhd->rx_pend_queue);
- skb_queue_head_init(&dhd->rx_napi_queue);
- /* Initialize the work that dispatches NAPI job to a given core */
- INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
- DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
-#endif /* DHD_LB_RXP */
+ wl_event_to_host_order(&event);
+ if (!tout_ctrl)
+ tout_ctrl = DHD_PACKET_TIMEOUT_MS;
-#if defined(DHD_LB_TXP)
- INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
- skb_queue_head_init(&dhd->tx_pend_queue);
- /* Initialize the work that dispatches TX job to a given core */
- tasklet_init(&dhd->tx_tasklet,
- dhd_lb_tx_handler, (ulong)(dhd));
- DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
-#endif /* DHD_LB_TXP */
+#if defined(PNO_SUPPORT)
+ if (event_type == WLC_E_PFN_NET_FOUND) {
+ /* enforce custom wake lock to garantee that Kernel not suspended */
+ tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
+ }
+#endif /* PNO_SUPPORT */
+ if (numpkt != 1) {
+ DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
+ __FUNCTION__));
+ }
- dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
-#endif /* DHD_LB */
+#ifdef DHD_WAKE_STATUS
+ if (unlikely(pkt_wake)) {
+#ifdef DHD_WAKE_EVENT_STATUS
+ if (event.event_type < WLC_E_LAST) {
+ wcp->rc_event[event.event_type]++;
+ wcp->rcwake++;
+ pkt_wake = 0;
+ }
+#endif /* DHD_WAKE_EVENT_STATUS */
+ }
+#endif /* DHD_WAKE_STATUS */
-#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
- INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn);
-#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+ /* For delete virtual interface event, wl_host_event returns positive
+ * i/f index, do not proceed. just free the pkt.
+ */
+ if ((event_type == WLC_E_IF) && (ret_event > 0)) {
+ DHD_ERROR(("%s: interface is deleted. Free event packet\n",
+ __FUNCTION__));
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif
+ continue;
+ }
-#if defined(BCMPCIE)
- dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
- if (dhd->pub.extended_trap_data == NULL) {
- DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
- }
-#ifdef DNGL_AXI_ERROR_LOGGING
- dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t));
- if (dhd->pub.axi_err_dump == NULL) {
- DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__));
- }
-#endif /* DNGL_AXI_ERROR_LOGGING */
-#endif /* BCMPCIE && ETD */
+#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ continue;
+#else
+ /*
+ * For the event packets, there is a possibility
+ * of ifidx getting modifed.Thus update the ifp
+ * once again.
+ */
+ ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
+ ifp = dhd->iflist[ifidx];
+#ifndef PROP_TXSTATUS_VSDB
+ if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
+#else
+ if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
+ dhd->pub.up))
+#endif /* PROP_TXSTATUS_VSDB */
+ {
+ DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
+ __FUNCTION__));
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
+#else
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+#endif
+ continue;
+ }
+#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
+ } else {
+ tout_rx = DHD_PACKET_TIMEOUT_MS;
-#ifdef SHOW_LOGTRACE
- if (dhd_init_logtrace_process(dhd) != BCME_OK) {
- goto fail;
- }
-#endif /* SHOW_LOGTRACE */
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
+#endif /* PROP_TXSTATUS */
- DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
+#ifdef DHD_WAKE_STATUS
+ if (unlikely(pkt_wake)) {
+ wcp->rxwake++;
+#ifdef DHD_WAKE_RX_STATUS
+#define ETHER_ICMP6_HEADER 20
+#define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
+#define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
+#define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
-#ifdef EWP_EDL
- if (host_edl_support) {
- if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) {
- host_edl_support = FALSE;
+ if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
+ wcp->rx_arp++;
+ if (dump_data[0] == 0xFF) { /* Broadcast */
+ wcp->rx_bcast++;
+ } else if (dump_data[0] & 0x01) { /* Multicast */
+ wcp->rx_mcast++;
+ if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
+ wcp->rx_multi_ipv6++;
+ if ((skb->len > ETHER_ICMP6_HEADER) &&
+ (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
+ wcp->rx_icmpv6++;
+ if (skb->len > ETHER_ICMPV6_TYPE) {
+ switch (dump_data[ETHER_ICMPV6_TYPE]) {
+ case NDISC_ROUTER_ADVERTISEMENT:
+ wcp->rx_icmpv6_ra++;
+ break;
+ case NDISC_NEIGHBOUR_ADVERTISEMENT:
+ wcp->rx_icmpv6_na++;
+ break;
+ case NDISC_NEIGHBOUR_SOLICITATION:
+ wcp->rx_icmpv6_ns++;
+ break;
+ }
+ }
+ }
+ } else if (dump_data[2] == 0x5E) {
+ wcp->rx_multi_ipv4++;
+ } else {
+ wcp->rx_multi_other++;
+ }
+ } else { /* Unicast */
+ wcp->rx_ucast++;
+ }
+#undef ETHER_ICMP6_HEADER
+#undef ETHER_IPV6_SADDR
+#undef ETHER_IPV6_DAADR
+#undef ETHER_ICMPV6_TYPE
+#endif /* DHD_WAKE_RX_STATUS */
+ pkt_wake = 0;
+ }
+#endif /* DHD_WAKE_STATUS */
}
- }
-#endif /* EWP_EDL */
- (void)dhd_sysfs_init(dhd);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+ if (ifp->net)
+ ifp->net->last_rx = jiffies;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
-#ifdef WL_NATOE
- /* Open Netlink socket for NF_CONNTRACK notifications */
- dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP,
- CT_ALL);
-#endif /* WL_NATOE */
+ if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
+ dhdp->dstats.rx_bytes += skb->len;
+ dhdp->rx_packets++; /* Local count */
+ ifp->stats.rx_bytes += skb->len;
+ ifp->stats.rx_packets++;
+ }
- dhd_state |= DHD_ATTACH_STATE_DONE;
- dhd->dhd_state = dhd_state;
+ if (in_interrupt()) {
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#if defined(DHD_LB_RXP)
+ netif_receive_skb(skb);
+#else /* !defined(DHD_LB_RXP) */
+ netif_rx(skb);
+#endif /* !defined(DHD_LB_RXP) */
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ } else {
+ if (dhd->rxthread_enabled) {
+ if (!skbhead)
+ skbhead = skb;
+ else
+ PKTSETNEXT(dhdp->osh, skbprev, skb);
+ skbprev = skb;
+ } else {
- dhd_found++;
-
-#ifdef CSI_SUPPORT
- dhd_csi_init(&dhd->pub);
-#endif /* CSI_SUPPORT */
-
-#ifdef DHD_DUMP_MNGR
- dhd->pub.dump_file_manage =
- (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t));
- if (unlikely(!dhd->pub.dump_file_manage)) {
- DHD_ERROR(("%s(): could not allocate memory for - "
- "dhd_dump_file_manage_t\n", __FUNCTION__));
- }
-#endif /* DHD_DUMP_MNGR */
-#ifdef DHD_FW_COREDUMP
- /* Set memdump default values */
- dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
- /* Check the memdump capability */
- dhd_get_memdump_info(&dhd->pub);
-#endif /* DHD_FW_COREDUMP */
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
-#ifdef DHD_ERPOM
- if (enable_erpom) {
- pom_handler = &dhd->pub.pom_wlan_handler;
- pom_handler->func_id = WLAN_FUNC_ID;
- pom_handler->handler = (void *)g_dhd_pub;
- pom_handler->power_off = dhd_wlan_power_off_handler;
- pom_handler->power_on = dhd_wlan_power_on_handler;
-
- dhd->pub.pom_func_register = NULL;
- dhd->pub.pom_func_deregister = NULL;
- dhd->pub.pom_toggle_reg_on = NULL;
-
- dhd->pub.pom_func_register = symbol_get(pom_func_register);
- dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
- dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
-
- symbol_put(pom_func_register);
- symbol_put(pom_func_deregister);
- symbol_put(pom_toggle_reg_on);
-
- if (!dhd->pub.pom_func_register ||
- !dhd->pub.pom_func_deregister ||
- !dhd->pub.pom_toggle_reg_on) {
- DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
- "POM is not loaded\n", __FUNCTION__));
- ASSERT(0);
- goto fail;
+#if defined(DHD_LB_RXP)
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ netif_receive_skb(skb);
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#else /* !defined(DHD_LB_RXP) */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ netif_rx_ni(skb);
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#else
+ ulong flags;
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ netif_rx(skb);
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+#endif /* !defined(DHD_LB_RXP) */
+ }
}
- dhd->pub.pom_func_register(pom_handler);
- dhd->pub.enable_erpom = TRUE;
-
}
-#endif /* DHD_ERPOM */
- return &dhd->pub;
-fail:
- if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
- DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
- __FUNCTION__, dhd_state, &dhd->pub));
- dhd->dhd_state = dhd_state;
- dhd_detach(&dhd->pub);
- dhd_free(&dhd->pub);
- }
+ if (dhd->rxthread_enabled && skbhead)
+ dhd_sched_rxf(dhdp, skbhead);
-dhd_null_flag:
- return NULL;
+ DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
}
-int dhd_get_fw_mode(dhd_info_t *dhdinfo)
+void
+dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
{
- if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
- return DHD_FLAG_HOSTAP_MODE;
- if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
- return DHD_FLAG_P2P_MODE;
- if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
- return DHD_FLAG_IBSS_MODE;
- if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
- return DHD_FLAG_MFG_MODE;
-
- return DHD_FLAG_STA_MODE;
+ /* Linux version has nothing to do */
+ return;
}
-int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
+void
+dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
{
- return dhd_get_fw_mode(dhdp->info);
-}
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct ether_header *eh;
+ uint16 type;
-extern char * nvram_get(const char *name);
-bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
-{
- int fw_len;
- int nv_len;
- int clm_len;
- int conf_len;
- const char *fw = NULL;
- const char *nv = NULL;
- const char *clm = NULL;
- const char *conf = NULL;
-#ifdef DHD_UCODE_DOWNLOAD
- int uc_len;
- const char *uc = NULL;
-#endif /* DHD_UCODE_DOWNLOAD */
- wifi_adapter_info_t *adapter = dhdinfo->adapter;
- int fw_path_len = sizeof(dhdinfo->fw_path);
- int nv_path_len = sizeof(dhdinfo->nv_path);
+ dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
- /* Update firmware and nvram path. The path may be from adapter info or module parameter
- * The path from adapter info is used for initialization only (as it won't change).
- *
- * The firmware_path/nvram_path module parameter may be changed by the system at run
- * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
- * command may change dhdinfo->fw_path. As such we need to clear the path info in
- * module parameter after it is copied. We won't update the path until the module parameter
- * is changed again (first character is not '\0')
- */
- /* set default firmware and nvram path for built-in type driver */
-// if (!dhd_download_fw_on_driverload) {
-#ifdef CONFIG_BCMDHD_FW_PATH
- fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH;
-#endif /* CONFIG_BCMDHD_FW_PATH */
-#ifdef CONFIG_BCMDHD_NVRAM_PATH
- nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH;
-#endif /* CONFIG_BCMDHD_NVRAM_PATH */
-// }
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
+ type = ntoh16(eh->ether_type);
- /* check if we need to initialize the path */
- if (dhdinfo->fw_path[0] == '\0') {
- if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
- fw = adapter->fw_path;
- }
- if (dhdinfo->nv_path[0] == '\0') {
- if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
- nv = adapter->nv_path;
- }
- if (dhdinfo->clm_path[0] == '\0') {
- if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
- clm = adapter->clm_path;
+ if ((type == ETHER_TYPE_802_1X) && (dhd_get_pend_8021x_cnt(dhd) > 0)) {
+ atomic_dec(&dhd->pend_8021x_cnt);
}
- if (dhdinfo->conf_path[0] == '\0') {
- if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
- conf = adapter->conf_path;
+
+#ifdef PROP_TXSTATUS
+ if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
+ dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
+ uint datalen = PKTLEN(dhd->pub.osh, txp);
+ if (ifp != NULL) {
+ if (success) {
+ dhd->pub.tx_packets++;
+ ifp->stats.tx_packets++;
+ ifp->stats.tx_bytes += datalen;
+ } else {
+ ifp->stats.tx_dropped++;
+ }
+ }
}
+#endif
+}
- /* Use module parameter if it is valid, EVEN IF the path has not been initialized
- *
- * TODO: need a solution for multi-chip, can't use the same firmware for all chips
- */
- if (firmware_path[0] != '\0')
- fw = firmware_path;
+static struct net_device_stats *
+dhd_get_stats(struct net_device *net)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_if_t *ifp;
+ int ifidx;
- if (nvram_path[0] != '\0')
- nv = nvram_path;
- if (clm_path[0] != '\0')
- clm = clm_path;
- if (config_path[0] != '\0')
- conf = config_path;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-#ifdef DHD_UCODE_DOWNLOAD
- if (ucode_path[0] != '\0')
- uc = ucode_path;
-#endif /* DHD_UCODE_DOWNLOAD */
+ if (!dhd) {
+ DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+ goto error;
+ }
- if (fw && fw[0] != '\0') {
- fw_len = strlen(fw);
- if (fw_len >= fw_path_len) {
- DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
- return FALSE;
- }
- strncpy(dhdinfo->fw_path, fw, fw_path_len);
- if (dhdinfo->fw_path[fw_len-1] == '\n')
- dhdinfo->fw_path[fw_len-1] = '\0';
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
+ goto error;
}
- if (nv && nv[0] != '\0') {
- nv_len = strlen(nv);
- if (nv_len >= nv_path_len) {
- DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
- return FALSE;
- }
- memset(dhdinfo->nv_path, 0, nv_path_len);
- strncpy(dhdinfo->nv_path, nv, nv_path_len);
- dhdinfo->nv_path[nv_len] = '\0';
-#ifdef DHD_USE_SINGLE_NVRAM_FILE
- /* Remove "_net" or "_mfg" tag from current nvram path */
- {
- char *nvram_tag = "nvram_";
- char *ext_tag = ".txt";
- char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
- bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
- strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
- if (valid_buf) {
- char *sp = sp_nvram + strlen(nvram_tag) - 1;
- uint32 padding_size = (uint32)(dhdinfo->nv_path +
- nv_path_len - sp);
- memset(sp, 0, padding_size);
- strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
- nv_len = strlen(dhdinfo->nv_path);
- DHD_INFO(("%s: new nvram path = %s\n",
- __FUNCTION__, dhdinfo->nv_path));
- } else if (sp_nvram) {
- DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
- __FUNCTION__));
- return FALSE;
- } else {
- DHD_ERROR(("%s: Couldn't find the nvram tag. current"
- " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
- }
- }
-#endif /* DHD_USE_SINGLE_NVRAM_FILE */
- if (dhdinfo->nv_path[nv_len-1] == '\n')
- dhdinfo->nv_path[nv_len-1] = '\0';
+
+ ifp = dhd->iflist[ifidx];
+
+ if (!ifp) {
+ ASSERT(ifp);
+ DHD_ERROR(("%s: ifp is NULL\n", __FUNCTION__));
+ goto error;
}
- if (clm && clm[0] != '\0') {
- clm_len = strlen(clm);
- if (clm_len >= sizeof(dhdinfo->clm_path)) {
- DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
- return FALSE;
- }
- strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
- if (dhdinfo->clm_path[clm_len-1] == '\n')
- dhdinfo->clm_path[clm_len-1] = '\0';
+
+ if (dhd->pub.up) {
+ /* Use the protocol to get dongle stats */
+ dhd_prot_dstats(&dhd->pub);
}
- if (conf && conf[0] != '\0') {
- conf_len = strlen(conf);
- if (conf_len >= sizeof(dhdinfo->conf_path)) {
- DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
- return FALSE;
- }
- strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
- if (dhdinfo->conf_path[conf_len-1] == '\n')
- dhdinfo->conf_path[conf_len-1] = '\0';
+ return &ifp->stats;
+
+error:
+ memset(&net->stats, 0, sizeof(net->stats));
+ return &net->stats;
+}
+
+#ifndef BCMDBUS
+static int
+dhd_watchdog_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_watchdog_prio > 0) {
+ struct sched_param param;
+ param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
+ dhd_watchdog_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, ¶m);
}
-#ifdef DHD_UCODE_DOWNLOAD
- if (uc && uc[0] != '\0') {
- uc_len = strlen(uc);
- if (uc_len >= sizeof(dhdinfo->uc_path)) {
- DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
- return FALSE;
+
+ while (1) {
+ if (down_interruptible (&tsk->sema) == 0) {
+ unsigned long flags;
+ unsigned long jiffies_at_start = jiffies;
+ unsigned long time_lapse;
+ DHD_OS_WD_WAKE_LOCK(&dhd->pub);
+
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+
+ if (dhd->pub.dongle_reset == FALSE) {
+ DHD_TIMER(("%s:\n", __FUNCTION__));
+ dhd_bus_watchdog(&dhd->pub);
+
+#ifdef DHD_TIMESYNC
+ /* Call the timesync module watchdog */
+ dhd_timesync_watchdog(&dhd->pub);
+#endif /* DHD_TIMESYNC */
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+#ifdef DHD_L2_FILTER
+ dhd_l2_filter_watchdog(&dhd->pub);
+#endif /* DHD_L2_FILTER */
+ time_lapse = jiffies - jiffies_at_start;
+
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid) {
+ mod_timer(&dhd->timer,
+ jiffies +
+ msecs_to_jiffies(dhd_watchdog_ms) -
+ min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
+ }
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ }
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+ } else {
+ break;
}
- strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
- if (dhdinfo->uc_path[uc_len-1] == '\n')
- dhdinfo->uc_path[uc_len-1] = '\0';
}
-#endif /* DHD_UCODE_DOWNLOAD */
-#if 0
- /* clear the path in module parameter */
- if (dhd_download_fw_on_driverload) {
- firmware_path[0] = '\0';
- nvram_path[0] = '\0';
- clm_path[0] = '\0';
- config_path[0] = '\0';
- }
+ complete_and_exit(&tsk->completed, 0);
+}
+
+static void dhd_watchdog(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct timer_list *t
+#else
+ ulong data
#endif
-#ifdef DHD_UCODE_DOWNLOAD
- ucode_path[0] = '\0';
- DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
-#endif /* DHD_UCODE_DOWNLOAD */
+)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ dhd_info_t *dhd = from_timer(dhd, t, timer);
+#else
+ dhd_info_t *dhd = (dhd_info_t *)data;
+#endif
+ unsigned long flags;
- /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
- if (dhdinfo->fw_path[0] == '\0') {
- DHD_ERROR(("firmware path not found\n"));
- return FALSE;
+ if (dhd->pub.dongle_reset) {
+ return;
}
- if (dhdinfo->nv_path[0] == '\0') {
- DHD_ERROR(("nvram path not found\n"));
- return FALSE;
+
+ if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+ up(&dhd->thr_wdt_ctl.sema);
+ return;
}
- return TRUE;
+ DHD_OS_WD_WAKE_LOCK(&dhd->pub);
+ /* Call the bus module watchdog */
+ dhd_bus_watchdog(&dhd->pub);
+
+#ifdef DHD_TIMESYNC
+ /* Call the timesync module watchdog */
+ dhd_timesync_watchdog(&dhd->pub);
+#endif /* DHD_TIMESYNC */
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ /* Count the tick for reference */
+ dhd->pub.tickcnt++;
+
+#ifdef DHD_L2_FILTER
+ dhd_l2_filter_watchdog(&dhd->pub);
+#endif /* DHD_L2_FILTER */
+ /* Reschedule the watchdog */
+ if (dhd->wd_timer_valid)
+ mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
}
-#if defined(BT_OVER_SDIO)
-extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
+#ifdef DHD_PCIE_RUNTIMEPM
+static int
+dhd_rpm_state_thread(void *data)
{
- int fw_len;
- const char *fw = NULL;
- wifi_adapter_info_t *adapter = dhdinfo->adapter;
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
- /* Update bt firmware path. The path may be from adapter info or module parameter
- * The path from adapter info is used for initialization only (as it won't change).
- *
- * The btfw_path module parameter may be changed by the system at run
- * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
- * command may change dhdinfo->btfw_path. As such we need to clear the path info in
- * module parameter after it is copied. We won't update the path until the module parameter
- * is changed again (first character is not '\0')
- */
+ while (1) {
+ if (down_interruptible (&tsk->sema) == 0) {
+ unsigned long flags;
+ unsigned long jiffies_at_start = jiffies;
+ unsigned long time_lapse;
- /* set default firmware and nvram path for built-in type driver */
- if (!dhd_download_fw_on_driverload) {
-#ifdef CONFIG_BCMDHD_BTFW_PATH
- fw = CONFIG_BCMDHD_BTFW_PATH;
-#endif /* CONFIG_BCMDHD_FW_PATH */
- }
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
- /* check if we need to initialize the path */
- if (dhdinfo->btfw_path[0] == '\0') {
- if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
- fw = adapter->btfw_path;
- }
+ if (dhd->pub.dongle_reset == FALSE) {
+ DHD_TIMER(("%s:\n", __FUNCTION__));
+ if (dhd->pub.up) {
+ dhd_runtimepm_state(&dhd->pub);
+ }
- /* Use module parameter if it is valid, EVEN IF the path has not been initialized
- */
- if (btfw_path[0] != '\0')
- fw = btfw_path;
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ time_lapse = jiffies - jiffies_at_start;
- if (fw && fw[0] != '\0') {
- fw_len = strlen(fw);
- if (fw_len >= sizeof(dhdinfo->btfw_path)) {
- DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
- return FALSE;
+ /* Reschedule the watchdog */
+ if (dhd->rpm_timer_valid) {
+ mod_timer(&dhd->rpm_timer,
+ jiffies +
+ msecs_to_jiffies(dhd_runtimepm_ms) -
+ min(msecs_to_jiffies(dhd_runtimepm_ms),
+ time_lapse));
+ }
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ }
+ } else {
+ break;
}
- strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
- if (dhdinfo->btfw_path[fw_len-1] == '\n')
- dhdinfo->btfw_path[fw_len-1] = '\0';
}
- /* clear the path in module parameter */
- btfw_path[0] = '\0';
+ complete_and_exit(&tsk->completed, 0);
+}
- if (dhdinfo->btfw_path[0] == '\0') {
- DHD_ERROR(("bt firmware path not found\n"));
- return FALSE;
+static void dhd_runtimepm(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct timer_list *t
+#else
+ ulong data
+#endif
+)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ dhd_info_t *dhd = from_timer(dhd, t, rpm_timer);
+#else
+ dhd_info_t *dhd = (dhd_info_t *)data;
+#endif
+
+ if (dhd->pub.dongle_reset) {
+ return;
}
- return TRUE;
+ if (dhd->thr_rpm_ctl.thr_pid >= 0) {
+ up(&dhd->thr_rpm_ctl.sema);
+ return;
+ }
}
-#endif /* defined (BT_OVER_SDIO) */
-#if defined(BT_OVER_SDIO)
-wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
+void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
{
- DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
- /* assuming that dhd_pub_t type pointer is available from a global variable */
- return (wlan_bt_handle_t) g_dhd_pub;
-} EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
+ dhd_os_runtimepm_timer(dhdp, 0);
+ dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
+ DHD_ERROR(("DHD Runtime PM Disabled \n"));
+}
-int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
+void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
{
- int ret = -1;
- dhd_pub_t *dhdp = (dhd_pub_t *)handle;
- dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+ if (dhd_get_idletime(dhdp)) {
+ dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
+ DHD_ERROR(("DHD Runtime PM Enabled \n"));
+ }
+}
- /* Download BT firmware image to the dongle */
- if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
- DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
- ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
- if (ret < 0) {
- DHD_ERROR(("%s: failed to download btfw from: %s\n",
- __FUNCTION__, dhd->btfw_path));
- return ret;
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+
+#ifdef ENABLE_ADAPTIVE_SCHED
+static void
+dhd_sched_policy(int prio)
+{
+ struct sched_param param;
+ if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
+ param.sched_priority = 0;
+ setScheduler(current, SCHED_NORMAL, ¶m);
+ } else {
+ if (get_scheduler_policy(current) != SCHED_FIFO) {
+ param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, ¶m);
}
}
- return ret;
-} EXPORT_SYMBOL(dhd_download_btfw);
-#endif /* defined (BT_OVER_SDIO) */
-
-#ifndef BCMDBUS
-int
-dhd_bus_start(dhd_pub_t *dhdp)
+}
+#endif /* ENABLE_ADAPTIVE_SCHED */
+#ifdef DEBUG_CPU_FREQ
+static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
{
- int ret = -1;
- dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
- unsigned long flags;
+ dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
+ struct cpufreq_freqs *freq = data;
+ if (dhd) {
+ if (!dhd->new_freq)
+ goto exit;
+ if (val == CPUFREQ_POSTCHANGE) {
+ DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
+ freq->new, freq->cpu));
+ *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
+ }
+ }
+exit:
+ return 0;
+}
+#endif /* DEBUG_CPU_FREQ */
-#if defined(DHD_DEBUG) && defined(BCMSDIO)
- int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
-#endif /* DHD_DEBUG && BCMSDIO */
- ASSERT(dhd);
+static int
+dhd_dpc_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
- DHD_TRACE(("Enter %s:\n", __FUNCTION__));
- dhdp->dongle_trap_occured = 0;
-#ifdef DHD_SSSR_DUMP
- /* Flag to indicate sssr dump is collected */
- dhdp->sssr_dump_collected = 0;
-#endif /* DHD_SSSR_DUMP */
- dhdp->iovar_timeout_occured = 0;
-#ifdef PCIE_FULL_DONGLE
- dhdp->d3ack_timeout_occured = 0;
- dhdp->livelock_occured = 0;
- dhdp->pktid_audit_failed = 0;
-#endif /* PCIE_FULL_DONGLE */
- dhd->pub.iface_op_failed = 0;
- dhd->pub.scan_timeout_occurred = 0;
- dhd->pub.scan_busy_occurred = 0;
- /* Clear induced error during initialize */
- dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR;
-
- /* set default value for now. Will be updated again in dhd_preinit_ioctls()
- * after querying FW
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
*/
- dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS;
- dhdp->event_log_max_sets_queried = FALSE;
- dhdp->smmu_fault_occurred = 0;
-#ifdef DNGL_AXI_ERROR_LOGGING
- dhdp->axi_error = FALSE;
-#endif /* DNGL_AXI_ERROR_LOGGING */
-
- DHD_PERIM_LOCK(dhdp);
- /* try to download image and nvram to the dongle */
- if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
- /* Indicate FW Download has not yet done */
- dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS;
- DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
- __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
-#if defined(DHD_DEBUG) && defined(BCMSDIO)
- fw_download_start = OSL_SYSUPTIME();
-#endif /* DHD_DEBUG && BCMSDIO */
- ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
- dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
-#if defined(DHD_DEBUG) && defined(BCMSDIO)
- fw_download_end = OSL_SYSUPTIME();
-#endif /* DHD_DEBUG && BCMSDIO */
- if (ret < 0) {
- DHD_ERROR(("%s: failed to download firmware %s\n",
- __FUNCTION__, dhd->fw_path));
- DHD_PERIM_UNLOCK(dhdp);
- return ret;
- }
- /* Indicate FW Download has succeeded */
- dhd->pub.fw_download_status = FW_DOWNLOAD_DONE;
- }
- if (dhd->pub.busstate != DHD_BUS_LOAD) {
- DHD_PERIM_UNLOCK(dhdp);
- return -ENETDOWN;
+ if (dhd_dpc_prio > 0)
+ {
+ struct sched_param param;
+ param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, ¶m);
}
-#ifdef BCMSDIO
- dhd_os_sdlock(dhdp);
-#endif /* BCMSDIO */
+#ifdef CUSTOM_DPC_CPUCORE
+ set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
+#endif
+#ifdef CUSTOM_SET_CPUCORE
+ dhd->pub.current_dpc = current;
+#endif /* CUSTOM_SET_CPUCORE */
+ /* Run until signal received */
+ while (1) {
+ if (dhd->pub.conf->dpc_cpucore >= 0) {
+ printf("%s: set dpc_cpucore %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
+ set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->dpc_cpucore));
+ dhd->pub.conf->dpc_cpucore = -1;
+ }
+ if (!binary_sema_down(tsk)) {
+#ifdef ENABLE_ADAPTIVE_SCHED
+ dhd_sched_policy(dhd_dpc_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
- /* Start the watchdog timer */
- dhd->pub.tickcnt = 0;
- dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ int resched_cnt = 0;
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+ dhd_os_wd_timer_extend(&dhd->pub, TRUE);
+ while (dhd_bus_dpc(dhd->pub.bus)) {
+ /* process all data */
+#ifdef DEBUG_DPC_THREAD_WATCHDOG
+ resched_cnt++;
+ if (resched_cnt > MAX_RESCHED_CNT) {
+ DHD_INFO(("%s Calling msleep to"
+ "let other processes run. \n",
+ __FUNCTION__));
+ dhd->pub.dhd_bug_on = true;
+ resched_cnt = 0;
+ OSL_SLEEP(1);
+ }
+#endif /* DEBUG_DPC_THREAD_WATCHDOG */
+ }
+ dhd_os_wd_timer_extend(&dhd->pub, FALSE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ } else {
+ if (dhd->pub.up)
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ }
+ } else {
+ break;
+ }
+ }
+ complete_and_exit(&tsk->completed, 0);
+}
- /* Bring up the bus */
- if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+static int
+dhd_rxf_thread(void *data)
+{
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+ dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+#if defined(WAIT_DEQUEUE)
+#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
+ ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
+#endif
+ dhd_pub_t *pub = &dhd->pub;
- DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
-#ifdef BCMSDIO
- dhd_os_sdunlock(dhdp);
-#endif /* BCMSDIO */
- DHD_PERIM_UNLOCK(dhdp);
- return ret;
+ /* This thread doesn't need any user-level access,
+ * so get rid of all our resources
+ */
+ if (dhd_rxf_prio > 0)
+ {
+ struct sched_param param;
+ param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, ¶m);
}
- DHD_ENABLE_RUNTIME_PM(&dhd->pub);
+#ifdef CUSTOM_SET_CPUCORE
+ dhd->pub.current_rxf = current;
+#endif /* CUSTOM_SET_CPUCORE */
+ /* Run until signal received */
+ while (1) {
+ if (dhd->pub.conf->rxf_cpucore >= 0) {
+ printf("%s: set rxf_cpucore %d\n", __FUNCTION__, dhd->pub.conf->rxf_cpucore);
+ set_cpus_allowed_ptr(current, cpumask_of(dhd->pub.conf->rxf_cpucore));
+ dhd->pub.conf->rxf_cpucore = -1;
+ }
+ if (down_interruptible(&tsk->sema) == 0) {
+ void *skb;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+ ulong flags;
+#endif
+#ifdef ENABLE_ADAPTIVE_SCHED
+ dhd_sched_policy(dhd_rxf_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
-#ifdef DHD_ULP
- dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED);
-#endif /* DHD_ULP */
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
- /* Host registration for OOB interrupt */
- if (dhd_bus_oob_intr_register(dhdp)) {
- /* deactivate timer and wait for the handler to finish */
-#if !defined(BCMPCIE_OOB_HOST_WAKE)
- DHD_GENERAL_LOCK(&dhd->pub, flags);
- dhd->wd_timer_valid = FALSE;
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- del_timer_sync(&dhd->timer);
+ SMP_RD_BARRIER_DEPENDS();
-#endif /* !BCMPCIE_OOB_HOST_WAKE */
- DHD_DISABLE_RUNTIME_PM(&dhd->pub);
- DHD_PERIM_UNLOCK(dhdp);
- DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
- DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
- return -ENODEV;
- }
+ if (tsk->terminated) {
+ break;
+ }
+ skb = dhd_rxf_dequeue(pub);
-#if defined(BCMPCIE_OOB_HOST_WAKE)
- dhd_bus_oob_intr_set(dhdp, TRUE);
+ if (skb == NULL) {
+ continue;
+ }
+ while (skb) {
+ void *skbnext = PKTNEXT(pub->osh, skb);
+ PKTSETNEXT(pub->osh, skb, NULL);
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ netif_rx_ni(skb);
#else
- /* Enable oob at firmware */
- dhd_enable_oob_intr(dhd->pub.bus, TRUE);
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#elif defined(FORCE_WOWLAN)
- /* Enable oob at firmware */
- dhd_enable_oob_intr(dhd->pub.bus, TRUE);
-#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
-#ifdef PCIE_FULL_DONGLE
- {
- /* max_h2d_rings includes H2D common rings */
- uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
+ netif_rx(skb);
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
- DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
- max_h2d_rings));
- if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
-#ifdef BCMSDIO
- dhd_os_sdunlock(dhdp);
-#endif /* BCMSDIO */
- DHD_PERIM_UNLOCK(dhdp);
- return ret;
+#endif
+ skb = skbnext;
+ }
+#if defined(WAIT_DEQUEUE)
+ if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
+ OSL_SLEEP(1);
+ watchdogTime = OSL_SYSUPTIME();
+ }
+#endif
+
+ DHD_OS_WAKE_UNLOCK(pub);
+ } else {
+ break;
}
}
-#endif /* PCIE_FULL_DONGLE */
+ complete_and_exit(&tsk->completed, 0);
+}
- /* Do protocol initialization necessary for IOCTL/IOVAR */
- ret = dhd_prot_init(&dhd->pub);
- if (unlikely(ret) != BCME_OK) {
- DHD_PERIM_UNLOCK(dhdp);
- DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
- return ret;
- }
+#ifdef BCMPCIE
+void dhd_dpc_enable(dhd_pub_t *dhdp)
+{
+#if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
+ dhd_info_t *dhd;
- /* If bus is not ready, can't come up */
- if (dhd->pub.busstate != DHD_BUS_DATA) {
- DHD_GENERAL_LOCK(&dhd->pub, flags);
- dhd->wd_timer_valid = FALSE;
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- del_timer_sync(&dhd->timer);
- DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
- DHD_DISABLE_RUNTIME_PM(&dhd->pub);
-#ifdef BCMSDIO
- dhd_os_sdunlock(dhdp);
-#endif /* BCMSDIO */
- DHD_PERIM_UNLOCK(dhdp);
- DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
- return -ENODEV;
- }
+ if (!dhdp || !dhdp->info)
+ return;
+ dhd = dhdp->info;
+#endif /* DHD_LB_RXP || DHD_LB_TXP */
+
+#ifdef DHD_LB_RXP
+ __skb_queue_head_init(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
-#ifdef BCMSDIO
- dhd_os_sdunlock(dhdp);
-#endif /* BCMSDIO */
+#ifdef DHD_LB_TXP
+ skb_queue_head_init(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+}
+#endif /* BCMPCIE */
- /* Bus is ready, query any dongle information */
-#if defined(DHD_DEBUG) && defined(BCMSDIO)
- f2_sync_start = OSL_SYSUPTIME();
-#endif /* DHD_DEBUG && BCMSDIO */
- if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
- DHD_GENERAL_LOCK(&dhd->pub, flags);
- dhd->wd_timer_valid = FALSE;
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- del_timer_sync(&dhd->timer);
- DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
- DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
- DHD_PERIM_UNLOCK(dhdp);
- return ret;
+#ifdef BCMPCIE
+void
+dhd_dpc_kill(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ if (!dhdp) {
+ return;
}
-#if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
- defined(CONFIG_SOC_EXYNOS9820)
- DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
- exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
-#endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
+ dhd = dhdp->info;
-#if defined(DHD_DEBUG) && defined(BCMSDIO)
- f2_sync_end = OSL_SYSUPTIME();
- DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
- (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
-#endif /* DHD_DEBUG && BCMSDIO */
+ if (!dhd) {
+ return;
+ }
-#ifdef ARP_OFFLOAD_SUPPORT
- if (dhd->pend_ipaddr) {
-#ifdef AOE_IP_ALIAS_SUPPORT
- aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
-#endif /* AOE_IP_ALIAS_SUPPORT */
- dhd->pend_ipaddr = 0;
+ if (dhd->thr_dpc_ctl.thr_pid < 0) {
+ tasklet_kill(&dhd->tasklet);
+ DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
}
-#endif /* ARP_OFFLOAD_SUPPORT */
- DHD_PERIM_UNLOCK(dhdp);
+#ifdef DHD_LB
+#ifdef DHD_LB_RXP
+ cancel_work_sync(&dhd->rx_napi_dispatcher_work);
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LB_TXP
+ cancel_work_sync(&dhd->tx_dispatcher_work);
+ skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
- return 0;
+ /* Kill the Load Balancing Tasklets */
+#if defined(DHD_LB_TXC)
+ tasklet_kill(&dhd->tx_compl_tasklet);
+#endif /* DHD_LB_TXC */
+#if defined(DHD_LB_RXC)
+ tasklet_kill(&dhd->rx_compl_tasklet);
+#endif /* DHD_LB_RXC */
+#if defined(DHD_LB_TXP)
+ tasklet_kill(&dhd->tx_tasklet);
+#endif /* DHD_LB_TXP */
+#endif /* DHD_LB */
}
-#endif /* !BCMDBUS */
-#ifdef WLTDLS
-int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
+void
+dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
{
- uint32 tdls = tdls_on;
- int ret = 0;
- uint32 tdls_auto_op = 0;
- uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
- int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
- int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
- uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH;
- uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW;
+ dhd_info_t *dhd;
- BCM_REFERENCE(mac);
- if (!FW_SUPPORTED(dhd, tdls))
- return BCME_ERROR;
+ if (!dhdp) {
+ return;
+ }
- if (dhd->tdls_enable == tdls_on)
- goto auto_mode;
- ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
- goto exit;
+ dhd = dhdp->info;
+
+ if (!dhd) {
+ return;
}
- dhd->tdls_enable = tdls_on;
-auto_mode:
- tdls_auto_op = auto_on;
- ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
- 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
- goto exit;
+ if (dhd->thr_dpc_ctl.thr_pid < 0) {
+ tasklet_kill(&dhd->tasklet);
}
+}
+#endif /* BCMPCIE */
- if (tdls_auto_op) {
- ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
- sizeof(tdls_idle_time), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
- goto exit;
- }
- ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
- sizeof(tdls_rssi_high), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
- goto exit;
- }
- ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
- sizeof(tdls_rssi_low), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
- goto exit;
- }
- ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high,
- sizeof(tdls_pktcnt_high), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret));
- goto exit;
- }
- ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low,
- sizeof(tdls_pktcnt_low), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret));
- goto exit;
+static void
+dhd_dpc(ulong data)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)data;
+
+ /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
+ * down below , wake lock is set,
+ * the tasklet is initialized in dhd_attach()
+ */
+ /* Call bus dpc unless it indicated down (then clean stop) */
+ if (dhd->pub.busstate != DHD_BUS_DOWN) {
+#if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
+ DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
+#endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
+ if (dhd_bus_dpc(dhd->pub.bus)) {
+ tasklet_schedule(&dhd->tasklet);
}
+ } else {
+ dhd_bus_stop(dhd->pub.bus, TRUE);
}
-
-exit:
- return ret;
}
-int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
+void
+dhd_sched_dpc(dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
- if (dhd)
- ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
- else
- ret = BCME_ERROR;
- return ret;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ DHD_OS_WAKE_LOCK(dhdp);
+ /* If the semaphore does not get up,
+ * wake unlock should be done here
+ */
+ if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
+ DHD_OS_WAKE_UNLOCK(dhdp);
+ }
+ return;
+ } else {
+ tasklet_schedule(&dhd->tasklet);
+ }
}
+#endif /* BCMDBUS */
-int
-dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
+static void
+dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
{
- int ret = 0;
- bool auto_on = false;
- uint32 mode = wfd_mode;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#ifdef RXF_DEQUEUE_ON_BUSY
+ int ret = BCME_OK;
+ int retry = 2;
+#endif /* RXF_DEQUEUE_ON_BUSY */
-#ifdef ENABLE_TDLS_AUTO_MODE
- if (wfd_mode) {
- auto_on = false;
+ DHD_OS_WAKE_LOCK(dhdp);
+
+ DHD_TRACE(("dhd_sched_rxf: Enter\n"));
+#ifdef RXF_DEQUEUE_ON_BUSY
+ do {
+ ret = dhd_rxf_enqueue(dhdp, skb);
+ if (ret == BCME_OK || ret == BCME_ERROR)
+ break;
+ else
+ OSL_SLEEP(50); /* waiting for dequeueing */
+ } while (retry-- > 0);
+
+ if (retry <= 0 && ret == BCME_BUSY) {
+ void *skbp = skb;
+
+ while (skbp) {
+ void *skbnext = PKTNEXT(dhdp->osh, skbp);
+ PKTSETNEXT(dhdp->osh, skbp, NULL);
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ netif_rx_ni(skbp);
+ skbp = skbnext;
+ }
+ DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
} else {
- auto_on = true;
+ if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+ up(&dhd->thr_rxf_ctl.sema);
+ }
}
-#else
- auto_on = false;
-#endif /* ENABLE_TDLS_AUTO_MODE */
- ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
+#else /* RXF_DEQUEUE_ON_BUSY */
+ do {
+ if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
+ break;
+ } while (1);
+ if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+ up(&dhd->thr_rxf_ctl.sema);
+ }
+ return;
+#endif /* RXF_DEQUEUE_ON_BUSY */
+}
+
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
+
+#ifdef TOE
+/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
+static int
+dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
+{
+ char buf[32];
+ int ret;
+
+ ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+
if (ret < 0) {
- DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
+ if (ret == -EIO) {
+ DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
+ ifidx)));
+ return -EOPNOTSUPP;
+ }
+
+ DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
return ret;
}
- ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
- if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
- DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
+ memcpy(toe_ol, buf, sizeof(uint32));
+ return 0;
+}
+
+/* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
+static int
+dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
+{
+ int toe, ret;
+
+ /* Set toe_ol as requested */
+ ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
+ dhd_ifname(&dhd->pub, ifidx), ret));
return ret;
}
- ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
+ /* Enable toe globally only if any components are enabled. */
+ toe = (toe_ol != 0);
+ ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
if (ret < 0) {
- DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
+ DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
return ret;
}
- dhd->tdls_mode = mode;
- return ret;
+ return 0;
}
-#ifdef PCIE_FULL_DONGLE
-int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
+#endif /* TOE */
+
+#if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
+void dhd_set_scb_probe(dhd_pub_t *dhd)
{
- dhd_pub_t *dhd_pub = dhdp;
- tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
- tdls_peer_node_t *new = NULL, *prev = NULL;
- int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
- uint8 *da = (uint8 *)&event->addr.octet[0];
- bool connect = FALSE;
- uint32 reason = ntoh32(event->reason);
- unsigned long flags;
+ wl_scb_probe_t scb_probe;
+ int ret;
- /* No handling needed for peer discovered reason */
- if (reason == WLC_E_TDLS_PEER_DISCOVERED) {
- return BCME_ERROR;
- }
- if (reason == WLC_E_TDLS_PEER_CONNECTED)
- connect = TRUE;
- else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
- connect = FALSE;
- else
- {
- DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
- return BCME_ERROR;
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ return;
}
- if (ifindex == DHD_BAD_IF)
- return BCME_ERROR;
- if (connect) {
- while (cur != NULL) {
- if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
- DHD_ERROR(("%s: TDLS Peer exist already %d\n",
- __FUNCTION__, __LINE__));
- return BCME_ERROR;
- }
- cur = cur->next;
- }
+ ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0,
+ (char *)&scb_probe, sizeof(scb_probe), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
+ }
- new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
- if (new == NULL) {
- DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
- return BCME_ERROR;
- }
- memcpy(new->addr, da, ETHER_ADDR_LEN);
- DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
- new->next = dhd_pub->peer_tbl.node;
- dhd_pub->peer_tbl.node = new;
- dhd_pub->peer_tbl.tdls_peer_count++;
- DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+ scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
- } else {
- while (cur != NULL) {
- if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
- dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
- DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
- if (prev)
- prev->next = cur->next;
- else
- dhd_pub->peer_tbl.node = cur->next;
- MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
- dhd_pub->peer_tbl.tdls_peer_count--;
- DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
- return BCME_OK;
- }
- prev = cur;
- cur = cur->next;
- }
- DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
+ ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(scb_probe),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
+ return;
}
- return BCME_OK;
}
-#endif /* PCIE_FULL_DONGLE */
-#endif // endif
+#endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
-bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+static void
+dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
- if (!dhd)
- return FALSE;
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
- if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
- return TRUE;
- else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
- DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
- return TRUE;
- else
- return FALSE;
+ snprintf(info->driver, sizeof(info->driver), "wl");
+ snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
}
-#if !defined(AP) && defined(WLP2P)
-/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
- * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
- * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
- * would still be named as fw_bcmdhd_apsta.
- */
-uint32
-dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
+
+struct ethtool_ops dhd_ethtool_ops = {
+ .get_drvinfo = dhd_ethtool_get_drvinfo
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+static int
+dhd_ethtool(dhd_info_t *dhd, void *uaddr)
{
- int32 ret = 0;
- char buf[WLC_IOCTL_SMLEN];
- bool mchan_supported = FALSE;
- /* if dhd->op_mode is already set for HOSTAP and Manufacturing
- * test mode, that means we only will use the mode as it is
- */
- if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
- return 0;
- if (FW_SUPPORTED(dhd, vsdb)) {
- mchan_supported = TRUE;
- }
- if (!FW_SUPPORTED(dhd, p2p)) {
- DHD_TRACE(("Chip does not support p2p\n"));
- return 0;
- } else {
- /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
- memset(buf, 0, sizeof(buf));
- ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
- sizeof(buf), FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
- return 0;
- } else {
- if (buf[0] == 1) {
- /* By default, chip supports single chan concurrency,
- * now lets check for mchan
- */
- ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
- if (mchan_supported)
- ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
- if (FW_SUPPORTED(dhd, rsdb)) {
- ret |= DHD_FLAG_RSDB_MODE;
- }
-#ifdef WL_SUPPORT_MULTIP2P
- if (FW_SUPPORTED(dhd, mp2p)) {
- ret |= DHD_FLAG_MP2P_MODE;
- }
-#endif /* WL_SUPPORT_MULTIP2P */
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
- return ret;
-#else
- return 0;
-#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
- }
+ struct ethtool_drvinfo info;
+ char drvname[sizeof(info.driver)];
+ uint32 cmd;
+#ifdef TOE
+ struct ethtool_value edata;
+ uint32 toe_cmpnt, csum_dir;
+ int ret;
+#endif
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* all ethtool calls start with a cmd word */
+ if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case ETHTOOL_GDRVINFO:
+ /* Copy out any request driver name */
+ if (copy_from_user(&info, uaddr, sizeof(info)))
+ return -EFAULT;
+ strncpy(drvname, info.driver, sizeof(info.driver));
+ drvname[sizeof(info.driver)-1] = '\0';
+
+ /* clear struct for return */
+ memset(&info, 0, sizeof(info));
+ info.cmd = cmd;
+
+ /* if dhd requested, identify ourselves */
+ if (strcmp(drvname, "?dhd") == 0) {
+ snprintf(info.driver, sizeof(info.driver), "dhd");
+ strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
+ info.version[sizeof(info.version) - 1] = '\0';
+ }
+
+ /* otherwise, require dongle to be up */
+ else if (!dhd->pub.up) {
+ DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ /* finally, report dongle driver type */
+ else if (dhd->pub.iswl)
+ snprintf(info.driver, sizeof(info.driver), "wl");
+ else
+ snprintf(info.driver, sizeof(info.driver), "xx");
+
+ snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
+ if (copy_to_user(uaddr, &info, sizeof(info)))
+ return -EFAULT;
+ DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
+ (int)sizeof(drvname), drvname, info.driver));
+ break;
+
+#ifdef TOE
+ /* Get toe offload components from dongle */
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ edata.cmd = cmd;
+ edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
+
+ if (copy_to_user(uaddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ break;
+
+ /* Set toe offload components in dongle */
+ case ETHTOOL_SRXCSUM:
+ case ETHTOOL_STXCSUM:
+ if (copy_from_user(&edata, uaddr, sizeof(edata)))
+ return -EFAULT;
+
+ /* Read the current settings, update and write back */
+ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
+ return ret;
+
+ csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
+
+ if (edata.data != 0)
+ toe_cmpnt |= csum_dir;
+ else
+ toe_cmpnt &= ~csum_dir;
+
+ if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
+ return ret;
+
+ /* If setting TX checksum mode, tell Linux the new mode */
+ if (cmd == ETHTOOL_STXCSUM) {
+ if (edata.data)
+ dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
+ else
+ dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
}
+
+ break;
+#endif /* TOE */
+
+ default:
+ return -EOPNOTSUPP;
}
+
return 0;
}
-#endif // endif
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
-#if defined(WLADPS)
-
-int
-dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
+static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
{
- int i;
- int len;
- int ret = BCME_OK;
-
- bcm_iov_buf_t *iov_buf = NULL;
- wl_adps_params_v1_t *data = NULL;
-
- len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
- iov_buf = MALLOC(dhd->osh, len);
- if (iov_buf == NULL) {
- DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
- ret = BCME_NOMEM;
- goto exit;
+ if (!dhdp) {
+ DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+ return FALSE;
}
- iov_buf->version = WL_ADPS_IOV_VER;
- iov_buf->len = sizeof(*data);
- iov_buf->id = WL_ADPS_IOV_MODE;
+ if (!dhdp->up)
+ return FALSE;
- data = (wl_adps_params_v1_t *)iov_buf->data;
- data->version = ADPS_SUB_IOV_VERSION_1;
- data->length = sizeof(*data);
- data->mode = on;
+#if !defined(BCMPCIE) && !defined(BCMDBUS)
+ if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
+ DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
+ return FALSE;
+ }
+#endif /* !BCMPCIE && !BCMDBUS */
- for (i = 1; i <= MAX_BANDS; i++) {
- data->band = i;
- ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE);
- if (ret < 0) {
- if (ret == BCME_UNSUPPORTED) {
- DHD_ERROR(("%s adps is not supported\n", __FUNCTION__));
- ret = BCME_OK;
- goto exit;
- }
- else {
- DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
- __FUNCTION__, on ? "On" : "Off", i, ret));
- goto exit;
+ if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
+ ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
+#ifdef BCMPCIE
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
+ __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
+ dhdp->d3ackcnt_timeout, error, dhdp->busstate));
+#else
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
+ dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
+#endif /* BCMPCIE */
+ if (dhdp->hang_reason == 0) {
+ if (dhdp->dongle_trap_occured) {
+ dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
+#ifdef BCMPCIE
+ } else if (dhdp->d3ackcnt_timeout) {
+ dhdp->hang_reason = HANG_REASON_D3_ACK_TIMEOUT;
+#endif /* BCMPCIE */
+ } else {
+ dhdp->hang_reason = HANG_REASON_IOCTL_RESP_TIMEOUT;
}
}
+ net_os_send_hang_message(net);
+ return TRUE;
}
+ return FALSE;
+}
-exit:
- if (iov_buf) {
- MFREE(dhd->osh, iov_buf, len);
- iov_buf = NULL;
- }
- return ret;
+#ifdef WL_MONITOR
+bool
+dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
+{
+ return (dhd->info->monitor_type != 0);
}
-#endif // endif
-int
-dhd_preinit_ioctls(dhd_pub_t *dhd)
+void
+dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
{
- int ret = 0;
- char eventmask[WL_EVENTING_MASK_LEN];
- char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
- uint32 buf_key_b4_m4 = 1;
- uint8 msglen;
- eventmsgs_ext_t *eventmask_msg = NULL;
- uint32 event_log_max_sets = 0;
- char* iov_buf = NULL;
- int ret2 = 0;
- uint32 wnm_cap = 0;
-#if defined(BCMSUP_4WAY_HANDSHAKE)
- uint32 sup_wpa = 1;
-#endif /* BCMSUP_4WAY_HANDSHAKE */
-#if defined(CUSTOM_AMPDU_BA_WSIZE)
- uint32 ampdu_ba_wsize = 0;
-#endif // endif
-#if defined(CUSTOM_AMPDU_MPDU)
- int32 ampdu_mpdu = 0;
-#endif // endif
-#if defined(CUSTOM_AMPDU_RELEASE)
- int32 ampdu_release = 0;
-#endif // endif
-#if defined(CUSTOM_AMSDU_AGGSF)
- int32 amsdu_aggsf = 0;
-#endif // endif
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#ifdef HOST_RADIOTAP_CONV
+ uint16 len = 0, offset = 0;
+ monitor_pkt_info_t pkt_info;
+ memcpy(&pkt_info.marker, &msg->marker, sizeof(msg->marker));
+ memcpy(&pkt_info.ts, &msg->ts, sizeof(monitor_pkt_ts_t));
+
+ if (!dhd->monitor_skb) {
+ if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL)
+ return;
+ }
-#if defined(BCMSDIO) || defined(BCMDBUS)
-#ifdef PROP_TXSTATUS
- int wlfc_enable = TRUE;
-#ifndef DISABLE_11N
- uint32 hostreorder = 1;
- uint wl_down = 1;
-#endif /* DISABLE_11N */
-#endif /* PROP_TXSTATUS */
-#endif /* BCMSDIO || BCMDBUS */
-#ifndef PCIE_FULL_DONGLE
- uint32 wl_ap_isolate;
-#endif /* PCIE_FULL_DONGLE */
- uint32 frameburst = CUSTOM_FRAMEBURST_SET;
- uint wnm_bsstrans_resp = 0;
-#ifdef SUPPORT_SET_CAC
- uint32 cac = 1;
-#endif /* SUPPORT_SET_CAC */
+ len = bcmwifi_monitor(dhd->monitor_info, &pkt_info, PKTDATA(dhdp->osh, pkt),
+ PKTLEN(dhdp->osh, pkt), PKTDATA(dhdp->osh, dhd->monitor_skb), &offset);
-#ifdef DHD_ENABLE_LPC
- uint32 lpc = 1;
-#endif /* DHD_ENABLE_LPC */
- uint power_mode = PM_FAST;
-#if defined(BCMSDIO)
- uint32 dongle_align = DHD_SDALIGN;
- uint32 glom = CUSTOM_GLOM_SETTING;
-#endif /* defined(BCMSDIO) */
-#if defined(USE_WL_CREDALL)
- uint32 credall = 1;
-#endif // endif
- uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
- uint scancache_enab = TRUE;
-#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- uint32 bcn_li_bcn = 1;
-#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
- uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
-#if defined(ARP_OFFLOAD_SUPPORT)
- int arpoe = 0;
-#endif // endif
- int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
- int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
- int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
- char buf[WLC_IOCTL_SMLEN];
- char *ptr;
- uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
-#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
- wl_el_tag_params_t *el_tag = NULL;
-#endif /* DHD_8021X_DUMP */
-#ifdef ROAM_ENABLE
- uint roamvar = 0;
- int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
- int roam_scan_period[2] = {10, WLC_BAND_ALL};
- int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
-#ifdef ROAM_AP_ENV_DETECTION
- int roam_env_mode = AP_ENV_INDETERMINATE;
-#endif /* ROAM_AP_ENV_DETECTION */
-#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
- int roam_fullscan_period = 60;
-#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
- int roam_fullscan_period = 120;
-#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
-#ifdef DISABLE_BCNLOSS_ROAM
- uint roam_bcnloss_off = 1;
-#endif /* DISABLE_BCNLOSS_ROAM */
-#else
-#ifdef DISABLE_BUILTIN_ROAM
- uint roamvar = 1;
-#endif /* DISABLE_BUILTIN_ROAM */
-#endif /* ROAM_ENABLE */
+ if (dhd->monitor_type && dhd->monitor_dev)
+ dhd->monitor_skb->dev = dhd->monitor_dev;
+ else {
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ dev_kfree_skb(dhd->monitor_skb);
+ return;
+ }
-#if defined(SOFTAP)
- uint dtim = 1;
-#endif // endif
-#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
- struct ether_addr p2p_ea;
-#endif // endif
-#ifdef BCMCCX
- uint32 ccx = 1;
-#endif // endif
-#ifdef SOFTAP_UAPSD_OFF
- uint32 wme_apsd = 0;
-#endif /* SOFTAP_UAPSD_OFF */
-#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
- uint32 apsta = 1; /* Enable APSTA mode */
-#elif defined(SOFTAP_AND_GC)
- uint32 apsta = 0;
- int ap_mode = 1;
-#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
-#ifdef GET_CUSTOM_MAC_ENABLE
- struct ether_addr ea_addr;
- char hw_ether[62];
-#endif /* GET_CUSTOM_MAC_ENABLE */
-#ifdef OKC_SUPPORT
- uint32 okc = 1;
-#endif // endif
+ PKTFREE(dhdp->osh, pkt, FALSE);
-#ifdef DISABLE_11N
- uint32 nmode = 0;
-#endif /* DISABLE_11N */
+ if (!len) {
+ return;
+ }
-#ifdef USE_WL_TXBF
- uint32 txbf = 1;
-#endif /* USE_WL_TXBF */
-#ifdef DISABLE_TXBFR
- uint32 txbf_bfr_cap = 0;
-#endif /* DISABLE_TXBFR */
-#ifdef AMPDU_VO_ENABLE
- struct ampdu_tid_control tid;
-#endif // endif
-#if defined(PROP_TXSTATUS)
-#ifdef USE_WFA_CERT_CONF
- uint32 proptx = 0;
-#endif /* USE_WFA_CERT_CONF */
-#endif /* PROP_TXSTATUS */
-#ifdef DHD_SET_FW_HIGHSPEED
- uint32 ack_ratio = 250;
- uint32 ack_ratio_depth = 64;
-#endif /* DHD_SET_FW_HIGHSPEED */
-#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
- uint32 vht_features = 0; /* init to 0, will be set based on each support */
-#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
-#ifdef DISABLE_11N_PROPRIETARY_RATES
- uint32 ht_features = 0;
-#endif /* DISABLE_11N_PROPRIETARY_RATES */
-#ifdef CUSTOM_PSPRETEND_THR
- uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
-#endif // endif
-#ifdef CUSTOM_EVENT_PM_WAKE
- uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
-#endif /* CUSTOM_EVENT_PM_WAKE */
-#ifdef DISABLE_PRUNED_SCAN
- uint32 scan_features = 0;
-#endif /* DISABLE_PRUNED_SCAN */
-#ifdef BCMPCIE_OOB_HOST_WAKE
- uint32 hostwake_oob = 0;
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef EVENT_LOG_RATE_HC
- /* threshold number of lines per second */
-#define EVENT_LOG_RATE_HC_THRESHOLD 1000
- uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD;
-#endif /* EVENT_LOG_RATE_HC */
- wl_wlc_version_t wlc_ver;
+ skb_put(dhd->monitor_skb, len);
+ skb_pull(dhd->monitor_skb, offset);
-#ifdef PKT_FILTER_SUPPORT
- dhd_pkt_filter_enable = TRUE;
-#ifdef APF
- dhd->apf_set = FALSE;
-#endif /* APF */
-#endif /* PKT_FILTER_SUPPORT */
- dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
-#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
- dhd->max_dtim_enable = TRUE;
+ dhd->monitor_skb->protocol = eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
#else
- dhd->max_dtim_enable = FALSE;
-#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
- dhd->disable_dtim_in_suspend = FALSE;
-#ifdef SUPPORT_SET_TID
- dhd->tid_mode = SET_TID_OFF;
- dhd->target_uid = 0;
- dhd->target_tid = 0;
-#endif /* SUPPORT_SET_TID */
- DHD_TRACE(("Enter %s\n", __FUNCTION__));
+ uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
+ BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
+ switch (amsdu_flag) {
+ case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
+ default:
+ if (!dhd->monitor_skb) {
+ if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt)) == NULL)
+ return;
+ }
-#ifdef DHDTCPACK_SUPPRESS
- dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
-#endif
- dhd->op_mode = 0;
+ if (dhd->monitor_type && dhd->monitor_dev)
+ dhd->monitor_skb->dev = dhd->monitor_dev;
+ else {
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ dhd->monitor_skb = NULL;
+ return;
+ }
+
+ dhd->monitor_skb->protocol =
+ eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
+ dhd->monitor_len = 0;
+ break;
+ case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
+ if (!dhd->monitor_skb) {
+ if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE)) == NULL)
+ return;
+ dhd->monitor_len = 0;
+ }
+ if (dhd->monitor_type && dhd->monitor_dev)
+ dhd->monitor_skb->dev = dhd->monitor_dev;
+ else {
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ dev_kfree_skb(dhd->monitor_skb);
+ return;
+ }
+ memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
+ PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
-#if defined(CUSTOM_COUNTRY_CODE)
- /* clear AP flags */
- dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
-#endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */
+ dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ return;
+ case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
+ memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
+ PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
+ dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
- /* query for 'ver' to get version info from firmware */
- memset(buf, 0, sizeof(buf));
- ptr = buf;
- ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
- if (ret < 0)
- DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
- else {
- bcmstrtok(&ptr, "\n", 0);
- /* Print fw version info */
- strncpy(fw_version, buf, FW_VER_STR_LEN);
- fw_version[FW_VER_STR_LEN-1] = '\0';
- }
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ return;
+ case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
+ memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
+ PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
+ dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
- /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
- if (strstr(fw_version, "WLTEST") != NULL) {
- DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
- __FUNCTION__));
- op_mode = DHD_FLAG_MFG_MODE;
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ skb_put(dhd->monitor_skb, dhd->monitor_len);
+ dhd->monitor_skb->protocol =
+ eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
+ dhd->monitor_len = 0;
+ break;
}
- if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
- (op_mode == DHD_FLAG_MFG_MODE)) {
- dhd->op_mode = DHD_FLAG_MFG_MODE;
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- /* disable runtimePM by default in MFG mode. */
- pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
- /* Check and adjust IOCTL response timeout for Manufactring firmware */
- dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
- DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
- __FUNCTION__));
- } else {
- dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
- DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
- }
-#ifdef BCMPCIE_OOB_HOST_WAKE
- ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
- sizeof(hostwake_oob), FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
+#endif /* HOST_RADIOTAP_CONV */
+ if (in_interrupt()) {
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ netif_rx(dhd->monitor_skb);
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
} else {
- if (hostwake_oob == 0) {
- DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
- __FUNCTION__));
- ret = BCME_UNSUPPORTED;
- goto done;
- } else {
- DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
- }
- }
-#endif /* BCMPCIE_OOB_HOST_WAKE */
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
+ bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
-#ifdef DNGL_AXI_ERROR_LOGGING
- ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
- sizeof(dhd->axierror_logbuf_addr), FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
- dhd->axierror_logbuf_addr = 0;
- } else {
- DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n", __FUNCTION__,
- dhd->axierror_logbuf_addr));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ netif_rx_ni(dhd->monitor_skb);
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+#else
+ ulong flags;
+ DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ netif_rx(dhd->monitor_skb);
+ DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
}
-#endif /* DNGL_AXI_ERROR_LOGGING */
-#ifdef EVENT_LOG_RATE_HC
- ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc,
- sizeof(event_log_rate_hc), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret));
- } else {
- DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
- event_log_rate_hc));
- }
-#endif /* EVENT_LOG_RATE_HC */
+ dhd->monitor_skb = NULL;
+}
-#ifdef GET_CUSTOM_MAC_ENABLE
- memset(hw_ether, 0, sizeof(hw_ether));
- ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether, iface_name);
-#ifdef GET_CUSTOM_MAC_FROM_CONFIG
- if (!memcmp(ðer_null, &dhd->conf->hw_ether, ETHER_ADDR_LEN)) {
- ret = 0;
- } else
-#endif
- if (!ret) {
- memset(buf, 0, sizeof(buf));
-#ifdef GET_CUSTOM_MAC_FROM_CONFIG
- memcpy(hw_ether, &dhd->conf->hw_ether, sizeof(dhd->conf->hw_ether));
-#endif
- bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
- bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
- ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
- if (ret < 0) {
- memset(buf, 0, sizeof(buf));
- bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
- ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
- if (ret) {
- DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
- __FUNCTION__, MAC2STRDBG(hw_ether), ret));
- prhex("MACPAD", &hw_ether[ETHER_ADDR_LEN], sizeof(hw_ether)-ETHER_ADDR_LEN);
- ret = BCME_NOTUP;
- goto done;
- }
- }
- } else {
- DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret));
- ret = BCME_NOTUP;
- goto done;
- }
-#endif /* GET_CUSTOM_MAC_ENABLE */
- /* Get the default device MAC address directly from firmware */
- memset(buf, 0, sizeof(buf));
- bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
- FALSE, 0)) < 0) {
- DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
- ret = BCME_NOTUP;
- goto done;
- }
- /* Update public MAC address after reading from Firmware */
- memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
+typedef struct dhd_mon_dev_priv {
+ struct net_device_stats stats;
+} dhd_mon_dev_priv_t;
- if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
- DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
- goto done;
+#define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
+#define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
+#define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
+
+static int
+dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
+{
+ PKTFREE(NULL, skb, FALSE);
+ return 0;
+}
+
+static int
+dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ return 0;
+}
+
+static struct net_device_stats*
+dhd_monitor_get_stats(struct net_device *dev)
+{
+ return &DHD_MON_DEV_STATS(dev);
+}
+
+static const struct net_device_ops netdev_monitor_ops =
+{
+ .ndo_start_xmit = dhd_monitor_start,
+ .ndo_get_stats = dhd_monitor_get_stats,
+ .ndo_do_ioctl = dhd_monitor_ioctl
+};
+
+static void
+dhd_add_monitor_if(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ struct net_device *dev;
+ char *devname;
+
+ if (event != DHD_WQ_WORK_IF_ADD) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
}
- /* get a capabilities from firmware */
- {
- uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
- memset(dhd->fw_capabilities, 0, cap_buf_size);
- ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
- FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
- __FUNCTION__, ret));
- return 0;
- }
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
- memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
- dhd->fw_capabilities[0] = ' ';
- dhd->fw_capabilities[cap_buf_size - 2] = ' ';
- dhd->fw_capabilities[cap_buf_size - 1] = '\0';
+ dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
+ if (!dev) {
+ DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
+ return;
}
- if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
- (op_mode == DHD_FLAG_HOSTAP_MODE)) {
-#ifdef SET_RANDOM_MAC_SOFTAP
- uint rand_mac;
-#endif /* SET_RANDOM_MAC_SOFTAP */
- dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
-#if defined(ARP_OFFLOAD_SUPPORT)
- arpoe = 0;
-#endif // endif
-#ifdef PKT_FILTER_SUPPORT
- if (dhd_conf_get_insuspend(dhd, AP_FILTER_IN_SUSPEND))
- dhd_pkt_filter_enable = TRUE;
- else
- dhd_pkt_filter_enable = FALSE;
-#endif // endif
-#ifdef SET_RANDOM_MAC_SOFTAP
- SRANDOM32((uint)jiffies);
- rand_mac = RANDOM32();
- iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
- iovbuf[1] = (unsigned char)(vendor_oui >> 8);
- iovbuf[2] = (unsigned char)vendor_oui;
- iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
- iovbuf[4] = (unsigned char)(rand_mac >> 8);
- iovbuf[5] = (unsigned char)(rand_mac >> 16);
+ devname = "radiotap";
- ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
- TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
- } else
- memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
-#endif /* SET_RANDOM_MAC_SOFTAP */
-#ifdef USE_DYNAMIC_F2_BLKSIZE
- dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
-#endif /* USE_DYNAMIC_F2_BLKSIZE */
-#ifdef SOFTAP_UAPSD_OFF
- ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
- TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
- __FUNCTION__, ret));
- }
-#endif /* SOFTAP_UAPSD_OFF */
-#if defined(CUSTOM_COUNTRY_CODE)
- /* set AP flag for specific country code of SOFTAP */
- dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
-#endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */
- } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
- (op_mode == DHD_FLAG_MFG_MODE)) {
-#if defined(ARP_OFFLOAD_SUPPORT)
- arpoe = 0;
-#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef PKT_FILTER_SUPPORT
- dhd_pkt_filter_enable = FALSE;
-#endif /* PKT_FILTER_SUPPORT */
- dhd->op_mode = DHD_FLAG_MFG_MODE;
-#ifdef USE_DYNAMIC_F2_BLKSIZE
- dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
-#endif /* USE_DYNAMIC_F2_BLKSIZE */
-#ifndef CUSTOM_SET_ANTNPM
- if (FW_SUPPORTED(dhd, rsdb)) {
- wl_config_t rsdb_mode;
- memset(&rsdb_mode, 0, sizeof(rsdb_mode));
- ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
- NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
- __FUNCTION__, ret));
- }
- }
-#endif /* !CUSTOM_SET_ANTNPM */
- } else {
- uint32 concurrent_mode = 0;
- if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
- (op_mode == DHD_FLAG_P2P_MODE)) {
-#if defined(ARP_OFFLOAD_SUPPORT)
- arpoe = 0;
-#endif // endif
-#ifdef PKT_FILTER_SUPPORT
- dhd_pkt_filter_enable = FALSE;
-#endif // endif
- dhd->op_mode = DHD_FLAG_P2P_MODE;
- } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
- (op_mode == DHD_FLAG_IBSS_MODE)) {
- dhd->op_mode = DHD_FLAG_IBSS_MODE;
- } else
- dhd->op_mode = DHD_FLAG_STA_MODE;
-#if !defined(AP) && defined(WLP2P)
- if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
- (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
-#if defined(ARP_OFFLOAD_SUPPORT)
- arpoe = 1;
-#endif // endif
- dhd->op_mode |= concurrent_mode;
- }
+ snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
- /* Check if we are enabling p2p */
- if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
- ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
- TRUE);
- if (ret < 0)
- DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
+#ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
+#define ARPHRD_IEEE80211_PRISM 802
+#endif
-#if defined(SOFTAP_AND_GC)
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
- (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
- DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
- }
-#endif // endif
- memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
- ETHER_SET_LOCALADDR(&p2p_ea);
- ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
- NULL, 0, TRUE);
- if (ret < 0)
- DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
- else
- DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
- }
+#ifndef ARPHRD_IEEE80211_RADIOTAP
+#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
+#endif /* ARPHRD_IEEE80211_RADIOTAP */
+
+ dev->type = ARPHRD_IEEE80211_RADIOTAP;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ dev->hard_start_xmit = dhd_monitor_start;
+ dev->do_ioctl = dhd_monitor_ioctl;
+ dev->get_stats = dhd_monitor_get_stats;
#else
- (void)concurrent_mode;
-#endif // endif
+ dev->netdev_ops = &netdev_monitor_ops;
+#endif
+
+ if (register_netdev(dev)) {
+ DHD_ERROR(("%s, register_netdev failed for %s\n",
+ __FUNCTION__, dev->name));
+ free_netdev(dev);
}
-#ifdef DISABLE_PRUNED_SCAN
- if (FW_SUPPORTED(dhd, rsdb)) {
- ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
- sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s get scan_features is failed ret=%d\n",
- __FUNCTION__, ret));
- } else {
- memcpy(&scan_features, iovbuf, 4);
- scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
- ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
- sizeof(scan_features), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s set scan_features is failed ret=%d\n",
- __FUNCTION__, ret));
- }
- }
+ bcmwifi_monitor_create(&dhd->monitor_info);
+ dhd->monitor_dev = dev;
+}
+
+static void
+dhd_del_monitor_if(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+
+ if (event != DHD_WQ_WORK_IF_DEL) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
}
-#endif /* DISABLE_PRUNED_SCAN */
- DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
- dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
-#if defined(DHD_BLOB_EXISTENCE_CHECK)
- if (!dhd->is_blob)
-#endif /* DHD_BLOB_EXISTENCE_CHECK */
- {
- /* get a ccode and revision for the country code */
-#if defined(CUSTOM_COUNTRY_CODE)
- get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
- &dhd->dhd_cspec, dhd->dhd_cflags);
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ if (dhd->monitor_dev) {
+ unregister_netdev(dhd->monitor_dev);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
+ MFREE(dhd->osh, dhd->monitor_dev->priv, DHD_MON_DEV_PRIV_SIZE);
+ MFREE(dhd->osh, dhd->monitor_dev, sizeof(struct net_device));
#else
- get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
- &dhd->dhd_cspec);
-#endif /* CUSTOM_COUNTRY_CODE */
+ free_netdev(dhd->monitor_dev);
+#endif /* 2.6.24 */
+
+ dhd->monitor_dev = NULL;
}
-#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
- if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
- dhd->info->rxthread_enabled = FALSE;
- else
- dhd->info->rxthread_enabled = TRUE;
-#endif // endif
- /* Set Country code */
- if (dhd->dhd_cspec.ccode[0] != 0) {
- ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
- NULL, 0, TRUE);
- if (ret < 0)
- DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
+ if (dhd->monitor_info) {
+ bcmwifi_monitor_delete(dhd->monitor_info);
+ dhd->monitor_info = NULL;
}
+}
- /* Set Listen Interval */
- ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
- NULL, 0, TRUE);
- if (ret < 0)
- DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
+static void
+dhd_set_monitor(dhd_pub_t *dhd, int ifidx, int val)
+{
+ dhd_info_t *info = dhd->info;
-#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
-#ifdef USE_WFA_CERT_CONF
- if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
- DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
- }
-#endif /* USE_WFA_CERT_CONF */
- /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
- ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
-#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
-#if defined(ROAM_ENABLE)
-#ifdef DISABLE_BCNLOSS_ROAM
- ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off,
- sizeof(roam_bcnloss_off), NULL, 0, TRUE);
-#endif /* DISABLE_BCNLOSS_ROAM */
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
- sizeof(roam_trigger), TRUE, 0)) < 0)
- DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
- sizeof(roam_scan_period), TRUE, 0)) < 0)
- DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
- if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
- sizeof(roam_delta), TRUE, 0)) < 0)
- DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
- ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
- sizeof(roam_fullscan_period), NULL, 0, TRUE);
- if (ret < 0)
- DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
-#ifdef ROAM_AP_ENV_DETECTION
- if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
- if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode,
- sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK)
- dhd->roam_env_detection = TRUE;
- else
- dhd->roam_env_detection = FALSE;
+ DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
+ if ((val && info->monitor_dev) || (!val && !info->monitor_dev)) {
+ DHD_ERROR(("%s: Mismatched params, return\n", __FUNCTION__));
+ return;
}
-#endif /* ROAM_AP_ENV_DETECTION */
-#endif /* ROAM_ENABLE */
-#ifdef CUSTOM_EVENT_PM_WAKE
- ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
- sizeof(pm_awake_thresh), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
+ /* Delete monitor */
+ if (!val) {
+ info->monitor_type = val;
+ dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_DEL,
+ dhd_del_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
+ return;
}
-#endif /* CUSTOM_EVENT_PM_WAKE */
-#ifdef OKC_SUPPORT
- ret = dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE);
-#endif // endif
-#ifdef BCMCCX
- ret = dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
-#endif /* BCMCCX */
-#ifdef WLTDLS
- dhd->tdls_enable = FALSE;
- dhd_tdls_set_mode(dhd, false);
-#endif /* WLTDLS */
+ /* Add monitor */
+ info->monitor_type = val;
+ dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL, DHD_WQ_WORK_IF_ADD,
+ dhd_add_monitor_if, DHD_WQ_WORK_PRIORITY_LOW);
+}
+#endif /* WL_MONITOR */
-#ifdef DHD_ENABLE_LPC
- /* Set lpc 1 */
- ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
+{
+ int bcmerror = BCME_OK;
+ int buflen = 0;
+ struct net_device *net;
- if (ret == BCME_NOTDOWN) {
- uint wl_down = 1;
- ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
- (char *)&wl_down, sizeof(wl_down), TRUE, 0);
- DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
+#ifdef REPORT_FATAL_TIMEOUTS
+ if (ioc->cmd == WLC_SET_WPA_AUTH) {
+ int wpa_auth;
- ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
- DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
+ wpa_auth = *((int *)ioc->buf);
+ DHD_INFO(("wpa_auth:%d\n", wpa_auth));
+ if (wpa_auth != WPA_AUTH_DISABLED) {
+ /* If AP is with security then enable WLC_E_PSK_SUP event checking */
+ dhd_set_join_error(pub, WLC_WPA_MASK);
+ } else {
+ /* If AP is with open then disable WLC_E_PSK_SUP event checking */
+ dhd_clear_join_error(pub, WLC_WPA_MASK);
}
}
-#endif /* DHD_ENABLE_LPC */
-#ifdef WLADPS
- if (dhd->op_mode & DHD_FLAG_STA_MODE) {
- if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
- DHD_ERROR(("%s dhd_enable_adps failed %d\n",
- __FUNCTION__, ret));
+ if (ioc->cmd == WLC_SET_AUTH) {
+ int auth;
+ auth = *((int *)ioc->buf);
+ DHD_INFO(("Auth:%d\n", auth));
+
+ if (auth != WL_AUTH_OPEN_SYSTEM) {
+ /* If AP is with security then enable WLC_E_PSK_SUP event checking */
+ dhd_set_join_error(pub, WLC_WPA_MASK);
+ } else {
+ /* If AP is with open then disable WLC_E_PSK_SUP event checking */
+ dhd_clear_join_error(pub, WLC_WPA_MASK);
}
}
-#endif /* WLADPS */
+#endif /* REPORT_FATAL_TIMEOUTS */
+ net = dhd_idx2net(pub, ifidx);
+ if (!net) {
+ bcmerror = BCME_BADARG;
+ goto done;
+ }
-#ifdef DHD_PM_CONTROL_FROM_FILE
- sec_control_pm(dhd, &power_mode);
-#else
- /* Set PowerSave mode */
- (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
-#endif /* DHD_PM_CONTROL_FROM_FILE */
+ /* check for local dhd ioctl and handle it */
+ if (ioc->driver == DHD_IOCTL_MAGIC) {
+ /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
+ if (data_buf)
+ buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
+ bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
+ if (bcmerror)
+ pub->bcmerror = bcmerror;
+ goto done;
+ }
+
+ /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
+ if (data_buf)
+ buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
+
+#ifndef BCMDBUS
+ /* send to dongle (must be up, and wl). */
+ if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
+ if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
+ int ret;
+ if (atomic_read(&exit_in_progress)) {
+ DHD_ERROR(("%s module exit in progress\n", __func__));
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+ ret = dhd_bus_start(pub);
+ if (ret != 0) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+ } else {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+ }
+
+ if (!pub->iswl) {
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
+#endif /* !BCMDBUS */
+
+ /*
+ * Flush the TX queue if required for proper message serialization:
+ * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
+ * prevent M4 encryption and
+ * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
+ * prevent disassoc frame being sent before WPS-DONE frame.
+ */
+ if (ioc->cmd == WLC_SET_KEY ||
+ (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+ strncmp("wsec_key", data_buf, 9) == 0) ||
+ (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+ strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
+ ioc->cmd == WLC_DISASSOC)
+ dhd_wait_pend8021x(net);
+
+#ifdef WLMEDIA_HTSF
+ if (data_buf) {
+ /* short cut wl ioctl calls here */
+ if (strcmp("htsf", data_buf) == 0) {
+ dhd_ioctl_htsf_get(dhd, 0);
+ return BCME_OK;
+ }
+
+ if (strcmp("htsflate", data_buf) == 0) {
+ if (ioc->set) {
+ memset(ts, 0, sizeof(tstamp_t)*TSMAX);
+ memset(&maxdelayts, 0, sizeof(tstamp_t));
+ maxdelay = 0;
+ tspktcnt = 0;
+ maxdelaypktno = 0;
+ memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+ } else {
+ dhd_dump_latency();
+ }
+ return BCME_OK;
+ }
+ if (strcmp("htsfclear", data_buf) == 0) {
+ memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
+ memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN);
+ htsf_seqnum = 0;
+ return BCME_OK;
+ }
+ if (strcmp("htsfhis", data_buf) == 0) {
+ dhd_dump_htsfhisto(&vi_d1, "H to D");
+ dhd_dump_htsfhisto(&vi_d2, "D to D");
+ dhd_dump_htsfhisto(&vi_d3, "D to H");
+ dhd_dump_htsfhisto(&vi_d4, "H to H");
+ return BCME_OK;
+ }
+ if (strcmp("tsport", data_buf) == 0) {
+ if (ioc->set) {
+ memcpy(&tsport, data_buf + 7, 4);
+ } else {
+ DHD_ERROR(("current timestamp port: %d \n", tsport));
+ }
+ return BCME_OK;
+ }
+ }
+#endif /* WLMEDIA_HTSF */
-#if defined(BCMSDIO)
- /* Match Host and Dongle rx alignment */
- ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
- NULL, 0, TRUE);
+ if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
+ data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
+#ifdef BCM_FD_AGGR
+ bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
+#else
+ bcmerror = BCME_UNSUPPORTED;
+#endif
+ goto done;
+ }
+ bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
-#if defined(USE_WL_CREDALL)
- /* enable credall to reduce the chance of no bus credit happened. */
- ret = dhd_iovar(dhd, 0, "bus:credall", (char *)&credall, sizeof(credall), NULL, 0, TRUE);
-#endif // endif
+#ifdef WL_MONITOR
+ /* Intercept monitor ioctl here, add/del monitor if */
+ if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
+ dhd_set_monitor(pub, ifidx, *(int32*)data_buf);
+ }
+#endif
-#ifdef USE_WFA_CERT_CONF
- if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
- DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
+#ifdef REPORT_FATAL_TIMEOUTS
+ if (ioc->cmd == WLC_SCAN && bcmerror == 0) {
+ dhd_start_scan_timer(pub);
}
-#endif /* USE_WFA_CERT_CONF */
- if (glom != DEFAULT_GLOM_VALUE) {
- DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
- ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
+ if (ioc->cmd == WLC_SET_SSID && bcmerror == 0) {
+ dhd_start_join_timer(pub);
}
-#endif /* defined(BCMSDIO) */
+#endif /* REPORT_FATAL_TIMEOUTS */
- /* Setup timeout if Beacons are lost and roam is off to report link down */
- ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0,
- TRUE);
-
- /* Setup assoc_retry_max count to reconnect target AP in dongle */
- ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0,
- TRUE);
-
-#if defined(AP) && !defined(WLP2P)
- ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
+done:
+ dhd_check_hang(net, pub, bcmerror);
-#endif /* defined(AP) && !defined(WLP2P) */
+ return bcmerror;
+}
-#ifdef MIMO_ANT_SETTING
- dhd_sel_ant_from_file(dhd);
-#endif /* MIMO_ANT_SETTING */
+static int
+dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_ioctl_t ioc;
+ int bcmerror = 0;
+ int ifidx;
+ int ret;
+ void *local_buf = NULL;
+ void __user *ioc_buf_user = NULL;
+ u16 buflen = 0;
-#if defined(SOFTAP)
- if (ap_fw_loaded == TRUE) {
- dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+ if (atomic_read(&exit_in_progress)) {
+ DHD_ERROR(("%s module exit in progress\n", __func__));
+ bcmerror = BCME_DONGLE_DOWN;
+ return OSL_ERROR(bcmerror);
}
-#endif // endif
-#if defined(KEEP_ALIVE)
- {
- /* Set Keep Alive : be sure to use FW with -keepalive */
- int res;
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
-#if defined(SOFTAP)
- if (ap_fw_loaded == FALSE)
-#endif // endif
- if (!(dhd->op_mode &
- (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
- if ((res = dhd_keep_alive_onoff(dhd)) < 0)
- DHD_ERROR(("%s set keeplive failed %d\n",
- __FUNCTION__, res));
- }
+ /* Interface up check for built-in type */
+ if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
+ DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return OSL_ERROR(BCME_NOTUP);
}
-#endif /* defined(KEEP_ALIVE) */
-
-#ifdef USE_WL_TXBF
- ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
- if (ret < 0)
- DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
-#endif /* USE_WL_TXBF */
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
- ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
- 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -1;
}
- ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
- sizeof(event_log_max_sets), FALSE);
- if (ret == BCME_OK) {
- dhd->event_log_max_sets = event_log_max_sets;
- } else {
- dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
+#if defined(WL_WIRELESS_EXT)
+ /* linux wireless extensions */
+ if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
+ /* may recurse, do NOT lock */
+ ret = wl_iw_ioctl(net, ifr, cmd);
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
}
- /* Make sure max_sets is set first with wmb and then sets_queried,
- * this will be used during parsing the logsets in the reverse order.
- */
- OSL_SMP_WMB();
- dhd->event_log_max_sets_queried = TRUE;
- DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
- __FUNCTION__, dhd->event_log_max_sets, ret));
+#endif /* defined(WL_WIRELESS_EXT) */
-#ifdef DISABLE_TXBFR
- ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
- 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
+ if (cmd == SIOCETHTOOL) {
+ ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
}
-#endif /* DISABLE_TXBFR */
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */
-#ifdef USE_WFA_CERT_CONF
-#ifdef USE_WL_FRAMEBURST
- if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
- DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
- }
-#endif /* USE_WL_FRAMEBURST */
- g_frameburst = frameburst;
-#endif /* USE_WFA_CERT_CONF */
-#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
- /* Disable Framebursting for SofAP */
- if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
- frameburst = 0;
- }
-#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
- /* Set frameburst to value */
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
- sizeof(frameburst), TRUE, 0)) < 0) {
- DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
- }
-#ifdef DHD_SET_FW_HIGHSPEED
- /* Set ack_ratio */
- ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
+ if (cmd == SIOCDEVPRIVATE+1) {
+ ret = wl_android_priv_cmd(net, ifr, cmd);
+ dhd_check_hang(net, &dhd->pub, ret);
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return ret;
}
- /* Set ack_ratio_depth */
- ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth,
- sizeof(ack_ratio_depth), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
+ if (cmd != SIOCDEVPRIVATE) {
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return -EOPNOTSUPP;
}
-#endif /* DHD_SET_FW_HIGHSPEED */
- iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
- if (iov_buf == NULL) {
- DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
- ret = BCME_NOMEM;
- goto done;
- }
+ memset(&ioc, 0, sizeof(ioc));
-#if defined(CUSTOM_AMPDU_BA_WSIZE)
- /* Set ampdu ba wsize to 64 or 16 */
-#ifdef CUSTOM_AMPDU_BA_WSIZE
- ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
-#endif // endif
- if (ampdu_ba_wsize != 0) {
- ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&du_ba_wsize,
- sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
- __FUNCTION__, ampdu_ba_wsize, ret));
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ if (in_compat_syscall())
+#else
+ if (is_compat_task())
+#endif
+ {
+ compat_wl_ioctl_t compat_ioc;
+ if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
+ bcmerror = BCME_BADADDR;
+ goto done;
}
- }
-#endif // endif
+ ioc.cmd = compat_ioc.cmd;
+ if (ioc.cmd & WLC_SPEC_FLAG) {
+ memset(&ioc, 0, sizeof(ioc));
+ /* Copy the ioc control structure part of ioctl request */
+ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ ioc.cmd &= ~WLC_SPEC_FLAG; /* Clear the FLAG */
-#if defined(CUSTOM_AMPDU_MPDU)
- ampdu_mpdu = CUSTOM_AMPDU_MPDU;
- if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
- ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&du_mpdu, sizeof(ampdu_mpdu),
- NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
- __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+
+ } else { /* ioc.cmd & WLC_SPEC_FLAG */
+ ioc.buf = compat_ptr(compat_ioc.buf);
+ ioc.len = compat_ioc.len;
+ ioc.set = compat_ioc.set;
+ ioc.used = compat_ioc.used;
+ ioc.needed = compat_ioc.needed;
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ } /* ioc.cmd & WLC_SPEC_FLAG */
+ } else
+#endif /* CONFIG_COMPAT */
+ {
+ /* Copy the ioc control structure part of ioctl request */
+ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+ bcmerror = BCME_BADADDR;
+ goto done;
}
- }
-#endif /* CUSTOM_AMPDU_MPDU */
+#ifdef CONFIG_COMPAT
+ ioc.cmd &= ~WLC_SPEC_FLAG; /* make sure it was clear when it isn't a compat task*/
+#endif
-#if defined(CUSTOM_AMPDU_RELEASE)
- ampdu_release = CUSTOM_AMPDU_RELEASE;
- if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
- ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&du_release,
- sizeof(ampdu_release), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
- __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = BCME_BADADDR;
+ goto done;
}
}
-#endif /* CUSTOM_AMPDU_RELEASE */
+/*
+ if (!capable(CAP_NET_ADMIN)) {
+ bcmerror = BCME_EPERM;
+ goto done;
+ }
+*/
+ /* Take backup of ioc.buf and restore later */
+ ioc_buf_user = ioc.buf;
-#if defined(CUSTOM_AMSDU_AGGSF)
- amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
- if (amsdu_aggsf != 0) {
- ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
- NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
- __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
+ if (ioc.len > 0) {
+ buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+ if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
+ bcmerror = BCME_NOMEM;
+ goto done;
}
- }
-#endif /* CUSTOM_AMSDU_AGGSF */
-#if defined(BCMSUP_4WAY_HANDSHAKE)
- /* Read 4-way handshake requirements */
- if (dhd_use_idsup == 1) {
- ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
- (char *)&iovbuf, sizeof(iovbuf), FALSE);
- /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
- * in-dongle supplicant.
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ if (copy_from_user(local_buf, ioc.buf, buflen)) {
+ DHD_PERIM_LOCK(&dhd->pub);
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ DHD_PERIM_LOCK(&dhd->pub);
+
+ *((char *)local_buf + buflen) = '\0';
+
+ /* For some platforms accessing userspace memory
+ * of ioc.buf is causing kernel panic, so to avoid that
+ * make ioc.buf pointing to kernel space memory local_buf
*/
- if (ret >= 0 || ret == BCME_NOTREADY)
- dhd->fw_4way_handshake = TRUE;
- DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
- }
-#endif /* BCMSUP_4WAY_HANDSHAKE */
-#if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
- ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
- NULL, 0, FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret));
- vht_features = 0;
- } else {
-#ifdef SUPPORT_2G_VHT
- vht_features |= 0x3; /* 2G support */
-#endif /* SUPPORT_2G_VHT */
-#ifdef SUPPORT_5G_1024QAM_VHT
- vht_features |= 0x6; /* 5G 1024 QAM support */
-#endif /* SUPPORT_5G_1024QAM_VHT */
+ ioc.buf = local_buf;
}
- if (vht_features) {
- ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
- NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
- if (ret == BCME_NOTDOWN) {
- uint wl_down = 1;
- ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
- (char *)&wl_down, sizeof(wl_down), TRUE, 0);
- DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
- " vht_features = 0x%x\n",
- __FUNCTION__, ret, vht_features));
+ /* Skip all the non DHD iovars (wl iovars) after f/w hang */
+ if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
+ DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
+ bcmerror = BCME_DONGLE_DOWN;
+ goto done;
+ }
- ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
- sizeof(vht_features), NULL, 0, TRUE);
+ bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
- DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
- }
- }
- }
-#endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
-#ifdef DISABLE_11N_PROPRIETARY_RATES
- ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
- TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
- }
-#endif /* DISABLE_11N_PROPRIETARY_RATES */
-#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
-#if defined(DISABLE_HE_ENAB)
- control_he_enab = 0;
-#endif /* DISABLE_HE_ENAB */
- dhd_control_he_enab(dhd, control_he_enab);
-#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
+ /* Restore back userspace pointer to ioc.buf */
+ ioc.buf = ioc_buf_user;
-#ifdef CUSTOM_PSPRETEND_THR
- /* Turn off MPC in AP mode */
- ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
- sizeof(pspretend_thr), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
- __FUNCTION__, ret));
+ if (!bcmerror && buflen && local_buf && ioc.buf) {
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ if (copy_to_user(ioc.buf, local_buf, buflen))
+ bcmerror = -EFAULT;
+ DHD_PERIM_LOCK(&dhd->pub);
}
-#endif // endif
- ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
- NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
- }
-#ifdef SUPPORT_SET_CAC
- ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
- }
-#endif /* SUPPORT_SET_CAC */
-#ifdef DHD_ULP
- /* Get the required details from dongle during preinit ioctl */
- dhd_ulp_preinit(dhd);
-#endif /* DHD_ULP */
+done:
+ if (local_buf)
+ MFREE(dhd->pub.osh, local_buf, buflen+1);
- /* Read event_msgs mask */
- ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
- sizeof(iovbuf), FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
- goto done;
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ return OSL_ERROR(bcmerror);
+}
+
+
+#ifdef FIX_CPU_MIN_CLOCK
+static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
+{
+ if (dhd) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_init(&dhd->cpufreq_fix);
+#endif
+ dhd->cpufreq_fix_status = FALSE;
}
- bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
+ return 0;
+}
- /* Setup event_msgs */
- setbit(eventmask, WLC_E_SET_SSID);
- setbit(eventmask, WLC_E_PRUNE);
- setbit(eventmask, WLC_E_AUTH);
- setbit(eventmask, WLC_E_AUTH_IND);
- setbit(eventmask, WLC_E_ASSOC);
- setbit(eventmask, WLC_E_REASSOC);
- setbit(eventmask, WLC_E_REASSOC_IND);
- if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
- setbit(eventmask, WLC_E_DEAUTH);
- setbit(eventmask, WLC_E_DEAUTH_IND);
- setbit(eventmask, WLC_E_DISASSOC_IND);
- setbit(eventmask, WLC_E_DISASSOC);
- setbit(eventmask, WLC_E_JOIN);
- setbit(eventmask, WLC_E_START);
- setbit(eventmask, WLC_E_ASSOC_IND);
- setbit(eventmask, WLC_E_PSK_SUP);
- setbit(eventmask, WLC_E_LINK);
- setbit(eventmask, WLC_E_MIC_ERROR);
- setbit(eventmask, WLC_E_ASSOC_REQ_IE);
- setbit(eventmask, WLC_E_ASSOC_RESP_IE);
-#ifdef LIMIT_BORROW
- setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW);
-#endif // endif
-#ifndef WL_CFG80211
- setbit(eventmask, WLC_E_PMKID_CACHE);
-// setbit(eventmask, WLC_E_TXFAIL); // terence 20181106: remove unnecessary event
-#endif // endif
- setbit(eventmask, WLC_E_JOIN_START);
-// setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
-#ifdef DHD_DEBUG
- setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
-#endif // endif
-#ifdef PNO_SUPPORT
- setbit(eventmask, WLC_E_PFN_NET_FOUND);
- setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
- setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
- setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
-#endif /* PNO_SUPPORT */
- /* enable dongle roaming event */
-#ifdef WL_CFG80211
-#if !defined(ROAM_EVT_DISABLE)
- setbit(eventmask, WLC_E_ROAM);
-#endif /* !ROAM_EVT_DISABLE */
- setbit(eventmask, WLC_E_BSSID);
-#endif /* WL_CFG80211 */
-#ifdef BCMCCX
- setbit(eventmask, WLC_E_ADDTS_IND);
- setbit(eventmask, WLC_E_DELTS_IND);
-#endif /* BCMCCX */
-#ifdef WLTDLS
- setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
-#endif /* WLTDLS */
-#ifdef WL_ESCAN
- setbit(eventmask, WLC_E_ESCAN_RESULT);
-#endif /* WL_ESCAN */
-#ifdef CSI_SUPPORT
- setbit(eventmask, WLC_E_CSI);
-#endif /* CSI_SUPPORT */
-#ifdef RTT_SUPPORT
- setbit(eventmask, WLC_E_PROXD);
-#endif /* RTT_SUPPORT */
-#ifdef WL_CFG80211
- setbit(eventmask, WLC_E_ESCAN_RESULT);
- setbit(eventmask, WLC_E_AP_STARTED);
- setbit(eventmask, WLC_E_ACTION_FRAME_RX);
- if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
- setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
+static void dhd_fix_cpu_freq(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_lock(&dhd->cpufreq_fix);
+#endif
+ if (dhd && !dhd->cpufreq_fix_status) {
+ pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
+#ifdef FIX_BUS_MIN_CLOCK
+ pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
+#endif /* FIX_BUS_MIN_CLOCK */
+ DHD_ERROR(("pm_qos_add_requests called\n"));
+
+ dhd->cpufreq_fix_status = TRUE;
}
-#endif /* WL_CFG80211 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_unlock(&dhd->cpufreq_fix);
+#endif
+}
-#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
- if (dhd_logtrace_from_file(dhd)) {
- setbit(eventmask, WLC_E_TRACE);
- } else {
- clrbit(eventmask, WLC_E_TRACE);
+static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_lock(&dhd ->cpufreq_fix);
+#endif
+ if (dhd && dhd->cpufreq_fix_status != TRUE) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_unlock(&dhd->cpufreq_fix);
+#endif
+ return;
}
-#elif defined(SHOW_LOGTRACE)
- setbit(eventmask, WLC_E_TRACE);
-#else
- clrbit(eventmask, WLC_E_TRACE);
- if (dhd->conf->chip == BCM43752_CHIP_ID)
- setbit(eventmask, WLC_E_TRACE);
-#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
- setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
-#ifdef CUSTOM_EVENT_PM_WAKE
- setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
-#endif /* CUSTOM_EVENT_PM_WAKE */
-#ifdef DHD_LOSSLESS_ROAMING
- setbit(eventmask, WLC_E_ROAM_PREP);
-#endif // endif
- /* nan events */
- setbit(eventmask, WLC_E_NAN);
-#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
- dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
-#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
+ pm_qos_remove_request(&dhd->dhd_cpu_qos);
+#ifdef FIX_BUS_MIN_CLOCK
+ pm_qos_remove_request(&dhd->dhd_bus_qos);
+#endif /* FIX_BUS_MIN_CLOCK */
+ DHD_ERROR(("pm_qos_add_requests called\n"));
-#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
- dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
-#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
+ dhd->cpufreq_fix_status = FALSE;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_unlock(&dhd->cpufreq_fix);
+#endif
+}
+#endif /* FIX_CPU_MIN_CLOCK */
- /* Write updated Event mask */
- ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
- goto done;
- }
+#if defined(BT_OVER_SDIO)
+
+void
+dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
+{
+ dhdp->info->bus_user_count++;
+}
+
+void
+dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
+{
+ dhdp->info->bus_user_count--;
+}
+
+/* Return values:
+ * Success: Returns 0
+ * Failure: Returns -1 or errono code
+ */
+int
+dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ int ret = 0;
- /* make up event mask ext message iovar for event larger than 128 */
- msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
- eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen);
- if (eventmask_msg == NULL) {
- DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
- ret = BCME_NOMEM;
- goto done;
+ mutex_lock(&dhd->bus_user_lock);
+ ++dhd->bus_user_count;
+ if (dhd->bus_user_count < 0) {
+ DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
}
- bzero(eventmask_msg, msglen);
- eventmask_msg->ver = EVENTMSGS_VER;
- eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
- /* Read event_msgs_ext mask */
- ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
- WLC_IOCTL_SMLEN, FALSE);
+ if (dhd->bus_user_count == 1) {
- if (ret2 == 0) { /* event_msgs_ext must be supported */
- bcopy(iov_buf, eventmask_msg, msglen);
-#ifdef RSSI_MONITOR_SUPPORT
- setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
-#endif /* RSSI_MONITOR_SUPPORT */
-#ifdef GSCAN_SUPPORT
- setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
- setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
- setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
- setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
-#endif /* GSCAN_SUPPORT */
- setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
-#ifdef BT_WIFI_HANDOVER
- setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
-#endif /* BT_WIFI_HANDOVER */
-#ifdef DBG_PKT_MON
- setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
-#endif /* DBG_PKT_MON */
-#ifdef DHD_ULP
- setbit(eventmask_msg->mask, WLC_E_ULP);
-#endif // endif
-#ifdef WL_NATOE
- setbit(eventmask_msg->mask, WLC_E_NATOE_NFCT);
-#endif /* WL_NATOE */
-#ifdef WL_NAN
- setbit(eventmask_msg->mask, WLC_E_SLOTTED_BSS_PEER_OP);
-#endif /* WL_NAN */
-#ifdef WL_MBO
- setbit(eventmask_msg->mask, WLC_E_MBO);
-#endif /* WL_MBO */
-#ifdef WL_CLIENT_SAE
- setbit(eventmask_msg->mask, WLC_E_JOIN_START);
-#endif /* WL_CLIENT_SAE */
-#ifdef WL_BCNRECV
- setbit(eventmask_msg->mask, WLC_E_BCNRECV_ABORTED);
-#endif /* WL_BCNRECV */
-#ifdef WL_CAC_TS
- setbit(eventmask_msg->mask, WLC_E_ADDTS_IND);
- setbit(eventmask_msg->mask, WLC_E_DELTS_IND);
-#endif /* WL_CAC_TS */
-#ifdef WL_CHAN_UTIL
- setbit(eventmask_msg->mask, WLC_E_BSS_LOAD);
-#endif /* WL_CHAN_UTIL */
+ dhd->pub.hang_was_sent = 0;
- /* Write updated Event mask */
- eventmask_msg->ver = EVENTMSGS_VER;
- eventmask_msg->command = EVENTMSGS_SET_MASK;
- eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
- ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
- TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
- goto done;
+ /* First user, turn on WL_REG, start the bus */
+ DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
+
+ if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
+ /* Enable F1 */
+ ret = dhd_bus_resume(dhdp, 0);
+ if (ret) {
+ DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
+ __FUNCTION__, ret));
+ goto exit;
+ }
}
- } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
- /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
- DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
- __FUNCTION__, ret2));
- } else {
- DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
- ret = ret2;
- goto done;
- }
-#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
- /* Enabling event log trace for EAP events */
- el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t));
- if (el_tag == NULL) {
- DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
- (int)sizeof(wl_el_tag_params_t)));
- ret = BCME_NOMEM;
- goto done;
+ dhd_update_fw_nv_path(dhd);
+ /* update firmware and nvram path to sdio bus */
+ dhd_bus_update_fw_nv_path(dhd->pub.bus,
+ dhd->fw_path, dhd->nv_path);
+ /* download the firmware, Enable F2 */
+ /* TODO: Should be done only in case of FW switch */
+ ret = dhd_bus_devreset(dhdp, FALSE);
+ dhd_bus_resume(dhdp, 1);
+ if (!ret) {
+ if (dhd_sync_with_dongle(&dhd->pub) < 0) {
+ DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
+ ret = -EFAULT;
+ }
+ } else {
+ DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
+ }
+ } else {
+ DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
+ __FUNCTION__, dhd->bus_user_count));
}
- el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
- el_tag->set = 1;
- el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
- ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL, 0,
- TRUE);
-#endif /* DHD_8021X_DUMP */
+exit:
+ mutex_unlock(&dhd->bus_user_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dhd_bus_get);
- dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
- sizeof(scan_assoc_time), TRUE, 0);
- dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
- sizeof(scan_unassoc_time), TRUE, 0);
- dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
- sizeof(scan_passive_time), TRUE, 0);
+/* Return values:
+ * Success: Returns 0
+ * Failure: Returns -1 or errono code
+ */
+int
+dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ int ret = 0;
+ BCM_REFERENCE(owner);
-#ifdef ARP_OFFLOAD_SUPPORT
- /* Set and enable ARP offload feature for STA only */
-#if defined(SOFTAP)
- if (arpoe && !ap_fw_loaded)
-#else
- if (arpoe)
-#endif // endif
- {
- dhd_arp_offload_enable(dhd, TRUE);
- dhd_arp_offload_set(dhd, dhd_arp_mode);
- } else {
- dhd_arp_offload_enable(dhd, FALSE);
- dhd_arp_offload_set(dhd, 0);
+ mutex_lock(&dhd->bus_user_lock);
+ --dhd->bus_user_count;
+ if (dhd->bus_user_count < 0) {
+ DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
+ dhd->bus_user_count = 0;
+ ret = -1;
+ goto exit;
}
- dhd_arp_enable = arpoe;
-#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef PKT_FILTER_SUPPORT
- /* Setup default defintions for pktfilter , enable in suspend */
- if (dhd_master_mode) {
- dhd->pktfilter_count = 6;
- dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
- if (!FW_SUPPORTED(dhd, pf6)) {
- dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
- dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
- } else {
- /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
- dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
- dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
+ if (dhd->bus_user_count == 0) {
+ /* Last user, stop the bus and turn Off WL_REG */
+ DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
+ __FUNCTION__));
+#ifdef PROP_TXSTATUS
+ if (dhd->pub.wlfc_enabled) {
+ dhd_wlfc_deinit(&dhd->pub);
}
- /* apply APP pktfilter */
- dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
-
-#ifdef BLOCK_IPV6_PACKET
- /* Setup filter to allow only IPv4 unicast frames */
- dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
- HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
- " "
- HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
-#else
- /* Setup filter to allow only unicast */
- dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
-#endif /* BLOCK_IPV6_PACKET */
-
-#ifdef PASS_IPV4_SUSPEND
- dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
-#else
- /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
- dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
-#endif /* PASS_IPV4_SUSPEND */
- if (FW_SUPPORTED(dhd, pf6)) {
- /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
- dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
- /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
- dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP;
- /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
- dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID;
- dhd->pktfilter_count = 10;
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+ if (dhd->pub.pno_state) {
+ dhd_pno_deinit(&dhd->pub);
+ }
+#endif /* PNO_SUPPORT */
+#ifdef RTT_SUPPORT
+ if (dhd->pub.rtt_state) {
+ dhd_rtt_deinit(&dhd->pub);
+ }
+#endif /* RTT_SUPPORT */
+ ret = dhd_bus_devreset(dhdp, TRUE);
+ if (!ret) {
+ dhd_bus_suspend(dhdp);
+ wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
}
+ } else {
+ DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
+ __FUNCTION__, dhd->bus_user_count));
+ }
+exit:
+ mutex_unlock(&dhd->bus_user_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dhd_bus_put);
-#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
- dhd->pktfilter_count = 4;
- /* Setup filter to block broadcast and NAT Keepalive packets */
- /* discard all broadcast packets */
- dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
- /* discard NAT Keepalive packets */
- dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
- /* discard NAT Keepalive packets */
- dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
- dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
-#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
- } else
- dhd_conf_discard_pkt_filter(dhd);
- dhd_conf_add_pkt_filter(dhd);
+int
+dhd_net_bus_get(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_get(&dhd->pub, WLAN_MODULE);
+}
-#if defined(SOFTAP)
- if (ap_fw_loaded) {
- dhd_enable_packet_filter(0, dhd);
- }
-#endif /* defined(SOFTAP) */
- dhd_set_packet_filter(dhd);
-#endif /* PKT_FILTER_SUPPORT */
-#ifdef DISABLE_11N
- ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
- if (ret < 0)
- DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
-#endif /* DISABLE_11N */
+int
+dhd_net_bus_put(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_put(&dhd->pub, WLAN_MODULE);
+}
-#ifdef ENABLE_BCN_LI_BCN_WAKEUP
- ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0,
- TRUE);
-#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
-#ifdef AMPDU_VO_ENABLE
- tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
- tid.enable = TRUE;
- ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
-
- tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
- tid.enable = TRUE;
- ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
-#endif // endif
- /* query for 'clmver' to get clm version info from firmware */
- memset(buf, 0, sizeof(buf));
- ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
- if (ret < 0)
- DHD_ERROR(("%s clmver failed %d\n", __FUNCTION__, ret));
- else {
- char *ver_temp_buf = NULL, *ver_date_buf = NULL;
- int len;
+/*
+ * Function to enable the Bus Clock
+ * Returns BCME_OK on success and BCME_xxx on failure
+ *
+ * This function is not callable from non-sleepable context
+ */
+int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+
+ int ret;
- if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
- DHD_ERROR(("Couldn't find \"Data:\"\n"));
- } else {
- ver_date_buf = bcmstrstr(buf, "Creation:");
- ptr = (ver_temp_buf + strlen("Data:"));
- if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
- DHD_ERROR(("Couldn't find New line character\n"));
- } else {
- memset(clm_version, 0, CLM_VER_STR_LEN);
- len = snprintf(clm_version, CLM_VER_STR_LEN - 1, "%s", ver_temp_buf);
- if (ver_date_buf) {
- ptr = (ver_date_buf + strlen("Creation:"));
- ver_date_buf = bcmstrtok(&ptr, "\n", 0);
- if (ver_date_buf)
- snprintf(clm_version+len, CLM_VER_STR_LEN-1-len,
- " (%s)", ver_date_buf);
- }
- DHD_INFO(("CLM version = %s\n", clm_version));
- }
- }
+ dhd_os_sdlock(dhdp);
+ /*
+ * The second argument is TRUE, that means, we expect
+ * the function to "wait" until the clocks are really
+ * available
+ */
+ ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
+ dhd_os_sdunlock(dhdp);
- if (strlen(clm_version)) {
- DHD_INFO(("CLM version = %s\n", clm_version));
- } else {
- DHD_ERROR(("Couldn't find CLM version!\n"));
- }
- }
- dhd_set_version_info(dhd, fw_version);
+ return ret;
+}
+EXPORT_SYMBOL(dhd_bus_clk_enable);
-#ifdef WRITE_WLANINFO
- sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
-#endif /* WRITE_WLANINFO */
+/*
+ * Function to disable the Bus Clock
+ * Returns BCME_OK on success and BCME_xxx on failure
+ *
+ * This function is not callable from non-sleepable context
+ */
+int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
- /* query for 'wlc_ver' to get version info from firmware */
- memset(&wlc_ver, 0, sizeof(wl_wlc_version_t));
- ret2 = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver,
- sizeof(wl_wlc_version_t), FALSE);
- if (ret2 < 0) {
- DHD_ERROR(("%s wlc_ver failed %d\n", __FUNCTION__, ret2));
- if (ret2 != BCME_UNSUPPORTED)
- ret = ret2;
- } else {
- dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
- dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
- }
-#ifdef GEN_SOFTAP_INFO_FILE
- sec_save_softap_info();
-#endif /* GEN_SOFTAP_INFO_FILE */
+ int ret;
-#if defined(BCMSDIO)
- dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
-#endif /* defined(BCMSDIO) */
+ dhd_os_sdlock(dhdp);
+ /*
+ * The second argument is TRUE, that means, we expect
+ * the function to "wait" until the clocks are really
+ * disabled
+ */
+ ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
+ dhd_os_sdunlock(dhdp);
-#if defined(BCMSDIO) || defined(BCMDBUS)
-#ifdef PROP_TXSTATUS
- if (disable_proptx ||
-#ifdef PROP_TXSTATUS_VSDB
- /* enable WLFC only if the firmware is VSDB when it is in STA mode */
- (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
- dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
-#endif /* PROP_TXSTATUS_VSDB */
- FALSE) {
- wlfc_enable = FALSE;
- }
- ret = dhd_conf_get_disable_proptx(dhd);
- if (ret == 0){
- disable_proptx = 0;
- wlfc_enable = TRUE;
- } else if (ret >= 1) {
- disable_proptx = 1;
- wlfc_enable = FALSE;
- /* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */
- hostreorder = 0;
- }
+ return ret;
+}
+EXPORT_SYMBOL(dhd_bus_clk_disable);
-#if defined(PROP_TXSTATUS)
-#ifdef USE_WFA_CERT_CONF
- if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
- DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
- wlfc_enable = proptx;
- }
-#endif /* USE_WFA_CERT_CONF */
-#endif /* PROP_TXSTATUS */
+/*
+ * Function to reset bt_use_count counter to zero.
+ *
+ * This function is not callable from non-sleepable context
+ */
+void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
+{
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
-#ifndef DISABLE_11N
- ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
- ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
- NULL, 0, TRUE);
- if (ret2 < 0) {
- DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
- if (ret2 != BCME_UNSUPPORTED)
- ret = ret2;
+ /* take the lock and reset bt use count */
+ dhd_os_sdlock(dhdp);
+ dhdsdio_reset_bt_use_count(dhdp->bus);
+ dhd_os_sdunlock(dhdp);
+}
+EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
- if (ret == BCME_NOTDOWN) {
- uint wl_down = 1;
- ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
- sizeof(wl_down), TRUE, 0);
- DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
- __FUNCTION__, ret2, hostreorder));
+#endif /* BT_OVER_SDIO */
- ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
- sizeof(hostreorder), NULL, 0, TRUE);
- DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
- if (ret2 != BCME_UNSUPPORTED)
- ret = ret2;
- }
- if (ret2 != BCME_OK)
- hostreorder = 0;
- }
-#endif /* DISABLE_11N */
+#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
+int dhd_deepsleep(dhd_info_t *dhd, int flag)
+{
+ char iovbuf[20];
+ uint powervar = 0;
+ dhd_pub_t *dhdp;
+ int cnt = 0;
+ int ret = 0;
- if (wlfc_enable) {
- dhd_wlfc_init(dhd);
- /* terence 20161229: enable ampdu_hostreorder if tlv enabled */
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
- }
-#ifndef DISABLE_11N
- else if (hostreorder)
- dhd_wlfc_hostreorder_init(dhd);
-#endif /* DISABLE_11N */
-#else
- /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
- printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
- dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
-#endif /* PROP_TXSTATUS */
-#endif /* BCMSDIO || BCMDBUS */
-#ifndef PCIE_FULL_DONGLE
- /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
- if (FW_SUPPORTED(dhd, ap)) {
- wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
- ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
- NULL, 0, TRUE);
- if (ret < 0)
- DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
- }
-#endif /* PCIE_FULL_DONGLE */
-#ifdef PNO_SUPPORT
- if (!dhd->pno_state) {
- dhd_pno_init(dhd);
- }
-#endif // endif
-#ifdef RTT_SUPPORT
- if (!dhd->rtt_state) {
- ret = dhd_rtt_init(dhd);
- if (ret < 0) {
- DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
- }
- }
-#endif // endif
-#ifdef FILTER_IE
- /* Failure to configure filter IE is not a fatal error, ignore it. */
- if (!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)))
- dhd_read_from_file(dhd);
-#endif /* FILTER_IE */
-#ifdef WL11U
- dhd_interworking_enable(dhd);
-#endif /* WL11U */
+ dhdp = &dhd->pub;
-#ifdef NDO_CONFIG_SUPPORT
- dhd->ndo_enable = FALSE;
- dhd->ndo_host_ip_overflow = FALSE;
- dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
-#endif /* NDO_CONFIG_SUPPORT */
+ switch (flag) {
+ case 1 : /* Deepsleep on */
+ DHD_ERROR(("dhd_deepsleep: ON\n"));
+ /* give some time to sysioc_work before deepsleep */
+ OSL_SLEEP(200);
+#ifdef PKT_FILTER_SUPPORT
+ /* disable pkt filter */
+ dhd_enable_packet_filter(0, dhdp);
+#endif /* PKT_FILTER_SUPPORT */
+ /* Disable MPC */
+ powervar = 0;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
- /* ND offload version supported */
- dhd->ndo_version = dhd_ndo_get_version(dhd);
- if (dhd->ndo_version > 0) {
- DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
+ /* Enable Deepsleep */
+ powervar = 1;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("deepsleep", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ break;
-#ifdef NDO_CONFIG_SUPPORT
- /* enable Unsolicited NA filter */
- ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
- if (ret < 0) {
- DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
- }
-#endif /* NDO_CONFIG_SUPPORT */
- }
+ case 0: /* Deepsleep Off */
+ DHD_ERROR(("dhd_deepsleep: OFF\n"));
- /* check dongle supports wbtext (product policy) or not */
- dhd->wbtext_support = FALSE;
- if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
- WLC_GET_VAR, FALSE, 0) != BCME_OK) {
- DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
- }
- dhd->wbtext_policy = wnm_bsstrans_resp;
- if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
- dhd->wbtext_support = TRUE;
- }
- /* driver can turn off wbtext feature through makefile */
- if (dhd->wbtext_support) {
- if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
- WL_BSSTRANS_POLICY_ROAM_ALWAYS,
- WLC_SET_VAR, FALSE, 0) != BCME_OK) {
- DHD_ERROR(("failed to disable WBTEXT\n"));
- }
- }
+ /* Disable Deepsleep */
+ for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
+ powervar = 0;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("deepsleep", (char *)&powervar, 4,
+ iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0);
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("deepsleep", (char *)&powervar, 4,
+ iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf,
+ sizeof(iovbuf), FALSE, 0)) < 0) {
+ DHD_ERROR(("the error of dhd deepsleep status"
+ " ret value :%d\n", ret));
+ } else {
+ if (!(*(int *)iovbuf)) {
+ DHD_ERROR(("deepsleep mode is 0,"
+ " count: %d\n", cnt));
+ break;
+ }
+ }
+ }
-#ifdef DHD_NON_DMA_M2M_CORRUPTION
- /* check pcie non dma loopback */
- if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
- (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
- goto done;
+ /* Enable MPC */
+ powervar = 1;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("mpc", (char *)&powervar, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ break;
}
-#endif /* DHD_NON_DMA_M2M_CORRUPTION */
- /* WNM capabilities */
- wnm_cap = 0
-#ifdef WL11U
- | WL_WNM_BSSTRANS | WL_WNM_NOTIF
-#endif // endif
- ;
-#if defined(WL_MBO) && defined(WL_OCE)
- if (FW_SUPPORTED(dhd, estm)) {
- wnm_cap |= WL_WNM_ESTM;
- }
-#endif /* WL_MBO && WL_OCE */
- if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
- DHD_ERROR(("failed to set WNM capabilities\n"));
- }
+ return 0;
+}
+
+static int
+dhd_stop(struct net_device *net)
+{
+ int ifidx = 0;
+#ifdef WL_CFG80211
+ unsigned long flags = 0;
+#endif /* WL_CFG80211 */
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
+ printf("%s: Enter %p\n", __FUNCTION__, net);
+ dhd->pub.rxcnt_timeout = 0;
+ dhd->pub.txcnt_timeout = 0;
- if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
- dhd_ecounter_configure(dhd, TRUE);
- }
+#ifdef BCMPCIE
+ dhd->pub.d3ackcnt_timeout = 0;
+#endif /* BCMPCIE */
- /* store the preserve log set numbers */
- if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
- != BCME_OK) {
- DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
+ if (dhd->pub.up == 0) {
+ goto exit;
}
-
-#ifdef WL_MONITOR
- if (FW_SUPPORTED(dhd, monitor)) {
- dhd->monitor_enable = TRUE;
- DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
- } else {
- dhd->monitor_enable = FALSE;
- DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (dhd->pub.req_hang_type) {
+ DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
+ __FUNCTION__, dhd->pub.req_hang_type));
+ dhd->pub.req_hang_type = 0;
}
-#endif /* WL_MONITOR */
-
-#ifdef CONFIG_SILENT_ROAM
- dhd->sroam_turn_on = TRUE;
- dhd->sroamed = FALSE;
-#endif /* CONFIG_SILENT_ROAM */
+#endif /* DHD_HANG_SEND_UP_TEST */
- dhd_conf_postinit_ioctls(dhd);
-done:
+ dhd_if_flush_sta(DHD_DEV_IFP(net));
- if (eventmask_msg) {
- MFREE(dhd->osh, eventmask_msg, msglen);
- eventmask_msg = NULL;
- }
- if (iov_buf) {
- MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
- iov_buf = NULL;
- }
-#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
- if (el_tag) {
- MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t));
- el_tag = NULL;
- }
-#endif /* DHD_8021X_DUMP */
- return ret;
-}
+ /* Disable Runtime PM before interface down */
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
-int
-dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
- uint res_len, int set)
-{
- char *buf = NULL;
- int input_len;
- wl_ioctl_t ioc;
- int ret;
+#ifdef FIX_CPU_MIN_CLOCK
+ if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
+ dhd_rollback_cpu_freq(dhd);
+#endif /* FIX_CPU_MIN_CLOCK */
- if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
- return BCME_BADARG;
+ ifidx = dhd_net2idx(dhd, net);
+ BCM_REFERENCE(ifidx);
- input_len = strlen(name) + 1 + param_len;
- if (input_len > WLC_IOCTL_MAXLEN)
- return BCME_BADARG;
+ /* Set state and stop OS transmissions */
+ netif_stop_queue(net);
+#ifdef WL_CFG80211
+ spin_lock_irqsave(&dhd->pub.up_lock, flags);
+ dhd->pub.up = 0;
+ spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
+#else
+ dhd->pub.up = 0;
+#endif /* WL_CFG80211 */
- buf = NULL;
- if (set) {
- if (res_buf || res_len != 0) {
- DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
- ret = BCME_BADARG;
- goto exit;
- }
- buf = MALLOCZ(pub->osh, input_len);
- if (!buf) {
- DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
- ret = BCME_NOMEM;
- goto exit;
- }
- ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
- if (!ret) {
- ret = BCME_NOMEM;
- goto exit;
- }
+#ifdef WL_CFG80211
+ if (ifidx == 0) {
+ dhd_if_t *ifp;
+ wl_cfg80211_down(net);
- ioc.cmd = WLC_SET_VAR;
- ioc.buf = buf;
- ioc.len = input_len;
- ioc.set = set;
+ ifp = dhd->iflist[0];
+ ASSERT(ifp && ifp->net);
+ /*
+ * For CFG80211: Clean up all the left over virtual interfaces
+ * when the primary Interface is brought down. [ifconfig wlan0 down]
+ */
+ if (!dhd_download_fw_on_driverload) {
+ if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
+ (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ int i;
+#ifdef WL_CFG80211_P2P_DEV_IF
+ wl_cfg80211_del_p2p_wdev(net);
+#endif /* WL_CFG80211_P2P_DEV_IF */
- ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
- } else {
- if (!res_buf || !res_len) {
- DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
- ret = BCME_BADARG;
- goto exit;
- }
+ dhd_net_if_lock_local(dhd);
+ for (i = 1; i < DHD_MAX_IFS; i++)
+ dhd_remove_if(&dhd->pub, i, FALSE);
- if (res_len < input_len) {
- DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
- res_len, input_len));
- buf = MALLOCZ(pub->osh, input_len);
- if (!buf) {
- DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
- ret = BCME_NOMEM;
- goto exit;
- }
- ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
- if (!ret) {
- ret = BCME_NOMEM;
- goto exit;
+ if (ifp && ifp->net) {
+ dhd_if_del_sta_list(ifp);
+ }
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = FALSE;
+ unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+ if (dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = FALSE;
+ unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+ dhd_net_if_unlock_local(dhd);
}
+#if 0
+ // terence 20161024: remove this to prevent dev_close() get stuck in dhd_hang_process
+ cancel_work_sync(dhd->dhd_deferred_wq);
+#endif
- ioc.cmd = WLC_GET_VAR;
- ioc.buf = buf;
- ioc.len = input_len;
- ioc.set = set;
-
- ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+#ifdef SHOW_LOGTRACE
+ /* Wait till event_log_dispatcher_work finishes */
+ cancel_work_sync(&dhd->event_log_dispatcher_work);
+#endif /* SHOW_LOGTRACE */
- if (ret == BCME_OK) {
- memcpy(res_buf, buf, res_len);
- }
- } else {
- memset(res_buf, 0, res_len);
- ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
- if (!ret) {
- ret = BCME_NOMEM;
- goto exit;
- }
+#if defined(DHD_LB_RXP)
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
- ioc.cmd = WLC_GET_VAR;
- ioc.buf = res_buf;
- ioc.len = res_len;
- ioc.set = set;
+#if defined(DHD_LB_TXP)
+ skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+ }
- ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+ argos_register_notifier_deinit();
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+#if defined(DHD_LB_RXP)
+ if (ifp->net == dhd->rx_napi_netdev) {
+ DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
+ __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
+ skb_queue_purge(&dhd->rx_napi_queue);
+ napi_disable(&dhd->rx_napi_struct);
+ netif_napi_del(&dhd->rx_napi_struct);
+ dhd->rx_napi_netdev = NULL;
}
+#endif /* DHD_LB_RXP */
}
-exit:
- if (buf) {
- MFREE(pub->osh, buf, input_len);
- buf = NULL;
- }
- return ret;
-}
-
-int
-dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
- uint cmd_len, char **resptr, uint resp_len)
-{
- int len = resp_len;
- int ret;
- char *buf = *resptr;
- wl_ioctl_t ioc;
- if (resp_len > WLC_IOCTL_MAXLEN)
- return BCME_BADARG;
+#endif /* WL_CFG80211 */
- memset(buf, 0, resp_len);
+ DHD_SSSR_DUMP_DEINIT(&dhd->pub);
- ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
- if (ret == 0) {
- return BCME_BUFTOOSHORT;
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
+#endif
+#ifdef SHOW_LOGTRACE
+ if (!dhd_download_fw_on_driverload) {
+ /* Release the skbs from queue for WLC_E_TRACE event */
+ dhd_event_logtrace_flush_queue(&dhd->pub);
+ if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
+ if (dhd->event_data.fmts) {
+ MFREE(dhd->pub.osh, dhd->event_data.fmts,
+ dhd->event_data.fmts_size);
+ dhd->event_data.fmts = NULL;
+ }
+ if (dhd->event_data.raw_fmts) {
+ MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
+ dhd->event_data.raw_fmts_size);
+ dhd->event_data.raw_fmts = NULL;
+ }
+ if (dhd->event_data.raw_sstr) {
+ MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
+ dhd->event_data.raw_sstr_size);
+ dhd->event_data.raw_sstr = NULL;
+ }
+ if (dhd->event_data.rom_raw_sstr) {
+ MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
+ dhd->event_data.rom_raw_sstr_size);
+ dhd->event_data.rom_raw_sstr = NULL;
+ }
+ dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
+ }
}
+#endif /* SHOW_LOGTRACE */
+#ifdef APF
+ dhd_dev_apf_delete_filter(net);
+#endif /* APF */
- memset(&ioc, 0, sizeof(ioc));
-
- ioc.cmd = WLC_GET_VAR;
- ioc.buf = buf;
- ioc.len = len;
- ioc.set = 0;
-
- ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
-
- return ret;
-}
-
-int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
-{
- struct dhd_info *dhd = dhdp->info;
- struct net_device *dev = NULL;
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
- ASSERT(dhd && dhd->iflist[ifidx]);
- dev = dhd->iflist[ifidx]->net;
- ASSERT(dev);
+ OLD_MOD_DEC_USE_COUNT;
+exit:
+ if (ifidx == 0 && !dhd_download_fw_on_driverload) {
+#if defined(BT_OVER_SDIO)
+ dhd_bus_put(&dhd->pub, WLAN_MODULE);
+ wl_android_set_wifi_on_flag(FALSE);
+#else
+ wl_android_wifi_off(net, TRUE);
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_dettach_netdev();
+#endif
+ } else {
+ if (dhd->pub.conf->deepsleep)
+ dhd_deepsleep(dhd, 1);
+#endif /* BT_OVER_SDIO */
+ }
+ dhd->pub.hang_was_sent = 0;
- if (netif_running(dev)) {
- DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
- return BCME_NOTDOWN;
+ /* Clear country spec for for built-in type driver */
+ if (!dhd_download_fw_on_driverload) {
+ dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
+ dhd->pub.dhd_cspec.rev = 0;
+ dhd->pub.dhd_cspec.ccode[0] = 0x00;
}
-#define DHD_MIN_MTU 1500
-#define DHD_MAX_MTU 1752
+#ifdef BCMDBGFS
+ dhd_dbgfs_remove();
+#endif
- if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
- DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
- return BCME_BADARG;
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ /* Destroy wakelock */
+ if (!dhd_download_fw_on_driverload &&
+ (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_OS_WAKE_LOCK_DESTROY(dhd);
+ dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
}
+ printf("%s: Exit\n", __FUNCTION__);
- dev->mtu = new_mtu;
return 0;
}
-#ifdef ARP_OFFLOAD_SUPPORT
-/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
-void
-aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
-{
- u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
- int i;
- int ret;
-
- bzero(ipv4_buf, sizeof(ipv4_buf));
+#if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
+extern bool g_first_broadcast_scan;
+#endif
- /* display what we've got */
- ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
- DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
-#ifdef AOE_DBG
- dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
-#endif // endif
- /* now we saved hoste_ip table, clr it in the dongle AOE */
- dhd_aoe_hostip_clr(dhd_pub, idx);
+#ifdef WL11U
+static int dhd_interworking_enable(dhd_pub_t *dhd)
+{
+ uint32 enable = true;
+ int ret = BCME_OK;
- if (ret) {
- DHD_ERROR(("%s failed\n", __FUNCTION__));
- return;
+ ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
}
- for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
- if (add && (ipv4_buf[i] == 0)) {
- ipv4_buf[i] = ipa;
- add = FALSE; /* added ipa to local table */
- DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
- __FUNCTION__, i));
- } else if (ipv4_buf[i] == ipa) {
- ipv4_buf[i] = 0;
- DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
- __FUNCTION__, ipa, i));
- }
-
- if (ipv4_buf[i] != 0) {
- /* add back host_ip entries from our local cache */
- dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
- DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
- __FUNCTION__, ipv4_buf[i], i));
- }
- }
-#ifdef AOE_DBG
- /* see the resulting hostip table */
- dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
- DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
- dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
-#endif // endif
+ return ret;
}
+#endif /* WL11u */
-/*
- * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
- * whenever there is an event related to an IP address.
- * ptr : kernel provided pointer to IP address that has changed
- */
-static int dhd_inetaddr_notifier_call(struct notifier_block *this,
- unsigned long event,
- void *ptr)
+static int
+dhd_open(struct net_device *net)
{
- struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
-
- dhd_info_t *dhd;
- dhd_pub_t *dhd_pub;
- int idx;
-
- if (!dhd_arp_enable)
- return NOTIFY_DONE;
- if (!ifa || !(ifa->ifa_dev->dev))
- return NOTIFY_DONE;
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+#ifdef TOE
+ uint32 toe_ol;
+#endif
+#ifdef BCM_FD_AGGR
+ char iovbuf[WLC_IOCTL_SMLEN];
+ dbus_config_t config;
+ uint32 agglimit = 0;
+ uint32 rpc_agg = BCM_RPC_TP_DNGL_AGG_DPC; /* host aggr not enabled yet */
+#endif /* BCM_FD_AGGR */
+ int ifidx;
+ int32 ret = 0;
+#if defined(OOB_INTR_ONLY)
+ uint32 bus_type = -1;
+ uint32 bus_num = -1;
+ uint32 slot_num = -1;
+ wifi_adapter_info_t *adapter = NULL;
+#endif
+#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
+ int bytes_written = 0;
+ struct dhd_conf *conf;
+#endif
- /* Filter notifications meant for non Broadcom devices */
- if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
- (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
-#if defined(WL_ENABLE_P2P_IF)
- if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
-#endif /* WL_ENABLE_P2P_IF */
- return NOTIFY_DONE;
+ if (!dhd_download_fw_on_driverload) {
+ if (!dhd_driver_init_done) {
+ DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
+ return -1;
+ }
}
- dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
- if (!dhd)
- return NOTIFY_DONE;
-
- dhd_pub = &dhd->pub;
-
- if (dhd_pub->arp_version == 1) {
- idx = 0;
- } else {
- for (idx = 0; idx < DHD_MAX_IFS; idx++) {
- if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
- break;
+ printf("%s: Enter %p\n", __FUNCTION__, net);
+ DHD_MUTEX_LOCK();
+ /* Init wakelock */
+ if (!dhd_download_fw_on_driverload) {
+ if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ DHD_OS_WAKE_LOCK_INIT(dhd);
+ dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
}
- if (idx < DHD_MAX_IFS)
- DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
- dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
- else {
- DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
- idx = 0;
+#ifdef SHOW_LOGTRACE
+ skb_queue_head_init(&dhd->evt_trace_queue);
+
+ if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
+ ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
+ if (ret == BCME_OK) {
+ dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
+ st_str_file_path, map_file_path);
+ dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
+ rom_st_str_file_path, rom_map_file_path);
+ dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
+ }
}
+#endif /* SHOW_LOGTRACE */
}
- switch (event) {
- case NETDEV_UP:
- DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
- __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
-
- /*
- * Skip if Bus is not in a state to transport the IOVAR
- * (or) the Dongle is not ready.
- */
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
- dhd->pub.busstate == DHD_BUS_LOAD) {
- DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
- __FUNCTION__, dhd->pub.busstate));
- if (dhd->pend_ipaddr) {
- DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
- __FUNCTION__, dhd->pend_ipaddr));
- }
- dhd->pend_ipaddr = ifa->ifa_address;
- break;
- }
+#if defined(PREVENT_REOPEN_DURING_HANG)
+ /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
+ if (dhd->pub.hang_was_sent == 1) {
+ DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
+ /* Force to bring down WLAN interface in case dhd_stop() is not called
+ * from the upper layer when HANG event is triggered.
+ */
+ if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
+ DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
+ dhd_stop(net);
+ } else {
+ return -1;
+ }
+ }
+#endif /* PREVENT_REOPEN_DURING_HANG */
-#ifdef AOE_IP_ALIAS_SUPPORT
- DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
- __FUNCTION__));
- aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
-#endif /* AOE_IP_ALIAS_SUPPORT */
- dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, TRUE);
- break;
- case NETDEV_DOWN:
- DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
- __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
- dhd->pend_ipaddr = 0;
-#ifdef AOE_IP_ALIAS_SUPPORT
- DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
- __FUNCTION__));
- if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
- (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
- aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
- } else
-#endif /* AOE_IP_ALIAS_SUPPORT */
- {
- dhd_aoe_hostip_clr(&dhd->pub, idx);
- dhd_aoe_arp_clr(&dhd->pub, idx);
- }
- dhd_conf_set_garp(dhd_pub, idx, ifa->ifa_address, FALSE);
- break;
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
+ dhd->pub.dongle_trap_occured = 0;
+ dhd->pub.hang_was_sent = 0;
+ dhd->pub.hang_reason = 0;
+ dhd->pub.iovar_timeout_occured = 0;
+#ifdef PCIE_FULL_DONGLE
+ dhd->pub.d3ack_timeout_occured = 0;
+#endif /* PCIE_FULL_DONGLE */
- default:
- DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
- __func__, ifa->ifa_label, event));
- break;
+#ifdef DHD_LOSSLESS_ROAMING
+ dhd->pub.dequeue_prec_map = ALLPRIO;
+#endif
+#if 0
+ /*
+ * Force start if ifconfig_up gets called before START command
+ * We keep WEXT's wl_control_wl_start to provide backward compatibility
+ * This should be removed in the future
+ */
+ ret = wl_control_wl_start(net);
+ if (ret != 0) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ ret = -1;
+ goto exit;
}
- return NOTIFY_DONE;
-}
-#endif /* ARP_OFFLOAD_SUPPORT */
-
-#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
-/* Neighbor Discovery Offload: defered handler */
-static void
-dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
-{
- struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
- dhd_info_t *dhd = (dhd_info_t *)dhd_info;
- dhd_pub_t *dhdp;
- int ret;
+#endif
- if (!dhd) {
- DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
- goto done;
- }
- dhdp = &dhd->pub;
+ ifidx = dhd_net2idx(dhd, net);
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
- if (event != DHD_WQ_WORK_IPV6_NDO) {
- DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
- goto done;
+ if (ifidx < 0) {
+ DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
}
- if (!ndo_work) {
- DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
- return;
+ if (!dhd->iflist[ifidx]) {
+ DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
}
- switch (ndo_work->event) {
- case NETDEV_UP:
-#ifndef NDO_CONFIG_SUPPORT
- DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
- ret = dhd_ndo_enable(dhdp, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
+ if (ifidx == 0) {
+ atomic_set(&dhd->pend_8021x_cnt, 0);
+ if (!dhd_download_fw_on_driverload) {
+ DHD_ERROR(("\n%s\n", dhd_version));
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_attach_netdev(net, ifidx, dhd->iflist[ifidx]->bssidx);
+#endif
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+ g_first_broadcast_scan = TRUE;
+#endif
+#if defined(BT_OVER_SDIO)
+ ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
+ wl_android_set_wifi_on_flag(TRUE);
+#else
+ ret = wl_android_wifi_on(net);
+#endif /* BT_OVER_SDIO */
+ if (ret != 0) {
+ DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
+ __FUNCTION__, ret));
+ ret = -1;
+ goto exit;
}
-#endif /* !NDO_CONFIG_SUPPORT */
- DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
- if (dhdp->ndo_version > 0) {
- /* inet6 addr notifier called only for unicast address */
- ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
- WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
- } else {
- ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
- ndo_work->if_idx);
+#if defined(WL_EXT_IAPSTA) && defined(ISAM_PREINIT)
+ conf = dhd_get_conf(net);
+ if (conf) {
+ wl_android_ext_priv_cmd(net, conf->isam_init, 0, &bytes_written);
+ wl_android_ext_priv_cmd(net, conf->isam_config, 0, &bytes_written);
+ wl_android_ext_priv_cmd(net, conf->isam_enable, 0, &bytes_written);
}
- if (ret < 0) {
- DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
- __FUNCTION__, ret));
+#endif
+ }
+#ifdef FIX_CPU_MIN_CLOCK
+ if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
+ dhd_init_cpufreq_fix(dhd);
+ dhd_fix_cpu_freq(dhd);
+ }
+#endif /* FIX_CPU_MIN_CLOCK */
+#if defined(OOB_INTR_ONLY)
+ if (dhd->pub.conf->dpc_cpucore >= 0) {
+ dhd_bus_get_ids(dhd->pub.bus, &bus_type, &bus_num, &slot_num);
+ adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+ if (adapter) {
+ printf("%s: set irq affinity hit %d\n", __FUNCTION__, dhd->pub.conf->dpc_cpucore);
+ irq_set_affinity_hint(adapter->irq_num, cpumask_of(dhd->pub.conf->dpc_cpucore));
}
- break;
- case NETDEV_DOWN:
- if (dhdp->ndo_version > 0) {
- DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
- ret = dhd_ndo_remove_ip_by_addr(dhdp,
- &ndo_work->ipv6_addr[0], ndo_work->if_idx);
+ }
+#endif
+
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+#ifdef BCMDBUS
+ dhd_set_path(&dhd->pub);
+ DHD_MUTEX_UNLOCK();
+ wait_event_interruptible_timeout(dhd->adapter->status_event,
+ wifi_get_adapter_status(dhd->adapter, WIFI_STATUS_FW_READY),
+ msecs_to_jiffies(DHD_FW_READY_TIMEOUT));
+ DHD_MUTEX_LOCK();
+ if ((ret = dbus_up(dhd->pub.bus)) != 0) {
+ DHD_ERROR(("%s: failed to dbus_up with code %d\n", __FUNCTION__, ret));
+ goto exit;
} else {
- DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
- ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
- }
- if (ret < 0) {
- DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
- __FUNCTION__, ret));
- goto done;
+ dhd->pub.busstate = DHD_BUS_DATA;
}
-#ifdef NDO_CONFIG_SUPPORT
- if (dhdp->ndo_host_ip_overflow) {
- ret = dhd_dev_ndo_update_inet6addr(
- dhd_idx2net(dhdp, ndo_work->if_idx));
- if ((ret < 0) && (ret != BCME_NORESOURCE)) {
- DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
- __FUNCTION__, ret));
- goto done;
- }
+ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ goto exit;
}
-#else /* !NDO_CONFIG_SUPPORT */
- DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
- ret = dhd_ndo_enable(dhdp, FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
- goto done;
+#else
+ /* try to bring up bus */
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ ret = dhd_bus_start(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
+ if (ret) {
+ DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
+ ret = -1;
+ goto exit;
}
-#endif /* NDO_CONFIG_SUPPORT */
- break;
+#endif /* !BCMDBUS */
- default:
- DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
- break;
- }
-done:
+ }
+#ifdef WL_EXT_IAPSTA
+ wl_ext_iapsta_attach_name(net, ifidx);
+#endif
+ if (dhd_download_fw_on_driverload) {
+ if (dhd->pub.conf->deepsleep)
+ dhd_deepsleep(dhd, 0);
+ }
- /* free ndo_work. alloced while scheduling the work */
- if (ndo_work) {
- kfree(ndo_work);
- }
+#ifdef BCM_FD_AGGR
+ config.config_id = DBUS_CONFIG_ID_AGGR_LIMIT;
- return;
-} /* dhd_init_logstrs_array */
-/*
- * Neighbor Discovery Offload: Called when an interface
- * is assigned with ipv6 address.
- * Handles only primary interface
- */
-int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
-{
- dhd_info_t *dhd;
- dhd_pub_t *dhdp;
- struct inet6_ifaddr *inet6_ifa = ptr;
- struct ipv6_work_info_t *ndo_info;
- int idx;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("rpc_dngl_agglimit", (char *)&agglimit, 4,
+ iovbuf, sizeof(iovbuf));
- /* Filter notifications meant for non Broadcom devices */
- if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
- return NOTIFY_DONE;
- }
+ if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) {
+ agglimit = *(uint32 *)iovbuf;
+ config.aggr_param.maxrxsf = agglimit >> BCM_RPC_TP_AGG_SF_SHIFT;
+ config.aggr_param.maxrxsize = agglimit & BCM_RPC_TP_AGG_BYTES_MASK;
+ DHD_ERROR(("rpc_dngl_agglimit %x : sf_limit %d bytes_limit %d\n",
+ agglimit, config.aggr_param.maxrxsf, config.aggr_param.maxrxsize));
+ if (bcm_rpc_tp_set_config(dhd->pub.info->rpc_th, &config)) {
+ DHD_ERROR(("set tx/rx queue size and buffersize failed\n"));
+ }
+ } else {
+ DHD_ERROR(("get rpc_dngl_agglimit failed\n"));
+ rpc_agg &= ~BCM_RPC_TP_DNGL_AGG_DPC;
+ }
+
+ /* Set aggregation for TX */
+ bcm_rpc_tp_agg_set(dhd->pub.info->rpc_th, BCM_RPC_TP_HOST_AGG_MASK,
+ rpc_agg & BCM_RPC_TP_HOST_AGG_MASK);
+
+ /* Set aggregation for RX */
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("rpc_agg", (char *)&rpc_agg, sizeof(rpc_agg), iovbuf, sizeof(iovbuf));
+ if (!dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) {
+ dhd->pub.info->fdaggr = 0;
+ if (rpc_agg & BCM_RPC_TP_HOST_AGG_MASK)
+ dhd->pub.info->fdaggr |= BCM_FDAGGR_H2D_ENABLED;
+ if (rpc_agg & BCM_RPC_TP_DNGL_AGG_MASK)
+ dhd->pub.info->fdaggr |= BCM_FDAGGR_D2H_ENABLED;
+ } else {
+ DHD_ERROR(("%s(): Setting RX aggregation failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* BCM_FD_AGGR */
- dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
- if (!dhd) {
- return NOTIFY_DONE;
- }
- dhdp = &dhd->pub;
+#ifdef BT_OVER_SDIO
+ if (dhd->pub.is_bt_recovery_required) {
+ DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
+ bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
+ }
+ dhd->pub.is_bt_recovery_required = FALSE;
+#endif
- /* Supports only primary interface */
- idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
- if (idx != 0) {
- return NOTIFY_DONE;
- }
+ /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
+ memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
- /* FW capability */
- if (!FW_SUPPORTED(dhdp, ndoe)) {
- return NOTIFY_DONE;
- }
+#ifdef TOE
+ /* Get current TOE mode from dongle */
+ if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
+ dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
+ } else {
+ dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
+ }
+#endif /* TOE */
- ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
- if (!ndo_info) {
- DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
- return NOTIFY_DONE;
- }
+#if defined(DHD_LB_RXP)
+ __skb_queue_head_init(&dhd->rx_pend_queue);
+ if (dhd->rx_napi_netdev == NULL) {
+ dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
+ memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
+ netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
+ dhd_napi_poll, dhd_napi_weight);
+ DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
+ __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
+ napi_enable(&dhd->rx_napi_struct);
+ DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
+ skb_queue_head_init(&dhd->rx_napi_queue);
+ } /* rx_napi_netdev == NULL */
+#endif /* DHD_LB_RXP */
- /* fill up ndo_info */
- ndo_info->event = event;
- ndo_info->if_idx = idx;
- memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
+#if defined(DHD_LB_TXP)
+ /* Use the variant that uses locks */
+ skb_queue_head_init(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
- /* defer the work to thread as it may block kernel */
- dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
- dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
- return NOTIFY_DONE;
-}
+#if defined(WL_CFG80211)
+ if (unlikely(wl_cfg80211_up(net))) {
+ DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
+ ret = -1;
+ goto exit;
+ }
+ if (!dhd_download_fw_on_driverload) {
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd->pend_ipaddr = 0;
+ if (!dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = TRUE;
+ register_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+ if (!dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = TRUE;
+ register_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+ }
-/* Network attach to be invoked from the bus probe handlers */
-int
-dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
-{
- struct net_device *primary_ndev;
- BCM_REFERENCE(primary_ndev);
+ argos_register_notifier_init(net);
+#if defined(NUM_SCB_MAX_PROBE)
+ dhd_set_scb_probe(&dhd->pub);
+#endif /* NUM_SCB_MAX_PROBE */
+#endif /* WL_CFG80211 */
+ }
+
+ /* Allow transmit calls */
+ netif_start_queue(net);
+ dhd->pub.up = 1;
+
+ OLD_MOD_INC_USE_COUNT;
+
+#ifdef BCMDBGFS
+ dhd_dbgfs_init(&dhd->pub);
+#endif
- /* Register primary net device */
- if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
- return BCME_ERROR;
+exit:
+ if (ret) {
+ dhd_stop(net);
}
-#if defined(WL_CFG80211)
- primary_ndev = dhd_linux_get_primary_netdev(dhdp);
- if (wl_cfg80211_net_attach(primary_ndev) < 0) {
- /* fail the init */
- dhd_remove_if(dhdp, 0, TRUE);
- return BCME_ERROR;
- }
-#endif /* WL_CFG80211 */
- return BCME_OK;
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ DHD_MUTEX_UNLOCK();
+
+ printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
+ return ret;
}
-int
-dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
+int dhd_do_driver_init(struct net_device *net)
{
- dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
- dhd_if_t *ifp;
- struct net_device *net = NULL;
- int err = 0;
- uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
-
- DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
+ dhd_info_t *dhd = NULL;
- if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
- DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
- return BCME_ERROR;
+ if (!net) {
+ DHD_ERROR(("Primary Interface not initialized \n"));
+ return -EINVAL;
}
- ASSERT(dhd && dhd->iflist[ifidx]);
- ifp = dhd->iflist[ifidx];
- net = ifp->net;
- ASSERT(net && (ifp->idx == ifidx));
+ DHD_MUTEX_IS_LOCK_RETURN();
- ASSERT(!net->netdev_ops);
- net->netdev_ops = &dhd_ops_virt;
+ /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
+ dhd = DHD_DEV_INFO(net);
- /* Ok, link into the network layer... */
- if (ifidx == 0) {
- /*
- * device functions for the primary interface only
- */
- net->netdev_ops = &dhd_ops_pri;
- if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
- memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
- } else {
- /*
- * We have to use the primary MAC for virtual interfaces
- */
- memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
- /*
- * Android sets the locally administered bit to indicate that this is a
- * portable hotspot. This will not work in simultaneous AP/STA mode,
- * nor with P2P. Need to set the Donlge's MAC address, and then use that.
- */
- if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
- ETHER_ADDR_LEN)) {
- DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
- __func__, net->name));
- temp_addr[0] |= 0x02;
- }
+ /* If driver is already initialized, do nothing
+ */
+ if (dhd->pub.busstate == DHD_BUS_DATA) {
+ DHD_TRACE(("Driver already Inititalized. Nothing to do"));
+ return 0;
}
- net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
- net->ethtool_ops = &dhd_ethtool_ops;
+ if (dhd_open(net) < 0) {
+ DHD_ERROR(("Driver Init Failed \n"));
+ return -1;
+ }
-#if defined(WL_WIRELESS_EXT)
-#if WIRELESS_EXT < 19
- net->get_wireless_stats = dhd_get_wireless_stats;
-#endif /* WIRELESS_EXT < 19 */
-#if WIRELESS_EXT > 12
- net->wireless_handlers = &wl_iw_handler_def;
-#endif /* WIRELESS_EXT > 12 */
-#endif /* defined(WL_WIRELESS_EXT) */
+ return 0;
+}
- dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
+int
+dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
-#ifdef WLMESH
- if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) {
- temp_addr[4] ^= 0x80;
- temp_addr[4] += ifidx;
- temp_addr[5] += ifidx;
- }
+#ifdef WL_CFG80211
+ if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
+ ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+ return BCME_OK;
#endif
- memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
-
- if (ifidx == 0)
- printf("%s\n", dhd_version);
- else {
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_update_net_device(net, ifidx);
-#endif /* WL_EXT_IAPSTA */
- if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr) == 0)
- DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
- else
- DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
- }
- if (need_rtnl_lock)
- err = register_netdev(net);
- else
- err = register_netdevice(net);
+ /* handle IF event caused by wl commands, SoftAP, WEXT and
+ * anything else. This has to be done asynchronously otherwise
+ * DPC will be blocked (and iovars will timeout as DPC has no chance
+ * to read the response back)
+ */
+ if (ifevent->ifidx > 0) {
+ dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+ if (if_event == NULL) {
+ DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
+ MALLOCED(dhdinfo->pub.osh)));
+ return BCME_NOMEM;
+ }
- if (err != 0) {
- DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
- goto fail;
+ memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+ memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+ strncpy(if_event->name, name, IFNAMSIZ);
+ if_event->name[IFNAMSIZ - 1] = '\0';
+ dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
+ DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
}
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
- wl_ext_event_attach_netdev(net, ifidx, ifp->bssidx);
-#ifdef WL_ESCAN
- wl_escan_event_attach(net, dhdp);
-#endif /* WL_ESCAN */
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
- wl_ext_iapsta_attach_name(net, ifidx);
-#endif /* WL_EXT_IAPSTA */
-#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
- printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
- MAC2STRDBG(net->dev_addr));
+ return BCME_OK;
+}
-#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
-// wl_iw_iscan_set_scan_broadcast_prep(net, 1);
-#endif // endif
+int
+dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
+{
+ dhd_if_event_t *if_event;
-#if (defined(BCMPCIE) || defined(BCMLXSDMMC) || defined(BCMDBUS))
- if (ifidx == 0) {
-#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
- up(&dhd_registration_sem);
-#endif /* BCMLXSDMMC */
- if (!dhd_download_fw_on_driverload) {
#ifdef WL_CFG80211
- wl_terminate_event_handler(net);
+ if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
+ ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+ return BCME_OK;
#endif /* WL_CFG80211 */
-#if defined(DHD_LB_RXP)
- __skb_queue_purge(&dhd->rx_pend_queue);
-#endif /* DHD_LB_RXP */
-
-#if defined(DHD_LB_TXP)
- skb_queue_purge(&dhd->tx_pend_queue);
-#endif /* DHD_LB_TXP */
-
-#ifdef SHOW_LOGTRACE
- /* Release the skbs from queue for WLC_E_TRACE event */
- dhd_event_logtrace_flush_queue(dhdp);
-#endif /* SHOW_LOGTRACE */
-#if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
- dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
-#endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
- dhd_net_bus_devreset(net, TRUE);
-#ifdef BCMLXSDMMC
- dhd_net_bus_suspend(net);
-#endif /* BCMLXSDMMC */
- wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
-#if defined(BT_OVER_SDIO)
- dhd->bus_user_count--;
-#endif /* BT_OVER_SDIO */
- }
-#if defined(WL_WIRELESS_EXT)
- wl_iw_down(net, &dhd->pub);
-#endif /* defined(WL_WIRELESS_EXT) */
+ /* handle IF event caused by wl commands, SoftAP, WEXT and
+ * anything else
+ */
+ if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+ if (if_event == NULL) {
+ DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
+ MALLOCED(dhdinfo->pub.osh)));
+ return BCME_NOMEM;
}
-#endif /* OEM_ANDROID && (BCMPCIE || BCMLXSDMMC) */
- return 0;
+ memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+ memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+ strncpy(if_event->name, name, IFNAMSIZ);
+ if_event->name[IFNAMSIZ - 1] = '\0';
+ dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
+ dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
-fail:
- net->netdev_ops = NULL;
- return err;
+ return BCME_OK;
}
-void
-dhd_bus_detach(dhd_pub_t *dhdp)
+int
+dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
{
- dhd_info_t *dhd;
-
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
- if (dhdp) {
- dhd = (dhd_info_t *)dhdp->info;
- if (dhd) {
+#ifdef DHD_UPDATE_INTF_MAC
+ dhd_if_event_t *if_event;
+#endif /* DHD_UPDATE_INTF_MAC */
- /*
- * In case of Android cfg80211 driver, the bus is down in dhd_stop,
- * calling stop again will cuase SD read/write errors.
- */
- if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) {
- /* Stop the protocol module */
- dhd_prot_stop(&dhd->pub);
+#ifdef WL_CFG80211
+ wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
+ ifevent->ifidx, name, mac, ifevent->bssidx);
+#endif /* WL_CFG80211 */
- /* Stop the bus module */
-#ifdef BCMDBUS
- /* Force Dongle terminated */
- if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0)
- DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
- __FUNCTION__));
- dbus_stop(dhd->pub.bus);
- dhd->pub.busstate = DHD_BUS_DOWN;
-#else
- dhd_bus_stop(dhd->pub.bus, TRUE);
-#endif /* BCMDBUS */
- }
+#ifdef DHD_UPDATE_INTF_MAC
+ /* handle IF event caused by wl commands, SoftAP, WEXT, MBSS and
+ * anything else
+ */
+ if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+ if (if_event == NULL) {
+ DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
+ MALLOCED(dhdinfo->pub.osh)));
+ return BCME_NOMEM;
+ }
+ memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+ // construct a change event
+ if_event->event.ifidx = dhd_ifname2idx(dhdinfo, name);
+ if_event->event.opcode = WLC_E_IF_CHANGE;
+ memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+ strncpy(if_event->name, name, IFNAMSIZ);
+ if_event->name[IFNAMSIZ - 1] = '\0';
+ dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_UPDATE,
+ dhd_ifupdate_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
+#endif /* DHD_UPDATE_INTF_MAC */
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
- dhd_bus_oob_intr_unregister(dhdp);
-#endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
- }
- }
+ return BCME_OK;
}
-void dhd_detach(dhd_pub_t *dhdp)
+/* unregister and free the existing net_device interface (if any) in iflist and
+ * allocate a new one. the slot is reused. this function does NOT register the
+ * new interface to linux kernel. dhd_register_if does the job
+ */
+struct net_device*
+dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
+ uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
{
- dhd_info_t *dhd;
- unsigned long flags;
- int timer_valid = FALSE;
- struct net_device *dev;
-#ifdef WL_CFG80211
- struct bcm_cfg80211 *cfg = NULL;
-#endif // endif
- if (!dhdp)
- return;
+ dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+ dhd_if_t *ifp;
- dhd = (dhd_info_t *)dhdp->info;
- if (!dhd)
- return;
+ ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
+ ifp = dhdinfo->iflist[ifidx];
- dev = dhd->iflist[0]->net;
+ if (ifp != NULL) {
+ if (ifp->net != NULL) {
+ DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
+ __FUNCTION__, ifp->net->name, ifidx));
- if (dev) {
- rtnl_lock();
-#if defined(WL_CFG80211) && defined(WL_STATIC_IF)
- if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
- cfg = wl_get_cfg(dev);
- if (cfg && cfg->static_ndev && (cfg->static_ndev->flags & IFF_UP)) {
- dev_close(cfg->static_ndev);
+ if (ifidx == 0) {
+ /* For primary ifidx (0), there shouldn't be
+ * any netdev present already.
+ */
+ DHD_ERROR(("Primary ifidx populated already\n"));
+ ASSERT(0);
+ return NULL;
}
- }
-#endif /* WL_CFG80211 && WL_STATIC_IF */
- if (dev->flags & IFF_UP) {
- /* If IFF_UP is still up, it indicates that
- * "ifconfig wlan0 down" hasn't been called.
- * So invoke dev_close explicitly here to
- * bring down the interface.
+
+ dhd_dev_priv_clear(ifp->net); /* clear net_device private */
+
+ /* in unregister_netdev case, the interface gets freed by net->destructor
+ * (which is set to free_netdev)
*/
- DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
- dev_close(dev);
+ if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+ free_netdev(ifp->net);
+ } else {
+ netif_stop_queue(ifp->net);
+ if (need_rtnl_lock)
+ unregister_netdev(ifp->net);
+ else
+ unregister_netdevice(ifp->net);
+ }
+ ifp->net = NULL;
+ }
+ } else {
+ ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
+ if (ifp == NULL) {
+ DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
+ return NULL;
}
- rtnl_unlock();
}
- DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
+ memset(ifp, 0, sizeof(dhd_if_t));
+ ifp->info = dhdinfo;
+ ifp->idx = ifidx;
+ ifp->bssidx = bssidx;
+#ifdef DHD_MCAST_REGEN
+ ifp->mcast_regen_bss_enable = FALSE;
+#endif
+ /* set to TRUE rx_pkt_chainable at alloc time */
+ ifp->rx_pkt_chainable = TRUE;
- DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
- dhd->pub.up = 0;
- if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
- /* Give sufficient time for threads to start running in case
- * dhd_attach() has failed
- */
- OSL_SLEEP(100);
+ if (mac != NULL)
+ memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
+
+ /* Allocate etherdev, including space for private structure */
+ ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
+ if (ifp->net == NULL) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
+ goto fail;
}
-#ifdef DHD_WET
- dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
-#endif /* DHD_WET */
-#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
-#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
-#ifdef PROP_TXSTATUS
-#ifdef DHD_WLFC_THREAD
- if (dhd->pub.wlfc_thread) {
- kthread_stop(dhd->pub.wlfc_thread);
- dhdp->wlfc_thread_go = TRUE;
- wake_up_interruptible(&dhdp->wlfc_wqhead);
+ /* Setup the dhd interface's netdevice private structure. */
+ dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
+
+ if (name && name[0]) {
+ strncpy(ifp->net->name, name, IFNAMSIZ);
+ ifp->net->name[IFNAMSIZ - 1] = '\0';
}
- dhd->pub.wlfc_thread = NULL;
-#endif /* DHD_WLFC_THREAD */
-#endif /* PROP_TXSTATUS */
#ifdef WL_CFG80211
- if (dev)
- wl_cfg80211_down(dev);
+ if (ifidx == 0) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+ ifp->net->destructor = free_netdev;
+#else
+ ifp->net->needs_free_netdev = true;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
+ } else {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+ ifp->net->destructor = dhd_netdev_free;
+#else
+ ifp->net->needs_free_netdev = true;
+ ifp->net->priv_destructor = dhd_netdev_free;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
+ }
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+ ifp->net->destructor = free_netdev;
+#else
+ ifp->net->needs_free_netdev = true;
+#endif
#endif /* WL_CFG80211 */
+ strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
+ ifp->name[IFNAMSIZ - 1] = '\0';
+ dhdinfo->iflist[ifidx] = ifp;
- if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+/* initialize the dongle provided if name */
+ if (dngl_name)
+ strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
+ else if (name)
+ strncpy(ifp->dngl_name, name, IFNAMSIZ);
- dhd_bus_detach(dhdp);
-#ifdef BCMPCIE
- if (is_reboot == SYS_RESTART) {
- extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
- if (dhd_wifi_platdata && !dhdp->dongle_reset) {
- dhdpcie_bus_clock_stop(dhdp->bus);
- wifi_platform_set_power(dhd_wifi_platdata->adapters,
- FALSE, WIFI_TURNOFF_DELAY);
- }
- }
-#endif /* BCMPCIE */
-#ifndef PCIE_FULL_DONGLE
- if (dhdp->prot)
- dhd_prot_detach(dhdp);
-#endif /* !PCIE_FULL_DONGLE */
- }
+#ifdef PCIE_FULL_DONGLE
+ /* Initialize STA info list */
+ INIT_LIST_HEAD(&ifp->sta_list);
+ DHD_IF_STA_LIST_LOCK_INIT(ifp);
+#endif /* PCIE_FULL_DONGLE */
-#ifdef ARP_OFFLOAD_SUPPORT
- if (dhd_inetaddr_notifier_registered) {
- dhd_inetaddr_notifier_registered = FALSE;
- unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
- }
-#endif /* ARP_OFFLOAD_SUPPORT */
-#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
- if (dhd_inet6addr_notifier_registered) {
- dhd_inet6addr_notifier_registered = FALSE;
- unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
- }
-#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
-#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
- if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
- if (dhd->early_suspend.suspend)
- unregister_early_suspend(&dhd->early_suspend);
- }
-#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
+#ifdef DHD_L2_FILTER
+ ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
+ ifp->parp_allnode = TRUE;
+#endif /* DHD_L2_FILTER */
-#if defined(WL_WIRELESS_EXT)
- if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
- /* Detatch and unlink in the iw */
- wl_iw_detach(dev, dhdp);
- }
-#endif /* defined(WL_WIRELESS_EXT) */
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW) || defined(WL_ESCAN)
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_dettach(dhdp);
-#endif /* WL_EXT_IAPSTA */
-#ifdef WL_ESCAN
- wl_escan_detach(dev, dhdp);
-#endif /* WL_ESCAN */
- wl_ext_event_dettach(dhdp);
-#endif /* WL_EXT_IAPSTA || USE_IW || WL_ESCAN */
-#ifdef DHD_ULP
- dhd_ulp_deinit(dhd->pub.osh, dhdp);
-#endif /* DHD_ULP */
+ DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
- /* delete all interfaces, start with virtual */
- if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
- int i = 1;
- dhd_if_t *ifp;
+ return ifp->net;
- /* Cleanup virtual interfaces */
- dhd_net_if_lock_local(dhd);
- for (i = 1; i < DHD_MAX_IFS; i++) {
- if (dhd->iflist[i]) {
- dhd_remove_if(&dhd->pub, i, TRUE);
+fail:
+ if (ifp != NULL) {
+ if (ifp->net != NULL) {
+#if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
+ if (ifp->net == dhdinfo->rx_napi_netdev) {
+ napi_disable(&dhdinfo->rx_napi_struct);
+ netif_napi_del(&dhdinfo->rx_napi_struct);
+ skb_queue_purge(&dhdinfo->rx_napi_queue);
+ dhdinfo->rx_napi_netdev = NULL;
}
+#endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
+ dhd_dev_priv_clear(ifp->net);
+ free_netdev(ifp->net);
+ ifp->net = NULL;
}
- dhd_net_if_unlock_local(dhd);
+ MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+ ifp = NULL;
+ }
+ dhdinfo->iflist[ifidx] = NULL;
+ return NULL;
+}
- /* delete primary interface 0 */
- ifp = dhd->iflist[0];
- if (ifp && ifp->net) {
+/* unregister and free the the net_device interface associated with the indexed
+ * slot, also free the slot memory and set the slot pointer to NULL
+ */
+int
+dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
+{
+ dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+ dhd_if_t *ifp;
+#ifdef PCIE_FULL_DONGLE
+ if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdpub->if_flow_lkup;
+#endif /* PCIE_FULL_DONGLE */
-#ifdef WL_CFG80211
- cfg = wl_get_cfg(ifp->net);
-#endif // endif
+ ifp = dhdinfo->iflist[ifidx];
+
+ if (ifp != NULL) {
+ if (ifp->net != NULL) {
+ DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
+
+ dhdinfo->iflist[ifidx] = NULL;
/* in unregister_netdev case, the interface gets freed by net->destructor
* (which is set to free_netdev)
*/
if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
free_netdev(ifp->net);
} else {
-#if defined(ARGOS_NOTIFY_CB)
- argos_register_notifier_deinit();
-#endif // endif
-#ifdef SET_RPS_CPUS
+ netif_tx_disable(ifp->net);
+
+
+
+#if defined(SET_RPS_CPUS)
custom_rps_map_clear(ifp->net->_rx);
#endif /* SET_RPS_CPUS */
- netif_tx_disable(ifp->net);
- unregister_netdev(ifp->net);
+ if (need_rtnl_lock)
+ unregister_netdev(ifp->net);
+ else
+ unregister_netdevice(ifp->net);
}
-#ifdef PCIE_FULL_DONGLE
- ifp->net = DHD_NET_DEV_NULL;
-#else
ifp->net = NULL;
-#endif /* PCIE_FULL_DONGLE */
-
+ }
+#ifdef DHD_WMF
+ dhd_wmf_cleanup(dhdpub, ifidx);
+#endif /* DHD_WMF */
#ifdef DHD_L2_FILTER
- bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
- NULL, FALSE, dhdp->tickcnt);
- deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
- ifp->phnd_arp_table = NULL;
+ bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
+ NULL, FALSE, dhdpub->tickcnt);
+ deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
+ ifp->phnd_arp_table = NULL;
#endif /* DHD_L2_FILTER */
- dhd_if_del_sta_list(ifp);
- MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
- dhd->iflist[0] = NULL;
-#ifdef WL_CFG80211
- if (cfg && cfg->wdev)
- cfg->wdev->netdev = NULL;
-#endif
+ dhd_if_del_sta_list(ifp);
+#ifdef PCIE_FULL_DONGLE
+ /* Delete flowrings of WDS interface */
+ if (if_flow_lkup[ifidx].role == WLC_E_IF_ROLE_WDS) {
+ dhd_flow_rings_delete(dhdpub, ifidx);
}
+#endif /* PCIE_FULL_DONGLE */
+ DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
+
+ MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+ ifp = NULL;
}
- /* Clear the watchdog timer */
- DHD_GENERAL_LOCK(&dhd->pub, flags);
- timer_valid = dhd->wd_timer_valid;
- dhd->wd_timer_valid = FALSE;
- DHD_GENERAL_UNLOCK(&dhd->pub, flags);
- if (timer_valid)
- del_timer_sync(&dhd->timer);
- DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+ return BCME_OK;
+}
-#ifdef BCMDBUS
- tasklet_kill(&dhd->tasklet);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+static struct net_device_ops dhd_ops_pri = {
+ .ndo_open = dhd_open,
+ .ndo_stop = dhd_stop,
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_set_multicast_list,
#else
- if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
- if (dhd->thr_wdt_ctl.thr_pid >= 0) {
- PROC_STOP(&dhd->thr_wdt_ctl);
- }
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
- if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
- PROC_STOP(&dhd->thr_rxf_ctl);
- }
+static struct net_device_ops dhd_ops_virt = {
+ .ndo_get_stats = dhd_get_stats,
+ .ndo_do_ioctl = dhd_ioctl_entry,
+ .ndo_start_xmit = dhd_start_xmit,
+ .ndo_set_mac_address = dhd_set_mac_address,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ .ndo_set_rx_mode = dhd_set_multicast_list,
+#else
+ .ndo_set_multicast_list = dhd_set_multicast_list,
+#endif
+};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
- if (dhd->thr_dpc_ctl.thr_pid >= 0) {
- PROC_STOP(&dhd->thr_dpc_ctl);
- } else
- {
- tasklet_kill(&dhd->tasklet);
- }
- }
-#endif /* BCMDBUS */
+#ifdef DEBUGGER
+extern void debugger_init(void *bus_handle);
+#endif
-#ifdef WL_NATOE
- if (dhd->pub.nfct) {
- dhd_ct_close(dhd->pub.nfct);
- }
-#endif /* WL_NATOE */
-#ifdef DHD_LB
- if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
- /* Clear the flag first to avoid calling the cpu notifier */
- dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
+#ifdef SHOW_LOGTRACE
+int
+dhd_os_read_file(void *file, char *buf, uint32 size)
+{
+ struct file *filep = (struct file *)file;
- /* Kill the Load Balancing Tasklets */
-#ifdef DHD_LB_RXP
- cancel_work_sync(&dhd->rx_napi_dispatcher_work);
- __skb_queue_purge(&dhd->rx_pend_queue);
-#endif /* DHD_LB_RXP */
-#ifdef DHD_LB_TXP
- cancel_work_sync(&dhd->tx_dispatcher_work);
- tasklet_kill(&dhd->tx_tasklet);
- __skb_queue_purge(&dhd->tx_pend_queue);
-#endif /* DHD_LB_TXP */
-#ifdef DHD_LB_TXC
- cancel_work_sync(&dhd->tx_compl_dispatcher_work);
- tasklet_kill(&dhd->tx_compl_tasklet);
-#endif /* DHD_LB_TXC */
-#ifdef DHD_LB_RXC
- tasklet_kill(&dhd->rx_compl_tasklet);
-#endif /* DHD_LB_RXC */
+ if (!file || !buf)
+ return -1;
- /* Unregister from CPU Hotplug framework */
- dhd_unregister_cpuhp_callback(dhd);
+ return vfs_read(filep, buf, size, &filep->f_pos);
+}
- dhd_cpumasks_deinit(dhd);
- DHD_LB_STATS_DEINIT(&dhd->pub);
- }
-#endif /* DHD_LB */
+int
+dhd_os_seek_file(void *file, int64 offset)
+{
+ struct file *filep = (struct file *)file;
+ if (!file)
+ return -1;
-#ifdef CSI_SUPPORT
- dhd_csi_deinit(dhdp);
-#endif /* CSI_SUPPORT */
+ /* offset can be -ve */
+ filep->f_pos = filep->f_pos + offset;
-#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
- cancel_work_sync(&dhd->axi_error_dispatcher_work);
-#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
+ return 0;
+}
- DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
+static int
+dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
+{
+ struct file *filep = NULL;
+ struct kstat stat;
+ mm_segment_t fs;
+ char *raw_fmts = NULL;
+ int logstrs_size = 0;
+ int error = 0;
-#ifdef WL_CFG80211
- if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
- if (!cfg) {
- DHD_ERROR(("cfg NULL!\n"));
- ASSERT(0);
- } else {
- wl_cfg80211_detach(cfg);
- dhd_monitor_uninit();
- }
- }
-#endif // endif
+ fs = get_fs();
+ set_fs(KERNEL_DS);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- destroy_workqueue(dhd->tx_wq);
- dhd->tx_wq = NULL;
- destroy_workqueue(dhd->rx_wq);
- dhd->rx_wq = NULL;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-#ifdef DEBUGABILITY
- if (dhdp->dbg) {
-#ifdef DBG_PKT_MON
- dhd_os_dbg_detach_pkt_monitor(dhdp);
- dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
-#endif /* DBG_PKT_MON */
+ filep = filp_open(logstrs_path, O_RDONLY, 0);
+
+ if (IS_ERR(filep)) {
+ DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
+ goto fail;
}
-#endif /* DEBUGABILITY */
- if (dhdp->dbg) {
- dhd_os_dbg_detach(dhdp);
+ error = vfs_stat(logstrs_path, &stat);
+ if (error) {
+ DHD_ERROR(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
+ goto fail;
}
-#ifdef DHD_STATUS_LOGGING
- dhd_detach_statlog(dhdp);
-#endif /* DHD_STATUS_LOGGING */
-#ifdef DHD_PKTDUMP_ROAM
- dhd_dump_pkt_deinit(dhdp);
-#endif /* DHD_PKTDUMP_ROAM */
-#ifdef SHOW_LOGTRACE
- /* Release the skbs from queue for WLC_E_TRACE event */
- dhd_event_logtrace_flush_queue(dhdp);
+ logstrs_size = (int) stat.size;
- /* Wait till event logtrace context finishes */
- dhd_cancel_logtrace_process_sync(dhd);
+ if (logstrs_size == 0) {
+ DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
+ goto fail1;
+ }
- /* Remove ring proc entries */
- dhd_dbg_ring_proc_destroy(&dhd->pub);
+ raw_fmts = MALLOC(osh, logstrs_size);
+ if (raw_fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
+ goto fail;
+ }
+ if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
+ DHD_ERROR(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
+ goto fail;
+ }
- if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
- if (dhd->event_data.fmts) {
- MFREE(dhd->pub.osh, dhd->event_data.fmts,
- dhd->event_data.fmts_size);
- dhd->event_data.fmts = NULL;
- }
- if (dhd->event_data.raw_fmts) {
- MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
- dhd->event_data.raw_fmts_size);
- dhd->event_data.raw_fmts = NULL;
- }
- if (dhd->event_data.raw_sstr) {
- MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
- dhd->event_data.raw_sstr_size);
- dhd->event_data.raw_sstr = NULL;
- }
- if (dhd->event_data.rom_raw_sstr) {
- MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
- dhd->event_data.rom_raw_sstr_size);
- dhd->event_data.rom_raw_sstr = NULL;
- }
- dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
+ if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
+ == BCME_OK) {
+ filp_close(filep, NULL);
+ set_fs(fs);
+ return BCME_OK;
}
-#endif /* SHOW_LOGTRACE */
-#ifdef PNO_SUPPORT
- if (dhdp->pno_state)
- dhd_pno_deinit(dhdp);
-#endif // endif
-#ifdef RTT_SUPPORT
- if (dhdp->rtt_state) {
- dhd_rtt_deinit(dhdp);
+
+fail:
+ if (raw_fmts) {
+ MFREE(osh, raw_fmts, logstrs_size);
+ raw_fmts = NULL;
}
-#endif // endif
-#if defined(CONFIG_PM_SLEEP)
- if (dhd_pm_notifier_registered) {
- unregister_pm_notifier(&dhd->pm_notifier);
- dhd_pm_notifier_registered = FALSE;
+
+fail1:
+ if (!IS_ERR(filep))
+ filp_close(filep, NULL);
+
+ set_fs(fs);
+ temp->fmts = NULL;
+ return BCME_ERROR;
+}
+
+static int
+dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
+ uint32 *rodata_end)
+{
+ struct file *filep = NULL;
+ mm_segment_t fs;
+ int err = BCME_ERROR;
+
+ if (fname == NULL) {
+ DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
+ return BCME_ERROR;
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef DEBUG_CPU_FREQ
- if (dhd->new_freq)
- free_percpu(dhd->new_freq);
- dhd->new_freq = NULL;
- cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
-#endif // endif
- DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
-#ifdef CONFIG_HAS_WAKELOCK
- dhd->wakelock_wd_counter = 0;
- wake_lock_destroy(&dhd->wl_wdwake);
- // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
- wake_lock_destroy(&dhd->wl_wifi);
-#endif /* CONFIG_HAS_WAKELOCK */
- if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
- DHD_OS_WAKE_LOCK_DESTROY(dhd);
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ filep = filp_open(fname, O_RDONLY, 0);
+ if (IS_ERR(filep)) {
+ DHD_ERROR(("%s: Failed to open %s \n", __FUNCTION__, fname));
+ goto fail;
}
-#ifdef DHDTCPACK_SUPPRESS
- /* This will free all MEM allocated for TCPACK SUPPRESS */
- dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
-#endif /* DHDTCPACK_SUPPRESS */
+ if ((err = dhd_parse_map_file(osh, filep, ramstart,
+ rodata_start, rodata_end)) < 0)
+ goto fail;
-#ifdef PCIE_FULL_DONGLE
- dhd_flow_rings_deinit(dhdp);
- if (dhdp->prot)
- dhd_prot_detach(dhdp);
-#endif // endif
+fail:
+ if (!IS_ERR(filep))
+ filp_close(filep, NULL);
-#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
- dhd_free_tdls_peer_list(dhdp);
-#endif // endif
+ set_fs(fs);
-#ifdef DUMP_IOCTL_IOV_LIST
- dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
-#endif /* DUMP_IOCTL_IOV_LIST */
-#ifdef DHD_DEBUG
- /* memory waste feature list initilization */
- dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
-#endif /* DHD_DEBUG */
-#ifdef WL_MONITOR
- dhd_del_monitor_if(dhd);
-#endif /* WL_MONITOR */
+ return err;
+}
-#ifdef DHD_ERPOM
- if (dhdp->enable_erpom) {
- dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
- }
-#endif /* DHD_ERPOM */
+static int
+dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
+{
+ struct file *filep = NULL;
+ mm_segment_t fs;
+ char *raw_fmts = NULL;
+ uint32 logstrs_size = 0;
- cancel_work_sync(&dhd->dhd_hang_process_work);
+ int error = 0;
+ uint32 ramstart = 0;
+ uint32 rodata_start = 0;
+ uint32 rodata_end = 0;
+ uint32 logfilebase = 0;
- /* Prefer adding de-init code above this comment unless necessary.
- * The idea is to cancel work queue, sysfs and flags at the end.
- */
- dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
- dhd->dhd_deferred_wq = NULL;
+ error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
+ if (error != BCME_OK) {
+ DHD_ERROR(("readmap Error!! \n"));
+ /* don't do event log parsing in actual case */
+ if (strstr(str_file, ram_file_str) != NULL) {
+ temp->raw_sstr = NULL;
+ } else if (strstr(str_file, rom_file_str) != NULL) {
+ temp->rom_raw_sstr = NULL;
+ }
+ return error;
+ }
+ DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
+ ramstart, rodata_start, rodata_end));
- /* log dump related buffers should be freed after wq is purged */
-#ifdef DHD_LOG_DUMP
- dhd_log_dump_deinit(&dhd->pub);
-#endif /* DHD_LOG_DUMP */
-#if defined(BCMPCIE)
- if (dhdp->extended_trap_data)
- {
- MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
- dhdp->extended_trap_data = NULL;
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ filep = filp_open(str_file, O_RDONLY, 0);
+ if (IS_ERR(filep)) {
+ DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
+ goto fail;
}
-#ifdef DNGL_AXI_ERROR_LOGGING
- if (dhdp->axi_err_dump)
- {
- MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t));
- dhdp->axi_err_dump = NULL;
+
+ /* Full file size is huge. Just read required part */
+ logstrs_size = rodata_end - rodata_start;
+
+ if (logstrs_size == 0) {
+ DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
+ goto fail1;
}
-#endif /* DNGL_AXI_ERROR_LOGGING */
-#endif /* BCMPCIE */
-#ifdef DHD_DUMP_MNGR
- if (dhd->pub.dump_file_manage) {
- MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
- sizeof(dhd_dump_file_manage_t));
+ raw_fmts = MALLOC(osh, logstrs_size);
+ if (raw_fmts == NULL) {
+ DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
+ goto fail;
}
-#endif /* DHD_DUMP_MNGR */
- dhd_sysfs_exit(dhd);
- dhd->pub.fw_download_status = FW_UNLOADED;
-#if defined(BT_OVER_SDIO)
- mutex_destroy(&dhd->bus_user_lock);
-#endif /* BT_OVER_SDIO */
- dhd_conf_detach(dhdp);
+ logfilebase = rodata_start - ramstart;
-} /* dhd_detach */
+ error = generic_file_llseek(filep, logfilebase, SEEK_SET);
+ if (error < 0) {
+ DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
+ goto fail;
+ }
-void
-dhd_free(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd;
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
+ if (error != logstrs_size) {
+ DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
+ goto fail;
+ }
- if (dhdp) {
- int i;
- for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
- if (dhdp->reorder_bufs[i]) {
- reorder_info_t *ptr;
- uint32 buf_size = sizeof(struct reorder_info);
+ if (strstr(str_file, ram_file_str) != NULL) {
+ temp->raw_sstr = raw_fmts;
+ temp->raw_sstr_size = logstrs_size;
+ temp->ramstart = ramstart;
+ temp->rodata_start = rodata_start;
+ temp->rodata_end = rodata_end;
+ } else if (strstr(str_file, rom_file_str) != NULL) {
+ temp->rom_raw_sstr = raw_fmts;
+ temp->rom_raw_sstr_size = logstrs_size;
+ temp->rom_ramstart = ramstart;
+ temp->rom_rodata_start = rodata_start;
+ temp->rom_rodata_end = rodata_end;
+ }
- ptr = dhdp->reorder_bufs[i];
+ filp_close(filep, NULL);
+ set_fs(fs);
- buf_size += ((ptr->max_idx + 1) * sizeof(void*));
- DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
- i, ptr->max_idx, buf_size));
+ return BCME_OK;
- MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
- dhdp->reorder_bufs[i] = NULL;
- }
- }
+fail:
+ if (raw_fmts) {
+ MFREE(osh, raw_fmts, logstrs_size);
+ raw_fmts = NULL;
+ }
- dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
+fail1:
+ if (!IS_ERR(filep))
+ filp_close(filep, NULL);
- dhd = (dhd_info_t *)dhdp->info;
- if (dhdp->soc_ram) {
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
- DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
-#else
- MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
- dhdp->soc_ram = NULL;
- }
- if (dhd != NULL) {
+ set_fs(fs);
- /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
- if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
- DHD_PREALLOC_DHD_INFO, 0, FALSE))
- MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
- dhd = NULL;
- }
+ if (strstr(str_file, ram_file_str) != NULL) {
+ temp->raw_sstr = NULL;
+ } else if (strstr(str_file, rom_file_str) != NULL) {
+ temp->rom_raw_sstr = NULL;
}
+
+ return error;
}
-void
-dhd_clear(dhd_pub_t *dhdp)
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BCMDBUS
+uint
+dhd_get_rxsz(dhd_pub_t *pub)
{
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ struct net_device *net = NULL;
+ dhd_info_t *dhd = NULL;
+ uint rxsz;
- if (dhdp) {
- int i;
-#ifdef DHDTCPACK_SUPPRESS
- /* Clean up timer/data structure for any remaining/pending packet or timer. */
- dhd_tcpack_info_tbl_clean(dhdp);
-#endif /* DHDTCPACK_SUPPRESS */
- for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
- if (dhdp->reorder_bufs[i]) {
- reorder_info_t *ptr;
- uint32 buf_size = sizeof(struct reorder_info);
+ /* Assign rxsz for dbus_attach */
+ dhd = pub->info;
+ net = dhd->iflist[0]->net;
+ net->hard_header_len = ETH_HLEN + pub->hdrlen;
+ rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
- ptr = dhdp->reorder_bufs[i];
+ return rxsz;
+}
- buf_size += ((ptr->max_idx + 1) * sizeof(void*));
- DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
- i, ptr->max_idx, buf_size));
+void
+dhd_set_path(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = NULL;
- MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
- dhdp->reorder_bufs[i] = NULL;
- }
- }
+ dhd = pub->info;
- dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
+ /* try to download image and nvram to the dongle */
+ if (dhd_update_fw_nv_path(dhd) && dhd->pub.bus) {
+ DHD_INFO(("%s: fw %s, nv %s, conf %s\n",
+ __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
+ dhd_bus_update_fw_nv_path(dhd->pub.bus,
+ dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+ }
+}
+#endif
- if (dhdp->soc_ram) {
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
- DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
-#else
- MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
- dhdp->soc_ram = NULL;
+dhd_pub_t *
+dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
+#ifdef BCMDBUS
+ , void *data
+#endif
+)
+{
+ dhd_info_t *dhd = NULL;
+ struct net_device *net = NULL;
+ char if_name[IFNAMSIZ] = {'\0'};
+#ifdef SHOW_LOGTRACE
+ int ret;
+#endif /* SHOW_LOGTRACE */
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ uint32 bus_type = -1;
+ uint32 bus_num = -1;
+ uint32 slot_num = -1;
+ wifi_adapter_info_t *adapter = NULL;
+#elif defined(BCMDBUS)
+ wifi_adapter_info_t *adapter = data;
+#endif
+#ifdef GET_CUSTOM_MAC_ENABLE
+ char hw_ether[62];
+#endif /* GET_CUSTOM_MAC_ENABLE */
+
+ dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+#ifdef STBLINUX
+ DHD_ERROR(("%s\n", driver_target));
+#endif /* STBLINUX */
+ /* will implement get_ids for DBUS later */
+#if defined(BCMSDIO)
+ dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
+#endif
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
+#endif
+
+ /* Allocate primary dhd_info */
+ dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
+ if (dhd == NULL) {
+ dhd = MALLOC(osh, sizeof(dhd_info_t));
+ if (dhd == NULL) {
+ DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+ goto dhd_null_flag;
}
}
-}
-
-static void
-dhd_module_cleanup(void)
-{
- printf("%s: Enter\n", __FUNCTION__);
+ memset(dhd, 0, sizeof(dhd_info_t));
+ dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
- dhd_bus_unregister();
+ dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
- wl_android_exit();
+ dhd->pub.osh = osh;
+#ifdef DUMP_IOCTL_IOV_LIST
+ dll_init(&(dhd->pub.dump_iovlist_head));
+#endif /* DUMP_IOCTL_IOV_LIST */
+ dhd->adapter = adapter;
+ dhd->pub.adapter = (void *)adapter;
+#ifdef DHD_DEBUG
+ dll_init(&(dhd->pub.mw_list_head));
+#endif /* DHD_DEBUG */
+#ifdef BT_OVER_SDIO
+ dhd->pub.is_bt_recovery_required = FALSE;
+ mutex_init(&dhd->bus_user_lock);
+#endif /* BT_OVER_SDIO */
- dhd_wifi_platform_unregister_drv();
+#ifdef GET_CUSTOM_MAC_ENABLE
+ wifi_platform_get_mac_addr(dhd->adapter, hw_ether);
+ bcopy(hw_ether, dhd->pub.mac.octet, sizeof(struct ether_addr));
+#endif /* GET_CUSTOM_MAC_ENABLE */
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+ dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+ dhd->pub.force_country_change = TRUE;
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
+#ifdef CUSTOM_COUNTRY_CODE
+ get_customized_country_code(dhd->adapter,
+ dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
+ dhd->pub.dhd_cflags);
+#endif /* CUSTOM_COUNTRY_CODE */
+#ifndef BCMDBUS
+ dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
+ dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
+#ifdef DHD_WET
+ dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
+#endif /* DHD_WET */
+ /* Initialize thread based operation and lock */
+ sema_init(&dhd->sdsem, 1);
+#endif /* !BCMDBUS */
-#ifdef CUSTOMER_HW_AMLOGIC
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
- wifi_teardown_dt();
-#endif
-#endif
- printf("%s: Exit\n", __FUNCTION__);
-}
+ /* Link to info module */
+ dhd->pub.info = dhd;
-static void __exit
-dhd_module_exit(void)
-{
- atomic_set(&exit_in_progress, 1);
- dhd_module_cleanup();
- unregister_reboot_notifier(&dhd_reboot_notifier);
- dhd_destroy_to_notifier_skt();
-}
-static int __init
-dhd_module_init(void)
-{
- int err;
- int retry = POWERUP_MAX_RETRY;
+ /* Link to bus module */
+ dhd->pub.bus = bus;
+ dhd->pub.hdrlen = bus_hdrlen;
- printf("%s: in %s\n", __FUNCTION__, dhd_version);
-#ifdef CUSTOMER_HW_AMLOGIC
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
- if (wifi_setup_dt()) {
- printf("wifi_dt : fail to setup dt\n");
+ /* dhd_conf must be attached after linking dhd to dhd->pub.info,
+ * because dhd_detech will check .info is NULL or not.
+ */
+ if (dhd_conf_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_conf_attach failed\n"));
+ goto fail;
}
-#endif
-#endif
+#ifndef BCMDBUS
+ dhd_conf_reset(&dhd->pub);
+ dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
+ dhd_conf_preinit(&dhd->pub);
+#endif /* !BCMDBUS */
- DHD_PERIM_RADIO_INIT();
+ /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
+ * This is indeed a hack but we have to make it work properly before we have a better
+ * solution
+ */
+ dhd_update_fw_nv_path(dhd);
- if (firmware_path[0] != '\0') {
- strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
- fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
+ /* Set network interface name if it was provided as module parameter */
+ if (iface_name[0]) {
+ int len;
+ char ch;
+ strncpy(if_name, iface_name, IFNAMSIZ);
+ if_name[IFNAMSIZ - 1] = 0;
+ len = strlen(if_name);
+ ch = if_name[len - 1];
+ if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
+ strncat(if_name, "%d", 2);
}
- if (nvram_path[0] != '\0') {
- strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
- nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
+ /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
+ net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
+ if (net == NULL) {
+ goto fail;
}
- do {
- err = dhd_wifi_platform_register_drv();
- if (!err) {
- register_reboot_notifier(&dhd_reboot_notifier);
- break;
- } else {
- DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
- __FUNCTION__, retry));
- strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
- firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
- strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
- nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
- }
- } while (retry--);
-
- dhd_create_to_notifier_skt();
- if (err) {
-#ifdef CUSTOMER_HW_AMLOGIC
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
- wifi_teardown_dt();
+ dhd_state |= DHD_ATTACH_STATE_ADD_IF;
+#ifdef DHD_L2_FILTER
+ /* initialize the l2_filter_cnt */
+ dhd->pub.l2_filter_cnt = 0;
#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
#endif
- DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
- } else {
- if (!dhd_download_fw_on_driverload) {
- dhd_driver_init_done = TRUE;
- }
- }
- printf("%s: Exit err=%d\n", __FUNCTION__, err);
- return err;
-}
+ mutex_init(&dhd->dhd_iovar_mutex);
+ sema_init(&dhd->proto_sem, 1);
+#ifdef DHD_ULP
+ if (!(dhd_ulp_init(osh, &dhd->pub)))
+ goto fail;
+#endif /* DHD_ULP */
-static int
-dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
-{
- DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
- if (code == SYS_RESTART) {
-#ifdef BCMPCIE
- is_reboot = code;
-#endif /* BCMPCIE */
+#if defined(DHD_HANG_SEND_UP_TEST)
+ dhd->pub.req_hang_type = 0;
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+#ifdef PROP_TXSTATUS
+ spin_lock_init(&dhd->wlfc_spinlock);
+
+ dhd->pub.skip_fc = dhd_wlfc_skip_fc;
+ dhd->pub.plat_init = dhd_wlfc_plat_init;
+ dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
+
+#ifdef DHD_WLFC_THREAD
+ init_waitqueue_head(&dhd->pub.wlfc_wqhead);
+ dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
+ if (IS_ERR(dhd->pub.wlfc_thread)) {
+ DHD_ERROR(("create wlfc thread failed\n"));
+ goto fail;
+ } else {
+ wake_up_process(dhd->pub.wlfc_thread);
}
- return NOTIFY_DONE;
-}
+#endif /* DHD_WLFC_THREAD */
+#endif /* PROP_TXSTATUS */
-#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
-#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
- defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
- defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
- defined(CONFIG_ARCH_SDM845) || defined(CONFIG_SOC_EXYNOS9820) || \
- defined(CONFIG_ARCH_SM8150)
-deferred_module_init_sync(dhd_module_init);
-#else
-deferred_module_init(dhd_module_init);
-#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
- * CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_SOC_EXYNOS8895
- * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845 || CONFIG_SOC_EXYNOS9820
- * CONFIG_ARCH_SM8150
- */
-#elif defined(USE_LATE_INITCALL_SYNC)
-late_initcall_sync(dhd_module_init);
-#else
-late_initcall(dhd_module_init);
-#endif /* USE_LATE_INITCALL_SYNC */
+ /* Initialize other structure content */
+ init_waitqueue_head(&dhd->ioctl_resp_wait);
+ init_waitqueue_head(&dhd->d3ack_wait);
+#ifdef PCIE_INB_DW
+ init_waitqueue_head(&dhd->ds_exit_wait);
+#endif /* PCIE_INB_DW */
+ init_waitqueue_head(&dhd->ctrl_wait);
+ init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
+ dhd->pub.dhd_bus_busy_state = 0;
-module_exit(dhd_module_exit);
+ /* Initialize the spinlocks */
+ spin_lock_init(&dhd->sdlock);
+ spin_lock_init(&dhd->txqlock);
+ spin_lock_init(&dhd->rxqlock);
+ spin_lock_init(&dhd->dhd_lock);
+ spin_lock_init(&dhd->rxf_lock);
+#ifdef WLTDLS
+ spin_lock_init(&dhd->pub.tdls_lock);
+#endif /* WLTDLS */
+#if defined(RXFRAME_THREAD)
+ dhd->rxthread_enabled = TRUE;
+#endif /* defined(RXFRAME_THREAD) */
-/*
- * OS specific functions required to implement DHD driver in OS independent way
- */
-int
-dhd_os_proto_block(dhd_pub_t *pub)
-{
- dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+#ifdef DHDTCPACK_SUPPRESS
+ spin_lock_init(&dhd->tcpack_lock);
+#endif /* DHDTCPACK_SUPPRESS */
- if (dhd) {
- DHD_PERIM_UNLOCK(pub);
+ /* Initialize Wakelock stuff */
+ spin_lock_init(&dhd->wakelock_spinlock);
+ spin_lock_init(&dhd->wakelock_evt_spinlock);
+ DHD_OS_WAKE_LOCK_INIT(dhd);
+ dhd->wakelock_counter = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+ wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
+ wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
+#endif /* CONFIG_HAS_WAKELOCK */
- down(&dhd->proto_sem);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ mutex_init(&dhd->dhd_net_if_mutex);
+ mutex_init(&dhd->dhd_suspend_mutex);
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+ mutex_init(&dhd->dhd_apf_mutex);
+#endif /* PKT_FILTER_SUPPORT && APF */
+#endif
+ dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
- DHD_PERIM_LOCK(pub);
- return 1;
+ /* Attach and link in the protocol */
+ if (dhd_prot_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_prot_attach failed\n"));
+ goto fail;
}
+ dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
- return 0;
-}
-
-int
-dhd_os_proto_unblock(dhd_pub_t *pub)
-{
- dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+#ifdef DHD_TIMESYNC
+ /* attach the timesync module */
+ if (dhd_timesync_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_timesync_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_TIMESYNC_ATTACH_DONE;
+#endif /* DHD_TIMESYNC */
- if (dhd) {
- up(&dhd->proto_sem);
- return 1;
+#ifdef WL_CFG80211
+ spin_lock_init(&dhd->pub.up_lock);
+ /* Attach and link in the cfg80211 */
+ if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
+ DHD_ERROR(("wl_cfg80211_attach failed\n"));
+ goto fail;
}
- return 0;
-}
+ dhd_monitor_init(&dhd->pub);
+ dhd_state |= DHD_ATTACH_STATE_CFG80211;
+#endif
+#ifdef DHD_LOG_DUMP
+ dhd_log_dump_init(&dhd->pub);
+#endif /* DHD_LOG_DUMP */
+#if defined(WL_WIRELESS_EXT)
+ /* Attach and link in the iw */
+ if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
+ if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
+ DHD_ERROR(("wl_iw_attach failed\n"));
+ goto fail;
+ }
+ dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
+ }
+#ifdef WL_ESCAN
+ wl_escan_attach(net, &dhd->pub);
+#endif /* WL_ESCAN */
+#endif /* defined(WL_WIRELESS_EXT) */
-void
-dhd_os_dhdiovar_lock(dhd_pub_t *pub)
-{
- dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+#ifdef SHOW_LOGTRACE
+ ret = dhd_init_logstrs_array(osh, &dhd->event_data);
+ if (ret == BCME_OK) {
+ dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
+ dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
+ rom_map_file_path);
+ dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
+ }
+#endif /* SHOW_LOGTRACE */
- if (dhd) {
- mutex_lock(&dhd->dhd_iovar_mutex);
+#ifdef DEBUGABILITY
+ /* attach debug if support */
+ if (dhd_os_dbg_attach(&dhd->pub)) {
+ DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
+ goto fail;
}
-}
-void
-dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
-{
- dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+#ifdef DBG_PKT_MON
+ dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
+#ifdef DBG_PKT_MON_INIT_DEFAULT
+ dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
+#endif /* DBG_PKT_MON_INIT_DEFAULT */
+#endif /* DBG_PKT_MON */
+#endif /* DEBUGABILITY */
+#ifdef DHD_PKT_LOGGING
+ dhd_os_attach_pktlog(&dhd->pub);
+#endif /* DHD_PKT_LOGGING */
- if (dhd) {
- mutex_unlock(&dhd->dhd_iovar_mutex);
+ if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
+ DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
+ goto fail;
}
-}
-void
-dhd_os_logdump_lock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = NULL;
- if (!pub)
- return;
- dhd = (dhd_info_t *)(pub->info);
+#ifndef BCMDBUS
+ /* Set up the watchdog timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ timer_setup(&dhd->timer, dhd_watchdog, 0);
+#else
+ init_timer(&dhd->timer);
+ dhd->timer.data = (ulong)dhd;
+ dhd->timer.function = dhd_watchdog;
+#endif
+ dhd->default_wd_interval = dhd_watchdog_ms;
- if (dhd) {
- mutex_lock(&dhd->logdump_lock);
+ if (dhd_watchdog_prio >= 0) {
+ /* Initialize watchdog thread */
+ PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
+ if (dhd->thr_wdt_ctl.thr_pid < 0) {
+ goto fail;
+ }
+
+ } else {
+ dhd->thr_wdt_ctl.thr_pid = -1;
}
-}
-void
-dhd_os_logdump_unlock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = NULL;
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Setup up the runtime PM Idlecount timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ timer_setup(&dhd->rpm_timer, dhd_runtimepm, 0);
+#else
+ init_timer(&dhd->rpm_timer);
+ dhd->rpm_timer.data = (ulong)dhd;
+ dhd->rpm_timer.function = dhd_runtimepm;
+#endif
+ dhd->rpm_timer_valid = FALSE;
- if (!pub)
- return;
+ dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
+ PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
+ if (dhd->thr_rpm_ctl.thr_pid < 0) {
+ goto fail;
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
- dhd = (dhd_info_t *)(pub->info);
+#ifdef DEBUGGER
+ debugger_init((void *) bus);
+#endif
- if (dhd) {
- mutex_unlock(&dhd->logdump_lock);
+ /* Set up the bottom half handler */
+ if (dhd_dpc_prio >= 0) {
+ /* Initialize DPC thread */
+ PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
+ if (dhd->thr_dpc_ctl.thr_pid < 0) {
+ goto fail;
+ }
+ } else {
+ /* use tasklet for dpc */
+ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
+ dhd->thr_dpc_ctl.thr_pid = -1;
}
-}
-unsigned long
-dhd_os_dbgring_lock(void *lock)
-{
- if (!lock)
- return 0;
+ if (dhd->rxthread_enabled) {
+ bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
+ /* Initialize RXF thread */
+ PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
+ if (dhd->thr_rxf_ctl.thr_pid < 0) {
+ goto fail;
+ }
+ }
+#endif /* !BCMDBUS */
+#ifdef SHOW_LOGTRACE
+ skb_queue_head_init(&dhd->evt_trace_queue);
+#endif /* SHOW_LOGTRACE */
- mutex_lock((struct mutex *)lock);
+ dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
- return 0;
-}
+#if defined(CONFIG_PM_SLEEP)
+ if (!dhd_pm_notifier_registered) {
+ dhd_pm_notifier_registered = TRUE;
+ dhd->pm_notifier.notifier_call = dhd_pm_callback;
+ dhd->pm_notifier.priority = 10;
+ register_pm_notifier(&dhd->pm_notifier);
+ }
-void
-dhd_os_dbgring_unlock(void *lock, unsigned long flags)
-{
- BCM_REFERENCE(flags);
+#endif /* CONFIG_PM_SLEEP */
- if (!lock)
- return;
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
+ dhd->early_suspend.suspend = dhd_early_suspend;
+ dhd->early_suspend.resume = dhd_late_resume;
+ register_early_suspend(&dhd->early_suspend);
+ dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
- mutex_unlock((struct mutex *)lock);
-}
+#ifdef ARP_OFFLOAD_SUPPORT
+ dhd->pend_ipaddr = 0;
+ if (!dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = TRUE;
+ register_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
-unsigned int
-dhd_os_get_ioctl_resp_timeout(void)
-{
- return ((unsigned int)dhd_ioctl_timeout_msec);
-}
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+ if (!dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = TRUE;
+ register_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+ dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
+#ifdef DEBUG_CPU_FREQ
+ dhd->new_freq = alloc_percpu(int);
+ dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
+ cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DEFAULT);
+#endif /* DHDTCPACK_SUPPRESS */
-void
-dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
-{
- dhd_ioctl_timeout_msec = (int)timeout_msec;
-}
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
-int
-dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
-{
- dhd_info_t * dhd = (dhd_info_t *)(pub->info);
- int timeout;
- /* Convert timeout in millsecond to jiffies */
- timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+#ifdef DHD_DEBUG_PAGEALLOC
+ register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
+#endif /* DHD_DEBUG_PAGEALLOC */
- DHD_PERIM_UNLOCK(pub);
+#if defined(DHD_LB)
- timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
+ dhd_lb_set_default_cpus(dhd);
- DHD_PERIM_LOCK(pub);
+ /* Initialize the CPU Masks */
+ if (dhd_cpumasks_init(dhd) == 0) {
+ /* Now we have the current CPU maps, run through candidacy */
+ dhd_select_cpu_candidacy(dhd);
+ /*
+ * If we are able to initialize CPU masks, lets register to the
+ * CPU Hotplug framework to change the CPU for each job dynamically
+ * using candidacy algorithm.
+ */
+ dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
+ register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
+ } else {
+ /*
+ * We are unable to initialize CPU masks, so candidacy algorithm
+ * won't run, but still Load Balancing will be honoured based
+ * on the CPUs allocated for a given job statically during init
+ */
+ dhd->cpu_notifier.notifier_call = NULL;
+ DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
+ __FUNCTION__));
+ }
- return timeout;
-}
+#ifdef DHD_LB_TXP
+#ifdef DHD_LB_TXP_DEFAULT_ENAB
+ /* Trun ON the feature by default */
+ atomic_set(&dhd->lb_txp_active, 1);
+#else
+ /* Trun OFF the feature by default */
+ atomic_set(&dhd->lb_txp_active, 0);
+#endif /* DHD_LB_TXP_DEFAULT_ENAB */
+#endif /* DHD_LB_TXP */
-int
-dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ DHD_LB_STATS_INIT(&dhd->pub);
- wake_up(&dhd->ioctl_resp_wait);
- return 0;
-}
+ /* Initialize the Load Balancing Tasklets and Napi object */
+#if defined(DHD_LB_TXC)
+ tasklet_init(&dhd->tx_compl_tasklet,
+ dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
+ INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
+ DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
+#endif /* DHD_LB_TXC */
-int
-dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
-{
- dhd_info_t * dhd = (dhd_info_t *)(pub->info);
- int timeout;
+#if defined(DHD_LB_RXC)
+ tasklet_init(&dhd->rx_compl_tasklet,
+ dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
+ DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
+#endif /* DHD_LB_RXC */
- /* Convert timeout in millsecond to jiffies */
- timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
+#if defined(DHD_LB_RXP)
+ __skb_queue_head_init(&dhd->rx_pend_queue);
+ skb_queue_head_init(&dhd->rx_napi_queue);
+ /* Initialize the work that dispatches NAPI job to a given core */
+ INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
+ DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
+#endif /* DHD_LB_RXP */
- DHD_PERIM_UNLOCK(pub);
+#if defined(DHD_LB_TXP)
+ INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
+ skb_queue_head_init(&dhd->tx_pend_queue);
+ /* Initialize the work that dispatches TX job to a given core */
+ tasklet_init(&dhd->tx_tasklet,
+ dhd_lb_tx_handler, (ulong)(dhd));
+ DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
+#endif /* DHD_LB_TXP */
- timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
+ dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
+#endif /* DHD_LB */
- DHD_PERIM_LOCK(pub);
+#ifdef SHOW_LOGTRACE
+ INIT_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
+#endif /* SHOW_LOGTRACE */
- return timeout;
-}
+ DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
-int
-dhd_os_d3ack_wake(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+#ifdef REPORT_FATAL_TIMEOUTS
+ init_dhd_timeouts(&dhd->pub);
+#endif /* REPORT_FATAL_TIMEOUTS */
+#ifdef BCMPCIE
+ dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ if (dhd->pub.extended_trap_data == NULL) {
+ DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
+ }
+#endif /* BCMPCIE */
- wake_up(&dhd->d3ack_wait);
- return 0;
-}
+ (void)dhd_sysfs_init(dhd);
-int
-dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
-{
- dhd_info_t * dhd = (dhd_info_t *)(pub->info);
- int timeout;
+ dhd_state |= DHD_ATTACH_STATE_DONE;
+ dhd->dhd_state = dhd_state;
- /* Wait for bus usage contexts to gracefully exit within some timeout value
- * Set time out to little higher than dhd_ioctl_timeout_msec,
- * so that IOCTL timeout should not get affected.
- */
- /* Convert timeout in millsecond to jiffies */
- timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
+ dhd_found++;
- timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
+ return &dhd->pub;
- return timeout;
+fail:
+ if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
+ DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
+ __FUNCTION__, dhd_state, &dhd->pub));
+ dhd->dhd_state = dhd_state;
+ dhd_detach(&dhd->pub);
+ dhd_free(&dhd->pub);
+ }
+dhd_null_flag:
+ return NULL;
}
-/*
- * Wait until the condition *var == condition is met.
- * Returns 0 if the @condition evaluated to false after the timeout elapsed
- * Returns 1 if the @condition evaluated to true
- */
-int
-dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
+int dhd_get_fw_mode(dhd_info_t *dhdinfo)
{
- dhd_info_t * dhd = (dhd_info_t *)(pub->info);
- int timeout;
-
- /* Convert timeout in millsecond to jiffies */
- timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
-
- timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
+ if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
+ return DHD_FLAG_HOSTAP_MODE;
+ if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
+ return DHD_FLAG_P2P_MODE;
+ if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
+ return DHD_FLAG_IBSS_MODE;
+ if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
+ return DHD_FLAG_MFG_MODE;
- return timeout;
+ return DHD_FLAG_STA_MODE;
}
-/*
- * Wait until the '(*var & bitmask) == condition' is met.
- * Returns 0 if the @condition evaluated to false after the timeout elapsed
- * Returns 1 if the @condition evaluated to true
- */
-int
-dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
- uint bitmask, uint condition)
+int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
{
- dhd_info_t * dhd = (dhd_info_t *)(pub->info);
- int timeout;
-
- /* Convert timeout in millsecond to jiffies */
- timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
-
- timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
- ((*var & bitmask) == condition), timeout);
-
- return timeout;
+ return dhd_get_fw_mode(dhdp->info);
}
-int
-dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
+bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
{
- int ret = 0;
- dhd_info_t * dhd = (dhd_info_t *)(pub->info);
- int timeout;
+ int fw_len;
+ int nv_len;
+ int clm_len;
+ int conf_len;
+ const char *fw = NULL;
+ const char *nv = NULL;
+ const char *clm = NULL;
+ const char *conf = NULL;
+#ifdef DHD_UCODE_DOWNLOAD
+ int uc_len;
+ const char *uc = NULL;
+#endif /* DHD_UCODE_DOWNLOAD */
+ wifi_adapter_info_t *adapter = dhdinfo->adapter;
+ int fw_path_len = sizeof(dhdinfo->fw_path);
+ int nv_path_len = sizeof(dhdinfo->nv_path);
- timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT);
- DHD_PERIM_UNLOCK(pub);
- ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout);
- DHD_PERIM_LOCK(pub);
+ /* Update firmware and nvram path. The path may be from adapter info or module parameter
+ * The path from adapter info is used for initialization only (as it won't change).
+ *
+ * The firmware_path/nvram_path module parameter may be changed by the system at run
+ * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
+ * command may change dhdinfo->fw_path. As such we need to clear the path info in
+ * module parameter after it is copied. We won't update the path until the module parameter
+ * is changed again (first character is not '\0')
+ */
- return ret;
+ /* set default firmware and nvram path for built-in type driver */
+// if (!dhd_download_fw_on_driverload) {
+#ifdef CONFIG_BCMDHD_FW_PATH
+ fw = CONFIG_BCMDHD_FW_PATH;
+#endif /* CONFIG_BCMDHD_FW_PATH */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+ nv = CONFIG_BCMDHD_NVRAM_PATH;
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+// }
-}
+ /* check if we need to initialize the path */
+ if (dhdinfo->fw_path[0] == '\0') {
+ if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
+ fw = adapter->fw_path;
-int
-dhd_os_dmaxfer_wake(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ }
+ if (dhdinfo->nv_path[0] == '\0') {
+ if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
+ nv = adapter->nv_path;
+ }
+ if (dhdinfo->clm_path[0] == '\0') {
+ if (adapter && adapter->clm_path && adapter->clm_path[0] != '\0')
+ clm = adapter->clm_path;
+ }
+ if (dhdinfo->conf_path[0] == '\0') {
+ if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
+ conf = adapter->conf_path;
+ }
+
+ /* Use module parameter if it is valid, EVEN IF the path has not been initialized
+ *
+ * TODO: need a solution for multi-chip, can't use the same firmware for all chips
+ */
+ if (firmware_path[0] != '\0')
+ fw = firmware_path;
+ if (nvram_path[0] != '\0')
+ nv = nvram_path;
+ if (clm_path[0] != '\0')
+ clm = clm_path;
+ if (config_path[0] != '\0')
+ conf = config_path;
+#ifdef DHD_UCODE_DOWNLOAD
+ if (ucode_path[0] != '\0')
+ uc = ucode_path;
+#endif /* DHD_UCODE_DOWNLOAD */
+
+ if (fw && fw[0] != '\0') {
+ fw_len = strlen(fw);
+ if (fw_len >= fw_path_len) {
+ DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
+ return FALSE;
+ }
+ strncpy(dhdinfo->fw_path, fw, fw_path_len);
+ if (dhdinfo->fw_path[fw_len-1] == '\n')
+ dhdinfo->fw_path[fw_len-1] = '\0';
+ }
+ if (nv && nv[0] != '\0') {
+ nv_len = strlen(nv);
+ if (nv_len >= nv_path_len) {
+ DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
+ return FALSE;
+ }
+ memset(dhdinfo->nv_path, 0, nv_path_len);
+ strncpy(dhdinfo->nv_path, nv, nv_path_len);
+#ifdef DHD_USE_SINGLE_NVRAM_FILE
+ /* Remove "_net" or "_mfg" tag from current nvram path */
+ {
+ char *nvram_tag = "nvram_";
+ char *ext_tag = ".txt";
+ char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
+ bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
+ strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
+ if (valid_buf) {
+ char *sp = sp_nvram + strlen(nvram_tag) - 1;
+ uint32 padding_size = (uint32)(dhdinfo->nv_path +
+ nv_path_len - sp);
+ memset(sp, 0, padding_size);
+ strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
+ nv_len = strlen(dhdinfo->nv_path);
+ DHD_INFO(("%s: new nvram path = %s\n",
+ __FUNCTION__, dhdinfo->nv_path));
+ } else if (sp_nvram) {
+ DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
+ __FUNCTION__));
+ return FALSE;
+ } else {
+ DHD_ERROR(("%s: Couldn't find the nvram tag. current"
+ " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
+ }
+ }
+#endif /* DHD_USE_SINGLE_NVRAM_FILE */
+ if (dhdinfo->nv_path[nv_len-1] == '\n')
+ dhdinfo->nv_path[nv_len-1] = '\0';
+ }
+ if (clm && clm[0] != '\0') {
+ clm_len = strlen(clm);
+ if (clm_len >= sizeof(dhdinfo->clm_path)) {
+ DHD_ERROR(("clm path len exceeds max len of dhdinfo->clm_path\n"));
+ return FALSE;
+ }
+ strncpy(dhdinfo->clm_path, clm, sizeof(dhdinfo->clm_path));
+ if (dhdinfo->clm_path[clm_len-1] == '\n')
+ dhdinfo->clm_path[clm_len-1] = '\0';
+ }
+ if (conf && conf[0] != '\0') {
+ conf_len = strlen(conf);
+ if (conf_len >= sizeof(dhdinfo->conf_path)) {
+ DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
+ return FALSE;
+ }
+ strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
+ if (dhdinfo->conf_path[conf_len-1] == '\n')
+ dhdinfo->conf_path[conf_len-1] = '\0';
+ }
+#ifdef DHD_UCODE_DOWNLOAD
+ if (uc && uc[0] != '\0') {
+ uc_len = strlen(uc);
+ if (uc_len >= sizeof(dhdinfo->uc_path)) {
+ DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
+ return FALSE;
+ }
+ strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
+ if (dhdinfo->uc_path[uc_len-1] == '\n')
+ dhdinfo->uc_path[uc_len-1] = '\0';
+ }
+#endif /* DHD_UCODE_DOWNLOAD */
- wake_up(&dhd->dmaxfer_wait);
- return 0;
-}
+#if 0
+ /* clear the path in module parameter */
+ if (dhd_download_fw_on_driverload) {
+ firmware_path[0] = '\0';
+ nvram_path[0] = '\0';
+ clm_path[0] = '\0';
+ config_path[0] = '\0';
+ }
+#endif
+#ifdef DHD_UCODE_DOWNLOAD
+ ucode_path[0] = '\0';
+ DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
+#endif /* DHD_UCODE_DOWNLOAD */
-void
-dhd_os_tx_completion_wake(dhd_pub_t *dhd)
-{
- /* Call wmb() to make sure before waking up the other event value gets updated */
- OSL_SMP_WMB();
- wake_up(&dhd->tx_completion_wait);
-}
+#ifndef BCMEMBEDIMAGE
+ /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
+ if (dhdinfo->fw_path[0] == '\0') {
+ DHD_ERROR(("firmware path not found\n"));
+ return FALSE;
+ }
+ if (dhdinfo->nv_path[0] == '\0') {
+ DHD_ERROR(("nvram path not found\n"));
+ return FALSE;
+ }
+#endif /* BCMEMBEDIMAGE */
-/* Fix compilation error for FC11 */
-INLINE int
-dhd_os_busbusy_wake(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- /* Call wmb() to make sure before waking up the other event value gets updated */
- OSL_SMP_WMB();
- wake_up(&dhd->dhd_bus_busy_state_wait);
- return 0;
+ return TRUE;
}
-void
-dhd_os_wd_timer_extend(void *bus, bool extend)
+#if defined(BT_OVER_SDIO)
+extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
{
-#ifndef BCMDBUS
- dhd_pub_t *pub = bus;
- dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ int fw_len;
+ const char *fw = NULL;
+ wifi_adapter_info_t *adapter = dhdinfo->adapter;
- if (extend)
- dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
- else
- dhd_os_wd_timer(bus, dhd->default_wd_interval);
-#endif /* !BCMDBUS */
-}
-void
-dhd_os_wd_timer(void *bus, uint wdtick)
-{
-#ifndef BCMDBUS
- dhd_pub_t *pub = bus;
- dhd_info_t *dhd = (dhd_info_t *)pub->info;
- unsigned long flags;
+ /* Update bt firmware path. The path may be from adapter info or module parameter
+ * The path from adapter info is used for initialization only (as it won't change).
+ *
+ * The btfw_path module parameter may be changed by the system at run
+ * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
+ * command may change dhdinfo->btfw_path. As such we need to clear the path info in
+ * module parameter after it is copied. We won't update the path until the module parameter
+ * is changed again (first character is not '\0')
+ */
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ /* set default firmware and nvram path for built-in type driver */
+ if (!dhd_download_fw_on_driverload) {
+#ifdef CONFIG_BCMDHD_BTFW_PATH
+ fw = CONFIG_BCMDHD_BTFW_PATH;
+#endif /* CONFIG_BCMDHD_FW_PATH */
+ }
- if (!dhd) {
- DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
- return;
+ /* check if we need to initialize the path */
+ if (dhdinfo->btfw_path[0] == '\0') {
+ if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
+ fw = adapter->btfw_path;
}
- DHD_GENERAL_LOCK(pub, flags);
+ /* Use module parameter if it is valid, EVEN IF the path has not been initialized
+ */
+ if (btfw_path[0] != '\0')
+ fw = btfw_path;
- /* don't start the wd until fw is loaded */
- if (pub->busstate == DHD_BUS_DOWN) {
- DHD_GENERAL_UNLOCK(pub, flags);
-#ifdef BCMSDIO
- if (!wdtick) {
- DHD_OS_WD_WAKE_UNLOCK(pub);
+ if (fw && fw[0] != '\0') {
+ fw_len = strlen(fw);
+ if (fw_len >= sizeof(dhdinfo->btfw_path)) {
+ DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
+ return FALSE;
}
-#endif /* BCMSDIO */
- return;
+ strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
+ if (dhdinfo->btfw_path[fw_len-1] == '\n')
+ dhdinfo->btfw_path[fw_len-1] = '\0';
}
- /* Totally stop the timer */
- if (!wdtick && dhd->wd_timer_valid == TRUE) {
- dhd->wd_timer_valid = FALSE;
- DHD_GENERAL_UNLOCK(pub, flags);
- del_timer_sync(&dhd->timer);
-#ifdef BCMSDIO
- DHD_OS_WD_WAKE_UNLOCK(pub);
-#endif /* BCMSDIO */
- return;
- }
+ /* clear the path in module parameter */
+ btfw_path[0] = '\0';
- if (wdtick) {
-#ifdef BCMSDIO
- DHD_OS_WD_WAKE_LOCK(pub);
- dhd_watchdog_ms = (uint)wdtick;
-#endif /* BCMSDIO */
- /* Re arm the timer, at last watchdog period */
- mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
- dhd->wd_timer_valid = TRUE;
+ if (dhdinfo->btfw_path[0] == '\0') {
+ DHD_ERROR(("bt firmware path not found\n"));
+ return FALSE;
}
- DHD_GENERAL_UNLOCK(pub, flags);
-#endif /* !BCMDBUS */
-}
-void *
-dhd_os_open_image1(dhd_pub_t *pub, char *filename)
-{
- struct file *fp;
- int size;
+ return TRUE;
+}
+#endif /* defined (BT_OVER_SDIO) */
- fp = filp_open(filename, O_RDONLY, 0);
- /*
- * 2.6.11 (FC4) supports filp_open() but later revs don't?
- * Alternative:
- * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
- * ???
- */
- if (IS_ERR(fp)) {
- fp = NULL;
- goto err;
- }
- if (!S_ISREG(file_inode(fp)->i_mode)) {
- DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
- fp = NULL;
- goto err;
- }
+#ifdef CUSTOMER_HW4_DEBUG
+bool dhd_validate_chipid(dhd_pub_t *dhdp)
+{
+ uint chipid = dhd_bus_chip_id(dhdp);
+ uint config_chipid;
+
+#ifdef BCM4361_CHIP
+ config_chipid = BCM4361_CHIP_ID;
+#elif defined(BCM4359_CHIP)
+ config_chipid = BCM4359_CHIP_ID;
+#elif defined(BCM4358_CHIP)
+ config_chipid = BCM4358_CHIP_ID;
+#elif defined(BCM4354_CHIP)
+ config_chipid = BCM4354_CHIP_ID;
+#elif defined(BCM4339_CHIP)
+ config_chipid = BCM4339_CHIP_ID;
+#elif defined(BCM43349_CHIP)
+ config_chipid = BCM43349_CHIP_ID;
+#elif defined(BCM4335_CHIP)
+ config_chipid = BCM4335_CHIP_ID;
+#elif defined(BCM43241_CHIP)
+ config_chipid = BCM4324_CHIP_ID;
+#elif defined(BCM4330_CHIP)
+ config_chipid = BCM4330_CHIP_ID;
+#elif defined(BCM43430_CHIP)
+ config_chipid = BCM43430_CHIP_ID;
+#elif defined(BCM43018_CHIP)
+ config_chipid = BCM43018_CHIP_ID;
+#elif defined(BCM43455_CHIP)
+ config_chipid = BCM4345_CHIP_ID;
+#elif defined(BCM4334W_CHIP)
+ config_chipid = BCM43342_CHIP_ID;
+#elif defined(BCM43454_CHIP)
+ config_chipid = BCM43454_CHIP_ID;
+#elif defined(BCM43012_CHIP_)
+ config_chipid = BCM43012_CHIP_ID;
+#else
+ DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
+ " please add CONFIG_BCMXXXX into the Kernel and"
+ " BCMXXXX_CHIP definition into the DHD driver\n",
+ __FUNCTION__));
+ config_chipid = 0;
- size = i_size_read(file_inode(fp));
- if (size <= 0) {
- DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
- fp = NULL;
- goto err;
- }
+ return FALSE;
+#endif /* BCM4354_CHIP */
- DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
+#ifdef SUPPORT_MULTIPLE_CHIP_4345X
+ if (config_chipid == BCM43454_CHIP_ID || config_chipid == BCM4345_CHIP_ID) {
+ return TRUE;
+ }
+#endif /* SUPPORT_MULTIPLE_CHIP_4345X */
+#if defined(BCM4359_CHIP)
+ if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
+ return TRUE;
+ }
+#endif /* BCM4359_CHIP */
+#if defined(BCM4361_CHIP)
+ if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
+ return TRUE;
+ }
+#endif /* BCM4361_CHIP */
-err:
- return fp;
+ return config_chipid == chipid;
}
+#endif /* CUSTOMER_HW4_DEBUG */
-int
-dhd_os_get_image_block(char *buf, int len, void *image)
+#if defined(BT_OVER_SDIO)
+wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
{
- struct file *fp = (struct file *)image;
- int rdlen;
- int size;
-
- if (!image) {
- return 0;
- }
+ DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
+ /* assuming that dhd_pub_t type pointer is available from a global variable */
+ return (wlan_bt_handle_t) g_dhd_pub;
+} EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
- size = i_size_read(file_inode(fp));
- rdlen = compat_kernel_read(fp, fp->f_pos, buf, MIN(len, size));
+int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
+{
+ int ret = -1;
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
- if (len >= size && size != rdlen) {
- return -EIO;
- }
- if (rdlen > 0) {
- fp->f_pos += rdlen;
+ /* Download BT firmware image to the dongle */
+ if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
+ DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
+ ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to download btfw from: %s\n",
+ __FUNCTION__, dhd->btfw_path));
+ return ret;
+ }
}
+ return ret;
+} EXPORT_SYMBOL(dhd_download_btfw);
+#endif /* defined (BT_OVER_SDIO) */
- return rdlen;
-}
-
-#if defined(BT_OVER_SDIO)
+#ifndef BCMDBUS
int
-dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
+dhd_bus_start(dhd_pub_t *dhdp)
{
- struct file *fp = (struct file *)image;
- int rd_len;
- uint str_len = 0;
- char *str_end = NULL;
+ int ret = -1;
+ dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
+ unsigned long flags;
- if (!image)
- return 0;
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+ int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
+#endif /* DHD_DEBUG && BCMSDIO */
+ ASSERT(dhd);
- rd_len = compat_kernel_read(fp, fp->f_pos, str, len);
- str_end = strnchr(str, len, '\n');
- if (str_end == NULL) {
- goto err;
+ DHD_TRACE(("Enter %s:\n", __FUNCTION__));
+
+ DHD_PERIM_LOCK(dhdp);
+#ifdef HOFFLOAD_MODULES
+ dhd_linux_get_modfw_address(dhdp);
+#endif
+ /* try to download image and nvram to the dongle */
+ if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
+ /* Indicate FW Download has not yet done */
+ dhd->pub.fw_download_done = FALSE;
+ DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
+ __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+ fw_download_start = OSL_SYSUPTIME();
+#endif /* DHD_DEBUG && BCMSDIO */
+ ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+ dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+ fw_download_end = OSL_SYSUPTIME();
+#endif /* DHD_DEBUG && BCMSDIO */
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to download firmware %s\n",
+ __FUNCTION__, dhd->fw_path));
+ DHD_PERIM_UNLOCK(dhdp);
+ return ret;
+ }
+ /* Indicate FW Download has succeeded */
+ dhd->pub.fw_download_done = TRUE;
+ }
+ if (dhd->pub.busstate != DHD_BUS_LOAD) {
+ DHD_PERIM_UNLOCK(dhdp);
+ return -ENETDOWN;
}
- str_len = (uint)(str_end - str);
- /* Advance file pointer past the string length */
- fp->f_pos += str_len + 1;
- bzero(str_end, rd_len - str_len);
+#ifdef BCMSDIO
+ dhd_os_sdlock(dhdp);
+#endif /* BCMSDIO */
-err:
- return str_len;
-}
-#endif /* defined (BT_OVER_SDIO) */
+ /* Start the watchdog timer */
+ dhd->pub.tickcnt = 0;
+ dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
-int
-dhd_os_get_image_size(void *image)
-{
- struct file *fp = (struct file *)image;
- int size;
- if (!image) {
- return 0;
+ /* Bring up the bus */
+ if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
+
+ DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
+#ifdef BCMSDIO
+ dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+ DHD_PERIM_UNLOCK(dhdp);
+ return ret;
}
- size = i_size_read(file_inode(fp));
+ DHD_ENABLE_RUNTIME_PM(&dhd->pub);
- return size;
-}
+#ifdef DHD_ULP
+ dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED);
+#endif /* DHD_ULP */
+#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
+ /* Host registration for OOB interrupt */
+ if (dhd_bus_oob_intr_register(dhdp)) {
+ /* deactivate timer and wait for the handler to finish */
+#if !defined(BCMPCIE_OOB_HOST_WAKE)
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
-void
-dhd_os_close_image1(dhd_pub_t *pub, void *image)
-{
- if (image) {
- filp_close((struct file *)image, NULL);
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+ DHD_PERIM_UNLOCK(dhdp);
+ DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
+ return -ENODEV;
}
-}
-void
-dhd_os_sdlock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
-
- dhd = (dhd_info_t *)(pub->info);
-
-#ifdef BCMDBUS
- spin_lock_bh(&dhd->sdlock);
+#if defined(BCMPCIE_OOB_HOST_WAKE)
+ dhd_bus_oob_intr_set(dhdp, TRUE);
#else
- if (dhd_dpc_prio >= 0)
- down(&dhd->sdsem);
- else
- spin_lock_bh(&dhd->sdlock);
-#endif /* !BCMDBUS */
-}
+ /* Enable oob at firmware */
+ dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#elif defined(FORCE_WOWLAN)
+ /* Enable oob at firmware */
+ dhd_enable_oob_intr(dhd->pub.bus, TRUE);
+#endif
+#ifdef PCIE_FULL_DONGLE
+ {
+ /* max_h2d_rings includes H2D common rings */
+ uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
-void
-dhd_os_sdunlock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
+ DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
+ max_h2d_rings));
+ if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
+#ifdef BCMSDIO
+ dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+ DHD_PERIM_UNLOCK(dhdp);
+ return ret;
+ }
+ }
+#endif /* PCIE_FULL_DONGLE */
- dhd = (dhd_info_t *)(pub->info);
+ /* Do protocol initialization necessary for IOCTL/IOVAR */
+ ret = dhd_prot_init(&dhd->pub);
+ if (unlikely(ret) != BCME_OK) {
+ DHD_PERIM_UNLOCK(dhdp);
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+ return ret;
+ }
-#ifdef BCMDBUS
- spin_unlock_bh(&dhd->sdlock);
-#else
- if (dhd_dpc_prio >= 0)
- up(&dhd->sdsem);
- else
- spin_unlock_bh(&dhd->sdlock);
-#endif /* !BCMDBUS */
-}
+ /* If bus is not ready, can't come up */
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+ DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
+#ifdef BCMSDIO
+ dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
+ DHD_PERIM_UNLOCK(dhdp);
+ return -ENODEV;
+ }
-void
-dhd_os_sdlock_txq(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
+#ifdef BCMSDIO
+ dhd_os_sdunlock(dhdp);
+#endif /* BCMSDIO */
- dhd = (dhd_info_t *)(pub->info);
-#ifdef BCMDBUS
- spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
-#else
- spin_lock_bh(&dhd->txqlock);
-#endif /* BCMDBUS */
-}
+ /* Bus is ready, query any dongle information */
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+ f2_sync_start = OSL_SYSUPTIME();
+#endif /* DHD_DEBUG && BCMSDIO */
+ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ del_timer_sync(&dhd->timer);
+ DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
+ DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
+ DHD_PERIM_UNLOCK(dhdp);
+ return ret;
+ }
+#if defined(CONFIG_SOC_EXYNOS8895)
+ DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
+ exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
+#endif /* CONFIG_SOC_EXYNOS8895 */
-void
-dhd_os_sdunlock_txq(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
+#if defined(DHD_DEBUG) && defined(BCMSDIO)
+ f2_sync_end = OSL_SYSUPTIME();
+ DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
+ (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
+#endif /* DHD_DEBUG && BCMSDIO */
- dhd = (dhd_info_t *)(pub->info);
-#ifdef BCMDBUS
- spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
-#else
- spin_unlock_bh(&dhd->txqlock);
-#endif /* BCMDBUS */
-}
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd->pend_ipaddr) {
+#ifdef AOE_IP_ALIAS_SUPPORT
+ aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+ dhd->pend_ipaddr = 0;
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
-void
-dhd_os_sdlock_rxq(dhd_pub_t *pub)
-{
+#if defined(TRAFFIC_MGMT_DWM)
+ bzero(&dhd->pub.dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
+#endif
+ DHD_PERIM_UNLOCK(dhdp);
+ return 0;
}
+#endif /* !BCMDBUS */
-void
-dhd_os_sdunlock_rxq(dhd_pub_t *pub)
+#ifdef WLTDLS
+int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
{
-}
+ uint32 tdls = tdls_on;
+ int ret = 0;
+ uint32 tdls_auto_op = 0;
+ uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
+ int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
+ int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
+ BCM_REFERENCE(mac);
+ if (!FW_SUPPORTED(dhd, tdls))
+ return BCME_ERROR;
-static void
-dhd_os_rxflock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd;
+ if (dhd->tdls_enable == tdls_on)
+ goto auto_mode;
+ ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
+ goto exit;
+ }
+ dhd->tdls_enable = tdls_on;
+auto_mode:
- dhd = (dhd_info_t *)(pub->info);
- spin_lock_bh(&dhd->rxf_lock);
+ tdls_auto_op = auto_on;
+ ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
+ 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+
+ if (tdls_auto_op) {
+ ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
+ sizeof(tdls_idle_time), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
+ sizeof(tdls_rssi_high), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
+ sizeof(tdls_rssi_low), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ }
+exit:
+ return ret;
}
-static void
-dhd_os_rxfunlock(dhd_pub_t *pub)
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
{
- dhd_info_t *dhd;
-
- dhd = (dhd_info_t *)(pub->info);
- spin_unlock_bh(&dhd->rxf_lock);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+ if (dhd)
+ ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
+ else
+ ret = BCME_ERROR;
+ return ret;
}
-#ifdef DHDTCPACK_SUPPRESS
-unsigned long
-dhd_os_tcpacklock(dhd_pub_t *pub)
+int
+dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
{
- dhd_info_t *dhd;
- unsigned long flags = 0;
-
- dhd = (dhd_info_t *)(pub->info);
+ int ret = 0;
+ bool auto_on = false;
+ uint32 mode = wfd_mode;
- if (dhd) {
-#ifdef BCMSDIO
- spin_lock_bh(&dhd->tcpack_lock);
+#ifdef ENABLE_TDLS_AUTO_MODE
+ if (wfd_mode) {
+ auto_on = false;
+ } else {
+ auto_on = true;
+ }
#else
- spin_lock_irqsave(&dhd->tcpack_lock, flags);
-#endif /* BCMSDIO */
+ auto_on = false;
+#endif /* ENABLE_TDLS_AUTO_MODE */
+ ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
+ if (ret < 0) {
+ DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
+ return ret;
}
- return flags;
-}
+ ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
+ if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
+ DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
+ return ret;
+ }
-void
-dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
+ ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
+ if (ret < 0) {
+ DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
+ return ret;
+ }
+
+ dhd->tdls_mode = mode;
+ return ret;
+}
+#ifdef PCIE_FULL_DONGLE
+int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
{
- dhd_info_t *dhd;
+ dhd_pub_t *dhd_pub = dhdp;
+ tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
+ tdls_peer_node_t *new = NULL, *prev = NULL;
+ int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ uint8 *da = (uint8 *)&event->addr.octet[0];
+ bool connect = FALSE;
+ uint32 reason = ntoh32(event->reason);
+ unsigned long flags;
-#ifdef BCMSDIO
- BCM_REFERENCE(flags);
-#endif /* BCMSDIO */
+ if (reason == WLC_E_TDLS_PEER_CONNECTED)
+ connect = TRUE;
+ else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
+ connect = FALSE;
+ else
+ {
+ DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ if (ifindex == DHD_BAD_IF)
+ return BCME_ERROR;
- dhd = (dhd_info_t *)(pub->info);
+ if (connect) {
+ while (cur != NULL) {
+ if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s: TDLS Peer exist already %d\n",
+ __FUNCTION__, __LINE__));
+ return BCME_ERROR;
+ }
+ cur = cur->next;
+ }
- if (dhd) {
-#ifdef BCMSDIO
- spin_unlock_bh(&dhd->tcpack_lock);
-#else
- spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
-#endif /* BCMSDIO */
+ new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
+ if (new == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ memcpy(new->addr, da, ETHER_ADDR_LEN);
+ DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
+ new->next = dhd_pub->peer_tbl.node;
+ dhd_pub->peer_tbl.node = new;
+ dhd_pub->peer_tbl.tdls_peer_count++;
+ DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+
+ } else {
+ while (cur != NULL) {
+ if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+ dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
+ DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
+ if (prev)
+ prev->next = cur->next;
+ else
+ dhd_pub->peer_tbl.node = cur->next;
+ MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
+ dhd_pub->peer_tbl.tdls_peer_count--;
+ DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
+ return BCME_OK;
+ }
+ prev = cur;
+ cur = cur->next;
+ }
+ DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
}
+ return BCME_OK;
}
-#endif /* DHDTCPACK_SUPPRESS */
+#endif /* PCIE_FULL_DONGLE */
+#endif
-uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
+bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
{
- uint8* buf;
- gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
-
- buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
- if (buf == NULL && kmalloc_if_fail)
- buf = kmalloc(size, flags);
+ if (!dhd)
+ return FALSE;
- return buf;
+ if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
+ return TRUE;
+ else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
+ DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
+ return TRUE;
+ else
+ return FALSE;
}
-
-void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
+#if !defined(AP) && defined(WLP2P)
+/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
+ * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
+ * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
+ * would still be named as fw_bcmdhd_apsta.
+ */
+uint32
+dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
{
+ int32 ret = 0;
+ char buf[WLC_IOCTL_SMLEN];
+ bool mchan_supported = FALSE;
+ /* if dhd->op_mode is already set for HOSTAP and Manufacturing
+ * test mode, that means we only will use the mode as it is
+ */
+ if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
+ return 0;
+ if (FW_SUPPORTED(dhd, vsdb)) {
+ mchan_supported = TRUE;
+ }
+ if (!FW_SUPPORTED(dhd, p2p)) {
+ DHD_TRACE(("Chip does not support p2p\n"));
+ return 0;
+ } else {
+ /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
+ memset(buf, 0, sizeof(buf));
+ ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
+ sizeof(buf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
+ return 0;
+ } else {
+ if (buf[0] == 1) {
+ /* By default, chip supports single chan concurrency,
+ * now lets check for mchan
+ */
+ ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
+ if (mchan_supported)
+ ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ ret |= DHD_FLAG_RSDB_MODE;
+ }
+#ifdef WL_SUPPORT_MULTIP2P
+ if (FW_SUPPORTED(dhd, mp2p)) {
+ ret |= DHD_FLAG_MP2P_MODE;
+ }
+#endif /* WL_SUPPORT_MULTIP2P */
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+ return ret;
+#else
+ return 0;
+#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
+ }
+ }
+ }
+ return 0;
}
+#endif
-#if defined(WL_WIRELESS_EXT)
-struct iw_statistics *
-dhd_get_wireless_stats(struct net_device *dev)
+#ifdef SUPPORT_AP_POWERSAVE
+#define RXCHAIN_PWRSAVE_PPS 10
+#define RXCHAIN_PWRSAVE_QUIET_TIME 10
+#define RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK 0
+int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable)
{
- int res = 0;
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int32 pps = RXCHAIN_PWRSAVE_PPS;
+ int32 quiet_time = RXCHAIN_PWRSAVE_QUIET_TIME;
+ int32 stas_assoc_check = RXCHAIN_PWRSAVE_STAS_ASSOC_CHECK;
+ int ret;
- if (!dhd->pub.up) {
- return NULL;
+ if (enable) {
+ ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
+ NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed to enable AP power save\n"));
+ }
+ ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_pps", (char *)&pps, sizeof(pps), NULL, 0,
+ TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed to set pps\n"));
+ }
+ ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_quiet_time", (char *)&quiet_time,
+ sizeof(quiet_time), NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed to set quiet time\n"));
+ }
+ ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_stas_assoc_check",
+ (char *)&stas_assoc_check, sizeof(stas_assoc_check), NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed to set stas assoc check\n"));
+ }
+ } else {
+ ret = dhd_iovar(dhdp, 0, "rxchain_pwrsave_enable", (char *)&enable, sizeof(enable),
+ NULL, 0, TRUE);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed to disable AP power save\n"));
+ }
}
- if (!(dev->flags & IFF_UP)) {
- return NULL;
- }
+ return 0;
+}
+#endif /* SUPPORT_AP_POWERSAVE */
- res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
- if (res == 0)
- return &dhd->iw.wstats;
- else
- return NULL;
-}
-#endif /* defined(WL_WIRELESS_EXT) */
-static int
-dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
- wl_event_msg_t *event, void **data)
+
+#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
+int
+dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
{
- int bcmerror = 0;
-#ifdef WL_CFG80211
- unsigned long flags = 0;
-#endif /* WL_CFG80211 */
- ASSERT(dhd != NULL);
+ int i;
+ int len;
+ int ret = BCME_OK;
-#ifdef SHOW_LOGTRACE
- bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
- &dhd->event_data);
-#else
- bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
- NULL);
-#endif /* SHOW_LOGTRACE */
- if (unlikely(bcmerror != BCME_OK)) {
- return bcmerror;
- }
+ bcm_iov_buf_t *iov_buf = NULL;
+ wl_adps_params_v1_t *data = NULL;
+ char buf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
- if (ntoh32(event->event_type) == WLC_E_IF) {
- /* WLC_E_IF event types are consumed by wl_process_host_event.
- * For ifadd/del ops, the netdev ptr may not be valid at this
- * point. so return before invoking cfg80211/wext handlers.
- */
- return BCME_OK;
+ len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
+ iov_buf = kmalloc(len, GFP_KERNEL);
+ if (iov_buf == NULL) {
+ DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
+ ret = BCME_NOMEM;
+ goto exit;
}
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW)
- wl_ext_event_send(dhd->pub.event_params, event, *data);
-#endif
+ iov_buf->version = WL_ADPS_IOV_VER;
+ iov_buf->len = sizeof(*data);
+ iov_buf->id = WL_ADPS_IOV_MODE;
-#ifdef WL_CFG80211
- if (dhd->iflist[ifidx]->net) {
- spin_lock_irqsave(&dhd->pub.up_lock, flags);
- if (dhd->pub.up) {
- wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
+ data = (wl_adps_params_v1_t *)iov_buf->data;
+ data->version = ADPS_SUB_IOV_VERSION_1;
+ data->length = sizeof(*data);
+ data->mode = on;
+
+ for (i = 1; i <= MAX_BANDS; i++) {
+ data->band = i;
+ bcm_mkiovar("adps", (char *)iov_buf, len, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0)) < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s adps is not supported\n", __FUNCTION__));
+ ret = BCME_OK;
+ goto exit;
+ }
+ else {
+ DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
+ __FUNCTION__, on ? "On" : "Off", i, ret));
+ goto exit;
+ }
}
- spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
}
-#endif /* defined(WL_CFG80211) */
-
- return (bcmerror);
-}
-/* send up locally generated event */
-void
-dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
-{
- switch (ntoh32(event->event_type)) {
- /* Handle error case or further events here */
- default:
- break;
+exit:
+ if (iov_buf) {
+ kfree(iov_buf);
}
+ return ret;
}
+#endif /* WLADPS || WLADPS_PRIVATE_CMD */
-#ifdef LOG_INTO_TCPDUMP
-void
-dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
+int
+dhd_preinit_ioctls(dhd_pub_t *dhd)
{
- struct sk_buff *p, *skb;
- uint32 pktlen;
- int len;
- dhd_if_t *ifp;
- dhd_info_t *dhd;
- uchar *skb_data;
- int ifidx = 0;
- struct ether_header eth;
-
- pktlen = sizeof(eth) + data_len;
- dhd = dhdp->info;
-
- if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
- ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+ int ret = 0;
+ char eventmask[WL_EVENTING_MASK_LEN];
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
+ uint32 buf_key_b4_m4 = 1;
+ uint8 msglen;
+ eventmsgs_ext_t *eventmask_msg = NULL;
+ char* iov_buf = NULL;
+ int ret2 = 0;
+ uint32 wnm_cap = 0;
+#if defined(CUSTOM_AMPDU_BA_WSIZE)
+ uint32 ampdu_ba_wsize = 0;
+#endif
+#if defined(CUSTOM_AMPDU_MPDU)
+ int32 ampdu_mpdu = 0;
+#endif
+#if defined(CUSTOM_AMPDU_RELEASE)
+ int32 ampdu_release = 0;
+#endif
+#if defined(CUSTOM_AMSDU_AGGSF)
+ int32 amsdu_aggsf = 0;
+#endif
+ shub_control_t shub_ctl;
- bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
- bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
- ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
- eth.ether_type = hton16(ETHER_TYPE_BRCM);
+#if defined(BCMSDIO) || defined(BCMDBUS)
+#ifdef PROP_TXSTATUS
+ int wlfc_enable = TRUE;
+#ifndef DISABLE_11N
+ uint32 hostreorder = 1;
+ uint wl_down = 1;
+#endif /* DISABLE_11N */
+#endif /* PROP_TXSTATUS */
+#endif /* BCMSDIO || BCMDBUS */
+#ifndef PCIE_FULL_DONGLE
+ uint32 wl_ap_isolate;
+#endif /* PCIE_FULL_DONGLE */
+ uint32 frameburst = CUSTOM_FRAMEBURST_SET;
+ uint wnm_bsstrans_resp = 0;
+#ifdef SUPPORT_SET_CAC
+ uint32 cac = 1;
+#endif /* SUPPORT_SET_CAC */
+#ifdef DHD_ENABLE_LPC
+ uint32 lpc = 1;
+#endif /* DHD_ENABLE_LPC */
+ uint power_mode = PM_FAST;
+#if defined(BCMSDIO)
+ uint32 dongle_align = DHD_SDALIGN;
+ uint32 glom = CUSTOM_GLOM_SETTING;
+#endif /* defined(BCMSDIO) */
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+ uint32 credall = 1;
+#endif
+ uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
+ uint scancache_enab = TRUE;
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ uint32 bcn_li_bcn = 1;
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+ uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ int arpoe = 1;
+#endif
+ int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
+ int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
+ int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
+ char buf[WLC_IOCTL_SMLEN];
+ char *ptr;
+ uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
+#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
+ wl_el_tag_params_t *el_tag = NULL;
+#endif /* DHD_8021X_DUMP */
+#ifdef ROAM_ENABLE
+ uint roamvar = 0;
+ int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
+ int roam_scan_period[2] = {10, WLC_BAND_ALL};
+ int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
+#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
+ int roam_fullscan_period = 60;
+#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+ int roam_fullscan_period = 120;
+#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
+#ifdef DISABLE_BCNLOSS_ROAM
+ uint roam_bcnloss_off = 1;
+#endif /* DISABLE_BCNLOSS_ROAM */
+#else
+#ifdef DISABLE_BUILTIN_ROAM
+ uint roamvar = 1;
+#endif /* DISABLE_BUILTIN_ROAM */
+#endif /* ROAM_ENABLE */
- bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
- bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
- skb = PKTTONATIVE(dhdp->osh, p);
- skb_data = skb->data;
- len = skb->len;
+#if defined(SOFTAP)
+ uint dtim = 1;
+#endif
+#if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
+ struct ether_addr p2p_ea;
+#endif
+#ifdef SOFTAP_UAPSD_OFF
+ uint32 wme_apsd = 0;
+#endif /* SOFTAP_UAPSD_OFF */
+#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
+ uint32 apsta = 1; /* Enable APSTA mode */
+#elif defined(SOFTAP_AND_GC)
+ uint32 apsta = 0;
+ int ap_mode = 1;
+#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
+#ifdef GET_CUSTOM_MAC_ENABLE
+ struct ether_addr ea_addr;
+ char hw_ether[62];
+#endif /* GET_CUSTOM_MAC_ENABLE */
- ifidx = dhd_ifname2idx(dhd, "wlan0");
- ifp = dhd->iflist[ifidx];
- if (ifp == NULL)
- ifp = dhd->iflist[0];
+#ifdef DISABLE_11N
+ uint32 nmode = 0;
+#endif /* DISABLE_11N */
- ASSERT(ifp);
- skb->dev = ifp->net;
- skb->protocol = eth_type_trans(skb, skb->dev);
- skb->data = skb_data;
- skb->len = len;
+#ifdef USE_WL_TXBF
+ uint32 txbf = 1;
+#endif /* USE_WL_TXBF */
+#ifdef DISABLE_TXBFR
+ uint32 txbf_bfr_cap = 0;
+#endif /* DISABLE_TXBFR */
+#if defined(PROP_TXSTATUS)
+#ifdef USE_WFA_CERT_CONF
+ uint32 proptx = 0;
+#endif /* USE_WFA_CERT_CONF */
+#endif /* PROP_TXSTATUS */
+#if defined(SUPPORT_5G_1024QAM_VHT)
+ uint32 vht_features = 0; /* init to 0, will be set based on each support */
+#endif
+#ifdef DISABLE_11N_PROPRIETARY_RATES
+ uint32 ht_features = 0;
+#endif /* DISABLE_11N_PROPRIETARY_RATES */
+#ifdef CUSTOM_PSPRETEND_THR
+ uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
+#endif
+#ifdef CUSTOM_EVENT_PM_WAKE
+ uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
+#endif /* CUSTOM_EVENT_PM_WAKE */
+ uint32 rsdb_mode = 0;
+#ifdef ENABLE_TEMP_THROTTLING
+ wl_temp_control_t temp_control;
+#endif /* ENABLE_TEMP_THROTTLING */
+#ifdef DISABLE_PRUNED_SCAN
+ uint32 scan_features = 0;
+#endif /* DISABLE_PRUNED_SCAN */
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = TRUE;
+#ifdef APF
+ dhd->apf_set = FALSE;
+#endif /* APF */
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef WLTDLS
+ dhd->tdls_enable = FALSE;
+ dhd_tdls_set_mode(dhd, false);
+#endif /* WLTDLS */
+ dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
+#ifdef ENABLE_MAX_DTIM_IN_SUSPEND
+ dhd->max_dtim_enable = TRUE;
+#else
+ dhd->max_dtim_enable = FALSE;
+#endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
+#ifdef CUSTOM_SET_OCLOFF
+ dhd->ocl_off = FALSE;
+#endif /* CUSTOM_SET_OCLOFF */
+ DHD_TRACE(("Enter %s\n", __FUNCTION__));
- /* Strip header, count, deliver upward */
- skb_pull(skb, ETH_HLEN);
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpack_suppress_set(dhd, dhd->conf->tcpack_sup_mode);
+#endif
+ dhd->op_mode = 0;
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
- __FUNCTION__, __LINE__);
- /* Send the packet */
- if (in_interrupt()) {
- netif_rx(skb);
- } else {
- netif_rx_ni(skb);
+#if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2)
+ /* clear AP flags */
+ dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
+#endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */
+
+#ifdef CUSTOMER_HW4_DEBUG
+ if (!dhd_validate_chipid(dhd)) {
+ DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
+ __FUNCTION__, dhd_bus_chip_id(dhd)));
+#ifndef SUPPORT_MULTIPLE_CHIPS
+ ret = BCME_BADARG;
+ goto done;
+#endif /* !SUPPORT_MULTIPLE_CHIPS */
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
+ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+ (op_mode == DHD_FLAG_MFG_MODE)) {
+ dhd->op_mode = DHD_FLAG_MFG_MODE;
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* Disable RuntimePM in mfg mode */
+ DHD_DISABLE_RUNTIME_PM(dhd);
+ DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
+#endif /* DHD_PCIE_RUNTIME_PM */
+ /* Check and adjust IOCTL response timeout for Manufactring firmware */
+ dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
+ DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
+ __FUNCTION__));
+ } else {
+ dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+ DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
+ }
+#ifdef GET_CUSTOM_MAC_ENABLE
+ ret = wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether);
+ if (!ret) {
+ memset(buf, 0, sizeof(buf));
+ bcopy(hw_ether, ea_addr.octet, sizeof(struct ether_addr));
+ bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret < 0) {
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("hw_ether", hw_ether, sizeof(hw_ether), buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret) {
+ int i;
+ DHD_ERROR(("%s: can't set MAC address MAC="MACDBG", error=%d\n",
+ __FUNCTION__, MAC2STRDBG(hw_ether), ret));
+ for (i=0; i<sizeof(hw_ether)-ETHER_ADDR_LEN; i++) {
+ printf("0x%02x,", hw_ether[i+ETHER_ADDR_LEN]);
+ if ((i+1)%8 == 0)
+ printf("\n");
+ }
+ ret = BCME_NOTUP;
+ goto done;
+ }
}
} else {
- /* Could not allocate a sk_buf */
- DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
+ DHD_ERROR(("%s: can't get custom MAC address, ret=%d\n", __FUNCTION__, ret));
+ ret = BCME_NOTUP;
+ goto done;
}
-}
-#endif /* LOG_INTO_TCPDUMP */
-
-void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
-{
-#if defined(BCMSDIO)
- struct dhd_info *dhdinfo = dhd->info;
+#endif /* GET_CUSTOM_MAC_ENABLE */
+ /* Get the default device MAC address directly from firmware */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+ FALSE, 0)) < 0) {
+ DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
+ ret = BCME_NOTUP;
+ goto done;
+ }
+ /* Update public MAC address after reading from Firmware */
+ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
- int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
+ if ((ret = dhd_apply_default_clm(dhd, dhd->clm_path)) < 0) {
+ DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
+ goto done;
+ }
- dhd_os_sdunlock(dhd);
- wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
- dhd_os_sdlock(dhd);
-#endif /* defined(BCMSDIO) */
- return;
-} /* dhd_init_static_strs_array */
+ /* get a capabilities from firmware */
+ {
+ uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
+ memset(dhd->fw_capabilities, 0, cap_buf_size);
+ ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
+ FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
+ __FUNCTION__, ret));
+ return 0;
+ }
-void dhd_wait_event_wakeup(dhd_pub_t *dhd)
-{
-#if defined(BCMSDIO)
- struct dhd_info *dhdinfo = dhd->info;
- if (waitqueue_active(&dhdinfo->ctrl_wait))
- wake_up(&dhdinfo->ctrl_wait);
-#endif // endif
- return;
-}
+ memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
+ dhd->fw_capabilities[0] = ' ';
+ dhd->fw_capabilities[cap_buf_size - 2] = ' ';
+ dhd->fw_capabilities[cap_buf_size - 1] = '\0';
+ }
-#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
-int
-dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
-{
- int ret;
+ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
+ (op_mode == DHD_FLAG_HOSTAP_MODE)) {
+#ifdef SET_RANDOM_MAC_SOFTAP
+ uint rand_mac;
+#endif /* SET_RANDOM_MAC_SOFTAP */
+ dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 0;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif
+#ifdef SET_RANDOM_MAC_SOFTAP
+ SRANDOM32((uint)jiffies);
+ rand_mac = RANDOM32();
+ iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
+ iovbuf[1] = (unsigned char)(vendor_oui >> 8);
+ iovbuf[2] = (unsigned char)vendor_oui;
+ iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
+ iovbuf[4] = (unsigned char)(rand_mac >> 8);
+ iovbuf[5] = (unsigned char)(rand_mac >> 16);
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ } else
+ memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
+#endif /* SET_RANDOM_MAC_SOFTAP */
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+#ifdef SUPPORT_AP_POWERSAVE
+ dhd_set_ap_powersave(dhd, 0, TRUE);
+#endif /* SUPPORT_AP_POWERSAVE */
+#ifdef SOFTAP_UAPSD_OFF
+ ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
+ __FUNCTION__, ret));
+ }
+#endif /* SOFTAP_UAPSD_OFF */
+#if defined(CUSTOM_COUNTRY_CODE) && defined(CUSTOMER_HW2)
+ /* set AP flag for specific country code of SOFTAP */
+ dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
+#endif /* CUSTOM_COUNTRY_CODE && CUSTOMER_HW2 */
+ } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+ (op_mode == DHD_FLAG_MFG_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 0;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif /* PKT_FILTER_SUPPORT */
+ dhd->op_mode = DHD_FLAG_MFG_MODE;
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ rsdb_mode = 0;
+ ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
+ __FUNCTION__, ret));
+ }
+ }
+ } else {
+ uint32 concurrent_mode = 0;
+ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
+ (op_mode == DHD_FLAG_P2P_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 0;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif
+ dhd->op_mode = DHD_FLAG_P2P_MODE;
+ } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
+ (op_mode == DHD_FLAG_IBSS_MODE)) {
+ dhd->op_mode = DHD_FLAG_IBSS_MODE;
+ } else
+ dhd->op_mode = DHD_FLAG_STA_MODE;
+#if !defined(AP) && defined(WLP2P)
+ if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
+ (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 1;
+#endif
+ dhd->op_mode |= concurrent_mode;
+ }
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
- return BCME_ERROR;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ /* Check if we are enabling p2p */
+ if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+ ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
+ TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
- if (flag == TRUE) {
- /* Issue wl down command before resetting the chip */
- if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
- DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
- }
-#ifdef PROP_TXSTATUS
- if (dhd->pub.wlfc_enabled) {
- dhd_wlfc_deinit(&dhd->pub);
+#if defined(SOFTAP_AND_GC)
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
+ (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
}
-#endif /* PROP_TXSTATUS */
-#ifdef PNO_SUPPORT
- if (dhd->pub.pno_state) {
- dhd_pno_deinit(&dhd->pub);
+#endif
+ memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
+ ETHER_SET_LOCALADDR(&p2p_ea);
+ ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
+ else
+ DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
}
-#endif // endif
-#ifdef RTT_SUPPORT
- if (dhd->pub.rtt_state) {
- dhd_rtt_deinit(&dhd->pub);
+#else
+ (void)concurrent_mode;
+#endif
+ }
+#ifdef BCMSDIO
+ if (dhd->conf->sd_f2_blocksize)
+ dhdsdio_func_blocksize(dhd, 2, dhd->conf->sd_f2_blocksize);
+#endif
+
+#if defined(RSDB_MODE_FROM_FILE)
+ (void)dhd_rsdb_mode_from_file(dhd);
+#endif
+
+#ifdef DISABLE_PRUNED_SCAN
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
+ sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s get scan_features is failed ret=%d\n",
+ __FUNCTION__, ret));
+ } else {
+ memcpy(&scan_features, iovbuf, 4);
+ scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
+ ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
+ sizeof(scan_features), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set scan_features is failed ret=%d\n",
+ __FUNCTION__, ret));
+ }
}
-#endif /* RTT_SUPPORT */
+ }
+#endif /* DISABLE_PRUNED_SCAN */
-#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
- dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
-#endif /* DBG_PKT_MON */
+ DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
+ dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
+#ifdef CUSTOMER_HW2
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (!dhd->pub.is_blob)
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ {
+ /* get a ccode and revision for the country code */
+#if defined(CUSTOM_COUNTRY_CODE)
+ get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
+ &dhd->dhd_cspec, dhd->dhd_cflags);
+#else
+ get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
+ &dhd->dhd_cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
}
+#endif /* CUSTOMER_HW2 */
-#ifdef BCMSDIO
- if (!flag) {
- dhd_update_fw_nv_path(dhd);
- /* update firmware and nvram path to sdio bus */
- dhd_bus_update_fw_nv_path(dhd->pub.bus,
- dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+#if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
+ if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
+ dhd->info->rxthread_enabled = FALSE;
+ else
+ dhd->info->rxthread_enabled = TRUE;
+#endif
+ /* Set Country code */
+ if (dhd->dhd_cspec.ccode[0] != 0) {
+ ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
}
-#endif /* BCMSDIO */
- ret = dhd_bus_devreset(&dhd->pub, flag);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
- pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ /* Set Listen Interval */
+ ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
- if (flag) {
- /* Clear some flags for recovery logic */
- dhd->pub.dongle_trap_occured = 0;
- dhd->pub.iovar_timeout_occured = 0;
-#ifdef PCIE_FULL_DONGLE
- dhd->pub.d3ack_timeout_occured = 0;
- dhd->pub.livelock_occured = 0;
- dhd->pub.pktid_audit_failed = 0;
-#endif /* PCIE_FULL_DONGLE */
- dhd->pub.iface_op_failed = 0;
- dhd->pub.scan_timeout_occurred = 0;
- dhd->pub.scan_busy_occurred = 0;
- dhd->pub.smmu_fault_occurred = 0;
+#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
+#ifdef USE_WFA_CERT_CONF
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
+ DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
}
+#endif /* USE_WFA_CERT_CONF */
+ /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
+ dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
+#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
+#if defined(ROAM_ENABLE)
+#ifdef DISABLE_BCNLOSS_ROAM
+ dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off, sizeof(roam_bcnloss_off),
+ NULL, 0, TRUE);
+#endif /* DISABLE_BCNLOSS_ROAM */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
+ sizeof(roam_scan_period), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
+ if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
+ sizeof(roam_delta), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
+ ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
+ sizeof(roam_fullscan_period), NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
+#endif /* ROAM_ENABLE */
- if (ret) {
- DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+#ifdef CUSTOM_EVENT_PM_WAKE
+ ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
+ sizeof(pm_awake_thresh), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
}
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef WLTDLS
+#ifdef ENABLE_TDLS_AUTO_MODE
+ /* by default TDLS on and auto mode on */
+ _dhd_tdls_enable(dhd, true, true, NULL);
+#else
+ /* by default TDLS on and auto mode off */
+ _dhd_tdls_enable(dhd, true, false, NULL);
+#endif /* ENABLE_TDLS_AUTO_MODE */
+#endif /* WLTDLS */
- return ret;
-}
+#ifdef DHD_ENABLE_LPC
+ /* Set lpc 1 */
+ ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
-#ifdef BCMSDIO
-int
-dhd_net_bus_suspend(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return dhd_bus_suspend(&dhd->pub);
-}
+ if (ret == BCME_NOTDOWN) {
+ uint wl_down = 1;
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
+ (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+ DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
-int
-dhd_net_bus_resume(struct net_device *dev, uint8 stage)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return dhd_bus_resume(&dhd->pub, stage);
-}
+ ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
+ DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
+ }
+ }
+#endif /* DHD_ENABLE_LPC */
-#endif /* BCMSDIO */
-#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
+#ifdef WLADPS
+#ifdef WLADPS_SEAK_AP_WAR
+ dhd->disabled_adps = FALSE;
+#endif /* WLADPS_SEAK_AP_WAR */
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+#ifdef ADPS_MODE_FROM_FILE
+ dhd_adps_mode_from_file(dhd);
+#else
+ if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
+ DHD_ERROR(("%s dhd_enable_adps failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif /* ADPS_MODE_FROM_FILE */
+ }
+#endif /* WLADPS */
-int net_os_set_suspend_disable(struct net_device *dev, int val)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
+ /* Set PowerSave mode */
+ (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
- if (dhd) {
- ret = dhd->pub.suspend_disable_flag;
- dhd->pub.suspend_disable_flag = val;
- }
- return ret;
-}
+#if defined(BCMSDIO)
+ /* Match Host and Dongle rx alignment */
+ dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
+ NULL, 0, TRUE);
-int net_os_set_suspend(struct net_device *dev, int val, int force)
-{
- int ret = 0;
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+ /* enable credall to reduce the chance of no bus credit happened. */
+ dhd_iovar(dhd, 0, "bus:credall", (char *)&credall, sizeof(credall), NULL, 0, TRUE);
+#endif
- if (dhd && dhd->pub.conf->suspend_mode == EARLY_SUSPEND) {
- if (!val)
- dhd_conf_set_suspend_resume(&dhd->pub, val);
-#ifdef CONFIG_MACH_UNIVERSAL7420
-#endif /* CONFIG_MACH_UNIVERSAL7420 */
-#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
- ret = dhd_set_suspend(val, &dhd->pub);
-#else
- ret = dhd_suspend_resume_helper(dhd, val, force);
-#endif // endif
-#ifdef WL_CFG80211
- wl_cfg80211_update_power_mode(dev);
-#endif // endif
- if (val)
- dhd_conf_set_suspend_resume(&dhd->pub, val);
+#ifdef USE_WFA_CERT_CONF
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
+ DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
}
- return ret;
-}
+#endif /* USE_WFA_CERT_CONF */
+ if (glom != DEFAULT_GLOM_VALUE) {
+ DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
+ dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
+ }
+#endif /* defined(BCMSDIO) */
-int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ /* Setup timeout if Beacons are lost and roam is off to report link down */
+ dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0, TRUE);
- if (dhd) {
- DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
- __FUNCTION__, val));
- dhd->pub.suspend_bcn_li_dtim = val;
- }
+ /* Setup assoc_retry_max count to reconnect target AP in dongle */
+ dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0, TRUE);
- return 0;
-}
+#if defined(AP) && !defined(WLP2P)
+ dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
-int net_os_set_max_dtim_enable(struct net_device *dev, int val)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#endif /* defined(AP) && !defined(WLP2P) */
- if (dhd) {
- DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
- __FUNCTION__, (val ? "Enable" : "Disable")));
- if (val) {
- dhd->pub.max_dtim_enable = TRUE;
- } else {
- dhd->pub.max_dtim_enable = FALSE;
- }
- } else {
- return -1;
- }
+#ifdef MIMO_ANT_SETTING
+ dhd_sel_ant_from_file(dhd);
+#endif /* MIMO_ANT_SETTING */
- return 0;
-}
+#if defined(SOFTAP)
+ if (ap_fw_loaded == TRUE) {
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
+ }
+#endif
-#ifdef DISABLE_DTIM_IN_SUSPEND
-int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#if defined(KEEP_ALIVE)
+ {
+ /* Set Keep Alive : be sure to use FW with -keepalive */
+ int res;
- if (dhd) {
- DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n",
- __FUNCTION__, (val ? "Enable" : "Disable")));
- if (val) {
- dhd->pub.disable_dtim_in_suspend = TRUE;
- } else {
- dhd->pub.disable_dtim_in_suspend = FALSE;
+#if defined(SOFTAP)
+ if (ap_fw_loaded == FALSE)
+#endif
+ if (!(dhd->op_mode &
+ (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
+ if ((res = dhd_keep_alive_onoff(dhd)) < 0)
+ DHD_ERROR(("%s set keeplive failed %d\n",
+ __FUNCTION__, res));
}
- } else {
- return -1;
}
+#endif /* defined(KEEP_ALIVE) */
- return 0;
-}
-#endif /* DISABLE_DTIM_IN_SUSPEND */
-
-#ifdef PKT_FILTER_SUPPORT
-int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
-{
- int ret = 0;
+#ifdef USE_WL_TXBF
+ ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
-#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#endif /* USE_WL_TXBF */
- if (!dhd_master_mode)
- add_remove = !add_remove;
- DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
- if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
- return 0;
+ ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
+ 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
}
-#ifdef BLOCK_IPV6_PACKET
- /* customer want to use NO IPV6 packets only */
- if (num == DHD_MULTICAST6_FILTER_NUM) {
- return 0;
+#ifdef DISABLE_TXBFR
+ ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
+ 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
}
-#endif /* BLOCK_IPV6_PACKET */
+#endif /* DISABLE_TXBFR */
- if (num >= dhd->pub.pktfilter_count) {
- return -EINVAL;
+#ifdef USE_WFA_CERT_CONF
+#ifdef USE_WL_FRAMEBURST
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
+ DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
+ }
+#endif /* USE_WL_FRAMEBURST */
+#ifdef DISABLE_FRAMEBURST_VSDB
+ g_frameburst = frameburst;
+#endif /* DISABLE_FRAMEBURST_VSDB */
+#endif /* USE_WFA_CERT_CONF */
+#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
+ /* Disable Framebursting for SofAP */
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ frameburst = 0;
+ }
+#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+ /* Set frameburst to value */
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
+ sizeof(frameburst), TRUE, 0)) < 0) {
+ DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
}
- ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
-#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
-
- return ret;
-}
-
-int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
+ iov_buf = (char*)kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
+ if (iov_buf == NULL) {
+ DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
+ ret = BCME_NOMEM;
+ goto done;
+ }
-{
- int ret = 0;
- /* Packet filtering is set only if we still in early-suspend and
- * we need either to turn it ON or turn it OFF
- * We can always turn it OFF in case of early-suspend, but we turn it
- * back ON only if suspend_disable_flag was not set
- */
- if (dhdp && dhdp->up) {
- if (dhdp->in_suspend) {
- if (!val || (val && !dhdp->suspend_disable_flag))
- dhd_enable_packet_filter(val, dhdp);
+#if defined(CUSTOM_AMPDU_BA_WSIZE)
+ /* Set ampdu ba wsize to 64 or 16 */
+#ifdef CUSTOM_AMPDU_BA_WSIZE
+ ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
+#endif
+ if (ampdu_ba_wsize != 0) {
+ ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&du_ba_wsize,
+ sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
+ __FUNCTION__, ampdu_ba_wsize, ret));
}
}
- return ret;
-}
+#endif
-/* function to enable/disable packet for Network device */
-int net_os_enable_packet_filter(struct net_device *dev, int val)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#ifdef ENABLE_TEMP_THROTTLING
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+ memset(&temp_control, 0, sizeof(temp_control));
+ temp_control.enable = 1;
+ temp_control.control_bit = TEMP_THROTTLE_CONTROL_BIT;
+ ret = dhd_iovar(dhd, 0, "temp_throttle_control", (char *)&temp_control,
+ sizeof(temp_control), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set temp_throttle_control to %d failed \n",
+ __FUNCTION__, ret));
+ }
+ }
+#endif /* ENABLE_TEMP_THROTTLING */
- DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
- return dhd_os_enable_packet_filter(&dhd->pub, val);
-}
-#endif /* PKT_FILTER_SUPPORT */
+#if defined(CUSTOM_AMPDU_MPDU)
+ ampdu_mpdu = CUSTOM_AMPDU_MPDU;
+ if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
+ ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&du_mpdu, sizeof(ampdu_mpdu),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
+ __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
+ }
+ }
+#endif /* CUSTOM_AMPDU_MPDU */
-int
-dhd_dev_init_ioctl(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret;
+#if defined(CUSTOM_AMPDU_RELEASE)
+ ampdu_release = CUSTOM_AMPDU_RELEASE;
+ if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
+ ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&du_release,
+ sizeof(ampdu_release), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
+ __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
+ }
+ }
+#endif /* CUSTOM_AMPDU_RELEASE */
- if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
- goto done;
+#if defined(CUSTOM_AMSDU_AGGSF)
+ amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
+ if (amsdu_aggsf != 0) {
+ ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
+ __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
+ }
+ }
+#endif /* CUSTOM_AMSDU_AGGSF */
-done:
- return ret;
-}
+#if defined(SUPPORT_5G_1024QAM_VHT)
+#ifdef SUPPORT_5G_1024QAM_VHT
+ if (dhd_get_chipid(dhd) == BCM4361_CHIP_ID) {
+ vht_features |= 0x6; /* 5G 1024 QAM support */
+ }
+#endif /* SUPPORT_5G_1024QAM_VHT */
+ if (vht_features) {
+ ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
-int
-dhd_dev_get_feature_set(struct net_device *dev)
-{
- dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
- dhd_pub_t *dhd = (&ptr->pub);
- int feature_set = 0;
+ if (ret == BCME_NOTDOWN) {
+ uint wl_down = 1;
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
+ (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+ DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
+ " vht_features = 0x%x\n",
+ __FUNCTION__, ret, vht_features));
- if (FW_SUPPORTED(dhd, sta))
- feature_set |= WIFI_FEATURE_INFRA;
- if (FW_SUPPORTED(dhd, dualband))
- feature_set |= WIFI_FEATURE_INFRA_5G;
- if (FW_SUPPORTED(dhd, p2p))
- feature_set |= WIFI_FEATURE_P2P;
- if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
- feature_set |= WIFI_FEATURE_SOFT_AP;
- if (FW_SUPPORTED(dhd, tdls))
- feature_set |= WIFI_FEATURE_TDLS;
- if (FW_SUPPORTED(dhd, vsdb))
- feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
- if (FW_SUPPORTED(dhd, nan)) {
- feature_set |= WIFI_FEATURE_NAN;
- /* NAN is essentail for d2d rtt */
- if (FW_SUPPORTED(dhd, rttd2d))
- feature_set |= WIFI_FEATURE_D2D_RTT;
- }
-#ifdef RTT_SUPPORT
- if (dhd->rtt_supported) {
- feature_set |= WIFI_FEATURE_D2D_RTT;
- feature_set |= WIFI_FEATURE_D2AP_RTT;
+ ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
+ sizeof(vht_features), NULL, 0, TRUE);
+ DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
+ }
+ }
}
-#endif /* RTT_SUPPORT */
-#ifdef LINKSTAT_SUPPORT
- feature_set |= WIFI_FEATURE_LINKSTAT;
-#endif /* LINKSTAT_SUPPORT */
-
-#if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
- if (dhd_is_pno_supported(dhd)) {
- feature_set |= WIFI_FEATURE_PNO;
-#ifdef GSCAN_SUPPORT
- /* terence 20171115: remove to get GTS PASS
- * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
- */
-// feature_set |= WIFI_FEATURE_GSCAN;
-// feature_set |= WIFI_FEATURE_HAL_EPNO;
-#endif /* GSCAN_SUPPORT */
+#endif
+#ifdef DISABLE_11N_PROPRIETARY_RATES
+ ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
}
-#endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
-#ifdef RSSI_MONITOR_SUPPORT
- if (FW_SUPPORTED(dhd, rssi_mon)) {
- feature_set |= WIFI_FEATURE_RSSI_MONITOR;
+#endif /* DISABLE_11N_PROPRIETARY_RATES */
+#ifdef CUSTOM_PSPRETEND_THR
+ /* Turn off MPC in AP mode */
+ ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
+ sizeof(pspretend_thr), NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
+ __FUNCTION__, ret));
}
-#endif /* RSSI_MONITOR_SUPPORT */
-#ifdef WL11U
- feature_set |= WIFI_FEATURE_HOTSPOT;
-#endif /* WL11U */
-#ifdef NDO_CONFIG_SUPPORT
- feature_set |= WIFI_FEATURE_CONFIG_NDO;
-#endif /* NDO_CONFIG_SUPPORT */
-#ifdef KEEP_ALIVE
- feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
-#endif /* KEEP_ALIVE */
-#ifdef FILTER_IE
- if (FW_SUPPORTED(dhd, fie)) {
- feature_set |= WIFI_FEATURE_FILTER_IE;
+#endif
+
+ ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
+ NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
}
-#endif /* FILTER_IE */
-#ifdef ROAMEXP_SUPPORT
- /* Check if the Android O roam feature is supported by FW */
- if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) {
- feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
+#ifdef SUPPORT_SET_CAC
+ bcm_mkiovar("cac", (char *)&cac, sizeof(cac), iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
}
-#endif /* ROAMEXP_SUPPORT */
- return feature_set;
-}
-
-int
-dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
-{
- int feature_set_full;
- int ret = 0;
-
- feature_set_full = dhd_dev_get_feature_set(dev);
-
- /* Common feature set for all interface */
- ret = (feature_set_full & WIFI_FEATURE_INFRA) |
- (feature_set_full & WIFI_FEATURE_INFRA_5G) |
- (feature_set_full & WIFI_FEATURE_D2D_RTT) |
- (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
- (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
- (feature_set_full & WIFI_FEATURE_EPR);
-
- /* Specific feature group for each interface */
- switch (num) {
- case 0:
- ret |= (feature_set_full & WIFI_FEATURE_P2P) |
- /* Not supported yet */
- /* (feature_set_full & WIFI_FEATURE_NAN) | */
- (feature_set_full & WIFI_FEATURE_TDLS) |
- (feature_set_full & WIFI_FEATURE_PNO) |
- (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
- (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
- (feature_set_full & WIFI_FEATURE_GSCAN) |
- (feature_set_full & WIFI_FEATURE_HOTSPOT) |
- (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
- break;
-
- case 1:
- ret |= (feature_set_full & WIFI_FEATURE_P2P);
- /* Not yet verified NAN with P2P */
- /* (feature_set_full & WIFI_FEATURE_NAN) | */
- break;
+#endif /* SUPPORT_SET_CAC */
+#ifdef DHD_ULP
+ /* Get the required details from dongle during preinit ioctl */
+ dhd_ulp_preinit(dhd);
+#endif /* DHD_ULP */
- case 2:
- ret |= (feature_set_full & WIFI_FEATURE_NAN) |
- (feature_set_full & WIFI_FEATURE_TDLS) |
- (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
- break;
+ /* Read event_msgs mask */
+ ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
+ sizeof(iovbuf), FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+ bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
- default:
- ret = WIFI_FEATURE_INVALID;
- DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
- break;
+ /* Setup event_msgs */
+ setbit(eventmask, WLC_E_SET_SSID);
+ setbit(eventmask, WLC_E_PRUNE);
+ setbit(eventmask, WLC_E_AUTH);
+ setbit(eventmask, WLC_E_AUTH_IND);
+ setbit(eventmask, WLC_E_ASSOC);
+ setbit(eventmask, WLC_E_REASSOC);
+ setbit(eventmask, WLC_E_REASSOC_IND);
+ if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
+ setbit(eventmask, WLC_E_DEAUTH);
+ setbit(eventmask, WLC_E_DEAUTH_IND);
+ setbit(eventmask, WLC_E_DISASSOC_IND);
+ setbit(eventmask, WLC_E_DISASSOC);
+ setbit(eventmask, WLC_E_JOIN);
+ setbit(eventmask, WLC_E_BSSID);
+ setbit(eventmask, WLC_E_START);
+ setbit(eventmask, WLC_E_ASSOC_IND);
+ setbit(eventmask, WLC_E_PSK_SUP);
+ setbit(eventmask, WLC_E_LINK);
+ setbit(eventmask, WLC_E_MIC_ERROR);
+ setbit(eventmask, WLC_E_ASSOC_REQ_IE);
+ setbit(eventmask, WLC_E_ASSOC_RESP_IE);
+#ifdef LIMIT_BORROW
+ setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW);
+#endif
+#ifndef WL_CFG80211
+ setbit(eventmask, WLC_E_PMKID_CACHE);
+ setbit(eventmask, WLC_E_TXFAIL);
+#endif
+ setbit(eventmask, WLC_E_JOIN_START);
+// setbit(eventmask, WLC_E_SCAN_COMPLETE); // terence 20150628: remove redundant event
+#ifdef DHD_DEBUG
+ setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
+#endif
+#ifdef WLMEDIA_HTSF
+ setbit(eventmask, WLC_E_HTSFSYNC);
+#endif /* WLMEDIA_HTSF */
+#ifdef PNO_SUPPORT
+ setbit(eventmask, WLC_E_PFN_NET_FOUND);
+ setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
+ setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
+ setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
+#endif /* PNO_SUPPORT */
+ /* enable dongle roaming event */
+ setbit(eventmask, WLC_E_ROAM);
+#ifdef WLTDLS
+ setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
+#endif /* WLTDLS */
+#ifdef WL_ESCAN
+ setbit(eventmask, WLC_E_ESCAN_RESULT);
+#endif /* WL_ESCAN */
+#ifdef RTT_SUPPORT
+ setbit(eventmask, WLC_E_PROXD);
+#endif /* RTT_SUPPORT */
+#ifdef WL_CFG80211
+ setbit(eventmask, WLC_E_ESCAN_RESULT);
+ setbit(eventmask, WLC_E_AP_STARTED);
+ setbit(eventmask, WLC_E_ACTION_FRAME_RX);
+ if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
+ setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
}
+#endif /* WL_CFG80211 */
- return ret;
-}
-
-#ifdef CUSTOM_FORCE_NODFS_FLAG
-int
-dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-
- if (nodfs)
- dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
- else
- dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
- dhd->pub.force_country_change = TRUE;
- return 0;
-}
-#endif /* CUSTOM_FORCE_NODFS_FLAG */
-
-#ifdef NDO_CONFIG_SUPPORT
-int
-dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ret = 0;
+#if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
+ if (dhd_logtrace_from_file(dhd)) {
+ setbit(eventmask, WLC_E_TRACE);
+ } else {
+ clrbit(eventmask, WLC_E_TRACE);
+ }
+#elif defined(SHOW_LOGTRACE)
+ setbit(eventmask, WLC_E_TRACE);
+#else
+ clrbit(eventmask, WLC_E_TRACE);
+#endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
- if (enable) {
- /* enable ND offload feature (will be enabled in FW on suspend) */
- dhdp->ndo_enable = TRUE;
+ setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
+#ifdef DHD_WMF
+ setbit(eventmask, WLC_E_PSTA_PRIMARY_INTF_IND);
+#endif
+#ifdef CUSTOM_EVENT_PM_WAKE
+ setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
+#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef DHD_LOSSLESS_ROAMING
+ setbit(eventmask, WLC_E_ROAM_PREP);
+#endif
+#if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
+#endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
- /* Update changes of anycast address & DAD failed address */
- ret = dhd_dev_ndo_update_inet6addr(dev);
- if ((ret < 0) && (ret != BCME_NORESOURCE)) {
- DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
- return ret;
- }
- } else {
- /* disable ND offload feature */
- dhdp->ndo_enable = FALSE;
+#if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
+ dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
+#endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
- /* disable ND offload in FW */
- ret = dhd_ndo_enable(dhdp, FALSE);
- if (ret < 0) {
- DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
- }
+#ifdef SUSPEND_EVENT
+ bcopy(eventmask, dhd->conf->resume_eventmask, WL_EVENTING_MASK_LEN);
+#endif
+ /* Write updated Event mask */
+ ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
+ goto done;
}
- return ret;
-}
-/* #pragma used as a WAR to fix build failure,
-* ignore dropping of 'const' qualifier in 'list_entry' macro
-* this pragma disables the warning only for the following function
-*/
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
+ /* make up event mask ext message iovar for event larger than 128 */
+ msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
+ eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
+ if (eventmask_msg == NULL) {
+ DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
+ ret = BCME_NOMEM;
+ goto done;
+ }
+ bzero(eventmask_msg, msglen);
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
-static int
-dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
-{
- struct inet6_ifaddr *ifa;
- struct ifacaddr6 *acaddr = NULL;
- int addr_count = 0;
+ /* Read event_msgs_ext mask */
+ ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
+ WLC_IOCTL_SMLEN, FALSE);
- /* lock */
- read_lock_bh(&inet6->lock);
+ if (ret2 == 0) { /* event_msgs_ext must be supported */
+ bcopy(iov_buf, eventmask_msg, msglen);
+#ifdef RSSI_MONITOR_SUPPORT
+ setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef GSCAN_SUPPORT
+ setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
+ setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
+ setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
+ setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
+#endif /* GSCAN_SUPPORT */
+ setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
+#ifdef BT_WIFI_HANDOVER
+ setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
+#endif /* BT_WIFI_HANDOVER */
+#ifdef DBG_PKT_MON
+ setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
+#endif /* DBG_PKT_MON */
+#ifdef DHD_ULP
+ setbit(eventmask_msg->mask, WLC_E_ULP);
+#endif
+#ifdef ENABLE_TEMP_THROTTLING
+ setbit(eventmask_msg->mask, WLC_E_TEMP_THROTTLE);
+#endif /* ENABLE_TEMP_THROTTLING */
- /* Count valid unicast address */
- list_for_each_entry(ifa, &inet6->addr_list, if_list) {
- if ((ifa->flags & IFA_F_DADFAILED) == 0) {
- addr_count++;
+ /* Write updated Event mask */
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = EVENTMSGS_SET_MASK;
+ eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+ ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
+ TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
+ goto done;
}
+ } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
+ /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
+ DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
+ __FUNCTION__, ret2));
+ } else {
+ DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
+ ret = ret2;
+ goto done;
}
- /* Count anycast address */
- acaddr = inet6->ac_list;
- while (acaddr) {
- addr_count++;
- acaddr = acaddr->aca_next;
+#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
+ /* Enabling event log trace for EAP events */
+ el_tag = (wl_el_tag_params_t *)kmalloc(sizeof(wl_el_tag_params_t), GFP_KERNEL);
+ if (el_tag == NULL) {
+ DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
+ (int)sizeof(wl_el_tag_params_t)));
+ ret = BCME_NOMEM;
+ goto done;
}
+ el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
+ el_tag->set = 1;
+ el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
+ bcm_mkiovar("event_log_tag_control", (char *)el_tag,
+ sizeof(*el_tag), iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif /* DHD_8021X_DUMP */
- /* unlock */
- read_unlock_bh(&inet6->lock);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
+ sizeof(scan_assoc_time), TRUE, 0);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
+ sizeof(scan_unassoc_time), TRUE, 0);
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
+ sizeof(scan_passive_time), TRUE, 0);
- return addr_count;
-}
+#ifdef ARP_OFFLOAD_SUPPORT
+ /* Set and enable ARP offload feature for STA only */
+#if defined(SOFTAP)
+ if (arpoe && !ap_fw_loaded)
+#else
+ if (arpoe)
+#endif
+ {
+ dhd_arp_offload_enable(dhd, TRUE);
+ dhd_arp_offload_set(dhd, dhd_arp_mode);
+ } else {
+ dhd_arp_offload_enable(dhd, FALSE);
+ dhd_arp_offload_set(dhd, 0);
+ }
+ dhd_arp_enable = arpoe;
+#endif /* ARP_OFFLOAD_SUPPORT */
-int
-dhd_dev_ndo_update_inet6addr(struct net_device *dev)
-{
- dhd_info_t *dhd;
- dhd_pub_t *dhdp;
- struct inet6_dev *inet6;
- struct inet6_ifaddr *ifa;
- struct ifacaddr6 *acaddr = NULL;
- struct in6_addr *ipv6_addr = NULL;
- int cnt, i;
- int ret = BCME_OK;
+#ifdef PKT_FILTER_SUPPORT
+ /* Setup default defintions for pktfilter , enable in suspend */
+ if (dhd_master_mode) {
+ dhd->pktfilter_count = 6;
+ dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
+ if (!FW_SUPPORTED(dhd, pf6)) {
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+ } else {
+ /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
+ }
+ /* apply APP pktfilter */
+ dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
- /*
- * this function evaulates host ip address in struct inet6_dev
- * unicast addr in inet6_dev->addr_list
- * anycast addr in inet6_dev->ac_list
- * while evaluating inet6_dev, read_lock_bh() is required to prevent
- * access on null(freed) pointer.
- */
+ /* Setup filter to allow only unicast */
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
- if (dev) {
- inet6 = dev->ip6_ptr;
- if (!inet6) {
- DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
- return BCME_ERROR;
- }
+ /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
+ dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
- dhd = DHD_DEV_INFO(dev);
- if (!dhd) {
- DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
- return BCME_ERROR;
+ dhd->pktfilter[DHD_BROADCAST_ARP_FILTER_NUM] = NULL;
+ if (FW_SUPPORTED(dhd, pf6)) {
+ /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
+ dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] =
+ "107 1 6 IP4_H:16 0xf0 !0xe0 IP4_H:19 0xff 0xff";
+ dhd->pktfilter_count = 8;
}
- dhdp = &dhd->pub;
- if (dhd_net2idx(dhd, dev) != 0) {
- DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
- return BCME_ERROR;
- }
- } else {
- DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
- return BCME_ERROR;
+#ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
+ dhd->pktfilter_count = 4;
+ /* Setup filter to block broadcast and NAT Keepalive packets */
+ /* discard all broadcast packets */
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
+ /* discard NAT Keepalive packets */
+ dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
+ /* discard NAT Keepalive packets */
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
+#endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
+ } else
+ dhd_conf_discard_pkt_filter(dhd);
+ dhd_conf_add_pkt_filter(dhd);
+
+#if defined(SOFTAP)
+ if (ap_fw_loaded) {
+ dhd_enable_packet_filter(0, dhd);
}
+#endif /* defined(SOFTAP) */
+ dhd_set_packet_filter(dhd);
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef DISABLE_11N
+ ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
+#endif /* DISABLE_11N */
- /* Check host IP overflow */
- cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
- if (cnt > dhdp->ndo_max_host_ip) {
- if (!dhdp->ndo_host_ip_overflow) {
- dhdp->ndo_host_ip_overflow = TRUE;
- /* Disable ND offload in FW */
- DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
- ret = dhd_ndo_enable(dhdp, FALSE);
- }
+#ifdef ENABLE_BCN_LI_BCN_WAKEUP
+ dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0, TRUE);
+#endif /* ENABLE_BCN_LI_BCN_WAKEUP */
+ /* query for 'clmver' to get clm version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ char *clmver_temp_buf = NULL;
- return ret;
+ if ((clmver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
+ DHD_ERROR(("Couldn't find \"Data:\"\n"));
+ } else {
+ ptr = (clmver_temp_buf + strlen("Data:"));
+ if ((clmver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
+ DHD_ERROR(("Couldn't find New line character\n"));
+ } else {
+ memset(clm_version, 0, CLM_VER_STR_LEN);
+ strncpy(clm_version, clmver_temp_buf,
+ MIN(strlen(clmver_temp_buf), CLM_VER_STR_LEN - 1));
+ }
+ }
}
- /*
- * Allocate ipv6 addr buffer to store addresses to be added/removed.
- * driver need to lock inet6_dev while accessing structure. but, driver
- * cannot use ioctl while inet6_dev locked since it requires scheduling
- * hence, copy addresses to the buffer and do ioctl after unlock.
- */
- ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
- sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
- if (!ipv6_addr) {
- DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
- return BCME_NOMEM;
+ /* query for 'ver' to get version info from firmware */
+ memset(buf, 0, sizeof(buf));
+ ptr = buf;
+ ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ else {
+ bcmstrtok(&ptr, "\n", 0);
+ strncpy(fw_version, buf, FW_VER_STR_LEN);
+ fw_version[FW_VER_STR_LEN-1] = '\0';
+ dhd_set_version_info(dhd, buf);
+#ifdef WRITE_WLANINFO
+ sec_save_wlinfo(buf, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
+#endif /* WRITE_WLANINFO */
}
+#ifdef GEN_SOFTAP_INFO_FILE
+ sec_save_softap_info();
+#endif /* GEN_SOFTAP_INFO_FILE */
- /* Find DAD failed unicast address to be removed */
- cnt = 0;
- read_lock_bh(&inet6->lock);
- list_for_each_entry(ifa, &inet6->addr_list, if_list) {
- /* DAD failed unicast address */
- if ((ifa->flags & IFA_F_DADFAILED) &&
- (cnt < dhdp->ndo_max_host_ip)) {
- memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
- cnt++;
- }
- }
- read_unlock_bh(&inet6->lock);
+#if defined(BCMSDIO)
+ dhd_txglom_enable(dhd, dhd->conf->bus_rxglom);
+#endif /* defined(BCMSDIO) */
- /* Remove DAD failed unicast address */
- for (i = 0; i < cnt; i++) {
- DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
- ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
- if (ret < 0) {
- goto done;
- }
+#if defined(BCMSDIO) || defined(BCMDBUS)
+#ifdef PROP_TXSTATUS
+ if (disable_proptx ||
+#ifdef PROP_TXSTATUS_VSDB
+ /* enable WLFC only if the firmware is VSDB when it is in STA mode */
+ (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
+#endif /* PROP_TXSTATUS_VSDB */
+ FALSE) {
+ wlfc_enable = FALSE;
+ }
+ ret = dhd_conf_get_disable_proptx(dhd);
+ if (ret == 0){
+ disable_proptx = 0;
+ wlfc_enable = TRUE;
+ } else if (ret >= 1) {
+ disable_proptx = 1;
+ wlfc_enable = FALSE;
+ /* terence 20161229: we should set ampdu_hostreorder=0 when disable_proptx=1 */
+ hostreorder = 0;
}
- /* Remove all anycast address */
- ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
- if (ret < 0) {
- goto done;
+#if defined(PROP_TXSTATUS)
+#ifdef USE_WFA_CERT_CONF
+ if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
+ DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
+ wlfc_enable = proptx;
}
+#endif /* USE_WFA_CERT_CONF */
+#endif /* PROP_TXSTATUS */
- /*
- * if ND offload was disabled due to host ip overflow,
- * attempt to add valid unicast address.
- */
- if (dhdp->ndo_host_ip_overflow) {
- /* Find valid unicast address */
- cnt = 0;
- read_lock_bh(&inet6->lock);
- list_for_each_entry(ifa, &inet6->addr_list, if_list) {
- /* valid unicast address */
- if (!(ifa->flags & IFA_F_DADFAILED) &&
- (cnt < dhdp->ndo_max_host_ip)) {
- memcpy(&ipv6_addr[cnt], &ifa->addr,
- sizeof(struct in6_addr));
- cnt++;
- }
- }
- read_unlock_bh(&inet6->lock);
+#ifndef DISABLE_11N
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+ ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
+ NULL, 0, TRUE);
+ if (ret2 < 0) {
+ DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
+ if (ret2 != BCME_UNSUPPORTED)
+ ret = ret2;
- /* Add valid unicast address */
- for (i = 0; i < cnt; i++) {
- ret = dhd_ndo_add_ip_with_type(dhdp,
- (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
- if (ret < 0) {
- goto done;
- }
- }
- }
+ if (ret == BCME_NOTDOWN) {
+ uint wl_down = 1;
+ ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
+ sizeof(wl_down), TRUE, 0);
+ DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
+ __FUNCTION__, ret2, hostreorder));
- /* Find anycast address */
- cnt = 0;
- read_lock_bh(&inet6->lock);
- acaddr = inet6->ac_list;
- while (acaddr) {
- if (cnt < dhdp->ndo_max_host_ip) {
- memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
- cnt++;
+ ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
+ sizeof(hostreorder), NULL, 0, TRUE);
+ DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
+ if (ret2 != BCME_UNSUPPORTED)
+ ret = ret2;
}
- acaddr = acaddr->aca_next;
+ if (ret2 != BCME_OK)
+ hostreorder = 0;
}
- read_unlock_bh(&inet6->lock);
+#endif /* DISABLE_11N */
- /* Add anycast address */
- for (i = 0; i < cnt; i++) {
- ret = dhd_ndo_add_ip_with_type(dhdp,
- (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
+
+ if (wlfc_enable) {
+ dhd_wlfc_init(dhd);
+ /* terence 20161229: enable ampdu_hostreorder if tlv enabled */
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 1, 0, TRUE);
+ }
+#ifndef DISABLE_11N
+ else if (hostreorder)
+ dhd_wlfc_hostreorder_init(dhd);
+#endif /* DISABLE_11N */
+#else
+ /* terence 20161229: disable ampdu_hostreorder if PROP_TXSTATUS not defined */
+ printf("%s: not define PROP_TXSTATUS\n", __FUNCTION__);
+ dhd_conf_set_intiovar(dhd, WLC_SET_VAR, "ampdu_hostreorder", 0, 0, TRUE);
+#endif /* PROP_TXSTATUS */
+#endif /* BCMSDIO || BCMDBUS */
+#ifndef PCIE_FULL_DONGLE
+ /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
+ if (FW_SUPPORTED(dhd, ap)) {
+ wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
+ ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
+ NULL, 0, TRUE);
+ if (ret < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* PCIE_FULL_DONGLE */
+#ifdef PNO_SUPPORT
+ if (!dhd->pno_state) {
+ dhd_pno_init(dhd);
+ }
+#endif
+#ifdef RTT_SUPPORT
+ if (!dhd->rtt_state) {
+ ret = dhd_rtt_init(dhd);
if (ret < 0) {
- goto done;
+ DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
}
}
+#endif
+#ifdef WL11U
+ dhd_interworking_enable(dhd);
+#endif /* WL11U */
- /* Now All host IP addr were added successfully */
- if (dhdp->ndo_host_ip_overflow) {
- dhdp->ndo_host_ip_overflow = FALSE;
- if (dhdp->in_suspend) {
- /* drvier is in (early) suspend state, need to enable ND offload in FW */
- DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
- ret = dhd_ndo_enable(dhdp, TRUE);
- }
+#ifdef SUPPORT_SENSORHUB
+ DHD_ERROR(("%s: SensorHub enabled %d\n",
+ __FUNCTION__, dhd->info->shub_enable));
+ ret2 = dhd_iovar(dhd, 0, "shub", NULL, 0,
+ (char *)&shub_ctl, sizeof(shub_ctl), FALSE);
+ if (ret2 < 0) {
+ DHD_ERROR(("%s failed to get shub hub enable information %d\n",
+ __FUNCTION__, ret2));
+ dhd->info->shub_enable = 0;
+ } else {
+ dhd->info->shub_enable = shub_ctl.enable;
+ DHD_ERROR(("%s: checking sensorhub enable %d\n",
+ __FUNCTION__, dhd->info->shub_enable));
}
-
-done:
- if (ipv6_addr) {
- MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
+#else
+ DHD_ERROR(("%s: SensorHub diabled %d\n",
+ __FUNCTION__, dhd->info->shub_enable));
+ dhd->info->shub_enable = FALSE;
+ shub_ctl.enable = FALSE;
+ ret2 = dhd_iovar(dhd, 0, "shub", (char *)&shub_ctl, sizeof(shub_ctl),
+ NULL, 0, TRUE);
+ if (ret2 < 0) {
+ DHD_ERROR(("%s failed to set ShubHub disable\n",
+ __FUNCTION__));
}
+#endif /* SUPPORT_SENSORHUB */
- return ret;
-}
-#pragma GCC diagnostic pop
+#ifdef NDO_CONFIG_SUPPORT
+ dhd->ndo_enable = FALSE;
+ dhd->ndo_host_ip_overflow = FALSE;
+ dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
#endif /* NDO_CONFIG_SUPPORT */
-#ifdef PNO_SUPPORT
-/* Linux wrapper to call common dhd_pno_stop_for_ssid */
-int
-dhd_dev_pno_stop_for_ssid(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-
- return (dhd_pno_stop_for_ssid(&dhd->pub));
-}
-
-/* Linux wrapper to call common dhd_pno_set_for_ssid */
-int
-dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
- uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-
- return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
- pno_repeat, pno_freq_expo_max, channel_list, nchan));
-}
-
-/* Linux wrapper to call common dhd_pno_enable */
-int
-dhd_dev_pno_enable(struct net_device *dev, int enable)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-
- return (dhd_pno_enable(&dhd->pub, enable));
-}
-
-/* Linux wrapper to call common dhd_pno_set_for_hotlist */
-int
-dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
- struct dhd_pno_hotlist_params *hotlist_params)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
-}
-/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
-int
-dhd_dev_pno_stop_for_batch(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_stop_for_batch(&dhd->pub));
-}
-
-/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
-int
-dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
-}
-
-/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
-int
-dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
-}
-#endif /* PNO_SUPPORT */
-
-#if defined(PNO_SUPPORT)
-#ifdef GSCAN_SUPPORT
-bool
-dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ /* ND offload version supported */
+ dhd->ndo_version = dhd_ndo_get_version(dhd);
+ if (dhd->ndo_version > 0) {
+ DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
- return (dhd_is_legacy_pno_enabled(&dhd->pub));
-}
+#ifdef NDO_CONFIG_SUPPORT
+ /* enable Unsolicited NA filter */
+ ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
+ if (ret < 0) {
+ DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
+ }
+#endif /* NDO_CONFIG_SUPPORT */
+ }
-int
-dhd_dev_set_epno(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (!dhd) {
- return BCME_ERROR;
+ /* check dongle supports wbtext or not */
+ dhd->wbtext_support = FALSE;
+ if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
+ WLC_GET_VAR, FALSE, 0) != BCME_OK) {
+ DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
}
- return dhd_pno_set_epno(&dhd->pub);
-}
-int
-dhd_dev_flush_fw_epno(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (!dhd) {
- return BCME_ERROR;
+ if (wnm_bsstrans_resp == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
+ dhd->wbtext_support = TRUE;
}
- return dhd_pno_flush_fw_epno(&dhd->pub);
-}
-
-/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
-int
-dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
- void *buf, bool flush)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-
- return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
-}
-
-/* Linux wrapper to call common dhd_wait_batch_results_complete */
-int
-dhd_dev_wait_batch_results_complete(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+#ifndef WBTEXT
+ /* driver can turn off wbtext feature through makefile */
+ if (dhd->wbtext_support) {
+ if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
+ WL_BSSTRANS_POLICY_ROAM_ALWAYS,
+ WLC_SET_VAR, FALSE, 0) != BCME_OK) {
+ DHD_ERROR(("failed to disable WBTEXT\n"));
+ }
+ }
+#endif /* !WBTEXT */
- return (dhd_wait_batch_results_complete(&dhd->pub));
-}
+ /* WNM capabilities */
+ wnm_cap = 0
+#ifdef WL11U
+ | WL_WNM_BSSTRANS | WL_WNM_NOTIF
+#endif
+#ifdef WBTEXT
+ | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
+#endif
+ ;
+ if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
+ DHD_ERROR(("failed to set WNM capabilities\n"));
+ }
-/* Linux wrapper to call common dhd_pno_lock_batch_results */
-int
-dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_conf_postinit_ioctls(dhd);
+done:
- return (dhd_pno_lock_batch_results(&dhd->pub));
+ if (eventmask_msg)
+ kfree(eventmask_msg);
+ if (iov_buf)
+ kfree(iov_buf);
+#if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
+ if (el_tag)
+ kfree(el_tag);
+#endif /* DHD_8021X_DUMP */
+ return ret;
}
-/* Linux wrapper to call common dhd_pno_unlock_batch_results */
-void
-dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_pno_unlock_batch_results(&dhd->pub));
-}
-/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
int
-dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
+dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
+ uint res_len, int set)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ char *buf = NULL;
+ int input_len;
+ wl_ioctl_t ioc;
+ int ret;
- return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
-}
+ if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
+ return BCME_BADARG;
-/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
-int
-dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ input_len = strlen(name) + 1 + param_len;
+ if (input_len > WLC_IOCTL_MAXLEN)
+ return BCME_BADARG;
- return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
-}
+ buf = NULL;
+ if (set) {
+ if (res_buf || res_len != 0) {
+ DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+ buf = kzalloc(input_len, GFP_KERNEL);
+ if (!buf) {
+ DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
+ if (!ret) {
+ ret = BCME_NOMEM;
+ goto exit;
+ }
-/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
-void *
-dhd_dev_hotlist_scan_event(struct net_device *dev,
- const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ ioc.cmd = WLC_SET_VAR;
+ ioc.buf = buf;
+ ioc.len = input_len;
+ ioc.set = set;
- return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
-}
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+ } else {
+ if (!res_buf || !res_len) {
+ DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+
+ if (res_len < input_len) {
+ DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
+ res_len, input_len));
+ buf = kzalloc(input_len, GFP_KERNEL);
+ if (!buf) {
+ DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
+ if (!ret) {
+ ret = BCME_NOMEM;
+ goto exit;
+ }
-/* Linux wrapper to call common dhd_process_full_gscan_result */
-void *
-dhd_dev_process_full_gscan_result(struct net_device *dev,
-const void *data, uint32 len, int *send_evt_bytes)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = input_len;
+ ioc.set = set;
- return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
-}
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
-void
-dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ if (ret == BCME_OK) {
+ memcpy(res_buf, buf, res_len);
+ }
+ } else {
+ memset(res_buf, 0, res_len);
+ ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
+ if (!ret) {
+ ret = BCME_NOMEM;
+ goto exit;
+ }
- dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = res_buf;
+ ioc.len = res_len;
+ ioc.set = set;
- return;
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+ }
+ }
+exit:
+ kfree(buf);
+ return ret;
}
int
-dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
+dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
+ uint cmd_len, char **resptr, uint resp_len)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int len = resp_len;
+ int ret;
+ char *buf = *resptr;
+ wl_ioctl_t ioc;
+ if (resp_len > WLC_IOCTL_MAXLEN)
+ return BCME_BADARG;
- return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
-}
+ memset(buf, 0, resp_len);
-/* Linux wrapper to call common dhd_retreive_batch_scan_results */
-int
-dhd_dev_retrieve_batch_scan(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
+ if (ret == 0) {
+ return BCME_BUFTOOSHORT;
+ }
- return (dhd_retreive_batch_scan_results(&dhd->pub));
-}
+ memset(&ioc, 0, sizeof(ioc));
-/* Linux wrapper to call common dhd_pno_process_epno_result */
-void * dhd_dev_process_epno_result(struct net_device *dev,
- const void *data, uint32 event, int *send_evt_bytes)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ ioc.cmd = WLC_GET_VAR;
+ ioc.buf = buf;
+ ioc.len = len;
+ ioc.set = 0;
- return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
+ ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
+
+ return ret;
}
-int
-dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
- wlc_roam_exp_params_t *roam_param)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- wl_roam_exp_cfg_t roam_exp_cfg;
- int err;
- if (!roam_param) {
- return BCME_BADARG;
- }
+int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
+{
+ struct dhd_info *dhd = dhdp->info;
+ struct net_device *dev = NULL;
- DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
- roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
- DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
- roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
- roam_param->cur_bssid_boost));
- DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
- roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+ ASSERT(dev);
- memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
- roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
- roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
- if (dhd->pub.lazy_roam_enable) {
- roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
- }
- err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
- (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
- TRUE);
- if (err < 0) {
- DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
+ if (netif_running(dev)) {
+ DHD_ERROR(("%s: Must be down to change its MTU\n", dev->name));
+ return BCME_NOTDOWN;
}
- return err;
-}
-int
-dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
-{
- int err;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- wl_roam_exp_cfg_t roam_exp_cfg;
+#define DHD_MIN_MTU 1500
+#define DHD_MAX_MTU 1752
- memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
- roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
- if (enable) {
- roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
+ if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
+ DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
+ return BCME_BADARG;
}
- err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
- (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
- TRUE);
- if (err < 0) {
- DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
- } else {
- dhd->pub.lazy_roam_enable = (enable != 0);
- }
- return err;
+ dev->mtu = new_mtu;
+ return 0;
}
-int
-dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
- wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
+#ifdef ARP_OFFLOAD_SUPPORT
+/* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
+void
+aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
{
- int err;
- uint len;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
+ int i;
+ int ret;
- bssid_pref->version = BSSID_PREF_LIST_VERSION;
- /* By default programming bssid pref flushes out old values */
- bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
- len = sizeof(wl_bssid_pref_cfg_t);
- if (bssid_pref->count) {
- len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
- }
- err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
- (char *)bssid_pref, len, NULL, 0, TRUE);
- if (err != BCME_OK) {
- DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
- }
- return err;
-}
-#endif /* GSCAN_SUPPORT */
+ bzero(ipv4_buf, sizeof(ipv4_buf));
-#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
-int
-dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
- uint32 len, uint32 flush)
-{
- int err;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- int macmode;
+ /* display what we've got */
+ ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+ DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
+#ifdef AOE_DBG
+ dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
+ /* now we saved hoste_ip table, clr it in the dongle AOE */
+ dhd_aoe_hostip_clr(dhd_pub, idx);
- if (blacklist) {
- err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
- len, TRUE, 0);
- if (err != BCME_OK) {
- DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
- return err;
- }
- }
- /* By default programming blacklist flushes out old values */
- macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
- err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
- sizeof(macmode), TRUE, 0);
- if (err != BCME_OK) {
- DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
+ if (ret) {
+ DHD_ERROR(("%s failed\n", __FUNCTION__));
+ return;
}
- return err;
-}
-int
-dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
- uint32 len, uint32 flush)
-{
- int err;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- wl_ssid_whitelist_t whitelist_ssid_flush;
+ for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
+ if (add && (ipv4_buf[i] == 0)) {
+ ipv4_buf[i] = ipa;
+ add = FALSE; /* added ipa to local table */
+ DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
+ __FUNCTION__, i));
+ } else if (ipv4_buf[i] == ipa) {
+ ipv4_buf[i] = 0;
+ DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
+ __FUNCTION__, ipa, i));
+ }
- if (!ssid_whitelist) {
- if (flush) {
- ssid_whitelist = &whitelist_ssid_flush;
- ssid_whitelist->ssid_count = 0;
- } else {
- DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
- return BCME_BADARG;
+ if (ipv4_buf[i] != 0) {
+ /* add back host_ip entries from our local cache */
+ dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
+ DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
+ __FUNCTION__, ipv4_buf[i], i));
}
}
- ssid_whitelist->version = SSID_WHITELIST_VERSION;
- ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
- err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
- 0, TRUE);
- if (err != BCME_OK) {
- DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
- }
- return err;
+#ifdef AOE_DBG
+ /* see the resulting hostip table */
+ dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
+ DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
+ dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
+#endif
}
-#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
-#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
-/* Linux wrapper to call common dhd_pno_get_gscan */
-void *
-dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
- void *info, uint32 *len)
+/*
+ * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
+ * whenever there is an event related to an IP address.
+ * ptr : kernel provided pointer to IP address that has changed
+ */
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
- return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
-}
-#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
-#endif // endif
+ dhd_info_t *dhd;
+ dhd_pub_t *dhd_pub;
+ int idx;
-#ifdef RSSI_MONITOR_SUPPORT
-int
-dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
- int8 max_rssi, int8 min_rssi)
-{
- int err;
- wl_rssi_monitor_cfg_t rssi_monitor;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ if (!dhd_arp_enable)
+ return NOTIFY_DONE;
+ if (!ifa || !(ifa->ifa_dev->dev))
+ return NOTIFY_DONE;
- rssi_monitor.version = RSSI_MONITOR_VERSION;
- rssi_monitor.max_rssi = max_rssi;
- rssi_monitor.min_rssi = min_rssi;
- rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
- err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
- NULL, 0, TRUE);
- if (err < 0 && err != BCME_UNSUPPORTED) {
- DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+ /* Filter notifications meant for non Broadcom devices */
+ if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
+ (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
+#if defined(WL_ENABLE_P2P_IF)
+ if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
+#endif /* WL_ENABLE_P2P_IF */
+ return NOTIFY_DONE;
}
- return err;
-}
-#endif /* RSSI_MONITOR_SUPPORT */
+#endif /* LINUX_VERSION_CODE */
-#ifdef DHDTCPACK_SUPPRESS
-int
-dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
-{
- int err;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
+ if (!dhd)
+ return NOTIFY_DONE;
- err = dhd_tcpack_suppress_set(&dhd->pub, enable);
- if (err != BCME_OK) {
- DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
+ dhd_pub = &dhd->pub;
+
+ if (dhd_pub->arp_version == 1) {
+ idx = 0;
+ } else {
+ for (idx = 0; idx < DHD_MAX_IFS; idx++) {
+ if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
+ break;
+ }
+ if (idx < DHD_MAX_IFS)
+ DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
+ dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
+ else {
+ DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
+ idx = 0;
+ }
}
- return err;
-}
-#endif /* DHDTCPACK_SUPPRESS */
-int
-dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- dhd_pub_t *dhdp = &dhd->pub;
+ switch (event) {
+ case NETDEV_UP:
+ DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
- if (!dhdp || !oui) {
- DHD_ERROR(("NULL POINTER : %s\n",
- __FUNCTION__));
- return BCME_ERROR;
- }
- if (ETHER_ISMULTI(oui)) {
- DHD_ERROR(("Expected unicast OUI\n"));
- return BCME_ERROR;
- } else {
- uint8 *rand_mac_oui = dhdp->rand_mac_oui;
- memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
- DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
- MACOUI2STRDBG(rand_mac_oui)));
+ if (dhd->pub.busstate != DHD_BUS_DATA) {
+ DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__));
+ if (dhd->pend_ipaddr) {
+ DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
+ __FUNCTION__, dhd->pend_ipaddr));
+ }
+ dhd->pend_ipaddr = ifa->ifa_address;
+ break;
+ }
+
+#ifdef AOE_IP_ALIAS_SUPPORT
+ DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
+ __FUNCTION__));
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+ break;
+
+ case NETDEV_DOWN:
+ DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+ dhd->pend_ipaddr = 0;
+#ifdef AOE_IP_ALIAS_SUPPORT
+ DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
+ __FUNCTION__));
+ if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
+ (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
+ } else
+#endif /* AOE_IP_ALIAS_SUPPORT */
+ {
+ dhd_aoe_hostip_clr(&dhd->pub, idx);
+ dhd_aoe_arp_clr(&dhd->pub, idx);
+ }
+ break;
+
+ default:
+ DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+ __func__, ifa->ifa_label, event));
+ break;
}
- return BCME_OK;
+ return NOTIFY_DONE;
}
+#endif /* ARP_OFFLOAD_SUPPORT */
-int
-dhd_set_rand_mac_oui(dhd_pub_t *dhd)
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+/* Neighbor Discovery Offload: defered handler */
+static void
+dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
{
- int err;
- wl_pfn_macaddr_cfg_t wl_cfg;
- uint8 *rand_mac_oui = dhd->rand_mac_oui;
+ struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
+ dhd_info_t *dhd = (dhd_info_t *)dhd_info;
+ dhd_pub_t *dhdp;
+ int ret;
- memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
- memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
- wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
- if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
- wl_cfg.flags = 0;
- } else {
- wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
+ if (!dhd) {
+ DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
+ goto done;
}
+ dhdp = &dhd->pub;
- DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
- MACOUI2STRDBG(rand_mac_oui)));
+ if (event != DHD_WQ_WORK_IPV6_NDO) {
+ DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
+ goto done;
+ }
- err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
- if (err < 0) {
- DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
+ if (!ndo_work) {
+ DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
+ return;
}
- return err;
-}
-#if defined(RTT_SUPPORT) && defined(WL_CFG80211)
-/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
-int
-dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ switch (ndo_work->event) {
+ case NETDEV_UP:
+#ifndef NDO_CONFIG_SUPPORT
+ DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
+ ret = dhd_ndo_enable(dhdp, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* !NDO_CONFIG_SUPPORT */
+ DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
+ if (dhdp->ndo_version > 0) {
+ /* inet6 addr notifier called only for unicast address */
+ ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
+ WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
+ } else {
+ ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
+ ndo_work->if_idx);
+ }
+ if (ret < 0) {
+ DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
+ __FUNCTION__, ret));
+ }
+ break;
+ case NETDEV_DOWN:
+ if (dhdp->ndo_version > 0) {
+ DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
+ ret = dhd_ndo_remove_ip_by_addr(dhdp,
+ &ndo_work->ipv6_addr[0], ndo_work->if_idx);
+ } else {
+ DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
+ ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
+ }
+ if (ret < 0) {
+ DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
+ __FUNCTION__, ret));
+ goto done;
+ }
+#ifdef NDO_CONFIG_SUPPORT
+ if (dhdp->ndo_host_ip_overflow) {
+ ret = dhd_dev_ndo_update_inet6addr(
+ dhd_idx2net(dhdp, ndo_work->if_idx));
+ if ((ret < 0) && (ret != BCME_NORESOURCE)) {
+ DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
+ __FUNCTION__, ret));
+ goto done;
+ }
+ }
+#else /* !NDO_CONFIG_SUPPORT */
+ DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
+ ret = dhd_ndo_enable(dhdp, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+#endif /* NDO_CONFIG_SUPPORT */
+ break;
- return (dhd_rtt_set_cfg(&dhd->pub, buf));
+ default:
+ DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
+ break;
+ }
+done:
+ /* free ndo_work. alloced while scheduling the work */
+ if (ndo_work) {
+ kfree(ndo_work);
+ }
+
+ return;
}
-int
-dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
+/*
+ * Neighbor Discovery Offload: Called when an interface
+ * is assigned with ipv6 address.
+ * Handles only primary interface
+ */
+int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd;
+ dhd_pub_t *dhdp;
+ struct inet6_ifaddr *inet6_ifa = ptr;
+ struct ipv6_work_info_t *ndo_info;
+ int idx;
- return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
-}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
+ /* Filter notifications meant for non Broadcom devices */
+ if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
+ return NOTIFY_DONE;
+ }
+#endif /* LINUX_VERSION_CODE */
-int
-dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
+ if (!dhd) {
+ return NOTIFY_DONE;
+ }
+ dhdp = &dhd->pub;
- return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
-}
+ /* Supports only primary interface */
+ idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
+ if (idx != 0) {
+ return NOTIFY_DONE;
+ }
-int
-dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ /* FW capability */
+ if (!FW_SUPPORTED(dhdp, ndoe)) {
+ return NOTIFY_DONE;
+ }
- return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
-}
+ ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
+ if (!ndo_info) {
+ DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
+ return NOTIFY_DONE;
+ }
-int
-dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ /* fill up ndo_info */
+ ndo_info->event = event;
+ ndo_info->if_idx = idx;
+ memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
- return (dhd_rtt_capability(&dhd->pub, capa));
+ /* defer the work to thread as it may block kernel */
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
+ dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
+ return NOTIFY_DONE;
}
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
int
-dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
+dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
-}
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ dhd_if_t *ifp;
+ struct net_device *net = NULL;
+ int err = 0;
+ uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
-int
-dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
-}
+ DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
-int dhd_dev_rtt_cancel_responder(struct net_device *dev)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return (dhd_rtt_cancel_responder(&dhd->pub));
-}
+ if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
+ DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
-#endif /* RTT_SUPPORT */
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ ifp = dhd->iflist[ifidx];
+ net = ifp->net;
+ ASSERT(net && (ifp->idx == ifidx));
-#ifdef KEEP_ALIVE
-#define KA_TEMP_BUF_SIZE 512
-#define KA_FRAME_SIZE 300
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ ASSERT(!net->open);
+ net->get_stats = dhd_get_stats;
+ net->do_ioctl = dhd_ioctl_entry;
+ net->hard_start_xmit = dhd_start_xmit;
+ net->set_mac_address = dhd_set_mac_address;
+ net->set_multicast_list = dhd_set_multicast_list;
+ net->open = net->stop = NULL;
+#else
+ ASSERT(!net->netdev_ops);
+ net->netdev_ops = &dhd_ops_virt;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
-int
-dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
- uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
-{
- const int ETHERTYPE_LEN = 2;
- char *pbuf = NULL;
- const char *str;
- wl_mkeep_alive_pkt_t mkeep_alive_pkt;
- wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
- int buf_len = 0;
- int str_len = 0;
- int res = BCME_ERROR;
- int len_bytes = 0;
- int i = 0;
+ /* Ok, link into the network layer... */
+ if (ifidx == 0) {
+ /*
+ * device functions for the primary interface only
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
+ net->open = dhd_open;
+ net->stop = dhd_stop;
+#else
+ net->netdev_ops = &dhd_ops_pri;
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+ if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
+ memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
+ } else {
+ /*
+ * We have to use the primary MAC for virtual interfaces
+ */
+ memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
+ /*
+ * Android sets the locally administered bit to indicate that this is a
+ * portable hotspot. This will not work in simultaneous AP/STA mode,
+ * nor with P2P. Need to set the Donlge's MAC address, and then use that.
+ */
+ if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
+ ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
+ __func__, net->name));
+ temp_addr[0] |= 0x02;
+ }
+ }
- /* ether frame to have both max IP pkt (256 bytes) and ether header */
- char *pmac_frame = NULL;
- char *pmac_frame_begin = NULL;
+ net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
+ net->ethtool_ops = &dhd_ethtool_ops;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
- /*
- * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
- * dongle shall reject a mkeep_alive request.
- */
- if (!dhd_support_sta_mode(dhd_pub))
- return res;
+#if defined(WL_WIRELESS_EXT)
+#if WIRELESS_EXT < 19
+ net->get_wireless_stats = dhd_get_wireless_stats;
+#endif /* WIRELESS_EXT < 19 */
+#if WIRELESS_EXT > 12
+ net->wireless_handlers = &wl_iw_handler_def;
+#endif /* WIRELESS_EXT > 12 */
+#endif /* defined(WL_WIRELESS_EXT) */
- DHD_TRACE(("%s execution\n", __FUNCTION__));
+ dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
- if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
- DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
- res = BCME_NOMEM;
- return res;
+#ifdef WLMESH
+ if (ifidx >= 2 && dhdp->conf->fw_type == FW_TYPE_MESH) {
+ temp_addr[4] ^= 0x80;
+ temp_addr[4] += ifidx;
+ temp_addr[5] += ifidx;
}
+#endif
+ memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
- if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) {
- DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
- res = BCME_NOMEM;
- goto exit;
+ if (ifidx == 0)
+ printf("%s\n", dhd_version);
+#ifdef WL_EXT_IAPSTA
+ else
+ wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
+#endif
+ if (ifidx != 0) {
+ if (_dhd_set_mac_address(dhd, ifidx, net->dev_addr) == 0)
+ DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
+ else
+ DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
}
- pmac_frame_begin = pmac_frame;
- /*
- * Get current mkeep-alive status.
- */
- res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
- KA_TEMP_BUF_SIZE, FALSE);
- if (res < 0) {
- DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
- goto exit;
- } else {
- /* Check available ID whether it is occupied */
- mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
- if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
- DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
- __FUNCTION__, mkeep_alive_id));
+ if (need_rtnl_lock)
+ err = register_netdev(net);
+ else
+ err = register_netdevice(net);
- /* Current occupied ID info */
- DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
- DHD_ERROR((" Id : %d\n"
- " Period: %d msec\n"
- " Length: %d\n"
- " Packet: 0x",
- mkeep_alive_pktp->keep_alive_id,
- dtoh32(mkeep_alive_pktp->period_msec),
- dtoh16(mkeep_alive_pktp->len_bytes)));
+ if (err != 0) {
+ DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
+ goto fail;
+ }
+#ifdef WL_EXT_IAPSTA
+ if (ifidx == 0)
+ wl_ext_iapsta_attach_netdev(net, ifidx, ifp->bssidx);
+ wl_ext_iapsta_attach_name(net, ifidx);
+#endif
- for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
- DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
- }
- DHD_ERROR(("\n"));
- res = BCME_NOTFOUND;
- goto exit;
- }
- }
- /* Request the specified ID */
- memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
- memset(pbuf, 0, KA_TEMP_BUF_SIZE);
- str = "mkeep_alive";
- str_len = strlen(str);
- strncpy(pbuf, str, str_len);
- pbuf[str_len] = '\0';
+ printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
+#if defined(CUSTOMER_HW4_DEBUG)
+ MAC2STRDBG(dhd->pub.mac.octet));
+#else
+ MAC2STRDBG(net->dev_addr));
+#endif /* CUSTOMER_HW4_DEBUG */
- mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
- mkeep_alive_pkt.period_msec = htod32(period_msec);
- buf_len = str_len + 1;
- mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
- mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
+// wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+#endif
- /* ID assigned */
- mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
+#if (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
+ KERNEL_VERSION(2, 6, 27))) || defined(BCMDBUS))
+ if (ifidx == 0) {
+#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
+ up(&dhd_registration_sem);
+#endif /* BCMLXSDMMC */
+ if (!dhd_download_fw_on_driverload) {
+#ifdef WL_CFG80211
+ wl_terminate_event_handler(net);
+#endif /* WL_CFG80211 */
+#if defined(DHD_LB_RXP)
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
- buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+#if defined(DHD_LB_TXP)
+ skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
- /*
- * Build up Ethernet Frame
- */
+#ifdef SHOW_LOGTRACE
+ /* Release the skbs from queue for WLC_E_TRACE event */
+ dhd_event_logtrace_flush_queue(dhdp);
+#endif /* SHOW_LOGTRACE */
+
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+ dhd_net_bus_devreset(net, TRUE);
+#ifdef BCMLXSDMMC
+ dhd_net_bus_suspend(net);
+#endif /* BCMLXSDMMC */
+ wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
+#if defined(BT_OVER_SDIO)
+ dhd->bus_user_count--;
+#endif /* BT_OVER_SDIO */
+ }
+ }
+#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
+ return 0;
- /* Mapping dest mac addr */
- memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
- pmac_frame += ETHER_ADDR_LEN;
+fail:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ net->open = NULL;
+#else
+ net->netdev_ops = NULL;
+#endif
+ return err;
+}
- /* Mapping src mac addr */
- memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
- pmac_frame += ETHER_ADDR_LEN;
+void
+dhd_bus_detach(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
- /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
- *(pmac_frame++) = 0x08;
- *(pmac_frame++) = 0x00;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- /* Mapping IP pkt */
- memcpy(pmac_frame, ip_pkt, ip_pkt_len);
- pmac_frame += ip_pkt_len;
+ if (dhdp) {
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhd) {
- /*
- * Length of ether frame (assume to be all hexa bytes)
- * = src mac + dst mac + ether type + ip pkt len
- */
- len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
- memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
- buf_len += len_bytes;
- mkeep_alive_pkt.len_bytes = htod16(len_bytes);
+ /*
+ * In case of Android cfg80211 driver, the bus is down in dhd_stop,
+ * calling stop again will cuase SD read/write errors.
+ */
+ if (dhd->pub.busstate != DHD_BUS_DOWN && dhd_download_fw_on_driverload) {
+ /* Stop the protocol module */
+ dhd_prot_stop(&dhd->pub);
- /*
- * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
- * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
- * guarantee that the buffer is properly aligned.
- */
- memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+ /* Stop the bus module */
+#ifdef BCMDBUS
+ /* Force Dongle terminated */
+ if (dhd_wl_ioctl_cmd(dhdp, WLC_TERMINATED, NULL, 0, TRUE, 0) < 0)
+ DHD_ERROR(("%s Setting WLC_TERMINATED failed\n",
+ __FUNCTION__));
+ dbus_stop(dhd->pub.bus);
+ dhd->pub.busstate = DHD_BUS_DOWN;
+#else
+ dhd_bus_stop(dhd->pub.bus, TRUE);
+#endif /* BCMDBUS */
+ }
- res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
-exit:
- if (pmac_frame_begin) {
- MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE);
- pmac_frame_begin = NULL;
- }
- if (pbuf) {
- MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
- pbuf = NULL;
+#if defined(OOB_INTR_ONLY) || defined(BCMPCIE_OOB_HOST_WAKE)
+ dhd_bus_oob_intr_unregister(dhdp);
+#endif
+ }
}
- return res;
}
-int
-dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
+
+void dhd_detach(dhd_pub_t *dhdp)
{
- char *pbuf = NULL;
- wl_mkeep_alive_pkt_t mkeep_alive_pkt;
- wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
- int res = BCME_ERROR;
- int i = 0;
+ dhd_info_t *dhd;
+ unsigned long flags;
+ int timer_valid = FALSE;
+ struct net_device *dev;
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = NULL;
+#endif
+#ifdef HOFFLOAD_MODULES
+ struct module_metadata *hmem = NULL;
+#endif
+ if (!dhdp)
+ return;
- /*
- * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
- * dongle shall reject a mkeep_alive request.
- */
- if (!dhd_support_sta_mode(dhd_pub))
- return res;
+ dhd = (dhd_info_t *)dhdp->info;
+ if (!dhd)
+ return;
- DHD_TRACE(("%s execution\n", __FUNCTION__));
+ dev = dhd->iflist[0]->net;
- /*
- * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
- */
- if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
- DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
- return res;
+ if (dev) {
+ rtnl_lock();
+ if (dev->flags & IFF_UP) {
+ /* If IFF_UP is still up, it indicates that
+ * "ifconfig wlan0 down" hasn't been called.
+ * So invoke dev_close explicitly here to
+ * bring down the interface.
+ */
+ DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
+ dev_close(dev);
+ }
+ rtnl_unlock();
}
- res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
- sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
- if (res < 0) {
- DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
- goto exit;
- } else {
- /* Check occupied ID */
- mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
- DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
- DHD_INFO((" Id : %d\n"
- " Period: %d msec\n"
- " Length: %d\n"
- " Packet: 0x",
- mkeep_alive_pktp->keep_alive_id,
- dtoh32(mkeep_alive_pktp->period_msec),
- dtoh16(mkeep_alive_pktp->len_bytes)));
+ DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
- for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
- DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
- }
- DHD_INFO(("\n"));
+ dhd->pub.up = 0;
+ if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
+ /* Give sufficient time for threads to start running in case
+ * dhd_attach() has failed
+ */
+ OSL_SLEEP(100);
}
+#ifdef DHD_WET
+ dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
+#endif /* DHD_WET */
+#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
+#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
- /* Make it stop if available */
- if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
- DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
- memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
-
- mkeep_alive_pkt.period_msec = 0;
- mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
- mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
- mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
+#ifdef PROP_TXSTATUS
+#ifdef DHD_WLFC_THREAD
+ if (dhd->pub.wlfc_thread) {
+ kthread_stop(dhd->pub.wlfc_thread);
+ dhdp->wlfc_thread_go = TRUE;
+ wake_up_interruptible(&dhdp->wlfc_wqhead);
+ }
+ dhd->pub.wlfc_thread = NULL;
+#endif /* DHD_WLFC_THREAD */
+#endif /* PROP_TXSTATUS */
- res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
- (char *)&mkeep_alive_pkt,
- WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
- } else {
- DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
- res = BCME_NOTFOUND;
+#ifdef DHD_TIMESYNC
+ if (dhd->dhd_state & DHD_ATTACH_TIMESYNC_ATTACH_DONE) {
+ dhd_timesync_detach(dhdp);
}
-exit:
- if (pbuf) {
- MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
- pbuf = NULL;
+#endif /* DHD_TIMESYNC */
+#ifdef WL_CFG80211
+ if (dev) {
+ wl_cfg80211_down(dev);
}
- return res;
-}
-#endif /* KEEP_ALIVE */
+#endif /* WL_CFG80211 */
-#if defined(PKT_FILTER_SUPPORT) && defined(APF)
-static void _dhd_apf_lock_local(dhd_info_t *dhd)
-{
- if (dhd) {
- mutex_lock(&dhd->dhd_apf_mutex);
+ if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+ dhd_bus_detach(dhdp);
+#ifdef BCMPCIE
+ if (is_reboot == SYS_RESTART) {
+ extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
+ if (dhd_wifi_platdata && !dhdp->dongle_reset) {
+ dhdpcie_bus_clock_stop(dhdp->bus);
+ wifi_platform_set_power(dhd_wifi_platdata->adapters,
+ FALSE, WIFI_TURNOFF_DELAY);
+ }
+ }
+#endif /* BCMPCIE */
+#ifndef PCIE_FULL_DONGLE
+ if (dhdp->prot)
+ dhd_prot_detach(dhdp);
+#endif /* !PCIE_FULL_DONGLE */
}
-}
-static void _dhd_apf_unlock_local(dhd_info_t *dhd)
-{
- if (dhd) {
- mutex_unlock(&dhd->dhd_apf_mutex);
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = FALSE;
+ unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
}
-}
-
-static int
-__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
- u8* program, uint32 program_len)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- wl_pkt_filter_t * pkt_filterp;
- wl_apf_program_t *apf_program;
- char *buf;
- u32 cmd_len, buf_len;
- int ifidx, ret;
- char cmd[] = "pkt_filter_add";
-
- ifidx = dhd_net2idx(dhd, ndev);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
- return -ENODEV;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
+ if (dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = FALSE;
+ unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
}
-
- cmd_len = sizeof(cmd);
-
- /* Check if the program_len is more than the expected len
- * and if the program is NULL return from here.
- */
- if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
- DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
- __FUNCTION__, program_len, program));
- return -EINVAL;
+#endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
+ if (dhd->early_suspend.suspend)
+ unregister_early_suspend(&dhd->early_suspend);
}
- buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
- WL_APF_PROGRAM_FIXED_LEN + program_len;
+#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
- buf = MALLOCZ(dhdp->osh, buf_len);
- if (unlikely(!buf)) {
- DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
- return -ENOMEM;
+#if defined(WL_WIRELESS_EXT)
+ if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
+ /* Detatch and unlink in the iw */
+ wl_iw_detach();
}
+#ifdef WL_ESCAN
+ wl_escan_detach(dhdp);
+#endif /* WL_ESCAN */
+#endif /* defined(WL_WIRELESS_EXT) */
- memcpy(buf, cmd, cmd_len);
-
- pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
- pkt_filterp->id = htod32(filter_id);
- pkt_filterp->negate_match = htod32(FALSE);
- pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
+#ifdef DHD_ULP
+ dhd_ulp_deinit(dhd->pub.osh, dhdp);
+#endif /* DHD_ULP */
- apf_program = &pkt_filterp->u.apf_program;
- apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
- apf_program->instr_len = htod16(program_len);
- memcpy(apf_program->instrs, program, program_len);
+ /* delete all interfaces, start with virtual */
+ if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
+ int i = 1;
+ dhd_if_t *ifp;
- ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
- __FUNCTION__, filter_id, ret));
- }
+ /* Cleanup virtual interfaces */
+ dhd_net_if_lock_local(dhd);
+ for (i = 1; i < DHD_MAX_IFS; i++) {
+ if (dhd->iflist[i]) {
+ dhd_remove_if(&dhd->pub, i, TRUE);
+ }
+ }
+ dhd_net_if_unlock_local(dhd);
- if (buf) {
- MFREE(dhdp->osh, buf, buf_len);
- }
- return ret;
-}
+ /* delete primary interface 0 */
+ ifp = dhd->iflist[0];
+ ASSERT(ifp);
+ ASSERT(ifp->net);
+ if (ifp && ifp->net) {
+#ifdef WL_CFG80211
+ cfg = wl_get_cfg(ifp->net);
+#endif
+ /* in unregister_netdev case, the interface gets freed by net->destructor
+ * (which is set to free_netdev)
+ */
+ if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+ free_netdev(ifp->net);
+ } else {
+ argos_register_notifier_deinit();
+#ifdef SET_RPS_CPUS
+ custom_rps_map_clear(ifp->net->_rx);
+#endif /* SET_RPS_CPUS */
+ netif_tx_disable(ifp->net);
+ unregister_netdev(ifp->net);
+ }
+#ifdef PCIE_FULL_DONGLE
+ ifp->net = DHD_NET_DEV_NULL;
+#else
+ ifp->net = NULL;
+#endif /* PCIE_FULL_DONGLE */
-static int
-__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
- uint32 mode, uint32 enable)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- wl_pkt_filter_enable_t * pkt_filterp;
- char *buf;
- u32 cmd_len, buf_len;
- int ifidx, ret;
- char cmd[] = "pkt_filter_enable";
+#ifdef DHD_WMF
+ dhd_wmf_cleanup(dhdp, 0);
+#endif /* DHD_WMF */
+#ifdef DHD_L2_FILTER
+ bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
+ NULL, FALSE, dhdp->tickcnt);
+ deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
+ ifp->phnd_arp_table = NULL;
+#endif /* DHD_L2_FILTER */
- ifidx = dhd_net2idx(dhd, ndev);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
- return -ENODEV;
- }
- cmd_len = sizeof(cmd);
- buf_len = cmd_len + sizeof(*pkt_filterp);
+ dhd_if_del_sta_list(ifp);
- buf = MALLOCZ(dhdp->osh, buf_len);
- if (unlikely(!buf)) {
- DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
- return -ENOMEM;
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+ dhd->iflist[0] = NULL;
+ }
}
- memcpy(buf, cmd, cmd_len);
-
- pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
- pkt_filterp->id = htod32(filter_id);
- pkt_filterp->enable = htod32(enable);
+ /* Clear the watchdog timer */
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ timer_valid = dhd->wd_timer_valid;
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ if (timer_valid)
+ del_timer_sync(&dhd->timer);
+ DHD_DISABLE_RUNTIME_PM(&dhd->pub);
- ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
- __FUNCTION__, filter_id, ret));
- goto exit;
- }
+#ifdef BCMDBUS
+ tasklet_kill(&dhd->tasklet);
+#else
+ if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhd->thr_rpm_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_rpm_ctl);
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+ if (dhd->thr_wdt_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_wdt_ctl);
+ }
- ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
- WLC_SET_VAR, TRUE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
- __FUNCTION__, filter_id, ret));
- }
+ if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_rxf_ctl);
+ }
-exit:
- if (buf) {
- MFREE(dhdp->osh, buf, buf_len);
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_dpc_ctl);
+ } else
+ {
+ tasklet_kill(&dhd->tasklet);
+ }
}
- return ret;
-}
+#endif /* BCMDBUS */
-static int
-__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ifidx, ret;
+#ifdef DHD_LB
+ if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
+ /* Clear the flag first to avoid calling the cpu notifier */
+ dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
- ifidx = dhd_net2idx(dhd, ndev);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
- return -ENODEV;
- }
+ /* Kill the Load Balancing Tasklets */
+#ifdef DHD_LB_RXP
+ cancel_work_sync(&dhd->rx_napi_dispatcher_work);
+ __skb_queue_purge(&dhd->rx_pend_queue);
+#endif /* DHD_LB_RXP */
+#ifdef DHD_LB_TXP
+ cancel_work_sync(&dhd->tx_dispatcher_work);
+ tasklet_kill(&dhd->tx_tasklet);
+ __skb_queue_purge(&dhd->tx_pend_queue);
+#endif /* DHD_LB_TXP */
+#ifdef DHD_LB_TXC
+ cancel_work_sync(&dhd->tx_compl_dispatcher_work);
+ tasklet_kill(&dhd->tx_compl_tasklet);
+#endif /* DHD_LB_TXC */
+#ifdef DHD_LB_RXC
+ tasklet_kill(&dhd->rx_compl_tasklet);
+#endif /* DHD_LB_RXC */
- ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
- htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
- __FUNCTION__, filter_id, ret));
+ if (dhd->cpu_notifier.notifier_call != NULL) {
+ unregister_cpu_notifier(&dhd->cpu_notifier);
+ }
+ dhd_cpumasks_deinit(dhd);
+ DHD_LB_STATS_DEINIT(&dhd->pub);
}
+#endif /* DHD_LB */
- return ret;
-}
-
-void dhd_apf_lock(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- _dhd_apf_lock_local(dhd);
-}
-
-void dhd_apf_unlock(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- _dhd_apf_unlock_local(dhd);
-}
+ DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
-int
-dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ifidx, ret;
+#ifdef DHD_LOG_DUMP
+ dhd_log_dump_deinit(&dhd->pub);
+#endif /* DHD_LOG_DUMP */
+#ifdef WL_CFG80211
+ if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
+ if (!cfg) {
+ DHD_ERROR(("cfg NULL!\n"));
+ ASSERT(0);
+ } else {
+ wl_cfg80211_detach(cfg);
+ dhd_monitor_uninit();
+ }
+ }
+#endif
- if (!FW_SUPPORTED(dhdp, apf)) {
- DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
+#ifdef DEBUGABILITY
+ if (dhdp->dbg) {
+#ifdef DBG_PKT_MON
+ dhd_os_dbg_detach_pkt_monitor(dhdp);
+ dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
+#endif /* DBG_PKT_MON */
+ dhd_os_dbg_detach(dhdp);
+ }
+#endif /* DEBUGABILITY */
+#ifdef SHOW_LOGTRACE
+#ifdef DHD_PKT_LOGGING
+ dhd_os_detach_pktlog(dhdp);
+#endif /* DHD_PKT_LOGGING */
+ /* Release the skbs from queue for WLC_E_TRACE event */
+ dhd_event_logtrace_flush_queue(dhdp);
- /*
- * Notify Android framework that APF is not supported by setting
- * version as zero.
- */
- *version = 0;
- return BCME_OK;
+ if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
+ if (dhd->event_data.fmts) {
+ MFREE(dhd->pub.osh, dhd->event_data.fmts,
+ dhd->event_data.fmts_size);
+ dhd->event_data.fmts = NULL;
+ }
+ if (dhd->event_data.raw_fmts) {
+ MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
+ dhd->event_data.raw_fmts_size);
+ dhd->event_data.raw_fmts = NULL;
+ }
+ if (dhd->event_data.raw_sstr) {
+ MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
+ dhd->event_data.raw_sstr_size);
+ dhd->event_data.raw_sstr = NULL;
+ }
+ if (dhd->event_data.rom_raw_sstr) {
+ MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
+ dhd->event_data.rom_raw_sstr_size);
+ dhd->event_data.rom_raw_sstr = NULL;
+ }
+ dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
}
-
- ifidx = dhd_net2idx(dhd, ndev);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
- return -ENODEV;
+#endif /* SHOW_LOGTRACE */
+#ifdef BCMPCIE
+ if (dhdp->extended_trap_data)
+ {
+ MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ dhdp->extended_trap_data = NULL;
}
-
- ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
- WLC_GET_VAR, FALSE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
- __FUNCTION__, ret));
+#endif /* BCMPCIE */
+#ifdef PNO_SUPPORT
+ if (dhdp->pno_state)
+ dhd_pno_deinit(dhdp);
+#endif
+#ifdef RTT_SUPPORT
+ if (dhdp->rtt_state) {
+ dhd_rtt_deinit(dhdp);
}
-
- return ret;
-}
-
-int
-dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
-{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ifidx, ret;
-
- if (!FW_SUPPORTED(dhdp, apf)) {
- DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
- *max_len = 0;
- return BCME_OK;
+#endif
+#if defined(CONFIG_PM_SLEEP)
+ if (dhd_pm_notifier_registered) {
+ unregister_pm_notifier(&dhd->pm_notifier);
+ dhd_pm_notifier_registered = FALSE;
}
+#endif /* CONFIG_PM_SLEEP */
- ifidx = dhd_net2idx(dhd, ndev);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
- return -ENODEV;
+#ifdef DEBUG_CPU_FREQ
+ if (dhd->new_freq)
+ free_percpu(dhd->new_freq);
+ dhd->new_freq = NULL;
+ cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+ DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd->wakelock_wd_counter = 0;
+ wake_lock_destroy(&dhd->wl_wdwake);
+ // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+ wake_lock_destroy(&dhd->wl_wifi);
+#endif /* CONFIG_HAS_WAKELOCK */
+ if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
+ DHD_OS_WAKE_LOCK_DESTROY(dhd);
}
- ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
- WLC_GET_VAR, FALSE, ifidx);
- if (unlikely(ret)) {
- DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
- __FUNCTION__, ret));
- }
- return ret;
-}
-int
-dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
- uint32 program_len)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ret;
+#ifdef DHDTCPACK_SUPPRESS
+ /* This will free all MEM allocated for TCPACK SUPPRESS */
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
- DHD_APF_LOCK(ndev);
+#ifdef PCIE_FULL_DONGLE
+ dhd_flow_rings_deinit(dhdp);
+ if (dhdp->prot)
+ dhd_prot_detach(dhdp);
+#endif
- /* delete, if filter already exists */
- if (dhdp->apf_set) {
- ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
- if (unlikely(ret)) {
- goto exit;
- }
- dhdp->apf_set = FALSE;
- }
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+ dhd_free_tdls_peer_list(dhdp);
+#endif
- ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
- if (ret) {
- goto exit;
- }
- dhdp->apf_set = TRUE;
+#ifdef HOFFLOAD_MODULES
+ hmem = &dhdp->hmem;
+ dhd_free_module_memory(dhdp->bus, hmem);
+#endif /* HOFFLOAD_MODULES */
+#if defined(BT_OVER_SDIO)
+ mutex_destroy(&dhd->bus_user_lock);
+#endif /* BT_OVER_SDIO */
+#ifdef DUMP_IOCTL_IOV_LIST
+ dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
+#endif /* DUMP_IOCTL_IOV_LIST */
+#ifdef DHD_DEBUG
+ /* memory waste feature list initilization */
+ dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
+#endif /* DHD_DEBUG */
+#ifdef WL_MONITOR
+ dhd_del_monitor_if(dhd, NULL, DHD_WQ_WORK_IF_DEL);
+#endif /* WL_MONITOR */
- if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- /* Driver is still in (early) suspend state, enable APF filter back */
- ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
- PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
- }
-exit:
- DHD_APF_UNLOCK(ndev);
+ /* Prefer adding de-init code above this comment unless necessary.
+ * The idea is to cancel work queue, sysfs and flags at the end.
+ */
+ dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
+ dhd->dhd_deferred_wq = NULL;
- return ret;
+#ifdef SHOW_LOGTRACE
+ /* Wait till event_log_dispatcher_work finishes */
+ cancel_work_sync(&dhd->event_log_dispatcher_work);
+#endif /* SHOW_LOGTRACE */
+
+ dhd_sysfs_exit(dhd);
+ dhd->pub.fw_download_done = FALSE;
+ dhd_conf_detach(dhdp);
}
-int
-dhd_dev_apf_enable_filter(struct net_device *ndev)
+
+void
+dhd_free(dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ret = 0;
- bool nan_dp_active = false;
+ dhd_info_t *dhd;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- DHD_APF_LOCK(ndev);
-#ifdef WL_NAN
- nan_dp_active = wl_cfgnan_is_dp_active(ndev);
-#endif /* WL_NAN */
- if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
- !nan_dp_active)) {
- ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
- PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
- }
+ if (dhdp) {
+ int i;
+ for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+ if (dhdp->reorder_bufs[i]) {
+ reorder_info_t *ptr;
+ uint32 buf_size = sizeof(struct reorder_info);
- DHD_APF_UNLOCK(ndev);
+ ptr = dhdp->reorder_bufs[i];
- return ret;
-}
+ buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+ DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+ i, ptr->max_idx, buf_size));
-int
-dhd_dev_apf_disable_filter(struct net_device *ndev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ret = 0;
+ MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+ dhdp->reorder_bufs[i] = NULL;
+ }
+ }
- DHD_APF_LOCK(ndev);
+ dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
- if (dhdp->apf_set) {
- ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
- PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
- }
+ dhd = (dhd_info_t *)dhdp->info;
+ if (dhdp->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#else
+ MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ dhdp->soc_ram = NULL;
+ }
+#ifdef CACHE_FW_IMAGES
+ if (dhdp->cached_fw) {
+ MFREE(dhdp->osh, dhdp->cached_fw, dhdp->bus->ramsize);
+ dhdp->cached_fw = NULL;
+ }
- DHD_APF_UNLOCK(ndev);
+ if (dhdp->cached_nvram) {
+ MFREE(dhdp->osh, dhdp->cached_nvram, MAX_NVRAMBUF_SIZE);
+ dhdp->cached_nvram = NULL;
+ }
+#endif
+ if (dhd) {
+#ifdef REPORT_FATAL_TIMEOUTS
+ deinit_dhd_timeouts(&dhd->pub);
+#endif /* REPORT_FATAL_TIMEOUTS */
- return ret;
+ /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
+ if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
+ DHD_PREALLOC_DHD_INFO, 0, FALSE))
+ MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+ dhd = NULL;
+ }
+ }
}
-int
-dhd_dev_apf_delete_filter(struct net_device *ndev)
+void
+dhd_clear(dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = DHD_DEV_INFO(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
- int ret = 0;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- DHD_APF_LOCK(ndev);
+ if (dhdp) {
+ int i;
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean up timer/data structure for any remaining/pending packet or timer. */
+ dhd_tcpack_info_tbl_clean(dhdp);
+#endif /* DHDTCPACK_SUPPRESS */
+ for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
+ if (dhdp->reorder_bufs[i]) {
+ reorder_info_t *ptr;
+ uint32 buf_size = sizeof(struct reorder_info);
- if (dhdp->apf_set) {
- ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
- if (!ret) {
- dhdp->apf_set = FALSE;
+ ptr = dhdp->reorder_bufs[i];
+
+ buf_size += ((ptr->max_idx + 1) * sizeof(void*));
+ DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
+ i, ptr->max_idx, buf_size));
+
+ MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
+ dhdp->reorder_bufs[i] = NULL;
+ }
}
- }
- DHD_APF_UNLOCK(ndev);
+ dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
- return ret;
+ if (dhdp->soc_ram) {
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+#else
+ MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ dhdp->soc_ram = NULL;
+ }
+ }
}
-#endif /* PKT_FILTER_SUPPORT && APF */
-static void dhd_hang_process(struct work_struct *work_data)
+static void
+dhd_module_cleanup(void)
{
- struct net_device *dev;
-#ifdef IFACE_HANG_FORCE_DEV_CLOSE
- struct net_device *ndev;
- uint8 i = 0;
-#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
-/* Ignore compiler warnings due to -Werror=cast-qual */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- struct dhd_info *dhd =
- container_of(work_data, dhd_info_t, dhd_hang_process_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ printf("%s: Enter\n", __FUNCTION__);
- if (!dhd || !dhd->iflist[0])
- return;
- dev = dhd->iflist[0]->net;
+ dhd_bus_unregister();
- if (dev) {
-#if defined(WL_WIRELESS_EXT)
- wl_iw_send_priv_event(dev, "HANG");
-#endif // endif
-#if defined(WL_CFG80211)
- wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
-#endif // endif
- }
-#ifdef IFACE_HANG_FORCE_DEV_CLOSE
- /*
- * For HW2, dev_close need to be done to recover
- * from upper layer after hang. For Interposer skip
- * dev_close so that dhd iovars can be used to take
- * socramdump after crash, also skip for HW4 as
- * handling of hang event is different
- */
+ wl_android_exit();
- rtnl_lock();
- for (i = 0; i < DHD_MAX_IFS; i++) {
- ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
- if (ndev && (ndev->flags & IFF_UP)) {
- DHD_ERROR(("ndev->name : %s dev close\n",
- ndev->name));
- dev_close(ndev);
- }
- }
- rtnl_unlock();
-#endif /* IFACE_HANG_FORCE_DEV_CLOSE */
+ dhd_wifi_platform_unregister_drv();
+#ifdef CUSTOMER_HW_AMLOGIC
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ wifi_teardown_dt();
+#endif
+#endif
+ printf("%s: Exit\n", __FUNCTION__);
}
-#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
-extern dhd_pub_t *link_recovery;
-void dhd_host_recover_link(void)
+static void __exit
+dhd_module_exit(void)
{
- DHD_ERROR(("****** %s ******\n", __FUNCTION__));
- link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
- dhd_bus_set_linkdown(link_recovery, TRUE);
- dhd_os_send_hang_message(link_recovery);
+ atomic_set(&exit_in_progress, 1);
+ dhd_module_cleanup();
+ unregister_reboot_notifier(&dhd_reboot_notifier);
+ dhd_destroy_to_notifier_skt();
}
-EXPORT_SYMBOL(dhd_host_recover_link);
-#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
-int dhd_os_send_hang_message(dhd_pub_t *dhdp)
+static int __init
+dhd_module_init(void)
{
- int ret = 0;
-#ifdef WL_CFG80211
- struct net_device *primary_ndev;
- struct bcm_cfg80211 *cfg;
-#ifdef DHD_FILE_DUMP_EVENT
- dhd_info_t *dhd_info = NULL;
-#endif /* DHD_FILE_DUMP_EVENT */
-#endif /* WL_CFG80211 */
+ int err;
+ int retry = POWERUP_MAX_RETRY;
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__));
- return -EINVAL;
+ printf("%s: in %s\n", __FUNCTION__, dhd_version);
+#ifdef CUSTOMER_HW_AMLOGIC
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ if (wifi_setup_dt()) {
+ printf("wifi_dt : fail to setup dt\n");
}
+#endif
+#endif
-#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
- dhd_info = (dhd_info_t *)dhdp->info;
+ DHD_PERIM_RADIO_INIT();
- if (dhd_info->scheduled_memdump) {
- DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__));
- dhdp->hang_was_pending = 1;
- return BCME_OK;
- }
-#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
-#ifdef WL_CFG80211
- primary_ndev = dhd_linux_get_primary_netdev(dhdp);
- if (!primary_ndev) {
- DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
- return -ENODEV;
- }
- cfg = wl_get_cfg(primary_ndev);
- if (!cfg) {
- DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
- return -EINVAL;
+ if (firmware_path[0] != '\0') {
+ strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
+ fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
}
- /* Skip sending HANG event to framework if driver is not ready */
- if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
- DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
- return -ENODEV;
+ if (nvram_path[0] != '\0') {
+ strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
+ nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
}
-#endif /* WL_CFG80211 */
- if (!dhdp->hang_was_sent) {
-#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
- dhdp->hang_counts++;
- if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
- DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
- __func__, dhdp->hang_counts));
- BUG_ON(1);
+ do {
+ err = dhd_wifi_platform_register_drv();
+ if (!err) {
+ register_reboot_notifier(&dhd_reboot_notifier);
+ break;
+ } else {
+ DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
+ __FUNCTION__, retry));
+ strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
+ firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
+ strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
+ nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
}
-#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
-#ifdef DHD_DEBUG_UART
- /* If PCIe lane has broken, execute the debug uart application
- * to gether a ramdump data from dongle via uart
- */
- if (!dhdp->info->duart_execute) {
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
- (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
- dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
+ } while (retry--);
+
+ dhd_create_to_notifier_skt();
+
+ if (err) {
+#ifdef CUSTOMER_HW_AMLOGIC
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ wifi_teardown_dt();
+#endif
+#endif
+ DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
+ } else {
+ if (!dhd_download_fw_on_driverload) {
+ dhd_driver_init_done = TRUE;
}
-#endif /* DHD_DEBUG_UART */
- dhdp->hang_was_sent = 1;
-#ifdef BT_OVER_SDIO
- dhdp->is_bt_recovery_required = TRUE;
-#endif // endif
- schedule_work(&dhdp->info->dhd_hang_process_work);
- DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
- dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
}
- return ret;
+
+ printf("%s: Exit err=%d\n", __FUNCTION__, err);
+ return err;
}
-int net_os_send_hang_message(struct net_device *dev)
+static int
+dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
-
- if (dhd) {
- /* Report FW problem when enabled */
- if (dhd->pub.hang_report) {
-#ifdef BT_OVER_SDIO
- if (netif_running(dev)) {
-#endif /* BT_OVER_SDIO */
- ret = dhd_os_send_hang_message(&dhd->pub);
-#ifdef BT_OVER_SDIO
- }
- DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
- bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
-#endif /* BT_OVER_SDIO */
- } else {
- DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
- __FUNCTION__));
- }
+ DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
+ if (code == SYS_RESTART) {
+#ifdef BCMPCIE
+ is_reboot = code;
+#endif /* BCMPCIE */
}
- return ret;
+ return NOTIFY_DONE;
}
-int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+#if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
+#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
+ defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_SOC_EXYNOS8895) || \
+ defined(CONFIG_ARCH_MSM8998)
+deferred_module_init_sync(dhd_module_init);
+#else
+deferred_module_init(dhd_module_init);
+#endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
+ * CONFIG_ARCH_MSM8996 || CONFIG_SOC_EXYNOS8895 || CONFIG_ARCH_MSM8998
+ */
+#elif defined(USE_LATE_INITCALL_SYNC)
+late_initcall_sync(dhd_module_init);
+#else
+late_initcall(dhd_module_init);
+#endif /* USE_LATE_INITCALL_SYNC */
+#else
+module_init(dhd_module_init);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+
+module_exit(dhd_module_exit);
+
+/*
+ * OS specific functions required to implement DHD driver in OS independent way
+ */
+int
+dhd_os_proto_block(dhd_pub_t *pub)
{
- dhd_info_t *dhd = NULL;
- dhd_pub_t *dhdp = NULL;
- int reason;
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
- dhd = DHD_DEV_INFO(dev);
if (dhd) {
- dhdp = &dhd->pub;
- }
-
- if (!dhd || !dhdp) {
- return 0;
- }
+ DHD_PERIM_UNLOCK(pub);
- reason = bcm_strtoul(string_num, NULL, 0);
- DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
+ down(&dhd->proto_sem);
- if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
- reason = 0;
+ DHD_PERIM_LOCK(pub);
+ return 1;
}
- dhdp->hang_reason = reason;
-
- return net_os_send_hang_message(dev);
+ return 0;
}
-int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
+int
+dhd_os_proto_unblock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return wifi_platform_set_power(dhd->adapter, on, delay_msec);
-}
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
-bool dhd_force_country_change(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ if (dhd) {
+ up(&dhd->proto_sem);
+ return 1;
+ }
- if (dhd && dhd->pub.up)
- return dhd->pub.force_country_change;
- return FALSE;
+ return 0;
}
-void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
- wl_country_t *cspec)
+void
+dhd_os_dhdiovar_lock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-#if defined(DHD_BLOB_EXISTENCE_CHECK)
- if (!dhd->pub.is_blob)
-#endif /* DHD_BLOB_EXISTENCE_CHECK */
- {
-#if defined(CUSTOM_COUNTRY_CODE)
- get_customized_country_code(dhd->adapter, country_iso_code, cspec,
- dhd->pub.dhd_cflags);
-#else
- get_customized_country_code(dhd->adapter, country_iso_code, cspec);
-#endif /* CUSTOM_COUNTRY_CODE */
- }
-#if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE)
- else {
- /* Replace the ccode to XZ if ccode is undefined country */
- if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
- strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
- strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
- strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
- DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
- }
- }
-#endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
- BCM_REFERENCE(dhd);
+ if (dhd) {
+ mutex_lock(&dhd->dhd_iovar_mutex);
+ }
}
-void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
+void
+dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-#ifdef WL_CFG80211
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#endif // endif
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
- if (dhd && dhd->pub.up) {
- memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
-#ifdef WL_CFG80211
- wl_update_wiphybands(cfg, notify);
-#endif // endif
+ if (dhd) {
+ mutex_unlock(&dhd->dhd_iovar_mutex);
}
}
-void dhd_bus_band_set(struct net_device *dev, uint band)
+unsigned int
+dhd_os_get_ioctl_resp_timeout(void)
+{
+ return ((unsigned int)dhd_ioctl_timeout_msec);
+}
+
+void
+dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
-#ifdef WL_CFG80211
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#endif // endif
- if (dhd && dhd->pub.up) {
-#ifdef WL_CFG80211
- wl_update_wiphybands(cfg, true);
-#endif // endif
- }
+ dhd_ioctl_timeout_msec = (int)timeout_msec;
}
-int dhd_net_set_fw_path(struct net_device *dev, char *fw)
+int
+dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool resched)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout, timeout_tmp = dhd_ioctl_timeout_msec;
- if (!fw || fw[0] == '\0')
- return -EINVAL;
+ if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) {
+ timeout_tmp = dhd_ioctl_timeout_msec;
+ dhd_ioctl_timeout_msec = pub->conf->dhd_ioctl_timeout_msec;
+ }
- strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
- dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
+ /* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+#else
+ timeout = dhd_ioctl_timeout_msec * HZ / 1000;
+#endif
-#if defined(SOFTAP)
- if (strstr(fw, "apsta") != NULL) {
- DHD_INFO(("GOT APSTA FIRMWARE\n"));
- ap_fw_loaded = TRUE;
- } else {
- DHD_INFO(("GOT STA FIRMWARE\n"));
- ap_fw_loaded = FALSE;
+ DHD_PERIM_UNLOCK(pub);
+
+ timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
+
+ if (!resched && pub->conf->ctrl_resched>0 && pub->conf->dhd_ioctl_timeout_msec>0) {
+ dhd_ioctl_timeout_msec = timeout_tmp;
}
-#endif // endif
- return 0;
-}
-void dhd_net_if_lock(struct net_device *dev)
-{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- dhd_net_if_lock_local(dhd);
+ DHD_PERIM_LOCK(pub);
+
+ return timeout;
}
-void dhd_net_if_unlock(struct net_device *dev)
+int
+dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- dhd_net_if_unlock_local(dhd);
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+
+ wake_up(&dhd->ioctl_resp_wait);
+ return 0;
}
-static void dhd_net_if_lock_local(dhd_info_t *dhd)
+int
+dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
{
- if (dhd)
- mutex_lock(&dhd->dhd_net_if_mutex);
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ /* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
+#else
+ timeout = dhd_ioctl_timeout_msec * HZ / 1000;
+#endif
+
+ DHD_PERIM_UNLOCK(pub);
+
+ timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
+
+ DHD_PERIM_LOCK(pub);
+
+ return timeout;
}
-static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+#ifdef PCIE_INB_DW
+int
+dhd_os_ds_exit_wait(dhd_pub_t *pub, uint *condition)
{
- if (dhd)
- mutex_unlock(&dhd->dhd_net_if_mutex);
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
+
+ /* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ timeout = msecs_to_jiffies(ds_exit_timeout_msec);
+#else
+ timeout = ds_exit_timeout_msec * HZ / 1000;
+#endif
+
+ DHD_PERIM_UNLOCK(pub);
+
+ timeout = wait_event_timeout(dhd->ds_exit_wait, (*condition), timeout);
+
+ DHD_PERIM_LOCK(pub);
+
+ return timeout;
}
-static void dhd_suspend_lock(dhd_pub_t *pub)
+int
+dhd_os_ds_exit_wake(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- if (dhd)
- mutex_lock(&dhd->dhd_suspend_mutex);
+
+ wake_up(&dhd->ds_exit_wait);
+ return 0;
}
-static void dhd_suspend_unlock(dhd_pub_t *pub)
+#endif /* PCIE_INB_DW */
+
+int
+dhd_os_d3ack_wake(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- if (dhd)
- mutex_unlock(&dhd->dhd_suspend_mutex);
+
+ wake_up(&dhd->d3ack_wait);
+ return 0;
}
-unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
+int
+dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags = 0;
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
- if (dhd)
- spin_lock_irqsave(&dhd->dhd_lock, flags);
+ /* Wait for bus usage contexts to gracefully exit within some timeout value
+ * Set time out to little higher than dhd_ioctl_timeout_msec,
+ * so that IOCTL timeout should not get affected.
+ */
+ /* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
+#else
+ timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
+#endif
- return flags;
+ timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
+
+ return timeout;
}
-void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+/*
+ * Wait until the condition *var == condition is met.
+ * Returns 0 if the @condition evaluated to false after the timeout elapsed
+ * Returns 1 if the @condition evaluated to true
+ */
+int
+dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t * dhd = (dhd_info_t *)(pub->info);
+ int timeout;
- if (dhd)
- spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+ /* Convert timeout in millsecond to jiffies */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
+#else
+ timeout = DHD_BUS_BUSY_TIMEOUT * HZ / 1000;
+#endif
+
+ timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
+
+ return timeout;
}
-/* Linux specific multipurpose spinlock API */
-void *
-dhd_os_spin_lock_init(osl_t *osh)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
+/* Fix compilation error for FC11 */
+INLINE
+#endif
+int
+dhd_os_busbusy_wake(dhd_pub_t *pub)
{
- /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
- /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
- /* and this results in kernel asserts in internal builds */
- spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
- if (lock)
- spin_lock_init(lock);
- return ((void *)lock);
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ /* Call wmb() to make sure before waking up the other event value gets updated */
+ OSL_SMP_WMB();
+ wake_up(&dhd->dhd_bus_busy_state_wait);
+ return 0;
}
+
void
-dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
+dhd_os_wd_timer_extend(void *bus, bool extend)
{
- if (lock)
- MFREE(osh, lock, sizeof(spinlock_t) + 4);
+#ifndef BCMDBUS
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+
+ if (extend)
+ dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
+ else
+ dhd_os_wd_timer(bus, dhd->default_wd_interval);
+#endif /* !BCMDBUS */
}
-unsigned long
-dhd_os_spin_lock(void *lock)
+
+
+void
+dhd_os_wd_timer(void *bus, uint wdtick)
{
- unsigned long flags = 0;
+#ifndef BCMDBUS
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ unsigned long flags;
- if (lock)
- spin_lock_irqsave((spinlock_t *)lock, flags);
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- return flags;
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_GENERAL_LOCK(pub, flags);
+
+ /* don't start the wd until fw is loaded */
+ if (pub->busstate == DHD_BUS_DOWN) {
+ DHD_GENERAL_UNLOCK(pub, flags);
+ return;
+ }
+
+ /* Totally stop the timer */
+ if (!wdtick && dhd->wd_timer_valid == TRUE) {
+ dhd->wd_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(pub, flags);
+ del_timer_sync(&dhd->timer);
+ return;
+ }
+
+ if (wdtick) {
+ dhd_watchdog_ms = (uint)wdtick;
+ /* Re arm the timer, at last watchdog period */
+ mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
+ dhd->wd_timer_valid = TRUE;
+ }
+ DHD_GENERAL_UNLOCK(pub, flags);
+#endif /* !BCMDBUS */
}
+
+#ifdef DHD_PCIE_RUNTIMEPM
void
-dhd_os_spin_unlock(void *lock, unsigned long flags)
+dhd_os_runtimepm_timer(void *bus, uint tick)
{
- if (lock)
- spin_unlock_irqrestore((spinlock_t *)lock, flags);
+ dhd_pub_t *pub = bus;
+ dhd_info_t *dhd = (dhd_info_t *)pub->info;
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ DHD_GENERAL_LOCK(pub, flags);
+
+ /* don't start the RPM until fw is loaded */
+ if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
+ DHD_GENERAL_UNLOCK(pub, flags);
+ return;
+ }
+
+ /* If tick is non-zero, the request is to start the timer */
+ if (tick) {
+ /* Start the timer only if its not already running */
+ if (dhd->rpm_timer_valid == FALSE) {
+ mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
+ dhd->rpm_timer_valid = TRUE;
+ }
+ } else {
+ /* tick is zero, we have to stop the timer */
+ /* Stop the timer only if its running, otherwise we don't have to do anything */
+ if (dhd->rpm_timer_valid == TRUE) {
+ dhd->rpm_timer_valid = FALSE;
+ DHD_GENERAL_UNLOCK(pub, flags);
+ del_timer_sync(&dhd->rpm_timer);
+ /* we have already released the lock, so just go to exit */
+ goto exit;
+ }
+ }
+
+ DHD_GENERAL_UNLOCK(pub, flags);
+exit:
+ return;
+
}
+#endif /* DHD_PCIE_RUNTIMEPM */
+
void *
-dhd_os_dbgring_lock_init(osl_t *osh)
+dhd_os_open_image(char *filename)
{
- struct mutex *mtx = NULL;
+ struct file *fp;
+ int size;
+
+ fp = filp_open(filename, O_RDONLY, 0);
+ /*
+ * 2.6.11 (FC4) supports filp_open() but later revs don't?
+ * Alternative:
+ * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+ * ???
+ */
+ if (IS_ERR(fp)) {
+ fp = NULL;
+ goto err;
+ }
+
+ if (!S_ISREG(file_inode(fp)->i_mode)) {
+ DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
+ fp = NULL;
+ goto err;
+ }
+
+ size = i_size_read(file_inode(fp));
+ if (size <= 0) {
+ DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
+ fp = NULL;
+ goto err;
+ }
- mtx = MALLOCZ(osh, sizeof(*mtx));
- if (mtx)
- mutex_init(mtx);
+ DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
- return mtx;
+err:
+ return fp;
}
-void
-dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx)
+int
+dhd_os_get_image_block(char *buf, int len, void *image)
{
- if (mtx) {
- mutex_destroy(mtx);
- MFREE(osh, mtx, sizeof(struct mutex));
+ struct file *fp = (struct file *)image;
+ int rdlen;
+ int size;
+
+ if (!image) {
+ return 0;
+ }
+
+ size = i_size_read(file_inode(fp));
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ rdlen = kernel_read(fp, buf, MIN(len, size), &fp->f_pos);
+#else
+ rdlen = kernel_read(fp, fp->f_pos, buf, MIN(len, size));
+#endif
+
+ if (len >= size && size != rdlen) {
+ return -EIO;
}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+ if (rdlen > 0) {
+ fp->f_pos += rdlen;
+ }
+#endif
+
+ return rdlen;
}
-static int
-dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+int
+dhd_os_get_image_size(void *image)
{
- return (atomic_read(&dhd->pend_8021x_cnt));
-}
+ struct file *fp = (struct file *)image;
+ int size;
+ if (!image) {
+ return 0;
+ }
-#define MAX_WAIT_FOR_8021X_TX 100
+ size = i_size_read(file_inode(fp));
+ return size;
+}
+
+#if defined(BT_OVER_SDIO)
int
-dhd_wait_pend8021x(struct net_device *dev)
+dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int timeout = msecs_to_jiffies(10);
- int ntimes = MAX_WAIT_FOR_8021X_TX;
- int pend = dhd_get_pend_8021x_cnt(dhd);
+ struct file *fp = (struct file *)image;
+ int rd_len;
+ uint str_len = 0;
+ char *str_end = NULL;
- while (ntimes && pend) {
- if (pend) {
- set_current_state(TASK_INTERRUPTIBLE);
- DHD_PERIM_UNLOCK(&dhd->pub);
- schedule_timeout(timeout);
- DHD_PERIM_LOCK(&dhd->pub);
- set_current_state(TASK_RUNNING);
- ntimes--;
- }
- pend = dhd_get_pend_8021x_cnt(dhd);
- }
- if (ntimes == 0)
- {
- atomic_set(&dhd->pend_8021x_cnt, 0);
- WL_MSG(dev->name, "TIMEOUT\n");
+ if (!image)
+ return 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ rd_len = kernel_read(fp, str, len, &fp->f_pos);
+#else
+ rd_len = kernel_read(fp, fp->f_pos, str, len);
+#endif
+ str_end = strnchr(str, len, '\n');
+ if (str_end == NULL) {
+ goto err;
}
- return pend;
+ str_len = (uint)(str_end - str);
+
+ /* Advance file pointer past the string length */
+ fp->f_pos += str_len + 1;
+ bzero(str_end, rd_len - str_len);
+
+err:
+ return str_len;
}
+#endif /* defined (BT_OVER_SDIO) */
-#if defined(DHD_DEBUG)
-int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
+
+void
+dhd_os_close_image(void *image)
{
- int ret = 0;
- struct file *fp = NULL;
- mm_segment_t old_fs;
- loff_t pos = 0;
- /* change to KERNEL_DS address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
+ if (image)
+ filp_close((struct file *)image, NULL);
+}
- /* open file to write */
- fp = filp_open(file_name, flags, 0664);
- if (IS_ERR(fp)) {
- DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
- goto exit;
- }
+void
+dhd_os_sdlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
- /* Write buf to file */
- ret = compat_vfs_write(fp, buf, size, &pos);
- if (ret < 0) {
- DHD_ERROR(("write file error, err = %d\n", ret));
- goto exit;
- }
+ dhd = (dhd_info_t *)(pub->info);
- /* Sync file from filesystem to physical media */
- ret = vfs_fsync(fp, 0);
- if (ret < 0) {
- DHD_ERROR(("sync file error, error = %d\n", ret));
- goto exit;
- }
- ret = BCME_OK;
+#ifdef BCMDBUS
+ spin_lock_bh(&dhd->sdlock);
+#else
+ if (dhd_dpc_prio >= 0)
+ down(&dhd->sdsem);
+ else
+ spin_lock_bh(&dhd->sdlock);
+#endif /* !BCMDBUS */
+}
-exit:
- /* close file before return */
- if (!IS_ERR(fp))
- filp_close(fp, current->files);
+void
+dhd_os_sdunlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
- /* restore previous address limit */
- set_fs(old_fs);
+ dhd = (dhd_info_t *)(pub->info);
- return ret;
+#ifdef BCMDBUS
+ spin_unlock_bh(&dhd->sdlock);
+#else
+ if (dhd_dpc_prio >= 0)
+ up(&dhd->sdsem);
+ else
+ spin_unlock_bh(&dhd->sdlock);
+#endif /* !BCMDBUS */
}
-#endif // endif
-#ifdef DHD_DEBUG
-static void
-dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type)
+void
+dhd_os_sdlock_txq(dhd_pub_t *pub)
{
- char *type_str = NULL;
+ dhd_info_t *dhd;
- switch (type) {
- case DUMP_TYPE_RESUMED_ON_TIMEOUT:
- type_str = "resumed_on_timeout";
- break;
- case DUMP_TYPE_D3_ACK_TIMEOUT:
- type_str = "D3_ACK_timeout";
- break;
- case DUMP_TYPE_DONGLE_TRAP:
- type_str = "Dongle_Trap";
- break;
- case DUMP_TYPE_MEMORY_CORRUPTION:
- type_str = "Memory_Corruption";
- break;
- case DUMP_TYPE_PKTID_AUDIT_FAILURE:
- type_str = "PKTID_AUDIT_Fail";
- break;
- case DUMP_TYPE_PKTID_INVALID:
- type_str = "PKTID_INVALID";
- break;
- case DUMP_TYPE_SCAN_TIMEOUT:
- type_str = "SCAN_timeout";
- break;
- case DUMP_TYPE_SCAN_BUSY:
- type_str = "SCAN_Busy";
- break;
- case DUMP_TYPE_BY_SYSDUMP:
- if (substr_type == CMD_UNWANTED) {
- type_str = "BY_SYSDUMP_FORUSER_unwanted";
- } else if (substr_type == CMD_DISCONNECTED) {
- type_str = "BY_SYSDUMP_FORUSER_disconnected";
- } else {
- type_str = "BY_SYSDUMP_FORUSER";
- }
- break;
- case DUMP_TYPE_BY_LIVELOCK:
- type_str = "BY_LIVELOCK";
- break;
- case DUMP_TYPE_AP_LINKUP_FAILURE:
- type_str = "BY_AP_LINK_FAILURE";
- break;
- case DUMP_TYPE_AP_ABNORMAL_ACCESS:
- type_str = "INVALID_ACCESS";
- break;
- case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
- type_str = "ERROR_RX_TIMED_OUT";
- break;
- case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
- type_str = "ERROR_TX_TIMED_OUT";
- break;
- case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
- type_str = "CFG_VENDOR_TRIGGERED";
- break;
- case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
- type_str = "BY_INVALID_RING_RDWR";
- break;
- case DUMP_TYPE_IFACE_OP_FAILURE:
- type_str = "BY_IFACE_OP_FAILURE";
- break;
- case DUMP_TYPE_TRANS_ID_MISMATCH:
- type_str = "BY_TRANS_ID_MISMATCH";
- break;
-#ifdef DEBUG_DNGL_INIT_FAIL
- case DUMP_TYPE_DONGLE_INIT_FAILURE:
- type_str = "DONGLE_INIT_FAIL";
- break;
-#endif /* DEBUG_DNGL_INIT_FAIL */
- case DUMP_TYPE_DONGLE_HOST_EVENT:
- type_str = "BY_DONGLE_HOST_EVENT";
- break;
- case DUMP_TYPE_SMMU_FAULT:
- type_str = "SMMU_FAULT";
- break;
- case DUMP_TYPE_BY_USER:
- type_str = "BY_USER";
- break;
-#ifdef DHD_ERPOM
- case DUMP_TYPE_DUE_TO_BT:
- type_str = "DUE_TO_BT";
- break;
-#endif /* DHD_ERPOM */
- case DUMP_TYPE_LOGSET_BEYOND_RANGE:
- type_str = "LOGSET_BEYOND_RANGE";
- break;
- case DUMP_TYPE_CTO_RECOVERY:
- type_str = "CTO_RECOVERY";
- break;
- case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR:
- type_str = "SEQUENTIAL_PRIVCMD_ERROR";
- break;
- case DUMP_TYPE_PROXD_TIMEOUT:
- type_str = "PROXD_TIMEOUT";
- break;
- case DUMP_TYPE_PKTID_POOL_DEPLETED:
- type_str = "PKTID_POOL_DEPLETED";
- break;
- default:
- type_str = "Unknown_type";
- break;
- }
+ dhd = (dhd_info_t *)(pub->info);
+#ifdef BCMDBUS
+ spin_lock_irqsave(&dhd->txqlock, dhd->txqlock_flags);
+#else
+ spin_lock_bh(&dhd->txqlock);
+#endif /* BCMDBUS */
+}
- strncpy(buf, type_str, strlen(type_str));
- buf[strlen(type_str)] = 0;
+void
+dhd_os_sdunlock_txq(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
+
+ dhd = (dhd_info_t *)(pub->info);
+#ifdef BCMDBUS
+ spin_unlock_irqrestore(&dhd->txqlock, dhd->txqlock_flags);
+#else
+ spin_unlock_bh(&dhd->txqlock);
+#endif /* BCMDBUS */
}
void
-dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname)
+dhd_os_sdlock_rxq(dhd_pub_t *pub)
{
- char memdump_type[32];
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
- dhd_pub_t *dhdp = &dhd->pub;
+#if 0
+ dhd_info_t *dhd;
- /* Init file name */
- memset(memdump_path, 0, len);
- memset(memdump_type, 0, sizeof(memdump_type));
- dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, dhdp->debug_dump_subcmd);
- clear_debug_dump_time(dhdp->debug_dump_time_str);
- get_debug_dump_time(dhdp->debug_dump_time_str);
- snprintf(memdump_path, len, "%s%s_%s_" "%s",
- DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
- if (strstr(fname, "sssr_dump")) {
- DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path);
- } else {
- DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
- memdump_path, FILE_NAME_HAL_TAG));
- }
+ dhd = (dhd_info_t *)(pub->info);
+ spin_lock_bh(&dhd->rxqlock);
+#endif
}
-int
-write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
+void
+dhd_os_sdunlock_rxq(dhd_pub_t *pub)
{
- int ret = 0;
- char memdump_path[128];
- char memdump_type[32];
- uint32 file_mode;
+#if 0
+ dhd_info_t *dhd;
- /* Init file name */
- memset(memdump_path, 0, sizeof(memdump_path));
- memset(memdump_type, 0, sizeof(memdump_type));
- dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd);
- clear_debug_dump_time(dhd->debug_dump_time_str);
- get_debug_dump_time(dhd->debug_dump_time_str);
- snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
- DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
- file_mode = O_CREAT | O_WRONLY | O_SYNC;
+ dhd = (dhd_info_t *)(pub->info);
+ spin_unlock_bh(&dhd->rxqlock);
+#endif
+}
- /* print SOCRAM dump file path */
- DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
+static void
+dhd_os_rxflock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
-#ifdef DHD_LOG_DUMP
- dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
-#endif /* DHD_LOG_DUMP */
+ dhd = (dhd_info_t *)(pub->info);
+ spin_lock_bh(&dhd->rxf_lock);
- /* Write file */
- ret = write_file(memdump_path, file_mode, buf, size);
+}
-#ifdef DHD_DUMP_MNGR
- if (ret == BCME_OK) {
- dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
- }
-#endif /* DHD_DUMP_MNGR */
+static void
+dhd_os_rxfunlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd;
- return ret;
+ dhd = (dhd_info_t *)(pub->info);
+ spin_unlock_bh(&dhd->rxf_lock);
}
-#endif /* DHD_DEBUG */
-int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+#ifdef DHDTCPACK_SUPPRESS
+unsigned long
+dhd_os_tcpacklock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- int ret = 0;
+ dhd_info_t *dhd;
+ unsigned long flags = 0;
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
- dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
-#ifdef CONFIG_HAS_WAKELOCK
- if (dhd->wakelock_rx_timeout_enable)
- wake_lock_timeout(&dhd->wl_rxwake,
- msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
- if (dhd->wakelock_ctrl_timeout_enable)
- wake_lock_timeout(&dhd->wl_ctrlwake,
- msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
-#endif // endif
- dhd->wakelock_rx_timeout_enable = 0;
- dhd->wakelock_ctrl_timeout_enable = 0;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+#ifdef BCMSDIO
+ spin_lock_bh(&dhd->tcpack_lock);
+#else
+ spin_lock_irqsave(&dhd->tcpack_lock, flags);
+#endif /* BCMSDIO */
}
- return ret;
+
+ return flags;
}
-int net_os_wake_lock_timeout(struct net_device *dev)
+void
+dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
+ dhd_info_t *dhd;
- if (dhd)
- ret = dhd_os_wake_lock_timeout(&dhd->pub);
- return ret;
+#ifdef BCMSDIO
+ BCM_REFERENCE(flags);
+#endif /* BCMSDIO */
+
+ dhd = (dhd_info_t *)(pub->info);
+
+ if (dhd) {
+#ifdef BCMSDIO
+ spin_unlock_bh(&dhd->tcpack_lock);
+#else
+ spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
+#endif /* BCMSDIO */
+ }
}
+#endif /* DHDTCPACK_SUPPRESS */
-int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
+ uint8* buf;
+ gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (val > dhd->wakelock_rx_timeout_enable)
- dhd->wakelock_rx_timeout_enable = val;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- }
- return 0;
+ buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
+ if (buf == NULL && kmalloc_if_fail)
+ buf = kmalloc(size, flags);
+
+ return buf;
}
-int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
+}
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (val > dhd->wakelock_ctrl_timeout_enable)
- dhd->wakelock_ctrl_timeout_enable = val;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+#if defined(WL_WIRELESS_EXT)
+struct iw_statistics *
+dhd_get_wireless_stats(struct net_device *dev)
+{
+ int res = 0;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (!dhd->pub.up) {
+ return NULL;
}
- return 0;
+
+ res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
+
+ if (res == 0)
+ return &dhd->iw.wstats;
+ else
+ return NULL;
}
+#endif /* defined(WL_WIRELESS_EXT) */
-int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
+static int
+dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
+ wl_event_msg_t *event, void **data)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
+ int bcmerror = 0;
+#ifdef WL_CFG80211
+ unsigned long flags = 0;
+#endif /* WL_CFG80211 */
+ ASSERT(dhd != NULL);
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- dhd->wakelock_ctrl_timeout_enable = 0;
-#ifdef CONFIG_HAS_WAKELOCK
- if (wake_lock_active(&dhd->wl_ctrlwake))
- wake_unlock(&dhd->wl_ctrlwake);
-#endif // endif
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+#ifdef SHOW_LOGTRACE
+ bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
+ &dhd->event_data);
+#else
+ bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
+ NULL);
+#endif /* SHOW_LOGTRACE */
+
+ if (bcmerror != BCME_OK)
+ return (bcmerror);
+
+#if defined(WL_EXT_IAPSTA)
+ wl_ext_iapsta_event(dhd->iflist[ifidx]->net, event, *data);
+#endif /* defined(WL_EXT_IAPSTA) */
+#if defined(WL_WIRELESS_EXT)
+ if (event->bsscfgidx == 0) {
+ /*
+ * Wireless ext is on primary interface only
+ */
+
+ ASSERT(dhd->iflist[ifidx] != NULL);
+ ASSERT(dhd->iflist[ifidx]->net != NULL);
+
+ if (dhd->iflist[ifidx]->net) {
+ wl_iw_event(dhd->iflist[ifidx]->net, event, *data);
+ }
}
- return 0;
+#endif /* defined(WL_WIRELESS_EXT) */
+
+#ifdef WL_CFG80211
+ ASSERT(dhd->iflist[ifidx] != NULL);
+ ASSERT(dhd->iflist[ifidx]->net != NULL);
+ if (dhd->iflist[ifidx]->net) {
+ spin_lock_irqsave(&dhd->pub.up_lock, flags);
+ if (dhd->pub.up) {
+ wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
+ }
+ spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
+ }
+#endif /* defined(WL_CFG80211) */
+
+ return (bcmerror);
}
-int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
+/* send up locally generated event */
+void
+dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
-
- if (dhd)
- ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
- return ret;
+ /* Just return from here */
+ return;
}
-int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
+#ifdef LOG_INTO_TCPDUMP
+void
+dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
+ struct sk_buff *p, *skb;
+ uint32 pktlen;
+ int len;
+ dhd_if_t *ifp;
+ dhd_info_t *dhd;
+ uchar *skb_data;
+ int ifidx = 0;
+ struct ether_header eth;
- if (dhd)
- ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
- return ret;
-}
+ pktlen = sizeof(eth) + data_len;
+ dhd = dhdp->info;
+
+ if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+ ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+ bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
+ bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
+ ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
+ eth.ether_type = hton16(ETHER_TYPE_BRCM);
-#if defined(DHD_TRACE_WAKE_LOCK)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
-#include <linux/hashtable.h>
-#else
-#include <linux/hash.h>
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
+ bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
+ skb = PKTTONATIVE(dhdp->osh, p);
+ skb_data = skb->data;
+ len = skb->len;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
-/* Define 2^5 = 32 bucket size hash table */
-DEFINE_HASHTABLE(wklock_history, 5);
-#else
-/* Define 2^5 = 32 bucket size hash table */
-struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ ifidx = dhd_ifname2idx(dhd, "wlan0");
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
-atomic_t trace_wklock_onoff;
-typedef enum dhd_wklock_type {
- DHD_WAKE_LOCK,
- DHD_WAKE_UNLOCK,
- DHD_WAIVE_LOCK,
- DHD_RESTORE_LOCK
-} dhd_wklock_t;
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->data = skb_data;
+ skb->len = len;
-struct wk_trace_record {
- unsigned long addr; /* Address of the instruction */
- dhd_wklock_t lock_type; /* lock_type */
- unsigned long long counter; /* counter information */
- struct hlist_node wklock_node; /* hash node */
-};
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
-static struct wk_trace_record *find_wklock_entry(unsigned long addr)
-{
- struct wk_trace_record *wklock_info;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
-#else
- struct hlist_node *entry;
- int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
- hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
- {
- if (wklock_info->addr == addr) {
- return wklock_info;
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
+ __FUNCTION__, __LINE__);
+ /* Send the packet */
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ netif_rx_ni(skb);
}
+ } else {
+ /* Could not allocate a sk_buf */
+ DHD_ERROR(("%s: unable to alloc sk_buf\n", __FUNCTION__));
}
- return NULL;
}
+#endif /* LOG_INTO_TCPDUMP */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
-#define HASH_ADD(hashtable, node, key) \
- do { \
- hash_add(hashtable, node, key); \
- } while (0);
-#else
-#define HASH_ADD(hashtable, node, key) \
- do { \
- int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
- hlist_add_head(node, &hashtable[index]); \
- } while (0);
-#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
-
-#define STORE_WKLOCK_RECORD(wklock_type) \
- do { \
- struct wk_trace_record *wklock_info = NULL; \
- unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
- wklock_info = find_wklock_entry(func_addr); \
- if (wklock_info) { \
- if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
- wklock_info->counter = dhd->wakelock_counter; \
- } else { \
- wklock_info->counter++; \
- } \
- } else { \
- wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
- if (!wklock_info) {\
- printk("Can't allocate wk_trace_record \n"); \
- } else { \
- wklock_info->addr = func_addr; \
- wklock_info->lock_type = wklock_type; \
- if (wklock_type == DHD_WAIVE_LOCK || \
- wklock_type == DHD_RESTORE_LOCK) { \
- wklock_info->counter = dhd->wakelock_counter; \
- } else { \
- wklock_info->counter++; \
- } \
- HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
- } \
- } \
- } while (0);
-
-static inline void dhd_wk_lock_rec_dump(void)
+void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
{
- int bkt;
- struct wk_trace_record *wklock_info;
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct dhd_info *dhdinfo = dhd->info;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
#else
- struct hlist_node *entry = NULL;
- int max_index = ARRAY_SIZE(wklock_history);
- for (bkt = 0; bkt < max_index; bkt++)
- hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
- {
- switch (wklock_info->lock_type) {
- case DHD_WAKE_LOCK:
- printk("wakelock lock : %pS lock_counter : %llu \n",
- (void *)wklock_info->addr, wklock_info->counter);
- break;
- case DHD_WAKE_UNLOCK:
- printk("wakelock unlock : %pS, unlock_counter : %llu \n",
- (void *)wklock_info->addr, wklock_info->counter);
- break;
- case DHD_WAIVE_LOCK:
- printk("wakelock waive : %pS before_waive : %llu \n",
- (void *)wklock_info->addr, wklock_info->counter);
- break;
- case DHD_RESTORE_LOCK:
- printk("wakelock restore : %pS, after_waive : %llu \n",
- (void *)wklock_info->addr, wklock_info->counter);
- break;
- }
- }
+ int timeout = (IOCTL_RESP_TIMEOUT / 1000) * HZ;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
+
+ dhd_os_sdunlock(dhd);
+ wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
+ dhd_os_sdlock(dhd);
+#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+ return;
}
-static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
+void dhd_wait_event_wakeup(dhd_pub_t *dhd)
{
- unsigned long flags;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
- int i;
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
-
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- hash_init(wklock_history);
-#else
- for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
- INIT_HLIST_HEAD(&wklock_history[i]);
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- atomic_set(&trace_wklock_onoff, 1);
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ struct dhd_info *dhdinfo = dhd->info;
+ if (waitqueue_active(&dhdinfo->ctrl_wait))
+ wake_up(&dhdinfo->ctrl_wait);
+#endif
+ return;
}
-static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
+#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
+int
+dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
{
- int bkt;
- struct wk_trace_record *wklock_info;
- struct hlist_node *tmp;
- unsigned long flags;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
- struct hlist_node *entry = NULL;
- int max_index = ARRAY_SIZE(wklock_history);
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ int ret;
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
-#else
- for (bkt = 0; bkt < max_index; bkt++)
- hlist_for_each_entry_safe(wklock_info, entry, tmp,
- &wklock_history[bkt], wklock_node)
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
- {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- hash_del(&wklock_info->wklock_node);
-#else
- hlist_del_init(&wklock_info->wklock_node);
-#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
- kfree(wklock_info);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (flag == TRUE) {
+ /* Issue wl down command before resetting the chip */
+ if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
+ DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
}
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+#ifdef PROP_TXSTATUS
+ if (dhd->pub.wlfc_enabled) {
+ dhd_wlfc_deinit(&dhd->pub);
+ }
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+ if (dhd->pub.pno_state) {
+ dhd_pno_deinit(&dhd->pub);
+ }
+#endif
+#ifdef RTT_SUPPORT
+ if (dhd->pub.rtt_state) {
+ dhd_rtt_deinit(&dhd->pub);
+ }
+#endif /* RTT_SUPPORT */
+
+#if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
+ dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
+#endif /* DBG_PKT_MON */
+ }
+
+#ifdef BCMSDIO
+ if (!flag) {
+ dhd_update_fw_nv_path(dhd);
+ /* update firmware and nvram path to sdio bus */
+ dhd_bus_update_fw_nv_path(dhd->pub.bus,
+ dhd->fw_path, dhd->nv_path, dhd->clm_path, dhd->conf_path);
+ }
+#endif /* BCMSDIO */
+
+ ret = dhd_bus_devreset(&dhd->pub, flag);
+ if (ret) {
+ DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+
+ return ret;
}
-void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
+#ifdef BCMSDIO
+int
+dhd_net_bus_suspend(struct net_device *dev)
{
- dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
- unsigned long flags;
-
- printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- dhd_wk_lock_rec_dump();
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_suspend(&dhd->pub);
+}
+int
+dhd_net_bus_resume(struct net_device *dev, uint8 stage)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_resume(&dhd->pub, stage);
}
-#else
-#define STORE_WKLOCK_RECORD(wklock_type)
-#endif /* ! DHD_TRACE_WAKE_LOCK */
-int dhd_os_wake_lock(dhd_pub_t *pub)
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
+
+int net_os_set_suspend_disable(struct net_device *dev, int val)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_lock(&dhd->wl_wifi);
-#elif defined(BCMSDIO)
- dhd_bus_dev_pm_stay_awake(pub);
-#endif // endif
- }
-#ifdef DHD_TRACE_WAKE_LOCK
- if (atomic_read(&trace_wklock_onoff)) {
- STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
- }
-#endif /* DHD_TRACE_WAKE_LOCK */
- dhd->wakelock_counter++;
- ret = dhd->wakelock_counter;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ if (dhd) {
+ ret = dhd->pub.suspend_disable_flag;
+ dhd->pub.suspend_disable_flag = val;
}
-
return ret;
}
-void dhd_event_wake_lock(dhd_pub_t *pub)
+int net_os_set_suspend(struct net_device *dev, int val, int force)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ int ret = 0;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (dhd) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_lock(&dhd->wl_evtwake);
-#elif defined(BCMSDIO)
- dhd_bus_dev_pm_stay_awake(pub);
-#endif // endif
+#ifdef CONFIG_MACH_UNIVERSAL7420
+#endif /* CONFIG_MACH_UNIVERSAL7420 */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+ ret = dhd_set_suspend(val, &dhd->pub);
+#else
+ ret = dhd_suspend_resume_helper(dhd, val, force);
+#endif
+#ifdef WL_CFG80211
+ wl_cfg80211_update_power_mode(dev);
+#endif
}
+ return ret;
}
-void
-dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
+int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (dhd) {
- wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
- }
-#endif /* CONFIG_HAS_WAKE_LOCK */
+ if (dhd)
+ dhd->pub.suspend_bcn_li_dtim = val;
+
+ return 0;
}
-void
-dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
+int net_os_set_max_dtim_enable(struct net_device *dev, int val)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (dhd) {
- wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
+ DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
+ __FUNCTION__, (val ? "Enable" : "Disable")));
+ if (val) {
+ dhd->pub.max_dtim_enable = TRUE;
+ } else {
+ dhd->pub.max_dtim_enable = FALSE;
+ }
+ } else {
+ return -1;
}
-#endif /* CONFIG_HAS_WAKE_LOCK */
+
+ return 0;
}
-int net_os_wake_lock(struct net_device *dev)
+#ifdef PKT_FILTER_SUPPORT
+int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
- if (dhd)
- ret = dhd_os_wake_lock(&dhd->pub);
+#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (!dhd_master_mode)
+ add_remove = !add_remove;
+ DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
+ if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
+ return 0;
+ }
+
+
+ if (num >= dhd->pub.pktfilter_count) {
+ return -EINVAL;
+ }
+
+ ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
+#endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
+
return ret;
}
-int dhd_os_wake_unlock(dhd_pub_t *pub)
+int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
+
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
int ret = 0;
- dhd_os_wake_lock_timeout(pub);
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
-
- if (dhd->wakelock_counter > 0) {
- dhd->wakelock_counter--;
-#ifdef DHD_TRACE_WAKE_LOCK
- if (atomic_read(&trace_wklock_onoff)) {
- STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
- }
-#endif /* DHD_TRACE_WAKE_LOCK */
- if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_unlock(&dhd->wl_wifi);
-#elif defined(BCMSDIO)
- dhd_bus_dev_pm_relax(pub);
-#endif // endif
- }
- ret = dhd->wakelock_counter;
+ /* Packet filtering is set only if we still in early-suspend and
+ * we need either to turn it ON or turn it OFF
+ * We can always turn it OFF in case of early-suspend, but we turn it
+ * back ON only if suspend_disable_flag was not set
+ */
+ if (dhdp && dhdp->up) {
+ if (dhdp->in_suspend) {
+ if (!val || (val && !dhdp->suspend_disable_flag))
+ dhd_enable_packet_filter(val, dhdp);
}
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
return ret;
}
-void dhd_event_wake_unlock(dhd_pub_t *pub)
+/* function to enable/disable packet for Network device */
+int net_os_enable_packet_filter(struct net_device *dev, int val)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (dhd) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_unlock(&dhd->wl_evtwake);
-#elif defined(BCMSDIO)
- dhd_bus_dev_pm_relax(pub);
-#endif // endif
- }
+ DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
+ return dhd_os_enable_packet_filter(&dhd->pub, val);
}
+#endif /* PKT_FILTER_SUPPORT */
-void dhd_pm_wake_unlock(dhd_pub_t *pub)
+int
+dhd_dev_init_ioctl(struct net_device *dev)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret;
- if (dhd) {
- /* if wl_pmwake is active, unlock it */
- if (wake_lock_active(&dhd->wl_pmwake)) {
- wake_unlock(&dhd->wl_pmwake);
- }
- }
-#endif /* CONFIG_HAS_WAKELOCK */
+ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
+ goto done;
+
+done:
+ return ret;
}
-void dhd_txfl_wake_unlock(dhd_pub_t *pub)
+int
+dhd_dev_get_feature_set(struct net_device *dev)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhd = (&ptr->pub);
+ int feature_set = 0;
- if (dhd) {
- /* if wl_txflwake is active, unlock it */
- if (wake_lock_active(&dhd->wl_txflwake)) {
- wake_unlock(&dhd->wl_txflwake);
- }
+ if (FW_SUPPORTED(dhd, sta))
+ feature_set |= WIFI_FEATURE_INFRA;
+ if (FW_SUPPORTED(dhd, dualband))
+ feature_set |= WIFI_FEATURE_INFRA_5G;
+ if (FW_SUPPORTED(dhd, p2p))
+ feature_set |= WIFI_FEATURE_P2P;
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
+ feature_set |= WIFI_FEATURE_SOFT_AP;
+ if (FW_SUPPORTED(dhd, tdls))
+ feature_set |= WIFI_FEATURE_TDLS;
+ if (FW_SUPPORTED(dhd, vsdb))
+ feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
+ if (FW_SUPPORTED(dhd, nan)) {
+ feature_set |= WIFI_FEATURE_NAN;
+ /* NAN is essentail for d2d rtt */
+ if (FW_SUPPORTED(dhd, rttd2d))
+ feature_set |= WIFI_FEATURE_D2D_RTT;
}
-#endif /* CONFIG_HAS_WAKELOCK */
-}
-
-int dhd_os_check_wakelock(dhd_pub_t *pub)
-{
-#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
- dhd_info_t *dhd;
+#ifdef RTT_SUPPORT
+ if (dhd->rtt_supported) {
+ feature_set |= WIFI_FEATURE_D2D_RTT;
+ feature_set |= WIFI_FEATURE_D2AP_RTT;
+ }
+#endif /* RTT_SUPPORT */
+#ifdef LINKSTAT_SUPPORT
+ feature_set |= WIFI_FEATURE_LINKSTAT;
+#endif /* LINKSTAT_SUPPORT */
- if (!pub)
- return 0;
- dhd = (dhd_info_t *)(pub->info);
-#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+#ifdef PNO_SUPPORT
+ if (dhd_is_pno_supported(dhd)) {
+ feature_set |= WIFI_FEATURE_PNO;
+#ifdef GSCAN_SUPPORT
+ /* terence 20171115: remove to get GTS PASS
+ * com.google.android.gts.wifi.WifiHostTest#testWifiScannerBatchTimestamp
+ */
+// feature_set |= WIFI_FEATURE_GSCAN;
+// feature_set |= WIFI_FEATURE_HAL_EPNO;
+#endif /* GSCAN_SUPPORT */
+ }
+#endif /* PNO_SUPPORT */
+#ifdef RSSI_MONITOR_SUPPORT
+ if (FW_SUPPORTED(dhd, rssi_mon)) {
+ feature_set |= WIFI_FEATURE_RSSI_MONITOR;
+ }
+#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef WL11U
+ feature_set |= WIFI_FEATURE_HOTSPOT;
+#endif /* WL11U */
+#ifdef NDO_CONFIG_SUPPORT
+ feature_set |= WIFI_FEATURE_CONFIG_NDO;
+#endif /* NDO_CONFIG_SUPPORT */
+#ifdef KEEP_ALIVE
+ feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
+#endif /* KEEP_ALIVE */
-#ifdef CONFIG_HAS_WAKELOCK
- /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
- if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
- (wake_lock_active(&dhd->wl_wdwake))))
- return 1;
-#elif defined(BCMSDIO)
- if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
- return 1;
-#endif // endif
- return 0;
+ return feature_set;
}
int
-dhd_os_check_wakelock_all(dhd_pub_t *pub)
+dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
{
-#if defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
-#if defined(CONFIG_HAS_WAKELOCK)
- int l1, l2, l3, l4, l7, l8, l9;
- int l5 = 0, l6 = 0;
- int c, lock_active;
-#endif /* CONFIG_HAS_WAKELOCK */
- dhd_info_t *dhd;
+ int feature_set_full;
+ int ret = 0;
- if (!pub) {
- return 0;
- }
- dhd = (dhd_info_t *)(pub->info);
- if (!dhd) {
- return 0;
- }
-#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
+ feature_set_full = dhd_dev_get_feature_set(dev);
-#ifdef CONFIG_HAS_WAKELOCK
- c = dhd->wakelock_counter;
- l1 = wake_lock_active(&dhd->wl_wifi);
- l2 = wake_lock_active(&dhd->wl_wdwake);
- l3 = wake_lock_active(&dhd->wl_rxwake);
- l4 = wake_lock_active(&dhd->wl_ctrlwake);
- l7 = wake_lock_active(&dhd->wl_evtwake);
-#ifdef BCMPCIE_OOB_HOST_WAKE
- l5 = wake_lock_active(&dhd->wl_intrwake);
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef DHD_USE_SCAN_WAKELOCK
- l6 = wake_lock_active(&dhd->wl_scanwake);
-#endif /* DHD_USE_SCAN_WAKELOCK */
- l8 = wake_lock_active(&dhd->wl_pmwake);
- l9 = wake_lock_active(&dhd->wl_txflwake);
- lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
+ /* Common feature set for all interface */
+ ret = (feature_set_full & WIFI_FEATURE_INFRA) |
+ (feature_set_full & WIFI_FEATURE_INFRA_5G) |
+ (feature_set_full & WIFI_FEATURE_D2D_RTT) |
+ (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
+ (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
+ (feature_set_full & WIFI_FEATURE_EPR);
- /* Indicate to the Host to avoid going to suspend if internal locks are up */
- if (lock_active) {
- DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
- "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
- __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
- return 1;
- }
-#elif defined(BCMSDIO)
- if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
- return 1;
+ /* Specific feature group for each interface */
+ switch (num) {
+ case 0:
+ ret |= (feature_set_full & WIFI_FEATURE_P2P) |
+ /* Not supported yet */
+ /* (feature_set_full & WIFI_FEATURE_NAN) | */
+ (feature_set_full & WIFI_FEATURE_TDLS) |
+ (feature_set_full & WIFI_FEATURE_PNO) |
+ (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
+ (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
+ (feature_set_full & WIFI_FEATURE_GSCAN) |
+ (feature_set_full & WIFI_FEATURE_HOTSPOT) |
+ (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
+ break;
+
+ case 1:
+ ret |= (feature_set_full & WIFI_FEATURE_P2P);
+ /* Not yet verified NAN with P2P */
+ /* (feature_set_full & WIFI_FEATURE_NAN) | */
+ break;
+
+ case 2:
+ ret |= (feature_set_full & WIFI_FEATURE_NAN) |
+ (feature_set_full & WIFI_FEATURE_TDLS) |
+ (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
+ break;
+
+ default:
+ ret = WIFI_FEATURE_INVALID;
+ DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
+ break;
}
-#endif /* defined(BCMSDIO) */
- return 0;
+
+ return ret;
}
-int net_os_wake_unlock(struct net_device *dev)
+#ifdef CUSTOM_FORCE_NODFS_FLAG
+int
+dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
{
dhd_info_t *dhd = DHD_DEV_INFO(dev);
- int ret = 0;
- if (dhd)
- ret = dhd_os_wake_unlock(&dhd->pub);
- return ret;
+ if (nodfs)
+ dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
+ else
+ dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
+ dhd->pub.force_country_change = TRUE;
+ return 0;
}
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
-int dhd_os_wd_wake_lock(dhd_pub_t *pub)
+#ifdef NDO_CONFIG_SUPPORT
+int
+dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
int ret = 0;
- if (dhd) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
- /* if wakelock_wd_counter was never used : lock it at once */
- wake_lock(&dhd->wl_wdwake);
-#endif // endif
- }
- dhd->wakelock_wd_counter++;
- ret = dhd->wakelock_wd_counter;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- }
- return ret;
-}
+ if (enable) {
+ /* enable ND offload feature (will be enabled in FW on suspend) */
+ dhdp->ndo_enable = TRUE;
-int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- int ret = 0;
+ /* Update changes of anycast address & DAD failed address */
+ ret = dhd_dev_ndo_update_inet6addr(dev);
+ if ((ret < 0) && (ret != BCME_NORESOURCE)) {
+ DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ } else {
+ /* disable ND offload feature */
+ dhdp->ndo_enable = FALSE;
- if (dhd) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (dhd->wakelock_wd_counter > 0) {
- dhd->wakelock_wd_counter = 0;
- if (!dhd->waive_wakelock) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_unlock(&dhd->wl_wdwake);
-#endif // endif
- }
+ /* disable ND offload in FW */
+ ret = dhd_ndo_enable(dhdp, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
}
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
return ret;
}
-#ifdef BCMPCIE_OOB_HOST_WAKE
-void
-dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
-{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
-
- if (dhd) {
- wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
- }
-#endif /* CONFIG_HAS_WAKELOCK */
-}
+/* #pragma used as a WAR to fix build failure,
+* ignore dropping of 'const' qualifier in 'list_entry' macro
+* this pragma disables the warning only for the following function
+*/
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
-void
-dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
+static int
+dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ struct inet6_ifaddr *ifa;
+ struct ifacaddr6 *acaddr = NULL;
+ int addr_count = 0;
- if (dhd) {
- /* if wl_intrwake is active, unlock it */
- if (wake_lock_active(&dhd->wl_intrwake)) {
- wake_unlock(&dhd->wl_intrwake);
+ /* lock */
+ read_lock_bh(&inet6->lock);
+
+ /* Count valid unicast address */
+ list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+ if ((ifa->flags & IFA_F_DADFAILED) == 0) {
+ addr_count++;
}
}
-#endif /* CONFIG_HAS_WAKELOCK */
-}
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-
-#ifdef DHD_USE_SCAN_WAKELOCK
-void
-dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
-{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- if (dhd) {
- wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
+ /* Count anycast address */
+ acaddr = inet6->ac_list;
+ while (acaddr) {
+ addr_count++;
+ acaddr = acaddr->aca_next;
}
-#endif /* CONFIG_HAS_WAKELOCK */
-}
-void
-dhd_os_scan_wake_unlock(dhd_pub_t *pub)
-{
-#ifdef CONFIG_HAS_WAKELOCK
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ /* unlock */
+ read_unlock_bh(&inet6->lock);
- if (dhd) {
- /* if wl_scanwake is active, unlock it */
- if (wake_lock_active(&dhd->wl_scanwake)) {
- wake_unlock(&dhd->wl_scanwake);
- }
- }
-#endif /* CONFIG_HAS_WAKELOCK */
+ return addr_count;
}
-#endif /* DHD_USE_SCAN_WAKELOCK */
-/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
- * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
- */
-int dhd_os_wake_lock_waive(dhd_pub_t *pub)
+int
+dhd_dev_ndo_update_inet6addr(struct net_device *dev)
{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- int ret = 0;
+ dhd_info_t *dhd;
+ dhd_pub_t *dhdp;
+ struct inet6_dev *inet6;
+ struct inet6_ifaddr *ifa;
+ struct ifacaddr6 *acaddr = NULL;
+ struct in6_addr *ipv6_addr = NULL;
+ int cnt, i;
+ int ret = BCME_OK;
- if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ /*
+ * this function evaulates host ip address in struct inet6_dev
+ * unicast addr in inet6_dev->addr_list
+ * anycast addr in inet6_dev->ac_list
+ * while evaluating inet6_dev, read_lock_bh() is required to prevent
+ * access on null(freed) pointer.
+ */
- /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
- if (dhd->waive_wakelock == FALSE) {
-#ifdef DHD_TRACE_WAKE_LOCK
- if (atomic_read(&trace_wklock_onoff)) {
- STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
- }
-#endif /* DHD_TRACE_WAKE_LOCK */
- /* record current lock status */
- dhd->wakelock_before_waive = dhd->wakelock_counter;
- dhd->waive_wakelock = TRUE;
+ if (dev) {
+ inet6 = dev->ip6_ptr;
+ if (!inet6) {
+ DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
+ return BCME_ERROR;
}
- ret = dhd->wakelock_wd_counter;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- }
- return ret;
-}
-int dhd_os_wake_lock_restore(dhd_pub_t *pub)
-{
- dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- unsigned long flags;
- int ret = 0;
+ dhd = DHD_DEV_INFO(dev);
+ if (!dhd) {
+ DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ dhdp = &dhd->pub;
- if (!dhd)
- return 0;
- if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
- return 0;
+ if (dhd_net2idx(dhd, dev) != 0) {
+ DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+ } else {
+ DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
- spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ /* Check host IP overflow */
+ cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
+ if (cnt > dhdp->ndo_max_host_ip) {
+ if (!dhdp->ndo_host_ip_overflow) {
+ dhdp->ndo_host_ip_overflow = TRUE;
+ /* Disable ND offload in FW */
+ DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
+ ret = dhd_ndo_enable(dhdp, 0);
+ }
- /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
- if (!dhd->waive_wakelock)
- goto exit;
+ return ret;
+ }
- dhd->waive_wakelock = FALSE;
- /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
- * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
- * the lock in between, do the same by calling wake_unlock or pm_relax
+ /*
+ * Allocate ipv6 addr buffer to store addresses to be added/removed.
+ * driver need to lock inet6_dev while accessing structure. but, driver
+ * cannot use ioctl while inet6_dev locked since it requires scheduling
+ * hence, copy addresses to the buffer and do ioctl after unlock.
*/
-#ifdef DHD_TRACE_WAKE_LOCK
- if (atomic_read(&trace_wklock_onoff)) {
- STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
+ ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
+ sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
+ if (!ipv6_addr) {
+ DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
+ return BCME_NOMEM;
}
-#endif /* DHD_TRACE_WAKE_LOCK */
- if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_lock(&dhd->wl_wifi);
-#elif defined(BCMSDIO)
- dhd_bus_dev_pm_stay_awake(&dhd->pub);
-#endif // endif
- } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
-#ifdef CONFIG_HAS_WAKELOCK
- wake_unlock(&dhd->wl_wifi);
-#elif defined(BCMSDIO)
- dhd_bus_dev_pm_relax(&dhd->pub);
-#endif // endif
+ /* Find DAD failed unicast address to be removed */
+ cnt = 0;
+ read_lock_bh(&inet6->lock);
+ list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+ /* DAD failed unicast address */
+ if ((ifa->flags & IFA_F_DADFAILED) &&
+ (cnt < dhdp->ndo_max_host_ip)) {
+ memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
+ cnt++;
+ }
}
- dhd->wakelock_before_waive = 0;
-exit:
- ret = dhd->wakelock_wd_counter;
- spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- return ret;
-}
-
-void dhd_os_wake_lock_init(struct dhd_info *dhd)
-{
- DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
- dhd->wakelock_counter = 0;
- dhd->wakelock_rx_timeout_enable = 0;
- dhd->wakelock_ctrl_timeout_enable = 0;
- /* wakelocks prevent a system from going into a low power state */
-#ifdef CONFIG_HAS_WAKELOCK
- // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
- wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
- wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
- wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
- wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
- wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
-#ifdef BCMPCIE_OOB_HOST_WAKE
- wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef DHD_USE_SCAN_WAKELOCK
- wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
-#endif /* DHD_USE_SCAN_WAKELOCK */
-#endif /* CONFIG_HAS_WAKELOCK */
-#ifdef DHD_TRACE_WAKE_LOCK
- dhd_wk_lock_trace_init(dhd);
-#endif /* DHD_TRACE_WAKE_LOCK */
-}
-
-void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
-{
- DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
-#ifdef CONFIG_HAS_WAKELOCK
- dhd->wakelock_counter = 0;
- dhd->wakelock_rx_timeout_enable = 0;
- dhd->wakelock_ctrl_timeout_enable = 0;
- // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
- wake_lock_destroy(&dhd->wl_rxwake);
- wake_lock_destroy(&dhd->wl_ctrlwake);
- wake_lock_destroy(&dhd->wl_evtwake);
- wake_lock_destroy(&dhd->wl_pmwake);
- wake_lock_destroy(&dhd->wl_txflwake);
-#ifdef BCMPCIE_OOB_HOST_WAKE
- wake_lock_destroy(&dhd->wl_intrwake);
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef DHD_USE_SCAN_WAKELOCK
- wake_lock_destroy(&dhd->wl_scanwake);
-#endif /* DHD_USE_SCAN_WAKELOCK */
-#ifdef DHD_TRACE_WAKE_LOCK
- dhd_wk_lock_trace_deinit(dhd);
-#endif /* DHD_TRACE_WAKE_LOCK */
-#endif /* CONFIG_HAS_WAKELOCK */
-}
-
-bool dhd_os_check_if_up(dhd_pub_t *pub)
-{
- if (!pub)
- return FALSE;
- return pub->up;
-}
+ read_unlock_bh(&inet6->lock);
-/* function to collect firmware, chip id and chip version info */
-void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
-{
- int i;
+ /* Remove DAD failed unicast address */
+ for (i = 0; i < cnt; i++) {
+ DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
+ ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
+ if (ret < 0) {
+ goto done;
+ }
+ }
- i = snprintf(info_string, sizeof(info_string),
- " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version);
- printf("%s\n", info_string);
+ /* Remove all anycast address */
+ ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
+ if (ret < 0) {
+ goto done;
+ }
- if (!dhdp)
- return;
+ /*
+ * if ND offload was disabled due to host ip overflow,
+ * attempt to add valid unicast address.
+ */
+ if (dhdp->ndo_host_ip_overflow) {
+ /* Find valid unicast address */
+ cnt = 0;
+ read_lock_bh(&inet6->lock);
+ list_for_each_entry(ifa, &inet6->addr_list, if_list) {
+ /* valid unicast address */
+ if (!(ifa->flags & IFA_F_DADFAILED) &&
+ (cnt < dhdp->ndo_max_host_ip)) {
+ memcpy(&ipv6_addr[cnt], &ifa->addr,
+ sizeof(struct in6_addr));
+ cnt++;
+ }
+ }
+ read_unlock_bh(&inet6->lock);
- i = snprintf(&info_string[i], sizeof(info_string) - i,
- "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
- dhd_conf_get_chiprev(dhdp));
-}
+ /* Add valid unicast address */
+ for (i = 0; i < cnt; i++) {
+ ret = dhd_ndo_add_ip_with_type(dhdp,
+ (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
+ if (ret < 0) {
+ goto done;
+ }
+ }
+ }
-int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
-{
- int ifidx;
- int ret = 0;
- dhd_info_t *dhd = NULL;
+ /* Find anycast address */
+ cnt = 0;
+ read_lock_bh(&inet6->lock);
+ acaddr = inet6->ac_list;
+ while (acaddr) {
+ if (cnt < dhdp->ndo_max_host_ip) {
+ memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
+ cnt++;
+ }
+ acaddr = acaddr->aca_next;
+ }
+ read_unlock_bh(&inet6->lock);
- if (!net || !DEV_PRIV(net)) {
- DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
- __FUNCTION__, net, DEV_PRIV(net)));
- return -EINVAL;
+ /* Add anycast address */
+ for (i = 0; i < cnt; i++) {
+ ret = dhd_ndo_add_ip_with_type(dhdp,
+ (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
+ if (ret < 0) {
+ goto done;
+ }
}
- dhd = DHD_DEV_INFO(net);
- if (!dhd)
- return -EINVAL;
+ /* Now All host IP addr were added successfully */
+ if (dhdp->ndo_host_ip_overflow) {
+ dhdp->ndo_host_ip_overflow = FALSE;
+ if (dhdp->in_suspend) {
+ /* drvier is in (early) suspend state, need to enable ND offload in FW */
+ DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
+ ret = dhd_ndo_enable(dhdp, 1);
+ }
+ }
- ifidx = dhd_net2idx(dhd, net);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
- return -ENODEV;
+done:
+ if (ipv6_addr) {
+ MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
}
- DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_PERIM_LOCK(&dhd->pub);
+ return ret;
+}
+#pragma GCC diagnostic pop
- ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
- dhd_check_hang(net, &dhd->pub, ret);
+#endif /* NDO_CONFIG_SUPPORT */
- DHD_PERIM_UNLOCK(&dhd->pub);
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
+#ifdef PNO_SUPPORT
+/* Linux wrapper to call common dhd_pno_stop_for_ssid */
+int
+dhd_dev_pno_stop_for_ssid(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- return ret;
+ return (dhd_pno_stop_for_ssid(&dhd->pub));
}
-bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
+/* Linux wrapper to call common dhd_pno_set_for_ssid */
+int
+dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
+ uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
{
- struct net_device *net;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- net = dhd_idx2net(dhdp, ifidx);
- if (!net) {
- DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
- return -EINVAL;
- }
+ return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
+ pno_repeat, pno_freq_expo_max, channel_list, nchan));
+}
- return dhd_check_hang(net, dhdp, ret);
+/* Linux wrapper to call common dhd_pno_enable */
+int
+dhd_dev_pno_enable(struct net_device *dev, int enable)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ return (dhd_pno_enable(&dhd->pub, enable));
}
-/* Return instance */
-int dhd_get_instance(dhd_pub_t *dhdp)
+/* Linux wrapper to call common dhd_pno_set_for_hotlist */
+int
+dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
+ struct dhd_pno_hotlist_params *hotlist_params)
{
- return dhdp->info->unit;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
+}
+/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
+int
+dhd_dev_pno_stop_for_batch(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_stop_for_batch(&dhd->pub));
}
-#if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
-#define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
-int dhd_deepsleep(struct net_device *dev, int flag)
+/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
+int
+dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
{
- char iovbuf[20];
- uint powervar = 0;
- dhd_info_t *dhd;
- dhd_pub_t *dhdp;
- int cnt = 0;
- int ret = 0;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
+}
- dhd = DHD_DEV_INFO(dev);
- dhdp = &dhd->pub;
+/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
+int
+dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
+}
+#endif /* PNO_SUPPORT */
- switch (flag) {
- case 1 : /* Deepsleep on */
- DHD_ERROR(("[WiFi] Deepsleep On\n"));
- /* give some time to sysioc_work before deepsleep */
- OSL_SLEEP(200);
-#ifdef PKT_FILTER_SUPPORT
- /* disable pkt filter */
- dhd_enable_packet_filter(0, dhdp);
-#endif /* PKT_FILTER_SUPPORT */
- /* Disable MPC */
- powervar = 0;
- ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
- 0, TRUE);
+#if defined(PNO_SUPPORT)
+#ifdef GSCAN_SUPPORT
+bool
+dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- /* Enable Deepsleep */
- powervar = 1;
- ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
- NULL, 0, TRUE);
- break;
+ return (dhd_is_legacy_pno_enabled(&dhd->pub));
+}
- case 0: /* Deepsleep Off */
- DHD_ERROR(("[WiFi] Deepsleep Off\n"));
+int
+dhd_dev_set_epno(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ if (!dhd) {
+ return BCME_ERROR;
+ }
+ return dhd_pno_set_epno(&dhd->pub);
+}
+int
+dhd_dev_flush_fw_epno(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ if (!dhd) {
+ return BCME_ERROR;
+ }
+ return dhd_pno_flush_fw_epno(&dhd->pub);
+}
- /* Disable Deepsleep */
- for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
- powervar = 0;
- ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
- sizeof(powervar), NULL, 0, TRUE);
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
+int
+dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *buf, bool flush)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
- sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
- if (ret < 0) {
- DHD_ERROR(("the error of dhd deepsleep status"
- " ret value :%d\n", ret));
- } else {
- if (!(*(int *)iovbuf)) {
- DHD_ERROR(("deepsleep mode is 0,"
- " count: %d\n", cnt));
- break;
- }
- }
- }
+ return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
+}
- /* Enable MPC */
- powervar = 1;
- ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
- 0, TRUE);
- break;
- }
+/* Linux wrapper to call common dhd_wait_batch_results_complete */
+int
+dhd_dev_wait_batch_results_complete(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return 0;
+ return (dhd_wait_batch_results_complete(&dhd->pub));
}
-#endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
-
-#ifdef PROP_TXSTATUS
-void dhd_wlfc_plat_init(void *dhd)
+/* Linux wrapper to call common dhd_pno_lock_batch_results */
+int
+dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
{
-#ifdef USE_DYNAMIC_F2_BLKSIZE
- dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
-#endif /* USE_DYNAMIC_F2_BLKSIZE */
- return;
-}
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-void dhd_wlfc_plat_deinit(void *dhd)
+ return (dhd_pno_lock_batch_results(&dhd->pub));
+}
+/* Linux wrapper to call common dhd_pno_unlock_batch_results */
+void
+dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
{
-#ifdef USE_DYNAMIC_F2_BLKSIZE
- dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
-#endif /* USE_DYNAMIC_F2_BLKSIZE */
- return;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_pno_unlock_batch_results(&dhd->pub));
}
-bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
+/* Linux wrapper to call common dhd_pno_initiate_gscan_request */
+int
+dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
{
-#ifdef SKIP_WLFC_ON_CONCURRENT
-
-#ifdef WL_CFG80211
- struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
- if (net)
- /* enable flow control in vsdb mode */
- return !(wl_cfg80211_is_concurrent_mode(net));
-#else
- return TRUE; /* skip flow control */
-#endif /* WL_CFG80211 */
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-#else
- return FALSE;
-#endif /* SKIP_WLFC_ON_CONCURRENT */
- return FALSE;
+ return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
}
-#endif /* PROP_TXSTATUS */
-#ifdef BCMDBGFS
-#include <linux/debugfs.h>
+/* Linux wrapper to call common dhd_pno_enable_full_scan_result */
+int
+dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-typedef struct dhd_dbgfs {
- struct dentry *debugfs_dir;
- struct dentry *debugfs_mem;
- dhd_pub_t *dhdp;
- uint32 size;
-} dhd_dbgfs_t;
+ return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
+}
-dhd_dbgfs_t g_dbgfs;
+/* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
+void *
+dhd_dev_hotlist_scan_event(struct net_device *dev,
+ const void *data, int *send_evt_bytes, hotlist_type_t type)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-extern uint32 dhd_readregl(void *bp, uint32 addr);
-extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+ return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type));
+}
-static int
-dhd_dbg_state_open(struct inode *inode, struct file *file)
+/* Linux wrapper to call common dhd_process_full_gscan_result */
+void *
+dhd_dev_process_full_gscan_result(struct net_device *dev,
+const void *data, uint32 len, int *send_evt_bytes)
{
- file->private_data = inode->i_private;
- return 0;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+
+ return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
}
-static ssize_t
-dhd_dbg_state_read(struct file *file, char __user *ubuf,
- size_t count, loff_t *ppos)
+void
+dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
{
- ssize_t rval;
- uint32 tmp;
- loff_t pos = *ppos;
- size_t ret;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- if (pos < 0)
- return -EINVAL;
- if (pos >= g_dbgfs.size || !count)
- return 0;
- if (count > g_dbgfs.size - pos)
- count = g_dbgfs.size - pos;
+ dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
- /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
- tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+ return;
+}
- ret = copy_to_user(ubuf, &tmp, 4);
- if (ret == count)
- return -EFAULT;
+int
+dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- count -= ret;
- *ppos = pos + count;
- rval = count;
+ return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
+}
+
+/* Linux wrapper to call common dhd_retreive_batch_scan_results */
+int
+dhd_dev_retrieve_batch_scan(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return rval;
+ return (dhd_retreive_batch_scan_results(&dhd->pub));
}
-static ssize_t
-dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+/* Linux wrapper to call common dhd_pno_process_epno_result */
+void * dhd_dev_process_epno_result(struct net_device *dev,
+ const void *data, uint32 event, int *send_evt_bytes)
{
- loff_t pos = *ppos;
- size_t ret;
- uint32 buf;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- if (pos < 0)
- return -EINVAL;
- if (pos >= g_dbgfs.size || !count)
- return 0;
- if (count > g_dbgfs.size - pos)
- count = g_dbgfs.size - pos;
+ return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
+}
- ret = copy_from_user(&buf, ubuf, sizeof(uint32));
- if (ret == count)
- return -EFAULT;
+int
+dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
+ wlc_roam_exp_params_t *roam_param)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ wl_roam_exp_cfg_t roam_exp_cfg;
+ int err;
- /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
- dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+ if (!roam_param) {
+ return BCME_BADARG;
+ }
- return count;
+ DHD_ERROR(("a_band_boost_thr %d a_band_penalty_thr %d\n",
+ roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
+ DHD_ERROR(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
+ roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
+ roam_param->cur_bssid_boost));
+ DHD_ERROR(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
+ roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
+
+ memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
+ roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
+ roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
+ if (dhd->pub.lazy_roam_enable) {
+ roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
+ }
+ err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
+ (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
+ TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
+ }
+ return err;
}
-loff_t
-dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
+int
+dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
{
- loff_t pos = -1;
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ wl_roam_exp_cfg_t roam_exp_cfg;
- switch (whence) {
- case 0:
- pos = off;
- break;
- case 1:
- pos = file->f_pos + off;
- break;
- case 2:
- pos = g_dbgfs.size - off;
+ memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
+ roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
+ if (enable) {
+ roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
}
- return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+
+ err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
+ (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
+ TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
+ } else {
+ dhd->pub.lazy_roam_enable = (enable != 0);
+ }
+ return err;
}
-static const struct file_operations dhd_dbg_state_ops = {
- .read = dhd_dbg_state_read,
- .write = dhd_debugfs_write,
- .open = dhd_dbg_state_open,
- .llseek = dhd_debugfs_lseek
-};
+int
+dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
+ wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
+{
+ int err;
+ int len;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
-static void dhd_dbgfs_create(void)
+ bssid_pref->version = BSSID_PREF_LIST_VERSION;
+ /* By default programming bssid pref flushes out old values */
+ bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
+ len = sizeof(wl_bssid_pref_cfg_t);
+ len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
+ err = dhd_iovar(&(dhd->pub), 0, "roam_exp_bssid_pref", (char *)bssid_pref,
+ len, NULL, 0, TRUE);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
+ }
+ return err;
+}
+
+int
+dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
+ uint32 len, uint32 flush)
{
- if (g_dbgfs.debugfs_dir) {
- g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
- NULL, &dhd_dbg_state_ops);
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int macmode;
+
+ if (blacklist) {
+ err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
+ len, TRUE, 0);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
+ return err;
+ }
+ }
+ /* By default programming blacklist flushes out old values */
+ macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
+ err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
+ sizeof(macmode), TRUE, 0);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
}
+ return err;
}
-void dhd_dbgfs_init(dhd_pub_t *dhdp)
+int
+dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
+ uint32 len, uint32 flush)
{
- g_dbgfs.dhdp = dhdp;
- g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ wl_ssid_whitelist_t whitelist_ssid_flush;
- g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
- if (IS_ERR(g_dbgfs.debugfs_dir)) {
- g_dbgfs.debugfs_dir = NULL;
- return;
+ if (!ssid_whitelist) {
+ if (flush) {
+ ssid_whitelist = &whitelist_ssid_flush;
+ ssid_whitelist->ssid_count = 0;
+ } else {
+ DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
+ return BCME_BADARG;
+ }
+ }
+ ssid_whitelist->version = SSID_WHITELIST_VERSION;
+ ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
+ err = dhd_iovar(&(dhd->pub), 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist,
+ len, NULL, 0, TRUE);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
}
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
- dhd_dbgfs_create();
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
+/* Linux wrapper to call common dhd_pno_get_gscan */
+void *
+dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
+ void *info, uint32 *len)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return;
+ return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
}
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+#endif
-void dhd_dbgfs_remove(void)
+#ifdef RSSI_MONITOR_SUPPORT
+int
+dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
+ int8 max_rssi, int8 min_rssi)
{
- debugfs_remove(g_dbgfs.debugfs_mem);
- debugfs_remove(g_dbgfs.debugfs_dir);
+ int err;
+ wl_rssi_monitor_cfg_t rssi_monitor;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
+ rssi_monitor.version = RSSI_MONITOR_VERSION;
+ rssi_monitor.max_rssi = max_rssi;
+ rssi_monitor.min_rssi = min_rssi;
+ rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
+ err = dhd_iovar(&(dhd->pub), 0, "rssi_monitor", (char *)&rssi_monitor,
+ sizeof(rssi_monitor), NULL, 0, TRUE);
+ if (err < 0 && err != BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
+ }
+ return err;
}
-#endif /* BCMDBGFS */
+#endif /* RSSI_MONITOR_SUPPORT */
-#ifdef CUSTOM_SET_CPUCORE
-void dhd_set_cpucore(dhd_pub_t *dhd, int set)
+#ifdef DHDTCPACK_SUPPRESS
+int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
{
- int e_dpc = 0, e_rxf = 0, retry_set = 0;
+ int err;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- if (!(dhd->chan_isvht80)) {
- DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
- return;
+ err = dhd_tcpack_suppress_set(&(dhd->pub), enable);
+ if (err != BCME_OK) {
+ DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
}
+ return err;
+}
+#endif /* DHDTCPACK_SUPPRESS */
- if (DPC_CPUCORE) {
- do {
- if (set == TRUE) {
- e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
- cpumask_of(DPC_CPUCORE));
- } else {
- e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
- cpumask_of(PRIMARY_CPUCORE));
- }
- if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
- DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
- return;
- }
- if (e_dpc < 0)
- OSL_SLEEP(1);
- } while (e_dpc < 0);
+int
+dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ if (!dhdp || !oui) {
+ DHD_ERROR(("NULL POINTER : %s\n",
+ __FUNCTION__));
+ return BCME_ERROR;
}
- if (RXF_CPUCORE) {
- do {
- if (set == TRUE) {
- e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
- cpumask_of(RXF_CPUCORE));
- } else {
- e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
- cpumask_of(PRIMARY_CPUCORE));
- }
- if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
- DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
- return;
- }
- if (e_rxf < 0)
- OSL_SLEEP(1);
- } while (e_rxf < 0);
+ if (ETHER_ISMULTI(oui)) {
+ DHD_ERROR(("Expected unicast OUI\n"));
+ return BCME_ERROR;
+ } else {
+ uint8 *rand_mac_oui = dhdp->rand_mac_oui;
+ memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
+ DHD_ERROR(("Random MAC OUI to be used - %02x:%02x:%02x\n", rand_mac_oui[0],
+ rand_mac_oui[1], rand_mac_oui[2]));
}
- DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
-
- return;
+ return BCME_OK;
}
-#endif /* CUSTOM_SET_CPUCORE */
-#ifdef DHD_MCAST_REGEN
-/* Get interface specific ap_isolate configuration */
-int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
+int
+dhd_set_rand_mac_oui(dhd_pub_t *dhd)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
+ int err;
+ wl_pfn_macaddr_cfg_t wl_cfg;
+ uint8 *rand_mac_oui = dhd->rand_mac_oui;
- ASSERT(idx < DHD_MAX_IFS);
+ memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
+ memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
+ wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
+ if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
+ wl_cfg.flags = 0;
+ } else {
+ wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
+ }
- ifp = dhd->iflist[idx];
+ DHD_ERROR(("Setting rand mac oui to FW - %02x:%02x:%02x\n", rand_mac_oui[0],
+ rand_mac_oui[1], rand_mac_oui[2]));
- return ifp->mcast_regen_bss_enable;
+ err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
+ }
+ return err;
}
-/* Set interface specific mcast_regen configuration */
-int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
+#ifdef RTT_SUPPORT
+#ifdef WL_CFG80211
+/* Linux wrapper to call common dhd_pno_set_cfg_gscan */
+int
+dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
-
- ASSERT(idx < DHD_MAX_IFS);
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- ifp = dhd->iflist[idx];
+ return (dhd_rtt_set_cfg(&dhd->pub, buf));
+}
- ifp->mcast_regen_bss_enable = val;
+int
+dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
- * is enabled
- */
- dhd_update_rx_pkt_chainable_state(dhdp, idx);
- return BCME_OK;
+ return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
}
-#endif /* DHD_MCAST_REGEN */
-/* Get interface specific ap_isolate configuration */
-int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
+int
+dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- ASSERT(idx < DHD_MAX_IFS);
+ return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
+}
- ifp = dhd->iflist[idx];
+int
+dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- return ifp->ap_isolate;
+ return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
}
-/* Set interface specific ap_isolate configuration */
-int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
+int
+dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- ASSERT(idx < DHD_MAX_IFS);
+ return (dhd_rtt_capability(&dhd->pub, capa));
+}
- ifp = dhd->iflist[idx];
+int
+dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
+}
- if (ifp)
- ifp->ap_isolate = val;
+int
+dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
+}
- return 0;
+int dhd_dev_rtt_cancel_responder(struct net_device *dev)
+{
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ return (dhd_rtt_cancel_responder(&dhd->pub));
}
+#endif /* WL_CFG80211 */
+#endif /* RTT_SUPPORT */
-#ifdef DHD_FW_COREDUMP
-void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
+#ifdef KEEP_ALIVE
+#define KA_TEMP_BUF_SIZE 512
+#define KA_FRAME_SIZE 300
+
+int
+dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
+ uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
{
- unsigned long flags = 0;
- dhd_dump_t *dump = NULL;
- dhd_info_t *dhd_info = NULL;
-#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
- log_dump_type_t type = DLD_BUF_TYPE_ALL;
-#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+ const int ETHERTYPE_LEN = 2;
+ char *pbuf = NULL;
+ const char *str;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
+ int buf_len = 0;
+ int str_len = 0;
+ int res = BCME_ERROR;
+ int len_bytes = 0;
+ int i = 0;
- dhd_info = (dhd_info_t *)dhdp->info;
- dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
- if (dump == NULL) {
- DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
- return;
- }
- dump->buf = buf;
- dump->bufsize = size;
-#ifdef BCMPCIE
- dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf),
- (uint32 *)(&dump->hscb_bufsize));
-#else /* BCMPCIE */
- dump->hscb_bufsize = 0;
-#endif /* BCMPCIE */
+ /* ether frame to have both max IP pkt (256 bytes) and ether header */
+ char *pmac_frame = NULL;
+ char *pmac_frame_begin = NULL;
-#ifdef DHD_LOG_DUMP
- dhd_print_buf_addr(dhdp, "memdump", buf, size);
-#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
- /* Print out buffer infomation */
- dhd_log_dump_buf_addr(dhdp, &type);
-#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
-#endif /* DHD_LOG_DUMP */
+ /*
+ * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
+ * dongle shall reject a mkeep_alive request.
+ */
+ if (!dhd_support_sta_mode(dhd_pub))
+ return res;
- if (dhdp->memdump_enabled == DUMP_MEMONLY) {
- BUG_ON(1);
+ DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+ if ((pbuf = kzalloc(KA_TEMP_BUF_SIZE, GFP_KERNEL)) == NULL) {
+ DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
+ res = BCME_NOMEM;
+ return res;
}
-#if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM) || \
- defined(DNGL_AXI_ERROR_LOGGING)
- if (
-#if defined(DEBUG_DNGL_INIT_FAIL)
- (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
-#endif /* DEBUG_DNGL_INIT_FAIL */
-#ifdef DHD_ERPOM
- (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
-#endif /* DHD_ERPOM */
-#ifdef DNGL_AXI_ERROR_LOGGING
- (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) ||
-#endif /* DNGL_AXI_ERROR_LOGGING */
- FALSE)
- {
-#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
- log_dump_type_t *flush_type = NULL;
-#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
- dhd_info->scheduled_memdump = FALSE;
- (void)dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
-#if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
- /* for dongle init fail cases, 'dhd_mem_dump' does
- * not call 'dhd_log_dump', so call it here.
- */
- flush_type = MALLOCZ(dhdp->osh,
- sizeof(log_dump_type_t));
- if (flush_type) {
- *flush_type = DLD_BUF_TYPE_ALL;
- DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
- dhd_log_dump(dhdp->info, flush_type, 0);
- }
-#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
- return;
+ if ((pmac_frame = kzalloc(KA_FRAME_SIZE, GFP_KERNEL)) == NULL) {
+ DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
+ res = BCME_NOMEM;
+ goto exit;
}
-#endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM || DNGL_AXI_ERROR_LOGGING */
+ pmac_frame_begin = pmac_frame;
- dhd_info->scheduled_memdump = TRUE;
- /* bus busy bit for mem dump will be cleared in mem dump
- * work item context, after mem dump file is written
- */
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
- DHD_WQ_WORK_SOC_RAM_DUMP, (void *)dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
-}
+ /*
+ * Get current mkeep-alive status.
+ */
+ res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
+ KA_TEMP_BUF_SIZE, FALSE);
+ if (res < 0) {
+ DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
+ goto exit;
+ } else {
+ /* Check available ID whether it is occupied */
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
+ if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
+ DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
+ __FUNCTION__, mkeep_alive_id));
-static int
-dhd_mem_dump(void *handle, void *event_info, u8 event)
-{
- dhd_info_t *dhd = handle;
- dhd_pub_t *dhdp = NULL;
- unsigned long flags = 0;
- int ret = 0;
- dhd_dump_t *dump = NULL;
+ /* Current occupied ID info */
+ DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
+ DHD_ERROR((" Id : %d\n"
+ " Period: %d msec\n"
+ " Length: %d\n"
+ " Packet: 0x",
+ mkeep_alive_pktp->keep_alive_id,
+ dtoh32(mkeep_alive_pktp->period_msec),
+ dtoh16(mkeep_alive_pktp->len_bytes)));
- DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
+ for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
+ DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
+ }
+ DHD_ERROR(("\n"));
- if (!dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return -ENODEV;
+ res = BCME_NOTFOUND;
+ goto exit;
+ }
}
- dhdp = &dhd->pub;
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return -ENODEV;
- }
+ /* Request the specified ID */
+ memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+ memset(pbuf, 0, KA_TEMP_BUF_SIZE);
+ str = "mkeep_alive";
+ str_len = strlen(str);
+ strncpy(pbuf, str, str_len);
+ pbuf[str_len] = '\0';
- DHD_GENERAL_LOCK(dhdp, flags);
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
- DHD_GENERAL_UNLOCK(dhdp, flags);
- DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
- ret = -ENODEV;
- goto exit;
- }
- DHD_GENERAL_UNLOCK(dhdp, flags);
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
+ mkeep_alive_pkt.period_msec = htod32(period_msec);
+ buf_len = str_len + 1;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
-#ifdef DHD_SSSR_DUMP
- if (dhdp->sssr_inited && dhdp->collect_sssr) {
- dhdpcie_sssr_dump(dhdp);
- }
- dhdp->collect_sssr = FALSE;
-#endif /* DHD_SSSR_DUMP */
-#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
- dhd_wait_for_file_dump(dhdp);
-#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
+ /* ID assigned */
+ mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
- dump = (dhd_dump_t *)event_info;
- if (!dump) {
- DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
- ret = -EINVAL;
- goto exit;
- }
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
/*
- * If kernel does not have file write access enabled
- * then skip writing dumps to files.
- * The dumps will be pushed to HAL layer which will
- * write into files
+ * Build up Ethernet Frame
*/
-#ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
-
- if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
- DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
-#ifdef DHD_DEBUG_UART
- dhd->pub.memdump_success = FALSE;
-#endif /* DHD_DEBUG_UART */
- }
-
- /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
- * context, no need to schedule another work queue for log dump. In case of
- * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
- * cfg layer is itself scheduling the log_dump work queue.
- * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
- * collect debug_dump as it may be called from non-sleepable context.
- */
-#ifdef DHD_LOG_DUMP
- if (dhd->scheduled_memdump &&
- dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
- log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
- sizeof(log_dump_type_t));
- if (flush_type) {
- *flush_type = DLD_BUF_TYPE_ALL;
- DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
- dhd_log_dump(dhd, flush_type, 0);
- }
- }
-#endif /* DHD_LOG_DUMP */
- clear_debug_dump_time(dhdp->debug_dump_time_str);
+ /* Mapping dest mac addr */
+ memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
+ pmac_frame += ETHER_ADDR_LEN;
- /* before calling bug on, wait for other logs to be dumped.
- * we cannot wait in case dhd_mem_dump is called directly
- * as it may not be in a sleepable context
- */
- if (dhd->scheduled_memdump) {
- uint bitmask = 0;
- int timeleft = 0;
-#ifdef DHD_SSSR_DUMP
- bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
-#endif // endif
- if (bitmask != 0) {
- DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
- __FUNCTION__, dhdp->dhd_bus_busy_state));
- timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
- &dhdp->dhd_bus_busy_state, bitmask, 0);
- if ((timeleft == 0) || (timeleft == 1)) {
- DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
- __FUNCTION__, dhdp->dhd_bus_busy_state));
- }
- }
- }
+ /* Mapping src mac addr */
+ memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
+ pmac_frame += ETHER_ADDR_LEN;
- if (dump->hscb_buf && dump->hscb_bufsize) {
- DHD_ERROR(("%s: write HSCB dump... \n", __FUNCTION__));
- if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
- dump->hscb_bufsize, "mem_dump_hscb")) {
- DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__));
-#ifdef DHD_DEBUG_UART
- dhd->pub.memdump_success = FALSE;
-#endif /* DHD_DEBUG_UART */
- }
- }
-#endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
+ /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
+ *(pmac_frame++) = 0x08;
+ *(pmac_frame++) = 0x00;
- DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
- if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
-#ifdef DHD_LOG_DUMP
- dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
-#endif /* DHD_LOG_DUMP */
- dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
-#ifdef DHD_DEBUG_UART
- dhd->pub.memdump_success == TRUE &&
-#endif /* DHD_DEBUG_UART */
-#ifdef DNGL_EVENT_SUPPORT
- dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
-#endif /* DNGL_EVENT_SUPPORT */
- dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
+ /* Mapping IP pkt */
+ memcpy(pmac_frame, ip_pkt, ip_pkt_len);
+ pmac_frame += ip_pkt_len;
-#ifdef SHOW_LOGTRACE
- /* Wait till logtrace context is flushed */
- dhd_flush_logtrace_process(dhd);
-#endif /* SHOW_LOGTRACE */
+ /*
+ * Length of ether frame (assume to be all hexa bytes)
+ * = src mac + dst mac + ether type + ip pkt len
+ */
+ len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
+ memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
+ buf_len += len_bytes;
+ mkeep_alive_pkt.len_bytes = htod16(len_bytes);
- DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__));
- BUG_ON(1);
- }
- DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type));
+ /*
+ * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
+ * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+ res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
exit:
- if (dump) {
- MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
- }
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- dhd->scheduled_memdump = FALSE;
- if (dhdp->hang_was_pending) {
- DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__));
- dhd_os_send_hang_message(dhdp);
- dhdp->hang_was_pending = 0;
- }
- DHD_ERROR(("%s: EXIT \n", __FUNCTION__));
- return ret;
+ kfree(pmac_frame_begin);
+ kfree(pbuf);
+ return res;
}
-#endif /* DHD_FW_COREDUMP */
-#ifdef DHD_SSSR_DUMP
int
-dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len)
+dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
{
- dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhd_pub_t *dhdp = &dhd_info->pub;
- int pos = 0, ret = BCME_ERROR;
- uint dig_buf_size = 0;
+ char *pbuf;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int res = BCME_ERROR;
+ int i;
- if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
- dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
- } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
- dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
- dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
- }
+ /*
+ * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
+ * dongle shall reject a mkeep_alive request.
+ */
+ if (!dhd_support_sta_mode(dhd_pub))
+ return res;
- if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
- ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before,
- NULL, user_buf, dig_buf_size, &pos);
+ DHD_TRACE(("%s execution\n", __FUNCTION__));
+
+ /*
+ * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
+ */
+ if ((pbuf = kmalloc(KA_TEMP_BUF_SIZE, GFP_KERNEL)) == NULL) {
+ DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
+ return res;
}
- return ret;
-}
-int
-dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len)
-{
- dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhd_pub_t *dhdp = &dhd_info->pub;
- int pos = 0, ret = BCME_ERROR;
- uint dig_buf_size = 0;
+ res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
+ sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
+ if (res < 0) {
+ DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
+ goto exit;
+ } else {
+ /* Check occupied ID */
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
+ DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
+ DHD_INFO((" Id : %d\n"
+ " Period: %d msec\n"
+ " Length: %d\n"
+ " Packet: 0x",
+ mkeep_alive_pktp->keep_alive_id,
+ dtoh32(mkeep_alive_pktp->period_msec),
+ dtoh16(mkeep_alive_pktp->len_bytes)));
- if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
- dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
- } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
- dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
- dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
+ for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
+ DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
+ }
+ DHD_INFO(("\n"));
}
- if (dhdp->sssr_dig_buf_after) {
- ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after,
- NULL, user_buf, dig_buf_size, &pos);
- }
- return ret;
-}
+ /* Make it stop if available */
+ if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
+ DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
+ memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
-int
-dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core)
-{
- dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhd_pub_t *dhdp = &dhd_info->pub;
- int pos = 0, ret = BCME_ERROR;
+ mkeep_alive_pkt.period_msec = 0;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+ mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
- if (dhdp->sssr_d11_before[core] &&
- dhdp->sssr_d11_outofreset[core] &&
- (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
- ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core],
- NULL, user_buf, len, &pos);
+ res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
+ (char *)&mkeep_alive_pkt,
+ WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
+ } else {
+ DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
+ res = BCME_NOTFOUND;
}
- return ret;
+exit:
+ kfree(pbuf);
+ return res;
}
+#endif /* KEEP_ALIVE */
-int
-dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core)
+#if defined(PKT_FILTER_SUPPORT) && defined(APF)
+static void _dhd_apf_lock_local(dhd_info_t *dhd)
{
- dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhd_pub_t *dhdp = &dhd_info->pub;
- int pos = 0, ret = BCME_ERROR;
-
- if (dhdp->sssr_d11_after[core] &&
- dhdp->sssr_d11_outofreset[core]) {
- ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core],
- NULL, user_buf, len, &pos);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (dhd) {
+ mutex_lock(&dhd->dhd_apf_mutex);
}
- return ret;
+#endif
}
-static void
-dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
+static void _dhd_apf_unlock_local(dhd_info_t *dhd)
{
- dhd_info_t *dhd = dhdinfo;
- dhd_pub_t *dhdp;
- int i;
- char before_sr_dump[128];
- char after_sr_dump[128];
- unsigned long flags = 0;
- uint dig_buf_size = 0;
-
- DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return;
- }
-
- dhdp = &dhd->pub;
-
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
- DHD_GENERAL_UNLOCK(dhdp, flags);
- DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
- goto exit;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (dhd) {
+ mutex_unlock(&dhd->dhd_apf_mutex);
}
- DHD_GENERAL_UNLOCK(dhdp, flags);
-
- for (i = 0; i < MAX_NUM_D11CORES; i++) {
- /* Init file name */
- memset(before_sr_dump, 0, sizeof(before_sr_dump));
- memset(after_sr_dump, 0, sizeof(after_sr_dump));
+#endif
+}
- snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
- "sssr_dump_core", i, "before_SR");
- snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
- "sssr_dump_core", i, "after_SR");
+static int
+__dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
+ u8* program, uint32 program_len)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ wl_pkt_filter_t * pkt_filterp;
+ wl_apf_program_t *apf_program;
+ char *buf;
+ u32 cmd_len, buf_len;
+ int ifidx, ret;
+ gfp_t kflags;
+ char cmd[] = "pkt_filter_add";
- if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
- (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
- if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
- dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
- DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
- __FUNCTION__));
- }
- }
- if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
- if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
- dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
- DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
- __FUNCTION__));
- }
- }
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
- dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
- } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
- dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
- dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
- }
+ cmd_len = sizeof(cmd);
- if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
- if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
- dig_buf_size, "sssr_dump_dig_before_SR")) {
- DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
- __FUNCTION__));
- }
+ /* Check if the program_len is more than the expected len
+ * and if the program is NULL return from here.
+ */
+ if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
+ DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
+ __FUNCTION__, program_len, program));
+ return -EINVAL;
}
+ buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
+ WL_APF_PROGRAM_FIXED_LEN + program_len;
- if (dhdp->sssr_dig_buf_after) {
- if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
- dig_buf_size, "sssr_dump_dig_after_SR")) {
- DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
- __FUNCTION__));
- }
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ buf = kzalloc(buf_len, kflags);
+ if (unlikely(!buf)) {
+ DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
+ return -ENOMEM;
}
-exit:
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
-}
-
-void
-dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode)
-{
- dhdp->sssr_dump_mode = dump_mode;
-
- /*
- * If kernel does not have file write access enabled
- * then skip writing dumps to files.
- * The dumps will be pushed to HAL layer which will
- * write into files
- */
-#if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
- return;
-#endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
-
- /*
- * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump
- * Without workqueue -
- * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT
- * : These are called in own handler, not in the interrupt context
- * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue
- * Thus, it doesn't neeed to dump SSSR in workqueue
- */
- DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__));
- dhd_sssr_dump_to_file(dhdp->info);
-
-}
-#endif /* DHD_SSSR_DUMP */
+ memcpy(buf, cmd, cmd_len);
-#ifdef DHD_LOG_DUMP
-static void
-dhd_log_dump(void *handle, void *event_info, u8 event)
-{
- dhd_info_t *dhd = handle;
- log_dump_type_t *type = (log_dump_type_t *)event_info;
+ pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
+ pkt_filterp->id = htod32(filter_id);
+ pkt_filterp->negate_match = htod32(FALSE);
+ pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
- if (!dhd || !type) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return;
- }
+ apf_program = &pkt_filterp->u.apf_program;
+ apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
+ apf_program->instr_len = htod16(program_len);
+ memcpy(apf_program->instrs, program, program_len);
-#ifdef WL_CFG80211
- /* flush the fw side logs */
- wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
- FW_LOGSET_MASK_ALL);
-#endif // endif
- /* there are currently 3 possible contexts from which
- * log dump can be scheduled -
- * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
- * 3.HEALTH CHECK event
- * The concise debug info buffer is a shared resource
- * and in case a trap is one of the contexts then both the
- * scheduled work queues need to run because trap data is
- * essential for debugging. Hence a mutex lock is acquired
- * before calling do_dhd_log_dump().
- */
- DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
- dhd_os_logdump_lock(&dhd->pub);
- DHD_OS_WAKE_LOCK(&dhd->pub);
- if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
- DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
}
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- dhd_os_logdump_unlock(&dhd->pub);
-}
-
-void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
-{
- DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
- type, DHD_WQ_WORK_DHD_LOG_DUMP,
- dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
-}
-static void
-dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
-{
-#ifdef DHD_FW_COREDUMP
- if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
- (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) ||
- (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT))
-#else
- if (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)
-#endif
- {
-#if defined(CONFIG_ARM64)
- DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
- name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
-#elif defined(__ARM_ARCH_7A__)
- DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
- name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
-#endif /* __ARM_ARCH_7A__ */
+ if (buf) {
+ kfree(buf);
}
+ return ret;
}
-static void
-dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
+static int
+__dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
+ uint32 mode, uint32 enable)
{
- int i;
- unsigned long wr_size = 0;
- struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
- size_t log_size = 0;
- char buf_name[DHD_PRINT_BUF_NAME_LEN];
- dhd_dbg_ring_t *ring = NULL;
-
- BCM_REFERENCE(ring);
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ wl_pkt_filter_enable_t * pkt_filterp;
+ char *buf;
+ u32 cmd_len, buf_len;
+ int ifidx, ret;
+ gfp_t kflags;
+ char cmd[] = "pkt_filter_enable";
- for (i = 0; i < DLD_BUFFER_NUM; i++) {
- dld_buf = &g_dld_buf[i];
- log_size = (unsigned long)dld_buf->max -
- (unsigned long)dld_buf->buffer;
- if (dld_buf->wraparound) {
- wr_size = log_size;
- } else {
- wr_size = (unsigned long)dld_buf->present -
- (unsigned long)dld_buf->front;
- }
- scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
- dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
- scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
- dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
- scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
- dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
- scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
- dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
-#ifdef EWP_ECNTRS_LOGGING
- /* periodic flushing of ecounters is NOT supported */
- if (*type == DLD_BUF_TYPE_ALL &&
- logdump_ecntr_enable &&
- dhdp->ecntr_dbg_ring) {
-
- ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
- dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
- dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
- LOG_DUMP_ECNTRS_MAX_BUFSIZE);
- }
-#endif /* EWP_ECNTRS_LOGGING */
+ cmd_len = sizeof(cmd);
+ buf_len = cmd_len + sizeof(*pkt_filterp);
-#ifdef DHD_STATUS_LOGGING
- if (dhdp->statlog) {
- dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp),
- dhd_statlog_get_logbuf_len(dhdp));
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ buf = kzalloc(buf_len, kflags);
+ if (unlikely(!buf)) {
+ DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
+ return -ENOMEM;
}
-#endif /* DHD_STATUS_LOGGING */
-#ifdef EWP_RTT_LOGGING
- /* periodic flushing of ecounters is NOT supported */
- if (*type == DLD_BUF_TYPE_ALL &&
- logdump_rtt_enable &&
- dhdp->rtt_dbg_ring) {
+ memcpy(buf, cmd, cmd_len);
- ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
- dhd_print_buf_addr(dhdp, "rtt_dbg_ring", ring, LOG_DUMP_RTT_MAX_BUFSIZE);
- dhd_print_buf_addr(dhdp, "rtt_dbg_ring ring_buf", ring->ring_buf,
- LOG_DUMP_RTT_MAX_BUFSIZE);
- }
-#endif /* EWP_RTT_LOGGING */
+ pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
+ pkt_filterp->id = htod32(filter_id);
+ pkt_filterp->enable = htod32(enable);
-#ifdef BCMPCIE
- if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
- dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
- BCMPCIE_EXT_TRAP_DATA_MAXLEN);
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
+ goto exit;
}
-#endif /* BCMPCIE */
-#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
- /* if health check event was received */
- if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
- dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
- HEALTH_CHK_BUF_SIZE);
+ ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
+ WLC_SET_VAR, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
}
-#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
- /* append the concise debug information */
- if (dhdp->concise_dbg_buf) {
- dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
- CONCISE_DUMP_BUFLEN);
+exit:
+ if (buf) {
+ kfree(buf);
}
+ return ret;
}
-#ifdef DHD_SSSR_DUMP
-int
-dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len)
+static int
+__dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
{
- int i = 0;
-
- DHD_ERROR(("%s\n", __FUNCTION__));
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ifidx, ret;
- /* core 0 */
- i = 0;
- if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
- (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
- arr_len[SSSR_C0_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
- DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__,
- arr_len[SSSR_C0_D11_BEFORE]));
-#ifdef DHD_LOG_DUMP
- dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE",
- dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]);
-#endif /* DHD_LOG_DUMP */
- }
- if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
- arr_len[SSSR_C0_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
- DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__,
- arr_len[SSSR_C0_D11_AFTER]));
-#ifdef DHD_LOG_DUMP
- dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER",
- dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]);
-#endif /* DHD_LOG_DUMP */
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- /* core 1 */
- i = 1;
- if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
- (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
- arr_len[SSSR_C1_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
- DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__,
- arr_len[SSSR_C1_D11_BEFORE]));
-#ifdef DHD_LOG_DUMP
- dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE",
- dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]);
-#endif /* DHD_LOG_DUMP */
- }
- if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
- arr_len[SSSR_C1_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
- DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__,
- arr_len[SSSR_C1_D11_AFTER]));
-#ifdef DHD_LOG_DUMP
- dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER",
- dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]);
-#endif /* DHD_LOG_DUMP */
+ ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
+ htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
+ __FUNCTION__, filter_id, ret));
}
- if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
- arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
- arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
- DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
- arr_len[SSSR_DIG_BEFORE]));
- DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
- arr_len[SSSR_DIG_AFTER]));
-#ifdef DHD_LOG_DUMP
- if (dhd->sssr_dig_buf_before) {
- dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
- dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
- }
- if (dhd->sssr_dig_buf_after) {
- dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
- dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
- }
-#endif /* DHD_LOG_DUMP */
- } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
- dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
- arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
- arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
- DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
- arr_len[SSSR_DIG_BEFORE]));
- DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
- arr_len[SSSR_DIG_AFTER]));
-#ifdef DHD_LOG_DUMP
- if (dhd->sssr_dig_buf_before) {
- dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
- dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
- }
- if (dhd->sssr_dig_buf_after) {
- dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
- dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
- }
-#endif /* DHD_LOG_DUMP */
- }
- return BCME_OK;
+ return ret;
}
-void
-dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len)
+void dhd_apf_lock(struct net_device *dev)
{
- dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
- dhd_pub_t *dhdp = &dhd_info->pub;
-
- if (dhdp->sssr_dump_collected) {
- dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len);
- }
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ _dhd_apf_lock_local(dhd);
}
-#endif /* DHD_SSSR_DUMP */
-uint32
-dhd_get_time_str_len()
+void dhd_apf_unlock(struct net_device *dev)
{
- char *ts = NULL, time_str[128];
-
- ts = dhd_log_dump_get_timestamp();
- snprintf(time_str, sizeof(time_str),
- "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
- return strlen(time_str);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ _dhd_apf_unlock_local(dhd);
}
-#if defined(BCMPCIE)
-uint32
-dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp)
+int
+dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
{
- int length = 0;
- log_dump_section_hdr_t sec_hdr;
- dhd_info_t *dhd_info;
-
- if (ndev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
- dhdp = &dhd_info->pub;
- }
-
- if (!dhdp)
- return length;
-
- if (dhdp->extended_trap_data) {
- length = (strlen(EXT_TRAP_LOG_HDR)
- + sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN);
- }
- return length;
-}
-#endif
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ifidx, ret;
-#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
-uint32
-dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp)
-{
- int length = 0;
- log_dump_section_hdr_t sec_hdr;
- dhd_info_t *dhd_info;
+ if (!FW_SUPPORTED(dhdp, apf)) {
+ DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
- if (ndev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
- dhdp = &dhd_info->pub;
+ /*
+ * Notify Android framework that APF is not supported by setting
+ * version as zero.
+ */
+ *version = 0;
+ return BCME_OK;
}
- if (!dhdp)
- return length;
-
- if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
- length = (strlen(HEALTH_CHK_LOG_HDR)
- + sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE);
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- return length;
-}
-#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
-
-uint32
-dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp)
-{
- int length = 0;
- log_dump_section_hdr_t sec_hdr;
- dhd_info_t *dhd_info;
- uint32 remain_len = 0;
- if (ndev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
- dhdp = &dhd_info->pub;
+ ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
+ WLC_GET_VAR, FALSE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
+ __FUNCTION__, ret));
}
- if (!dhdp)
- return length;
-
- if (dhdp->concise_dbg_buf) {
- remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
- if (remain_len <= 0) {
- DHD_ERROR(("%s: error getting concise debug info !\n",
- __FUNCTION__));
- return length;
- }
- length = (strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr) +
- (CONCISE_DUMP_BUFLEN - remain_len));
- }
- return length;
+ return ret;
}
-uint32
-dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp)
+int
+dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
{
- int length = 0;
- dhd_info_t *dhd_info;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ifidx, ret;
- if (ndev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
- dhdp = &dhd_info->pub;
+ if (!FW_SUPPORTED(dhdp, apf)) {
+ DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
+ *max_len = 0;
+ return BCME_OK;
}
- if (!dhdp)
- return length;
-
- if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
- length = dhd_log_dump_cookie_len(dhdp);
+ ifidx = dhd_net2idx(dhd, ndev);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- return length;
-}
-
-#ifdef DHD_DUMP_PCIE_RINGS
-uint32
-dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp)
-{
- int length = 0;
- log_dump_section_hdr_t sec_hdr;
- dhd_info_t *dhd_info;
- uint16 h2d_flowrings_total;
- uint32 remain_len = 0;
-
- if (ndev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
- dhdp = &dhd_info->pub;
+ ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
+ WLC_GET_VAR, FALSE, ifidx);
+ if (unlikely(ret)) {
+ DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
+ __FUNCTION__, ret));
}
- if (!dhdp)
- return length;
-
- if (dhdp->concise_dbg_buf) {
- remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
- if (remain_len <= 0) {
- DHD_ERROR(("%s: error getting concise debug info !\n",
- __FUNCTION__));
- return length;
- }
- }
-
- length += strlen(FLOWRING_DUMP_HDR);
- length += CONCISE_DUMP_BUFLEN - remain_len;
- length += sizeof(sec_hdr);
- h2d_flowrings_total = dhd_get_max_flow_rings(dhdp);
- length += ((H2DRING_TXPOST_ITEMSIZE
- * H2DRING_TXPOST_MAX_ITEM * h2d_flowrings_total)
- + (D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM)
- + (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM)
- + (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM)
- + (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM)
- + (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM)
-#ifdef EWP_EDL
- + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
-#else
- + (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM)
- + (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM));
-#endif /* EWP_EDL */
- return length;
+ return ret;
}
-#endif /* DHD_DUMP_PCIE_RINGS */
-#ifdef EWP_ECNTRS_LOGGING
-uint32
-dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp)
+int
+dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
+ uint32 program_len)
{
- dhd_info_t *dhd_info;
- log_dump_section_hdr_t sec_hdr;
- int length = 0;
- dhd_dbg_ring_t *ring;
-
- if (ndev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
- dhdp = &dhd_info->pub;
- }
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret;
- if (!dhdp)
- return length;
+ DHD_APF_LOCK(ndev);
- if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) {
- ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
- length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr);
+ /* delete, if filter already exists */
+ if (dhdp->apf_set) {
+ ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
+ if (unlikely(ret)) {
+ goto exit;
+ }
+ dhdp->apf_set = FALSE;
}
- return length;
-}
-#endif /* EWP_ECNTRS_LOGGING */
-
-#ifdef EWP_RTT_LOGGING
-uint32
-dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd_info;
- log_dump_section_hdr_t sec_hdr;
- int length = 0;
- dhd_dbg_ring_t *ring;
- if (ndev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
- dhdp = &dhd_info->pub;
+ ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
+ if (ret) {
+ goto exit;
}
+ dhdp->apf_set = TRUE;
- if (!dhdp)
- return length;
-
- if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
- ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
- length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr);
+ if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ /* Driver is still in (early) suspend state, enable APF filter back */
+ ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+ PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
}
- return length;
+exit:
+ DHD_APF_UNLOCK(ndev);
+
+ return ret;
}
-#endif /* EWP_RTT_LOGGING */
int
-dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, int type, void *pos)
+dhd_dev_apf_enable_filter(struct net_device *ndev)
{
- int ret = BCME_OK;
- struct dhd_log_dump_buf *dld_buf;
- log_dump_section_hdr_t sec_hdr;
- dhd_info_t *dhd_info;
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
- dld_buf = &g_dld_buf[type];
+ DHD_APF_LOCK(ndev);
- if (dev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhdp = &dhd_info->pub;
- } else if (!dhdp) {
- return BCME_ERROR;
+ if (dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+ PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
}
- DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
-
- dhd_init_sec_hdr(&sec_hdr);
-
- /* write the section header first */
- ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf,
- strlen(dld_hdrs[type].hdr_str), pos);
- if (ret < 0)
- goto exit;
- len -= (uint32)strlen(dld_hdrs[type].hdr_str);
- len -= (uint32)sizeof(sec_hdr);
- sec_hdr.type = dld_hdrs[type].sec_type;
- sec_hdr.length = len;
- ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
- if (ret < 0)
- goto exit;
- ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos);
- if (ret < 0)
- goto exit;
+ DHD_APF_UNLOCK(ndev);
-exit:
return ret;
}
-static int
-dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type)
+int
+dhd_dev_apf_disable_filter(struct net_device *ndev)
{
- unsigned long flags = 0;
-#ifdef EWP_EDL
- int i = 0;
-#endif /* EWP_EDL */
- dhd_info_t *dhd_info = NULL;
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
- /* if dhdp is null, its extremely unlikely that log dump will be scheduled
- * so not freeing 'type' here is ok, even if we want to free 'type'
- * we cannot do so, since 'dhdp->osh' is unavailable
- * as dhdp is null
- */
- if (!dhdp || !type) {
- if (dhdp) {
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
- }
- return BCME_ERROR;
- }
+ DHD_APF_LOCK(ndev);
- dhd_info = (dhd_info_t *)dhdp->info;
- /* in case of trap get preserve logs from ETD */
-#if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
- if (dhdp->dongle_trap_occured &&
- dhdp->extended_trap_data) {
- dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
- &dhd_info->event_data);
+ if (dhdp->apf_set) {
+ ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
+ PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
}
-#endif /* BCMPCIE */
- /* flush the event work items to get any fw events/logs
- * flush_work is a blocking call
- */
-#ifdef SHOW_LOGTRACE
-#ifdef EWP_EDL
- if (dhd_info->pub.dongle_edl_support) {
- /* wait till existing edl items are processed */
- dhd_flush_logtrace_process(dhd_info);
- /* dhd_flush_logtrace_process will ensure the work items in the ring
- * (EDL ring) from rd to wr are processed. But if wr had
- * wrapped around, only the work items from rd to ring-end are processed.
- * So to ensure that the work items at the
- * beginning of ring are also processed in the wrap around case, call
- * it twice
- */
- for (i = 0; i < 2; i++) {
- /* blocks till the edl items are processed */
- dhd_flush_logtrace_process(dhd_info);
- }
- } else {
- dhd_flush_logtrace_process(dhd_info);
- }
-#else
- dhd_flush_logtrace_process(dhd_info);
-#endif /* EWP_EDL */
-#endif /* SHOW_LOGTRACE */
+ DHD_APF_UNLOCK(ndev);
- return BCME_OK;
+ return ret;
}
int
-dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size)
+dhd_dev_apf_delete_filter(struct net_device *ndev)
{
- dhd_info_t *dhd_info;
-
- if (dev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhdp = &dhd_info->pub;
- }
-
- if (!dhdp)
- return BCME_ERROR;
+ dhd_info_t *dhd = DHD_DEV_INFO(ndev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ int ret = 0;
- memset(dump_path, 0, size);
+ DHD_APF_LOCK(ndev);
- switch (dhdp->debug_dump_subcmd) {
- case CMD_UNWANTED:
- snprintf(dump_path, size, "%s",
- DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
- DHD_DUMP_SUBSTR_UNWANTED);
- break;
- case CMD_DISCONNECTED:
- snprintf(dump_path, size, "%s",
- DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
- DHD_DUMP_SUBSTR_DISCONNECTED);
- break;
- default:
- snprintf(dump_path, size, "%s",
- DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
+ if (dhdp->apf_set) {
+ ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
+ if (!ret) {
+ dhdp->apf_set = FALSE;
+ }
}
- if (!dhdp->logdump_periodic_flush) {
- get_debug_dump_time(dhdp->debug_dump_time_str);
- snprintf(dump_path + strlen(dump_path),
- size - strlen(dump_path),
- "_%s", dhdp->debug_dump_time_str);
- }
- return BCME_OK;
+ DHD_APF_UNLOCK(ndev);
+
+ return ret;
}
+#endif /* PKT_FILTER_SUPPORT && APF */
-uint32
-dhd_get_dld_len(int log_type)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
{
- unsigned long wr_size = 0;
- unsigned long buf_size = 0;
- unsigned long flags = 0;
- struct dhd_log_dump_buf *dld_buf;
- log_dump_section_hdr_t sec_hdr;
+ dhd_info_t *dhd;
+ struct net_device *dev;
- /* calculate the length of the log */
- dld_buf = &g_dld_buf[log_type];
- buf_size = (unsigned long)dld_buf->max -
- (unsigned long)dld_buf->buffer;
+ dhd = (dhd_info_t *)dhd_info;
+ dev = dhd->iflist[0]->net;
- if (dld_buf->wraparound) {
- wr_size = buf_size;
- } else {
- /* need to hold the lock before accessing 'present' and 'remain' ptrs */
- spin_lock_irqsave(&dld_buf->lock, flags);
- wr_size = (unsigned long)dld_buf->present -
- (unsigned long)dld_buf->front;
- spin_unlock_irqrestore(&dld_buf->lock, flags);
+ if (dev) {
+ /*
+ * For HW2, dev_close need to be done to recover
+ * from upper layer after hang. For Interposer skip
+ * dev_close so that dhd iovars can be used to take
+ * socramdump after crash, also skip for HW4 as
+ * handling of hang event is different
+ */
+#if !defined(CUSTOMER_HW2_INTERPOSER)
+ rtnl_lock();
+ dev_close(dev);
+ rtnl_unlock();
+#endif
+#if defined(WL_WIRELESS_EXT)
+ wl_iw_send_priv_event(dev, "HANG");
+#endif
+#if defined(WL_CFG80211)
+ wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
}
- return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str));
}
-static void
-dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size)
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+extern dhd_pub_t *link_recovery;
+void dhd_host_recover_link(void)
{
- char *ts = NULL;
- memset(time_str, 0, size);
- ts = dhd_log_dump_get_timestamp();
- snprintf(time_str, size,
- "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
+ DHD_ERROR(("****** %s ******\n", __FUNCTION__));
+ link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+ dhd_bus_set_linkdown(link_recovery, TRUE);
+ dhd_os_send_hang_message(link_recovery);
}
+EXPORT_SYMBOL(dhd_host_recover_link);
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
-int
-dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos)
+int dhd_os_send_hang_message(dhd_pub_t *dhdp)
{
- char *ts = NULL;
int ret = 0;
- char time_str[128];
-
- memset_s(time_str, sizeof(time_str), 0, sizeof(time_str));
- ts = dhd_log_dump_get_timestamp();
- snprintf(time_str, sizeof(time_str),
- "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
+ if (dhdp) {
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (dhdp->req_hang_type) {
+ DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
+ __FUNCTION__, dhdp->req_hang_type));
+ dhdp->req_hang_type = 0;
+ }
+#endif /* DHD_HANG_SEND_UP_TEST */
- /* write the timestamp hdr to the file first */
- ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos);
- if (ret < 0) {
- DHD_ERROR(("write file error, err = %d\n", ret));
+ if (!dhdp->hang_was_sent) {
+#if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
+ dhdp->hang_counts++;
+ if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
+ DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
+ __func__, dhdp->hang_counts));
+ BUG_ON(1);
+ }
+#endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
+#ifdef DHD_DEBUG_UART
+ /* If PCIe lane has broken, execute the debug uart application
+ * to gether a ramdump data from dongle via uart
+ */
+ if (!dhdp->info->duart_execute) {
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
+ dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
+ }
+#endif /* DHD_DEBUG_UART */
+ dhdp->hang_was_sent = 1;
+#ifdef BT_OVER_SDIO
+ dhdp->is_bt_recovery_required = TRUE;
+#endif
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
+ DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WQ_WORK_PRIORITY_HIGH);
+ DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d s=%d\n", __FUNCTION__,
+ dhdp->rxcnt_timeout, dhdp->txcnt_timeout, dhdp->busstate));
+ }
}
return ret;
}
-#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
-int
-dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos)
+int net_os_send_hang_message(struct net_device *dev)
{
- int ret = BCME_OK;
- log_dump_section_hdr_t sec_hdr;
- dhd_info_t *dhd_info;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
- if (dev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhdp = &dhd_info->pub;
+ if (dhd) {
+ /* Report FW problem when enabled */
+ if (dhd->pub.hang_report) {
+#ifdef BT_OVER_SDIO
+ if (netif_running(dev)) {
+#endif /* BT_OVER_SDIO */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ ret = dhd_os_send_hang_message(&dhd->pub);
+#else
+ ret = wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
+#endif
+#ifdef BT_OVER_SDIO
+ }
+ DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
+ bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
+#endif /* BT_OVER_SDIO */
+ } else {
+ DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
+ __FUNCTION__));
+ }
}
+ return ret;
+}
- if (!dhdp)
- return BCME_ERROR;
+int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
+{
+ dhd_info_t *dhd = NULL;
+ dhd_pub_t *dhdp = NULL;
+ int reason;
- dhd_init_sec_hdr(&sec_hdr);
+ dhd = DHD_DEV_INFO(dev);
+ if (dhd) {
+ dhdp = &dhd->pub;
+ }
- if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
- /* write the section header first */
- ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf,
- strlen(HEALTH_CHK_LOG_HDR), pos);
- if (ret < 0)
- goto exit;
+ if (!dhd || !dhdp) {
+ return 0;
+ }
- len -= (uint32)strlen(HEALTH_CHK_LOG_HDR);
- sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
- sec_hdr.length = HEALTH_CHK_BUF_SIZE;
- ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
- if (ret < 0)
- goto exit;
+ reason = bcm_strtoul(string_num, NULL, 0);
+ DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
- len -= (uint32)sizeof(sec_hdr);
- /* write the log */
- ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp,
- user_buf, len, pos);
- if (ret < 0)
- goto exit;
+ if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
+ reason = 0;
}
-exit:
- return ret;
-}
-#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
-#ifdef BCMPCIE
-int
-dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos)
-{
- int ret = BCME_OK;
- log_dump_section_hdr_t sec_hdr;
- dhd_info_t *dhd_info;
+ dhdp->hang_reason = reason;
- if (dev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhdp = &dhd_info->pub;
- }
+ return net_os_send_hang_message(dev);
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
- if (!dhdp)
- return BCME_ERROR;
- dhd_init_sec_hdr(&sec_hdr);
+int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return wifi_platform_set_power(dhd->adapter, on, delay_msec);
+}
- /* append extended trap data to the file in case of traps */
- if (dhdp->dongle_trap_occured &&
- dhdp->extended_trap_data) {
- /* write the section header first */
- ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf,
- strlen(EXT_TRAP_LOG_HDR), pos);
- if (ret < 0)
- goto exit;
+bool dhd_force_country_change(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- len -= (uint32)strlen(EXT_TRAP_LOG_HDR);
- sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
- sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
- ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
- if (ret < 0)
- goto exit;
+ if (dhd && dhd->pub.up)
+ return dhd->pub.force_country_change;
+ return FALSE;
+}
- len -= (uint32)sizeof(sec_hdr);
- /* write the log */
- ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp,
- user_buf, len, pos);
- if (ret < 0)
- goto exit;
+void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+ wl_country_t *cspec)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
+ if (!dhd->pub.is_blob)
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+ {
+#if defined(CUSTOM_COUNTRY_CODE)
+ get_customized_country_code(dhd->adapter, country_iso_code, cspec,
+ dhd->pub.dhd_cflags);
+#else
+ get_customized_country_code(dhd->adapter, country_iso_code, cspec);
+#endif /* CUSTOM_COUNTRY_CODE */
}
-exit:
- return ret;
+
+ BCM_REFERENCE(dhd);
}
-#endif /* BCMPCIE */
-int
-dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos)
+void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
{
- int ret = BCME_OK;
- log_dump_section_hdr_t sec_hdr;
- dhd_info_t *dhd_info;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif
- if (dev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhdp = &dhd_info->pub;
+ if (dhd && dhd->pub.up) {
+ memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
+#ifdef WL_CFG80211
+ wl_update_wiphybands(cfg, notify);
+#endif
}
+}
- if (!dhdp)
- return BCME_ERROR;
-
- dhd_init_sec_hdr(&sec_hdr);
+void dhd_bus_band_set(struct net_device *dev, uint band)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+#ifdef WL_CFG80211
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif
+ if (dhd && dhd->pub.up) {
+#ifdef WL_CFG80211
+ wl_update_wiphybands(cfg, true);
+#endif
+ }
+}
- ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos);
- if (ret < 0)
- goto exit;
+int dhd_net_set_fw_path(struct net_device *dev, char *fw)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- len -= (uint32)strlen(DHD_DUMP_LOG_HDR);
- sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
- sec_hdr.length = len;
- ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
- if (ret < 0)
- goto exit;
+ if (!fw || fw[0] == '\0')
+ return -EINVAL;
- len -= (uint32)sizeof(sec_hdr);
+ strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
+ dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
- if (dhdp->concise_dbg_buf) {
- dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
- ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos);
- if (ret < 0)
- goto exit;
+#if defined(SOFTAP)
+ if (strstr(fw, "apsta") != NULL) {
+ DHD_INFO(("GOT APSTA FIRMWARE\n"));
+ ap_fw_loaded = TRUE;
+ } else {
+ DHD_INFO(("GOT STA FIRMWARE\n"));
+ ap_fw_loaded = FALSE;
}
+#endif
+ return 0;
+}
-exit:
- return ret;
+void dhd_net_if_lock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_net_if_lock_local(dhd);
}
-int
-dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos)
+void dhd_net_if_unlock(struct net_device *dev)
{
- int ret = BCME_OK;
- dhd_info_t *dhd_info;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_net_if_unlock_local(dhd);
+}
- if (dev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhdp = &dhd_info->pub;
- }
+static void dhd_net_if_lock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (dhd)
+ mutex_lock(&dhd->dhd_net_if_mutex);
+#endif
+}
- if (!dhdp)
- return BCME_ERROR;
+static void dhd_net_if_unlock_local(dhd_info_t *dhd)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ if (dhd)
+ mutex_unlock(&dhd->dhd_net_if_mutex);
+#endif
+}
- if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
- ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos);
- }
- return ret;
+static void dhd_suspend_lock(dhd_pub_t *pub)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ if (dhd)
+ mutex_lock(&dhd->dhd_suspend_mutex);
+#endif
}
-#ifdef DHD_DUMP_PCIE_RINGS
-int
-dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos)
+static void dhd_suspend_unlock(dhd_pub_t *pub)
{
- log_dump_section_hdr_t sec_hdr;
- int ret = BCME_OK;
- uint32 remain_len = 0;
- dhd_info_t *dhd_info;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ if (dhd)
+ mutex_unlock(&dhd->dhd_suspend_mutex);
+#endif
+}
- if (dev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhdp = &dhd_info->pub;
- }
+unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags = 0;
- if (!dhdp)
- return BCME_ERROR;
+ if (dhd)
+ spin_lock_irqsave(&dhd->dhd_lock, flags);
- dhd_init_sec_hdr(&sec_hdr);
+ return flags;
+}
- remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
- memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
+void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- /* write the section header first */
- ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf,
- strlen(FLOWRING_DUMP_HDR), pos);
- if (ret < 0)
- goto exit;
+ if (dhd)
+ spin_unlock_irqrestore(&dhd->dhd_lock, flags);
+}
- /* Write the ring summary */
- ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf,
- (CONCISE_DUMP_BUFLEN - remain_len), pos);
- if (ret < 0)
- goto exit;
+/* Linux specific multipurpose spinlock API */
+void *
+dhd_os_spin_lock_init(osl_t *osh)
+{
+ /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
+ /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
+ /* and this results in kernel asserts in internal builds */
+ spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
+ if (lock)
+ spin_lock_init(lock);
+ return ((void *)lock);
+}
+void
+dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
+{
+ if (lock)
+ MFREE(osh, lock, sizeof(spinlock_t) + 4);
+}
+unsigned long
+dhd_os_spin_lock(void *lock)
+{
+ unsigned long flags = 0;
- sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
- sec_hdr.length = len;
- ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
- if (ret < 0)
- goto exit;
+ if (lock)
+ spin_lock_irqsave((spinlock_t *)lock, flags);
- /* write the log */
- ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE);
- if (ret < 0)
- goto exit;
+ return flags;
+}
+void
+dhd_os_spin_unlock(void *lock, unsigned long flags)
+{
+ if (lock)
+ spin_unlock_irqrestore((spinlock_t *)lock, flags);
+}
-exit:
- return ret;
+static int
+dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
+{
+ return (atomic_read(&dhd->pend_8021x_cnt));
}
-#endif /* DHD_DUMP_PCIE_RINGS */
-#ifdef EWP_ECNTRS_LOGGING
+#define MAX_WAIT_FOR_8021X_TX 100
+
int
-dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos)
+dhd_wait_pend8021x(struct net_device *dev)
{
- log_dump_section_hdr_t sec_hdr;
- int ret = BCME_OK;
- dhd_info_t *dhd_info;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int timeout = msecs_to_jiffies(10);
+ int ntimes = MAX_WAIT_FOR_8021X_TX;
+ int pend = dhd_get_pend_8021x_cnt(dhd);
- if (dev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhdp = &dhd_info->pub;
+ while (ntimes && pend) {
+ if (pend) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ schedule_timeout(timeout);
+ DHD_PERIM_LOCK(&dhd->pub);
+ set_current_state(TASK_RUNNING);
+ ntimes--;
+ }
+ pend = dhd_get_pend_8021x_cnt(dhd);
}
-
- if (!dhdp)
- return BCME_ERROR;
-
- dhd_init_sec_hdr(&sec_hdr);
-
- if (logdump_ecntr_enable &&
- dhdp->ecntr_dbg_ring) {
- sec_hdr.type = LOG_DUMP_SECTION_ECNTRS;
- ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring,
- user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS);
+ if (ntimes == 0)
+ {
+ atomic_set(&dhd->pend_8021x_cnt, 0);
+ DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
}
- return ret;
-
+ return pend;
}
-#endif /* EWP_ECNTRS_LOGGING */
-#ifdef EWP_RTT_LOGGING
-int
-dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos)
+#if defined(DHD_DEBUG)
+int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
{
- log_dump_section_hdr_t sec_hdr;
- int ret = BCME_OK;
- dhd_info_t *dhd_info;
+ int ret = 0;
+ struct file *fp = NULL;
+ mm_segment_t old_fs;
+ loff_t pos = 0;
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
- if (dev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhdp = &dhd_info->pub;
+ /* open file to write */
+ fp = filp_open(file_name, flags, 0664);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
+ ret = -1;
+ goto exit;
}
- if (!dhdp)
- return BCME_ERROR;
-
- dhd_init_sec_hdr(&sec_hdr);
-
- if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
- ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring,
- user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT);
+ /* Write buf to file */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ ret = kernel_write(fp, buf, size, &pos);
+#else
+ ret = vfs_write(fp, buf, size, &pos);
+#endif
+ if (ret < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ goto exit;
}
- return ret;
-
-}
-#endif /* EWP_RTT_LOGGING */
-
-#ifdef DHD_STATUS_LOGGING
-int
-dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
- void *fp, uint32 len, void *pos)
-{
- dhd_info_t *dhd_info;
- if (dev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
- dhdp = &dhd_info->pub;
+ /* Sync file from filesystem to physical media */
+ ret = vfs_fsync(fp, 0);
+ if (ret < 0) {
+ DHD_ERROR(("sync file error, error = %d\n", ret));
+ goto exit;
}
+ ret = BCME_OK;
- if (!dhdp) {
- return BCME_ERROR;
- }
+exit:
+ /* close file before return */
+ if (!IS_ERR(fp))
+ filp_close(fp, current->files);
- return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos);
+ /* restore previous address limit */
+ set_fs(old_fs);
+
+ return ret;
}
+#endif
-uint32
-dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp)
+#ifdef DHD_DEBUG
+static void
+dhd_convert_memdump_type_to_str(uint32 type, char *buf)
{
- dhd_info_t *dhd_info;
- uint32 length = 0;
-
- if (ndev) {
- dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
- dhdp = &dhd_info->pub;
- }
+ char *type_str = NULL;
- if (dhdp) {
- length = dhd_statlog_get_logbuf_len(dhdp);
+ switch (type) {
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT:
+ type_str = "resumed_on_timeout";
+ break;
+ case DUMP_TYPE_D3_ACK_TIMEOUT:
+ type_str = "D3_ACK_timeout";
+ break;
+ case DUMP_TYPE_DONGLE_TRAP:
+ type_str = "Dongle_Trap";
+ break;
+ case DUMP_TYPE_MEMORY_CORRUPTION:
+ type_str = "Memory_Corruption";
+ break;
+ case DUMP_TYPE_PKTID_AUDIT_FAILURE:
+ type_str = "PKTID_AUDIT_Fail";
+ break;
+ case DUMP_TYPE_PKTID_INVALID:
+ type_str = "PKTID_INVALID";
+ break;
+ case DUMP_TYPE_SCAN_TIMEOUT:
+ type_str = "SCAN_timeout";
+ break;
+ case DUMP_TYPE_JOIN_TIMEOUT:
+ type_str = "JOIN_timeout";
+ break;
+ case DUMP_TYPE_SCAN_BUSY:
+ type_str = "SCAN_Busy";
+ break;
+ case DUMP_TYPE_BY_SYSDUMP:
+ type_str = "BY_SYSDUMP";
+ break;
+ case DUMP_TYPE_BY_LIVELOCK:
+ type_str = "BY_LIVELOCK";
+ break;
+ case DUMP_TYPE_AP_LINKUP_FAILURE:
+ type_str = "BY_AP_LINK_FAILURE";
+ break;
+ case DUMP_TYPE_AP_ABNORMAL_ACCESS:
+ type_str = "INVALID_ACCESS";
+ break;
+ case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
+ type_str = "CFG_VENDOR_TRIGGERED";
+ break;
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
+ type_str = "ERROR_RX_TIMED_OUT";
+ break;
+ case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
+ type_str = "ERROR_TX_TIMED_OUT";
+ break;
+ case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
+ type_str = "BY_INVALID_RING_RDWR";
+ break;
+ case DUMP_TYPE_DONGLE_HOST_EVENT:
+ type_str = "BY_DONGLE_HOST_EVENT";
+ break;
+ case DUMP_TYPE_TRANS_ID_MISMATCH:
+ type_str = "BY_TRANS_ID_MISMATCH";
+ break;
+ case DUMP_TYPE_HANG_ON_IFACE_OP_FAIL:
+ type_str = "HANG_IFACE_OP_FAIL";
+ break;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ case DUMP_TYPE_READ_SHM_FAIL:
+ type_str = "READ_SHM_FAIL";
+ break;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ default:
+ type_str = "Unknown_type";
+ break;
}
- return length;
-}
-#endif /* DHD_STATUS_LOGGING */
-
-void
-dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr)
-{
- /* prep the section header */
- memset(sec_hdr, 0, sizeof(*sec_hdr));
- sec_hdr->magic = LOG_DUMP_MAGIC;
- sec_hdr->timestamp = local_clock();
+ strncpy(buf, type_str, strlen(type_str));
+ buf[strlen(type_str)] = 0;
}
-/* Must hold 'dhd_os_logdump_lock' before calling this function ! */
-static int
-do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
+int
+write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
{
- int ret = 0, i = 0;
- struct file *fp = NULL;
- mm_segment_t old_fs;
- loff_t pos = 0;
- char dump_path[128];
+ int ret = 0;
+ char memdump_path[128];
+ char memdump_type[32];
+ struct timeval curtime;
uint32 file_mode;
- unsigned long flags = 0;
- size_t log_size = 0;
- size_t fspace_remain = 0;
- struct kstat stat;
- char time_str[128];
- unsigned int len = 0;
- log_dump_section_hdr_t sec_hdr;
-
- DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
-
- DHD_GENERAL_LOCK(dhdp, flags);
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
- DHD_GENERAL_UNLOCK(dhdp, flags);
- DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
- goto exit1;
- }
- DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
-
- if ((ret = dhd_log_flush(dhdp, type)) < 0) {
- goto exit1;
- }
- /* change to KERNEL_DS address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
- dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path));
-
- DHD_ERROR(("debug_dump_path = %s\n", dump_path));
- DHD_ERROR(("DHD version: %s\n", dhd_version));
- DHD_ERROR(("F/W version: %s\n", fw_version));
-
- dhd_log_dump_buf_addr(dhdp, type);
- dhd_get_time_str(dhdp, time_str, 128);
-
- /* if this is the first time after dhd is loaded,
- * or, if periodic flush is disabled, clear the log file
+ /* Init file name */
+ memset(memdump_path, 0, sizeof(memdump_path));
+ memset(memdump_type, 0, sizeof(memdump_type));
+ do_gettimeofday(&curtime);
+ dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type);
+#ifdef CUSTOMER_HW4_DEBUG
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
+ DHD_COMMON_DUMP_PATH, fname, memdump_type,
+ (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+#elif defined(CUSTOMER_HW2)
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
+ "/data/misc/wifi/", fname, memdump_type,
+ (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+#elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
+ "/data/misc/wifi/", fname, memdump_type,
+ (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ file_mode = O_CREAT | O_WRONLY;
+#else
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
+ "/installmedia/", fname, memdump_type,
+ (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
+ * calling BUG_ON immediately after collecting the socram dump.
+ * So the file write operation should directly write the contents into the
+ * file instead of caching it. O_TRUNC flag ensures that file will be re-written
+ * instead of appending.
*/
- if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0)
- file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC;
- else
- file_mode = O_CREAT | O_RDWR | O_SYNC;
-
- fp = filp_open(dump_path, file_mode, 0664);
- if (IS_ERR(fp)) {
- /* If android installed image, try '/data' directory */
-#if defined(CONFIG_X86)
- DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
- __FUNCTION__));
- snprintf(dump_path, sizeof(dump_path), "/data/" DHD_DEBUG_DUMP_TYPE);
- if (!dhdp->logdump_periodic_flush) {
- snprintf(dump_path + strlen(dump_path),
- sizeof(dump_path) - strlen(dump_path),
- "_%s", dhdp->debug_dump_time_str);
- }
- fp = filp_open(dump_path, file_mode, 0664);
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+ {
+ struct file *fp = filp_open(memdump_path, file_mode, 0664);
+ /* Check if it is live Brix image having /installmedia, else use /data */
if (IS_ERR(fp)) {
- ret = PTR_ERR(fp);
- DHD_ERROR(("open file error, err = %d\n", ret));
- goto exit2;
- }
- DHD_ERROR(("debug_dump_path = %s\n", dump_path));
-#else
- ret = PTR_ERR(fp);
- DHD_ERROR(("open file error, err = %d\n", ret));
- goto exit2;
-#endif /* CONFIG_X86 && OEM_ANDROID */
- }
-
- ret = vfs_stat(dump_path, &stat);
- if (ret < 0) {
- DHD_ERROR(("file stat error, err = %d\n", ret));
- goto exit2;
- }
-
- /* if some one else has changed the file */
- if (dhdp->last_file_posn != 0 &&
- stat.size < dhdp->last_file_posn) {
- dhdp->last_file_posn = 0;
- }
-
- if (dhdp->logdump_periodic_flush) {
- log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr);
- /* calculate the amount of space required to dump all logs */
- for (i = 0; i < DLD_BUFFER_NUM; ++i) {
- if (*type != DLD_BUF_TYPE_ALL && i != *type)
- continue;
-
- if (g_dld_buf[i].wraparound) {
- log_size += (unsigned long)g_dld_buf[i].max
- - (unsigned long)g_dld_buf[i].buffer;
- } else {
- spin_lock_irqsave(&g_dld_buf[i].lock, flags);
- log_size += (unsigned long)g_dld_buf[i].present -
- (unsigned long)g_dld_buf[i].front;
- spin_unlock_irqrestore(&g_dld_buf[i].lock, flags);
- }
- log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr);
-
- if (*type != DLD_BUF_TYPE_ALL && i == *type)
- break;
- }
-
- ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
- if (ret < 0) {
- DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
- goto exit2;
- }
- pos = fp->f_pos;
-
- /* if the max file size is reached, wrap around to beginning of the file
- * we're treating the file as a large ring buffer
- */
- fspace_remain = logdump_max_filesize - pos;
- if (log_size > fspace_remain) {
- fp->f_pos -= pos;
- pos = fp->f_pos;
+ DHD_ERROR(("open file %s, try /data/\n", memdump_path));
+ snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_%ld.%ld",
+ "/data/", fname, memdump_type,
+ (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ } else {
+ filp_close(fp, NULL);
}
}
+#endif /* CUSTOMER_HW4_DEBUG */
- dhd_print_time_str(0, fp, len, &pos);
-
- for (i = 0; i < DLD_BUFFER_NUM; ++i) {
-
- if (*type != DLD_BUF_TYPE_ALL && i != *type)
- continue;
-
- len = dhd_get_dld_len(i);
- dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos);
- if (*type != DLD_BUF_TYPE_ALL)
- break;
- }
+ /* print SOCRAM dump file path */
+ DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
-#ifdef EWP_ECNTRS_LOGGING
- /* periodic flushing of ecounters is NOT supported */
- if (*type == DLD_BUF_TYPE_ALL &&
- logdump_ecntr_enable &&
- dhdp->ecntr_dbg_ring) {
- dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
- fp, (unsigned long *)&pos,
- &sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS);
- }
-#endif /* EWP_ECNTRS_LOGGING */
-
-#ifdef DHD_STATUS_LOGGING
- if (dhdp->statlog) {
- /* write the statlog */
- len = dhd_get_status_log_len(NULL, dhdp);
- if (len) {
- if (dhd_print_status_log_data(NULL, dhdp, 0, fp,
- len, &pos) < 0) {
- goto exit2;
- }
- }
- }
-#endif /* DHD_STATUS_LOGGING */
+ /* Write file */
+ ret = write_file(memdump_path, file_mode, buf, size);
-#ifdef EWP_RTT_LOGGING
- /* periodic flushing of ecounters is NOT supported */
- if (*type == DLD_BUF_TYPE_ALL &&
- logdump_rtt_enable &&
- dhdp->rtt_dbg_ring) {
- dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring,
- fp, (unsigned long *)&pos,
- &sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT);
- }
-#endif /* EWP_RTT_LOGGING */
+ return ret;
+}
+#endif /* DHD_DEBUG */
-#ifdef BCMPCIE
- len = dhd_get_ext_trap_len(NULL, dhdp);
- if (len) {
- if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
- goto exit2;
- }
-#endif /* BCMPCIE */
+int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
-#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
- len = dhd_get_health_chk_len(NULL, dhdp);
- if (len) {
- if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
- goto exit2;
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
+ dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (dhd->wakelock_rx_timeout_enable)
+ wake_lock_timeout(&dhd->wl_rxwake,
+ msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
+ if (dhd->wakelock_ctrl_timeout_enable)
+ wake_lock_timeout(&dhd->wl_ctrlwake,
+ msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
+#endif
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
-#endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
+ return ret;
+}
- len = dhd_get_dhd_dump_len(NULL, dhdp);
- if (len) {
- if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0)
- goto exit2;
- }
+int net_os_wake_lock_timeout(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
- len = dhd_get_cookie_log_len(NULL, dhdp);
- if (len) {
- if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0)
- goto exit2;
- }
+ if (dhd)
+ ret = dhd_os_wake_lock_timeout(&dhd->pub);
+ return ret;
+}
-#ifdef DHD_DUMP_PCIE_RINGS
- len = dhd_get_flowring_len(NULL, dhdp);
- if (len) {
- if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0)
- goto exit2;
- }
-#endif // endif
+int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
- if (dhdp->logdump_periodic_flush) {
- /* store the last position written to in the file for future use */
- dhdp->last_file_posn = pos;
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (val > dhd->wakelock_rx_timeout_enable)
+ dhd->wakelock_rx_timeout_enable = val;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
+ return 0;
+}
-exit2:
- if (!IS_ERR(fp) && fp != NULL) {
- filp_close(fp, NULL);
- DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
- __FUNCTION__, dump_path));
- }
- set_fs(old_fs);
-exit1:
- if (type) {
- MFREE(dhdp->osh, type, sizeof(*type));
- }
- DHD_GENERAL_LOCK(dhdp, flags);
- DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
- dhd_os_busbusy_wake(dhdp);
- DHD_GENERAL_UNLOCK(dhdp, flags);
+int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
-#ifdef DHD_DUMP_MNGR
- if (ret >= 0) {
- dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE);
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (val > dhd->wakelock_ctrl_timeout_enable)
+ dhd->wakelock_ctrl_timeout_enable = val;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
-#endif /* DHD_DUMP_MNGR */
-
- return (ret < 0) ? BCME_ERROR : BCME_OK;
+ return 0;
}
-#endif /* DHD_LOG_DUMP */
-/* This function writes data to the file pointed by fp, OR
- * copies data to the user buffer sent by upper layer(HAL).
- */
-int
-dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos)
+int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
{
- int ret = BCME_OK;
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
- if (fp) {
- ret = compat_vfs_write(fp, mem_buf, buf_len, (loff_t *)pos);
- if (ret < 0) {
- DHD_ERROR(("write file error, err = %d\n", ret));
- goto exit;
- }
- } else {
-#ifdef CONFIG_COMPAT
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
- if (in_compat_syscall()) {
-#else
- if (is_compat_task()) {
-#endif /* LINUX_VER >= 4.6 */
- void * usr_ptr = compat_ptr((uintptr_t) user_buf);
- ret = copy_to_user((void *)((uintptr_t)usr_ptr + (*(int *)pos)),
- mem_buf, buf_len);
- if (ret) {
- DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
- goto exit;
- }
- }
- else
-#endif /* CONFIG_COMPAT */
- {
- ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)),
- mem_buf, buf_len);
- if (ret) {
- DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
- goto exit;
- }
- }
- (*(int *)pos) += buf_len;
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (wake_lock_active(&dhd->wl_ctrlwake))
+ wake_unlock(&dhd->wl_ctrlwake);
+#endif
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
-exit:
- return ret;
+ return 0;
}
-/*
- * This call is to get the memdump size so that,
- * halutil can alloc that much buffer in user space.
- */
-int
-dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
+int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
{
- int ret = BCME_OK;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- dhd_pub_t *dhdp = &dhd->pub;
-
- if (dhdp->busstate == DHD_BUS_DOWN) {
- DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
- return BCME_ERROR;
- }
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
- if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
- DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
- __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
- return BCME_ERROR;
- }
- ret = dhd_common_socram_dump(dhdp);
- if (ret == BCME_OK) {
- *dump_size = dhdp->soc_ram_length;
- }
+ if (dhd)
+ ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
return ret;
}
-/*
- * This is to get the actual memdup after getting the memdump size
- */
-int
-dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
+int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
{
- int ret = BCME_OK;
- int orig_len = 0;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- dhd_pub_t *dhdp = &dhd->pub;
- if (buf == NULL)
- return BCME_ERROR;
- orig_len = *size;
- if (dhdp->soc_ram) {
- if (orig_len >= dhdp->soc_ram_length) {
- *buf = dhdp->soc_ram;
- *size = dhdp->soc_ram_length;
- } else {
- ret = BCME_BUFTOOSHORT;
- DHD_ERROR(("The length of the buffer is too short"
- " to save the memory dump with %d\n", dhdp->soc_ram_length));
- }
- } else {
- DHD_ERROR(("socram_dump is not ready to get\n"));
- ret = BCME_NOTREADY;
- }
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
return ret;
}
-int
-dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
-{
- char *fw_str;
- if (size == 0)
- return BCME_BADARG;
+#if defined(DHD_TRACE_WAKE_LOCK)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#include <linux/hashtable.h>
+#else
+#include <linux/hash.h>
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
- fw_str = strstr(info_string, "Firmware: ");
- if (fw_str == NULL) {
- return BCME_ERROR;
- }
- memset(*buf, 0, size);
- if (dhd_ver) {
- strncpy(*buf, dhd_version, size - 1);
- } else {
- strncpy(*buf, fw_str, size - 1);
- }
- return BCME_OK;
-}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+/* Define 2^5 = 32 bucket size hash table */
+DEFINE_HASHTABLE(wklock_history, 5);
+#else
+/* Define 2^5 = 32 bucket size hash table */
+struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
-#ifdef DNGL_AXI_ERROR_LOGGING
-int
-dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len)
-{
- int ret = BCME_OK;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
- dhd_pub_t *dhdp = &dhd->pub;
- loff_t pos = 0;
- if (user_buf == NULL) {
- DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
- return BCME_ERROR;
- }
+int trace_wklock_onoff = 1;
+typedef enum dhd_wklock_type {
+ DHD_WAKE_LOCK,
+ DHD_WAKE_UNLOCK,
+ DHD_WAIVE_LOCK,
+ DHD_RESTORE_LOCK
+} dhd_wklock_t;
- ret = dhd_export_debug_data((char *)dhdp->axi_err_dump,
- NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos);
+struct wk_trace_record {
+ unsigned long addr; /* Address of the instruction */
+ dhd_wklock_t lock_type; /* lock_type */
+ unsigned long long counter; /* counter information */
+ struct hlist_node wklock_node; /* hash node */
+};
- if (ret < 0) {
- DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
- return ret;
+static struct wk_trace_record *find_wklock_entry(unsigned long addr)
+{
+ struct wk_trace_record *wklock_info;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
+#else
+ struct hlist_node *entry;
+ int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
+ hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ {
+ if (wklock_info->addr == addr) {
+ return wklock_info;
+ }
}
- return ret;
+ return NULL;
}
-int
-dhd_os_get_axi_error_dump_size(struct net_device *dev)
-{
- int size = -1;
- size = sizeof(dhd_axi_error_dump_t);
- if (size < 0) {
- DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size));
- }
- return size;
-}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+#define HASH_ADD(hashtable, node, key) \
+ do { \
+ hash_add(hashtable, node, key); \
+ } while (0);
+#else
+#define HASH_ADD(hashtable, node, key) \
+ do { \
+ int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
+ hlist_add_head(node, &hashtable[index]); \
+ } while (0);
+#endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
-void
-dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len)
+#define STORE_WKLOCK_RECORD(wklock_type) \
+ do { \
+ struct wk_trace_record *wklock_info = NULL; \
+ unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
+ wklock_info = find_wklock_entry(func_addr); \
+ if (wklock_info) { \
+ if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
+ wklock_info->counter = dhd->wakelock_counter; \
+ } else { \
+ wklock_info->counter++; \
+ } \
+ } else { \
+ wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
+ if (!wklock_info) {\
+ printk("Can't allocate wk_trace_record \n"); \
+ } else { \
+ wklock_info->addr = func_addr; \
+ wklock_info->lock_type = wklock_type; \
+ if (wklock_type == DHD_WAIVE_LOCK || \
+ wklock_type == DHD_RESTORE_LOCK) { \
+ wklock_info->counter = dhd->wakelock_counter; \
+ } else { \
+ wklock_info->counter++; \
+ } \
+ HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
+ } \
+ } \
+ } while (0);
+
+static inline void dhd_wk_lock_rec_dump(void)
{
- snprintf(dump_path, len, "%s",
- DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME);
+ int bkt;
+ struct wk_trace_record *wklock_info;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
+#else
+ struct hlist_node *entry = NULL;
+ int max_index = ARRAY_SIZE(wklock_history);
+ for (bkt = 0; bkt < max_index; bkt++)
+ hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ {
+ switch (wklock_info->lock_type) {
+ case DHD_WAKE_LOCK:
+ printk("wakelock lock : %pS lock_counter : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter);
+ break;
+ case DHD_WAKE_UNLOCK:
+ printk("wakelock unlock : %pS, unlock_counter : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter);
+ break;
+ case DHD_WAIVE_LOCK:
+ printk("wakelock waive : %pS before_waive : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter);
+ break;
+ case DHD_RESTORE_LOCK:
+ printk("wakelock restore : %pS, after_waive : %llu \n",
+ (void *)wklock_info->addr, wklock_info->counter);
+ break;
+ }
+ }
}
-#endif /* DNGL_AXI_ERROR_LOGGING */
-bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
+static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
{
- return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
+ unsigned long flags;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ int i;
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_init(wklock_history);
+#else
+ for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
+ INIT_HLIST_HEAD(&wklock_history[i]);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
-#ifdef DHD_L2_FILTER
-arp_table_t*
-dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
+static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
-
- ASSERT(bssidx < DHD_MAX_IFS);
+ int bkt;
+ struct wk_trace_record *wklock_info;
+ struct hlist_node *tmp;
+ unsigned long flags;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ struct hlist_node *entry = NULL;
+ int max_index = ARRAY_SIZE(wklock_history);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
- ifp = dhd->iflist[bssidx];
- return ifp->phnd_arp_table;
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
+#else
+ for (bkt = 0; bkt < max_index; bkt++)
+ hlist_for_each_entry_safe(wklock_info, entry, tmp,
+ &wklock_history[bkt], wklock_node)
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ hash_del(&wklock_info->wklock_node);
+#else
+ hlist_del_init(&wklock_info->wklock_node);
+#endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
+ kfree(wklock_info);
+ }
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
-int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
+void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
-
- ASSERT(idx < DHD_MAX_IFS);
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ unsigned long flags;
- ifp = dhd->iflist[idx];
+ printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ dhd_wk_lock_rec_dump();
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
- if (ifp)
- return ifp->parp_enable;
- else
- return FALSE;
}
+#else
+#define STORE_WKLOCK_RECORD(wklock_type)
+#endif /* ! DHD_TRACE_WAKE_LOCK */
-/* Set interface specific proxy arp configuration */
-int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
+int dhd_os_wake_lock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
- ASSERT(idx < DHD_MAX_IFS);
- ifp = dhd->iflist[idx];
-
- if (!ifp)
- return BCME_ERROR;
-
- /* At present all 3 variables are being
- * handled at once
- */
- ifp->parp_enable = val;
- ifp->parp_discard = val;
- ifp->parp_allnode = val;
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
- /* Flush ARP entries when disabled */
- if (val == FALSE) {
- bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
- FALSE, dhdp->tickcnt);
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_stay_awake(pub);
+#endif
+ }
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (trace_wklock_onoff) {
+ STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+ dhd->wakelock_counter++;
+ ret = dhd->wakelock_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
- return BCME_OK;
+
+ return ret;
}
-bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
+void dhd_event_wake_lock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
-
- ASSERT(idx < DHD_MAX_IFS);
-
- ifp = dhd->iflist[idx];
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- ASSERT(ifp);
- return ifp->parp_discard;
+ if (dhd) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&dhd->wl_evtwake);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_stay_awake(pub);
+#endif
+ }
}
-bool
-dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
+void
+dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
-
- ASSERT(idx < DHD_MAX_IFS);
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- ifp = dhd->iflist[idx];
+ if (dhd) {
+ wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKE_LOCK */
+}
- ASSERT(ifp);
+void
+dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- return ifp->parp_allnode;
+ if (dhd) {
+ wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKE_LOCK */
}
-int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
+int net_os_wake_lock(struct net_device *dev)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
- ASSERT(idx < DHD_MAX_IFS);
+ if (dhd)
+ ret = dhd_os_wake_lock(&dhd->pub);
+ return ret;
+}
- ifp = dhd->iflist[idx];
+int dhd_os_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
- ASSERT(ifp);
+ dhd_os_wake_lock_timeout(pub);
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- return ifp->dhcp_unicast;
+ if (dhd->wakelock_counter > 0) {
+ dhd->wakelock_counter--;
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (trace_wklock_onoff) {
+ STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+ if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_relax(pub);
+#endif
+ }
+ ret = dhd->wakelock_counter;
+ }
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
}
-int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
+void dhd_event_wake_unlock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
- ASSERT(idx < DHD_MAX_IFS);
- ifp = dhd->iflist[idx];
-
- ASSERT(ifp);
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- ifp->dhcp_unicast = val;
- return BCME_OK;
+ if (dhd) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&dhd->wl_evtwake);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_relax(pub);
+#endif
+ }
}
-int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
+void dhd_pm_wake_unlock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
-
- ASSERT(idx < DHD_MAX_IFS);
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- ifp = dhd->iflist[idx];
+ if (dhd) {
+ /* if wl_pmwake is active, unlock it */
+ if (wake_lock_active(&dhd->wl_pmwake)) {
+ wake_unlock(&dhd->wl_pmwake);
+ }
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
+}
- ASSERT(ifp);
+void dhd_txfl_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- return ifp->block_ping;
+ if (dhd) {
+ /* if wl_txflwake is active, unlock it */
+ if (wake_lock_active(&dhd->wl_txflwake)) {
+ wake_unlock(&dhd->wl_txflwake);
+ }
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
}
-int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
+int dhd_os_check_wakelock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
- ASSERT(idx < DHD_MAX_IFS);
- ifp = dhd->iflist[idx];
+#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
+ KERNEL_VERSION(2, 6, 36)))
+ dhd_info_t *dhd;
- ASSERT(ifp);
+ if (!pub)
+ return 0;
+ dhd = (dhd_info_t *)(pub->info);
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
- ifp->block_ping = val;
- /* Disable rx_pkt_chain feature for interface if block_ping option is
- * enabled
- */
- dhd_update_rx_pkt_chainable_state(dhdp, idx);
- return BCME_OK;
+#ifdef CONFIG_HAS_WAKELOCK
+ /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
+ if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
+ (wake_lock_active(&dhd->wl_wdwake))))
+ return 1;
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
+ return 1;
+#endif
+ return 0;
}
-int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
+int
+dhd_os_check_wakelock_all(dhd_pub_t *pub)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
+#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
+ KERNEL_VERSION(2, 6, 36)))
+#if defined(CONFIG_HAS_WAKELOCK)
+ int l1, l2, l3, l4, l7, l8, l9;
+ int l5 = 0, l6 = 0;
+ int c, lock_active;
+#endif /* CONFIG_HAS_WAKELOCK */
+ dhd_info_t *dhd;
- ASSERT(idx < DHD_MAX_IFS);
+ if (!pub) {
+ return 0;
+ }
+ dhd = (dhd_info_t *)(pub->info);
+ if (!dhd) {
+ return 0;
+ }
+#endif /* CONFIG_HAS_WAKELOCK || BCMSDIO */
- ifp = dhd->iflist[idx];
+#ifdef CONFIG_HAS_WAKELOCK
+ c = dhd->wakelock_counter;
+ l1 = wake_lock_active(&dhd->wl_wifi);
+ l2 = wake_lock_active(&dhd->wl_wdwake);
+ l3 = wake_lock_active(&dhd->wl_rxwake);
+ l4 = wake_lock_active(&dhd->wl_ctrlwake);
+ l7 = wake_lock_active(&dhd->wl_evtwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ l5 = wake_lock_active(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ l6 = wake_lock_active(&dhd->wl_scanwake);
+#endif /* DHD_USE_SCAN_WAKELOCK */
+ l8 = wake_lock_active(&dhd->wl_pmwake);
+ l9 = wake_lock_active(&dhd->wl_txflwake);
+ lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
- ASSERT(ifp);
+ /* Indicate to the Host to avoid going to suspend if internal locks are up */
+ if (lock_active) {
+ DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
+ "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
+ __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
+ return 1;
+ }
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
+ return 1;
+ }
+#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+ return 0;
+}
- return ifp->grat_arp;
+int net_os_wake_unlock(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+
+ if (dhd)
+ ret = dhd_os_wake_unlock(&dhd->pub);
+ return ret;
}
-int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
+int dhd_os_wd_wake_lock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
- ASSERT(idx < DHD_MAX_IFS);
- ifp = dhd->iflist[idx];
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
- ASSERT(ifp);
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+#ifdef CONFIG_HAS_WAKELOCK
+ /* if wakelock_wd_counter was never used : lock it at once */
+ if (!dhd->wakelock_wd_counter)
+ wake_lock(&dhd->wl_wdwake);
+#endif
+ dhd->wakelock_wd_counter++;
+ ret = dhd->wakelock_wd_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
- ifp->grat_arp = val;
+int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
- return BCME_OK;
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ if (dhd->wakelock_wd_counter) {
+ dhd->wakelock_wd_counter = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&dhd->wl_wdwake);
+#endif
+ }
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
}
-int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx)
+#ifdef BCMPCIE_OOB_HOST_WAKE
+void
+dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
-
- ASSERT(idx < DHD_MAX_IFS);
-
- ifp = dhd->iflist[idx];
-
- ASSERT(ifp);
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- return ifp->block_tdls;
+ if (dhd) {
+ wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
}
-int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val)
+void
+dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
{
- dhd_info_t *dhd = dhdp->info;
- dhd_if_t *ifp;
- ASSERT(idx < DHD_MAX_IFS);
- ifp = dhd->iflist[idx];
-
- ASSERT(ifp);
-
- ifp->block_tdls = val;
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- return BCME_OK;
+ if (dhd) {
+ /* if wl_intrwake is active, unlock it */
+ if (wake_lock_active(&dhd->wl_intrwake)) {
+ wake_unlock(&dhd->wl_intrwake);
+ }
+ }
+#endif /* CONFIG_HAS_WAKELOCK */
}
-#endif /* DHD_L2_FILTER */
+#endif /* BCMPCIE_OOB_HOST_WAKE */
-#if defined(SET_RPS_CPUS)
-int dhd_rps_cpus_enable(struct net_device *net, int enable)
+#ifdef DHD_USE_SCAN_WAKELOCK
+void
+dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
{
- dhd_info_t *dhd = DHD_DEV_INFO(net);
- dhd_if_t *ifp;
- int ifidx;
- char * RPS_CPU_SETBUF;
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- ifidx = dhd_net2idx(dhd, net);
- if (ifidx == DHD_BAD_IF) {
- DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
- return -ENODEV;
+ if (dhd) {
+ wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
}
+#endif /* CONFIG_HAS_WAKELOCK */
+}
- if (ifidx == PRIMARY_INF) {
- if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
- DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
- RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
- } else {
- DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
- RPS_CPU_SETBUF = RPS_CPUS_MASK;
- }
- } else if (ifidx == VIRTUAL_INF) {
- DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
- RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
- } else {
- DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
- return -EINVAL;
- }
+void
+dhd_os_scan_wake_unlock(dhd_pub_t *pub)
+{
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- ifp = dhd->iflist[ifidx];
- if (ifp) {
- if (enable) {
- DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
- custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
- } else {
- custom_rps_map_clear(ifp->net->_rx);
+ if (dhd) {
+ /* if wl_scanwake is active, unlock it */
+ if (wake_lock_active(&dhd->wl_scanwake)) {
+ wake_unlock(&dhd->wl_scanwake);
}
- } else {
- DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
- return -ENODEV;
}
- return BCME_OK;
+#endif /* CONFIG_HAS_WAKELOCK */
}
+#endif /* DHD_USE_SCAN_WAKELOCK */
-int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
+/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
+ * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
+ */
+int dhd_os_wake_lock_waive(dhd_pub_t *pub)
{
- struct rps_map *old_map, *map;
- cpumask_var_t mask;
- int err, cpu, i;
- static DEFINE_SPINLOCK(rps_map_lock);
-
- DHD_INFO(("%s : Entered.\n", __FUNCTION__));
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
- DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
- return -ENOMEM;
- }
+ if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
- if (err) {
- free_cpumask_var(mask);
- DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
- return err;
+ /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+ if (dhd->waive_wakelock == FALSE) {
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (trace_wklock_onoff) {
+ STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
+ }
+#endif /* DHD_TRACE_WAKE_LOCK */
+ /* record current lock status */
+ dhd->wakelock_before_waive = dhd->wakelock_counter;
+ dhd->waive_wakelock = TRUE;
+ }
+ ret = dhd->wakelock_wd_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
}
+ return ret;
+}
- map = kzalloc(max_t(unsigned int,
- RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
- GFP_KERNEL);
- if (!map) {
- free_cpumask_var(mask);
- DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
- return -ENOMEM;
- }
+int dhd_os_wake_lock_restore(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
- i = 0;
- for_each_cpu(cpu, mask) {
- map->cpus[i++] = cpu;
- }
+ if (!dhd)
+ return 0;
+ if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
+ return 0;
- if (i) {
- map->len = i;
- } else {
- kfree(map);
- map = NULL;
- free_cpumask_var(mask);
- DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
- return -1;
- }
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- spin_lock(&rps_map_lock);
- old_map = rcu_dereference_protected(queue->rps_map,
- lockdep_is_held(&rps_map_lock));
- rcu_assign_pointer(queue->rps_map, map);
- spin_unlock(&rps_map_lock);
+ /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+ if (!dhd->waive_wakelock)
+ goto exit;
- if (map) {
- static_key_slow_inc(&rps_needed);
+ dhd->waive_wakelock = FALSE;
+ /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
+ * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
+ * the lock in between, do the same by calling wake_unlock or pm_relax
+ */
+#ifdef DHD_TRACE_WAKE_LOCK
+ if (trace_wklock_onoff) {
+ STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
}
- if (old_map) {
- kfree_rcu(old_map, rcu);
- static_key_slow_dec(&rps_needed);
+#endif /* DHD_TRACE_WAKE_LOCK */
+
+ if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_stay_awake(&dhd->pub);
+#endif
+ } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_relax(&dhd->pub);
+#endif
}
- free_cpumask_var(mask);
+ dhd->wakelock_before_waive = 0;
+exit:
+ ret = dhd->wakelock_wd_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ return ret;
+}
- DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
- return map->len;
+void dhd_os_wake_lock_init(struct dhd_info *dhd)
+{
+ DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+ wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
+ wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
+ wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
+ wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
+ wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#endif /* CONFIG_HAS_WAKELOCK */
+#ifdef DHD_TRACE_WAKE_LOCK
+ dhd_wk_lock_trace_init(dhd);
+#endif /* DHD_TRACE_WAKE_LOCK */
+}
+
+void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
+{
+ DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
+#ifdef CONFIG_HAS_WAKELOCK
+ dhd->wakelock_counter = 0;
+ dhd->wakelock_rx_timeout_enable = 0;
+ dhd->wakelock_ctrl_timeout_enable = 0;
+ // terence 20161023: can not destroy wl_wifi when wlan down, it will happen null pointer in dhd_ioctl_entry
+ wake_lock_destroy(&dhd->wl_rxwake);
+ wake_lock_destroy(&dhd->wl_ctrlwake);
+ wake_lock_destroy(&dhd->wl_evtwake);
+ wake_lock_destroy(&dhd->wl_pmwake);
+ wake_lock_destroy(&dhd->wl_txflwake);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ wake_lock_destroy(&dhd->wl_intrwake);
+#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef DHD_USE_SCAN_WAKELOCK
+ wake_lock_destroy(&dhd->wl_scanwake);
+#endif /* DHD_USE_SCAN_WAKELOCK */
+#ifdef DHD_TRACE_WAKE_LOCK
+ dhd_wk_lock_trace_deinit(dhd);
+#endif /* DHD_TRACE_WAKE_LOCK */
+#endif /* CONFIG_HAS_WAKELOCK */
}
-void custom_rps_map_clear(struct netdev_rx_queue *queue)
+bool dhd_os_check_if_up(dhd_pub_t *pub)
{
- struct rps_map *map;
+ if (!pub)
+ return FALSE;
+ return pub->up;
+}
- DHD_INFO(("%s : Entered.\n", __FUNCTION__));
+/* function to collect firmware, chip id and chip version info */
+void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
+{
+ int i;
- map = rcu_dereference_protected(queue->rps_map, 1);
- if (map) {
- RCU_INIT_POINTER(queue->rps_map, NULL);
- kfree_rcu(map, rcu);
- DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
- }
-}
-#endif // endif
+ i = snprintf(info_string, sizeof(info_string),
+ " Driver: %s\n Firmware: %s\n CLM: %s ", EPI_VERSION_STR, fw, clm_version);
+ printf("%s\n", info_string);
-#if defined(ARGOS_NOTIFY_CB)
+ if (!dhdp)
+ return;
-static int argos_status_notifier_wifi_cb(struct notifier_block *notifier,
- unsigned long speed, void *v);
-static int argos_status_notifier_p2p_cb(struct notifier_block *notifier,
- unsigned long speed, void *v);
+ i = snprintf(&info_string[i], sizeof(info_string) - i,
+ "\n Chip: %x Rev %x", dhd_conf_get_chip(dhdp),
+ dhd_conf_get_chiprev(dhdp));
+}
-int
-argos_register_notifier_init(struct net_device *net)
+int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
{
+ int ifidx;
int ret = 0;
+ dhd_info_t *dhd = NULL;
- DHD_INFO(("DHD: %s: \n", __FUNCTION__));
- argos_rps_ctrl_data.wlan_primary_netdev = net;
- argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
-
- if (argos_wifi.notifier_call == NULL) {
- argos_wifi.notifier_call = argos_status_notifier_wifi_cb;
- ret = sec_argos_register_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
- if (ret < 0) {
- DHD_ERROR(("DHD:Failed to register WIFI notifier, ret=%d\n", ret));
- goto exit;
- }
+ if (!net || !DEV_PRIV(net)) {
+ DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
+ return -EINVAL;
}
- if (argos_p2p.notifier_call == NULL) {
- argos_p2p.notifier_call = argos_status_notifier_p2p_cb;
- ret = sec_argos_register_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
- if (ret < 0) {
- DHD_ERROR(("DHD:Failed to register P2P notifier, ret=%d\n", ret));
- sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
- goto exit;
- }
+ dhd = DHD_DEV_INFO(net);
+ if (!dhd)
+ return -EINVAL;
+
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- return 0;
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
-exit:
- if (argos_wifi.notifier_call) {
- argos_wifi.notifier_call = NULL;
- }
+ ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
+ dhd_check_hang(net, &dhd->pub, ret);
- if (argos_p2p.notifier_call) {
- argos_p2p.notifier_call = NULL;
- }
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
return ret;
}
-int
-argos_register_notifier_deinit(void)
+bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
{
- DHD_INFO(("DHD: %s: \n", __FUNCTION__));
+ struct net_device *net;
- if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
- DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__));
- return -1;
+ net = dhd_idx2net(dhdp, ifidx);
+ if (!net) {
+ DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
+ return -EINVAL;
}
-#ifndef DHD_LB
- custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
-#endif /* !DHD_LB */
- if (argos_p2p.notifier_call) {
- sec_argos_unregister_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
- argos_p2p.notifier_call = NULL;
- }
+ return dhd_check_hang(net, dhdp, ret);
+}
- if (argos_wifi.notifier_call) {
- sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
- argos_wifi.notifier_call = NULL;
- }
+/* Return instance */
+int dhd_get_instance(dhd_pub_t *dhdp)
+{
+ return dhdp->info->unit;
+}
- argos_rps_ctrl_data.wlan_primary_netdev = NULL;
- argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
- return 0;
+#ifdef PROP_TXSTATUS
+
+void dhd_wlfc_plat_init(void *dhd)
+{
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ return;
}
-int
-argos_status_notifier_wifi_cb(struct notifier_block *notifier,
- unsigned long speed, void *v)
+void dhd_wlfc_plat_deinit(void *dhd)
{
- dhd_info_t *dhd;
- dhd_pub_t *dhdp;
-#if defined(ARGOS_NOTIFY_CB)
- unsigned int pcie_irq = 0;
-#endif /* ARGOS_NOTIFY_CB */
- DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
+#ifdef USE_DYNAMIC_F2_BLKSIZE
+ dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
+#endif /* USE_DYNAMIC_F2_BLKSIZE */
+ return;
+}
- if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
- goto exit;
- }
+bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
+{
+#ifdef SKIP_WLFC_ON_CONCURRENT
- dhd = DHD_DEV_INFO(argos_rps_ctrl_data.wlan_primary_netdev);
- if (dhd == NULL) {
- goto exit;
- }
+#ifdef WL_CFG80211
+ struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
+ if (net)
+ /* enable flow control in vsdb mode */
+ return !(wl_cfg80211_is_concurrent_mode(net));
+#else
+ return TRUE; /* skip flow control */
+#endif /* WL_CFG80211 */
- dhdp = &dhd->pub;
- if (dhdp == NULL || !dhdp->up) {
- goto exit;
- }
- /* Check if reported TPut value is more than threshold value */
- if (speed > RPS_TPUT_THRESHOLD) {
- if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) {
- /* It does not need to configre rps_cpus
- * if Load Balance is enabled
- */
-#ifndef DHD_LB
- int err = 0;
+#else
+ return FALSE;
+#endif /* SKIP_WLFC_ON_CONCURRENT */
+ return FALSE;
+}
+#endif /* PROP_TXSTATUS */
- if (cpu_online(RPS_CPUS_WLAN_CORE_ID)) {
- err = custom_rps_map_set(
- argos_rps_ctrl_data.wlan_primary_netdev->_rx,
- RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
- } else {
- DHD_ERROR(("DHD: %s: RPS_Set fail,"
- " Core=%d Offline\n", __FUNCTION__,
- RPS_CPUS_WLAN_CORE_ID));
- err = -1;
- }
+#ifdef BCMDBGFS
+#include <linux/debugfs.h>
- if (err < 0) {
- DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. "
- "speed=%ld, error=%d\n",
- __FUNCTION__, speed, err));
- } else {
-#endif /* !DHD_LB */
-#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
- if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
- DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_ON(%d)\n",
- __FUNCTION__, TCPACK_SUP_HOLD));
- dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD);
- }
-#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
- argos_rps_ctrl_data.argos_rps_cpus_enabled = 1;
-#ifndef DHD_LB
- DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
- __FUNCTION__, speed));
- }
-#endif /* !DHD_LB */
- }
- } else {
- if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 1) {
-#if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
- if (dhdp->tcpack_sup_mode != TCPACK_SUP_OFF) {
- DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_OFF\n",
- __FUNCTION__));
- dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
- }
-#endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
-#ifndef DHD_LB
- /* It does not need to configre rps_cpus
- * if Load Balance is enabled
- */
- custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
- DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__, speed));
- OSL_SLEEP(DELAY_TO_CLEAR_RPS_CPUS);
-#endif /* !DHD_LB */
- argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
- }
- }
+typedef struct dhd_dbgfs {
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_mem;
+ dhd_pub_t *dhdp;
+ uint32 size;
+} dhd_dbgfs_t;
-exit:
- return NOTIFY_OK;
+dhd_dbgfs_t g_dbgfs;
+
+extern uint32 dhd_readregl(void *bp, uint32 addr);
+extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
+
+static int
+dhd_dbg_state_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
}
-int
-argos_status_notifier_p2p_cb(struct notifier_block *notifier,
- unsigned long speed, void *v)
+static ssize_t
+dhd_dbg_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
{
- DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
- return argos_status_notifier_wifi_cb(notifier, speed, v);
+ ssize_t rval;
+ uint32 tmp;
+ loff_t pos = *ppos;
+ size_t ret;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
+
+ /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
+ tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
+
+ ret = copy_to_user(ubuf, &tmp, 4);
+ if (ret == count)
+ return -EFAULT;
+
+ count -= ret;
+ *ppos = pos + count;
+ rval = count;
+
+ return rval;
}
-#endif // endif
-#ifdef DHD_DEBUG_PAGEALLOC
-void
-dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
+static ssize_t
+dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
{
- dhd_pub_t *dhdp = (dhd_pub_t *)handle;
+ loff_t pos = *ppos;
+ size_t ret;
+ uint32 buf;
- DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
- __FUNCTION__, addr_corrupt, (uint32)len));
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= g_dbgfs.size || !count)
+ return 0;
+ if (count > g_dbgfs.size - pos)
+ count = g_dbgfs.size - pos;
- DHD_OS_WAKE_LOCK(dhdp);
- prhex("Page Corruption:", addr_corrupt, len);
- dhd_dump_to_kernelog(dhdp);
-#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
- /* Load the dongle side dump to host memory and then BUG_ON() */
- dhdp->memdump_enabled = DUMP_MEMONLY;
- dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
- dhd_bus_mem_dump(dhdp);
-#endif /* BCMPCIE && DHD_FW_COREDUMP */
- DHD_OS_WAKE_UNLOCK(dhdp);
+ ret = copy_from_user(&buf, ubuf, sizeof(uint32));
+ if (ret == count)
+ return -EFAULT;
+
+ /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
+ dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
+
+ return count;
}
-EXPORT_SYMBOL(dhd_page_corrupt_cb);
-#endif /* DHD_DEBUG_PAGEALLOC */
-#if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
-void
-dhd_pktid_error_handler(dhd_pub_t *dhdp)
+
+loff_t
+dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
{
- DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
- DHD_OS_WAKE_LOCK(dhdp);
- dhd_dump_to_kernelog(dhdp);
-#ifdef DHD_FW_COREDUMP
- /* Load the dongle side dump to host memory */
- if (dhdp->memdump_enabled == DUMP_DISABLED) {
- dhdp->memdump_enabled = DUMP_MEMFILE;
+ loff_t pos = -1;
+
+ switch (whence) {
+ case 0:
+ pos = off;
+ break;
+ case 1:
+ pos = file->f_pos + off;
+ break;
+ case 2:
+ pos = g_dbgfs.size - off;
+ }
+ return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
+}
+
+static const struct file_operations dhd_dbg_state_ops = {
+ .read = dhd_dbg_state_read,
+ .write = dhd_debugfs_write,
+ .open = dhd_dbg_state_open,
+ .llseek = dhd_debugfs_lseek
+};
+
+static void dhd_dbgfs_create(void)
+{
+ if (g_dbgfs.debugfs_dir) {
+ g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
+ NULL, &dhd_dbg_state_ops);
}
- dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
- dhd_bus_mem_dump(dhdp);
-#endif /* DHD_FW_COREDUMP */
- dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
- dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
- DHD_OS_WAKE_UNLOCK(dhdp);
}
-#endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
-struct net_device *
-dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
+void dhd_dbgfs_init(dhd_pub_t *dhdp)
{
- dhd_info_t *dhd = dhdp->info;
+ g_dbgfs.dhdp = dhdp;
+ g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
- if (dhd->iflist[0] && dhd->iflist[0]->net)
- return dhd->iflist[0]->net;
- else
- return NULL;
+ g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
+ if (IS_ERR(g_dbgfs.debugfs_dir)) {
+ g_dbgfs.debugfs_dir = NULL;
+ return;
+ }
+
+ dhd_dbgfs_create();
+
+ return;
}
-fw_download_status_t
-dhd_fw_download_status(dhd_pub_t * dhd_pub)
+void dhd_dbgfs_remove(void)
{
- return dhd_pub->fw_download_status;
+ debugfs_remove(g_dbgfs.debugfs_mem);
+ debugfs_remove(g_dbgfs.debugfs_dir);
+
+ bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
}
+#endif /* BCMDBGFS */
-static int
-dhd_create_to_notifier_skt(void)
+#ifdef WLMEDIA_HTSF
+
+static
+void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
- /* Kernel 3.7 onwards this API accepts only 3 arguments. */
- /* Kernel version 3.6 is a special case which accepts 4 arguments */
- nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg);
-#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
- /* Kernel version 3.5 and below use this old API format */
- nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
- dhd_process_daemon_msg, NULL, THIS_MODULE);
-#else
- nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE,
- &dhd_netlink_cfg);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
- if (!nl_to_event_sk)
- {
- printf("Error creating socket.\n");
- return -1;
+ dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
+ struct sk_buff *skb;
+ uint32 htsf = 0;
+ uint16 dport = 0, oldmagic = 0xACAC;
+ char *p1;
+ htsfts_t ts;
+
+ /* timestamp packet */
+
+ p1 = (char*) PKTDATA(dhdp->osh, pktbuf);
+
+ if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) {
+/* memcpy(&proto, p1+26, 4); */
+ memcpy(&dport, p1+40, 2);
+/* proto = ((ntoh32(proto))>> 16) & 0xFF; */
+ dport = ntoh16(dport);
}
- DHD_INFO(("nl_to socket created successfully...\n"));
- return 0;
-}
-void
-dhd_destroy_to_notifier_skt(void)
-{
- DHD_INFO(("Destroying nl_to socket\n"));
- netlink_kernel_release(nl_to_event_sk);
+ /* timestamp only if icmp or udb iperf with port 5555 */
+/* if (proto == 17 && dport == tsport) { */
+ if (dport >= tsport && dport <= tsport + 20) {
+
+ skb = (struct sk_buff *) pktbuf;
+
+ htsf = dhd_get_htsf(dhd, 0);
+ memset(skb->data + 44, 0, 2); /* clear checksum */
+ memcpy(skb->data+82, &oldmagic, 2);
+ memcpy(skb->data+84, &htsf, 4);
+
+ memset(&ts, 0, sizeof(htsfts_t));
+ ts.magic = HTSFMAGIC;
+ ts.prio = PKTPRIO(pktbuf);
+ ts.seqnum = htsf_seqnum++;
+ ts.c10 = get_cycles();
+ ts.t10 = htsf;
+ ts.endmagic = HTSFENDMAGIC;
+
+ memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts));
+ }
}
-static void
-dhd_recv_msg_from_daemon(struct sk_buff *skb)
+static void dhd_dump_htsfhisto(histo_t *his, char *s)
{
- struct nlmsghdr *nlh;
- bcm_to_info_t *cmd;
-
- nlh = (struct nlmsghdr *)skb->data;
- cmd = (bcm_to_info_t *)nlmsg_data(nlh);
- if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
- sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
- DHD_INFO(("DHD Daemon Started\n"));
+ int pktcnt = 0, curval = 0, i;
+ for (i = 0; i < (NUMBIN-2); i++) {
+ curval += 500;
+ printf("%d ", his->bin[i]);
+ pktcnt += his->bin[i];
}
+ printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt,
+ his->bin[NUMBIN-1], s);
}
-int
-dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
+static
+void sorttobin(int value, histo_t *histo)
{
- struct nlmsghdr *nlh;
- struct sk_buff *skb_out;
- int ret = BCME_ERROR;
+ int i, binval = 0;
- BCM_REFERENCE(skb);
- if (sender_pid == 0) {
- DHD_INFO(("Invalid PID 0\n"));
- skb_out = NULL;
- goto err;
+ if (value < 0) {
+ histo->bin[NUMBIN-1]++;
+ return;
}
+ if (value > histo->bin[NUMBIN-2]) /* store the max value */
+ histo->bin[NUMBIN-2] = value;
- if ((skb_out = nlmsg_new(size, 0)) == NULL) {
- DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
- ret = BCME_NOMEM;
- goto err;
+ for (i = 0; i < (NUMBIN-2); i++) {
+ binval += 500; /* 500m s bins */
+ if (value <= binval) {
+ histo->bin[i]++;
+ return;
+ }
}
- nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
- if (nlh == NULL) {
- DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
- goto err;
+ histo->bin[NUMBIN-3]++;
+}
+
+static
+void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf)
+{
+ dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ struct sk_buff *skb;
+ char *p1;
+ uint16 old_magic;
+ int d1, d2, d3, end2end;
+ htsfts_t *htsf_ts;
+ uint32 htsf;
+
+ skb = PKTTONATIVE(dhdp->osh, pktbuf);
+ p1 = (char*)PKTDATA(dhdp->osh, pktbuf);
+
+ if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) {
+ memcpy(&old_magic, p1+78, 2);
+ htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4);
+ } else {
+ return;
}
- NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
- (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
- if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
- DHD_ERROR(("Error sending message, ret:%d\n", ret));
- /* skb is already freed inside nlmsg_unicast() on error case */
- /* explicitly making skb_out to NULL to avoid double free */
- skb_out = NULL;
- goto err;
+ if (htsf_ts->magic == HTSFMAGIC) {
+ htsf_ts->tE0 = dhd_get_htsf(dhd, 0);
+ htsf_ts->cE0 = get_cycles();
}
- return BCME_OK;
-err:
- if (skb_out) {
- nlmsg_free(skb_out);
+
+ if (old_magic == 0xACAC) {
+
+ tspktcnt++;
+ htsf = dhd_get_htsf(dhd, 0);
+ memcpy(skb->data+92, &htsf, sizeof(uint32));
+
+ memcpy(&ts[tsidx].t1, skb->data+80, 16);
+
+ d1 = ts[tsidx].t2 - ts[tsidx].t1;
+ d2 = ts[tsidx].t3 - ts[tsidx].t2;
+ d3 = ts[tsidx].t4 - ts[tsidx].t3;
+ end2end = ts[tsidx].t4 - ts[tsidx].t1;
+
+ sorttobin(d1, &vi_d1);
+ sorttobin(d2, &vi_d2);
+ sorttobin(d3, &vi_d3);
+ sorttobin(end2end, &vi_d4);
+
+ if (end2end > 0 && end2end > maxdelay) {
+ maxdelay = end2end;
+ maxdelaypktno = tspktcnt;
+ memcpy(&maxdelayts, &ts[tsidx], 16);
+ }
+ if (++tsidx >= TSMAX)
+ tsidx = 0;
}
- return ret;
}
-static void
-dhd_process_daemon_msg(struct sk_buff *skb)
+uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx)
{
- bcm_to_info_t to_info;
+ uint32 htsf = 0, cur_cycle, delta, delta_us;
+ uint32 factor, baseval, baseval2;
+ cycles_t t;
- to_info.magic = BCM_TO_MAGIC;
- to_info.reason = REASON_DAEMON_STARTED;
- to_info.trap = NO_TRAP;
+ t = get_cycles();
+ cur_cycle = t;
- dhd_recv_msg_from_daemon(skb);
- dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
-}
+ if (cur_cycle > dhd->htsf.last_cycle)
+ delta = cur_cycle - dhd->htsf.last_cycle;
+ else {
+ delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle);
+ }
-#ifdef DHD_LOG_DUMP
-bool
-dhd_log_dump_ecntr_enabled(void)
-{
- return (bool)logdump_ecntr_enable;
+ delta = delta >> 4;
+
+ if (dhd->htsf.coef) {
+ /* times ten to get the first digit */
+ factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1);
+ baseval = (delta*10)/factor;
+ baseval2 = (delta*10)/(factor+1);
+ delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10);
+ htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY;
+ } else {
+ DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n"));
+ }
+
+ return htsf;
}
-bool
-dhd_log_dump_rtt_enabled(void)
+static void dhd_dump_latency(void)
{
- return (bool)logdump_rtt_enable;
+ int i, max = 0;
+ int d1, d2, d3, d4, d5;
+
+ printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n");
+ for (i = 0; i < TSMAX; i++) {
+ d1 = ts[i].t2 - ts[i].t1;
+ d2 = ts[i].t3 - ts[i].t2;
+ d3 = ts[i].t4 - ts[i].t3;
+ d4 = ts[i].t4 - ts[i].t1;
+ d5 = ts[max].t4-ts[max].t1;
+ if (d4 > d5 && d4 > 0) {
+ max = i;
+ }
+ printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n",
+ ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4,
+ d1, d2, d3, d4, i);
+ }
+
+ printf("current idx = %d \n", tsidx);
+
+ printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt);
+ printf("%08X %08X %08X %08X \t%d %d %d %d\n",
+ maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4,
+ maxdelayts.t2 - maxdelayts.t1,
+ maxdelayts.t3 - maxdelayts.t2,
+ maxdelayts.t4 - maxdelayts.t3,
+ maxdelayts.t4 - maxdelayts.t1);
}
-void
-dhd_log_dump_init(dhd_pub_t *dhd)
+
+static int
+dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx)
{
- struct dhd_log_dump_buf *dld_buf, *dld_buf_special;
- int i = 0;
- uint8 *prealloc_buf = NULL, *bufptr = NULL;
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
- int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
+ char buf[32];
int ret;
- dhd_dbg_ring_t *ring = NULL;
- unsigned long flags = 0;
- dhd_info_t *dhd_info = dhd->info;
- void *cookie_buf = NULL;
+ uint32 s1, s2;
- BCM_REFERENCE(ret);
- BCM_REFERENCE(ring);
- BCM_REFERENCE(flags);
+ struct tsf {
+ uint32 low;
+ uint32 high;
+ } tsf_buf;
- /* sanity check */
- if (logdump_prsrv_tailsize <= 0 ||
- logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) {
- logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
- }
- /* now adjust the preserve log flush size based on the
- * kernel printk log buffer size
- */
-#ifdef CONFIG_LOG_BUF_SHIFT
- DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
- " limit prsrv tail size to = %uKB\n",
- __FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024,
- logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024));
+ memset(&tsf_buf, 0, sizeof(tsf_buf));
- if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) {
- logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE;
+ s1 = dhd_get_htsf(dhd, 0);
+ ret = dhd_iovar(&dhd->pub, ifidx, "tsf", NULL, 0, buf, sizeof(buf), FALSE);
+ if (ret < 0) {
+ if (ret == -EIO) {
+ DHD_ERROR(("%s: tsf is not supported by device\n",
+ dhd_ifname(&dhd->pub, ifidx)));
+ return -EOPNOTSUPP;
+ }
+ return ret;
}
-#else
- DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
- __FUNCTION__, logdump_prsrv_tailsize/1024);
-#endif /* CONFIG_LOG_BUF_SHIFT */
+ s2 = dhd_get_htsf(dhd, 0);
- mutex_init(&dhd_info->logdump_lock);
+ memcpy(&tsf_buf, buf, sizeof(tsf_buf));
+ printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ",
+ tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1,
+ dhd->htsf.coefdec2, s2-tsf_buf.low);
+ printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle);
+ return 0;
+}
- /* initialize log dump buf structures */
- memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM);
+void htsf_update(dhd_info_t *dhd, void *data)
+{
+ static ulong cur_cycle = 0, prev_cycle = 0;
+ uint32 htsf, tsf_delta = 0;
+ uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp;
+ ulong b, a;
+ cycles_t t;
- /* set the log dump buffer size based on the module_param */
- if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE ||
- logdump_max_bufsize <= 0)
- dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE;
- else
- dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize;
+ /* cycles_t in inlcude/mips/timex.h */
- /* pre-alloc the memory for the log buffers & 'special' buffer */
- dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
- DHD_ERROR(("%s : Try to allocate memory total(%d) special(%d)\n",
- __FUNCTION__, LOG_DUMP_TOTAL_BUFSIZE, LOG_DUMP_SPECIAL_MAX_BUFSIZE));
- prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE);
- dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++,
- dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
-#else
- prealloc_buf = MALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE);
- dld_buf_special->buffer = MALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
- if (!prealloc_buf) {
- DHD_ERROR(("Failed to pre-allocate memory for log buffers !\n"));
- goto fail;
+ t = get_cycles();
+
+ prev_cycle = cur_cycle;
+ cur_cycle = t;
+
+ if (cur_cycle > prev_cycle)
+ cyc_delta = cur_cycle - prev_cycle;
+ else {
+ b = cur_cycle;
+ a = prev_cycle;
+ cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle);
}
- if (!dld_buf_special->buffer) {
- DHD_ERROR(("Failed to pre-allocate memory for special buffer !\n"));
- goto fail;
+
+ if (data == NULL)
+ printf(" tsf update ata point er is null \n");
+
+ memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t));
+ memcpy(&cur_tsf, data, sizeof(tsf_t));
+
+ if (cur_tsf.low == 0) {
+ DHD_INFO((" ---- 0 TSF, do not update, return\n"));
+ return;
}
- bufptr = prealloc_buf;
- for (i = 0; i < DLD_BUFFER_NUM; i++) {
- dld_buf = &g_dld_buf[i];
- dld_buf->dhd_pub = dhd;
- spin_lock_init(&dld_buf->lock);
- dld_buf->wraparound = 0;
- if (i != DLD_BUF_TYPE_SPECIAL) {
- dld_buf->buffer = bufptr;
- dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
- bufptr = (uint8 *)dld_buf->max;
+ if (cur_tsf.low > prev_tsf.low)
+ tsf_delta = (cur_tsf.low - prev_tsf.low);
+ else {
+ DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n",
+ cur_tsf.low, prev_tsf.low));
+ if (cur_tsf.high > prev_tsf.high) {
+ tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low);
+ DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta));
} else {
- dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
+ return; /* do not update */
}
- dld_buf->present = dld_buf->front = dld_buf->buffer;
- dld_buf->remain = dld_buf_size[i];
- dld_buf->enable = 1;
}
-#ifdef EWP_ECNTRS_LOGGING
- /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
- dhd->ecntr_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
- if (!dhd->ecntr_dbg_ring)
- goto fail;
+ if (tsf_delta) {
+ hfactor = cyc_delta / tsf_delta;
+ tmp = (cyc_delta - (hfactor * tsf_delta))*10;
+ dec1 = tmp/tsf_delta;
+ dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta;
+ tmp = (tmp - (dec1*tsf_delta))*10;
+ dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta;
- ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
- ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID,
- ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE,
- bufptr, TRUE);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: unable to init ecntr ring !\n",
- __FUNCTION__));
- goto fail;
+ if (dec3 > 4) {
+ if (dec2 == 9) {
+ dec2 = 0;
+ if (dec1 == 9) {
+ dec1 = 0;
+ hfactor++;
+ } else {
+ dec1++;
+ }
+ } else {
+ dec2++;
+ }
+ }
}
- DHD_DBG_RING_LOCK(ring->lock, flags);
- ring->state = RING_ACTIVE;
- ring->threshold = 0;
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
-
- bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
-#endif /* EWP_ECNTRS_LOGGING */
-#ifdef EWP_RTT_LOGGING
- /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
- dhd->rtt_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
- if (!dhd->rtt_dbg_ring)
- goto fail;
-
- ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
- ret = dhd_dbg_ring_init(dhd, ring, RTT_RING_ID,
- RTT_RING_NAME, LOG_DUMP_RTT_MAX_BUFSIZE,
- bufptr, TRUE);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: unable to init ecntr ring !\n",
- __FUNCTION__));
- goto fail;
+ if (hfactor) {
+ htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low;
+ dhd->htsf.coef = hfactor;
+ dhd->htsf.last_cycle = cur_cycle;
+ dhd->htsf.last_tsf = cur_tsf.low;
+ dhd->htsf.coefdec1 = dec1;
+ dhd->htsf.coefdec2 = dec2;
+ } else {
+ htsf = prev_tsf.low;
}
- DHD_DBG_RING_LOCK(ring->lock, flags);
- ring->state = RING_ACTIVE;
- ring->threshold = 0;
- DHD_DBG_RING_UNLOCK(ring->lock, flags);
+}
- bufptr += LOG_DUMP_RTT_MAX_BUFSIZE;
-#endif /* EWP_RTT_LOGGING */
+#endif /* WLMEDIA_HTSF */
- /* Concise buffer is used as intermediate buffer for following purposes
- * a) pull ecounters records temporarily before
- * writing it to file
- * b) to store dhd dump data before putting it to file
- * It should have a size equal to
- * MAX(largest possible ecntr record, 'dhd dump' data size)
- */
- dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN);
- if (!dhd->concise_dbg_buf) {
- DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
- __FUNCTION__));
- goto fail;
- }
+#ifdef CUSTOM_SET_CPUCORE
+void dhd_set_cpucore(dhd_pub_t *dhd, int set)
+{
+ int e_dpc = 0, e_rxf = 0, retry_set = 0;
- cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE);
- if (!cookie_buf) {
- DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
- __FUNCTION__));
- goto fail;
- }
- ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
- if (ret != BCME_OK) {
- MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
- goto fail;
+ if (!(dhd->chan_isvht80)) {
+ DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
+ return;
}
- return;
-fail:
-
- if (dhd->logdump_cookie) {
- dhd_logdump_cookie_deinit(dhd);
- MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
- dhd->logdump_cookie = NULL;
+ if (DPC_CPUCORE) {
+ do {
+ if (set == TRUE) {
+ e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+ cpumask_of(DPC_CPUCORE));
+ } else {
+ e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+ cpumask_of(PRIMARY_CPUCORE));
+ }
+ if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+ DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
+ return;
+ }
+ if (e_dpc < 0)
+ OSL_SLEEP(1);
+ } while (e_dpc < 0);
+ }
+ if (RXF_CPUCORE) {
+ do {
+ if (set == TRUE) {
+ e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+ cpumask_of(RXF_CPUCORE));
+ } else {
+ e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+ cpumask_of(PRIMARY_CPUCORE));
+ }
+ if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+ DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
+ return;
+ }
+ if (e_rxf < 0)
+ OSL_SLEEP(1);
+ } while (e_rxf < 0);
}
+#ifdef DHD_OF_SUPPORT
+ interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE);
+#endif /* DHD_OF_SUPPORT */
+ DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
- if (dhd->concise_dbg_buf) {
- MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
- }
+ return;
+}
+#endif /* CUSTOM_SET_CPUCORE */
-#ifdef EWP_ECNTRS_LOGGING
- if (dhd->ecntr_dbg_ring) {
- ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
- dhd_dbg_ring_deinit(dhd, ring);
- ring->ring_buf = NULL;
- ring->ring_size = 0;
- MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
- dhd->ecntr_dbg_ring = NULL;
- }
-#endif /* EWP_ECNTRS_LOGGING */
+#ifdef DHD_MCAST_REGEN
+/* Get interface specific ap_isolate configuration */
+int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
-#ifdef EWP_RTT_LOGGING
- if (dhd->rtt_dbg_ring) {
- ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
- dhd_dbg_ring_deinit(dhd, ring);
- ring->ring_buf = NULL;
- ring->ring_size = 0;
- MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
- dhd->rtt_dbg_ring = NULL;
- }
-#endif /* EWP_RTT_LOGGING */
+ ASSERT(idx < DHD_MAX_IFS);
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
- if (prealloc_buf) {
- DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
- }
- if (dld_buf_special->buffer) {
- DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
- dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
- }
-#else
- if (prealloc_buf) {
- MFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
- }
- if (dld_buf_special->buffer) {
- MFREE(dhd->osh, dld_buf_special->buffer,
- dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
- }
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
- for (i = 0; i < DLD_BUFFER_NUM; i++) {
- dld_buf = &g_dld_buf[i];
- dld_buf->enable = 0;
- dld_buf->buffer = NULL;
- }
+ ifp = dhd->iflist[idx];
- mutex_destroy(&dhd_info->logdump_lock);
+ return ifp->mcast_regen_bss_enable;
}
-void
-dhd_log_dump_deinit(dhd_pub_t *dhd)
+/* Set interface specific mcast_regen configuration */
+int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
{
- struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL;
- int i = 0;
- dhd_info_t *dhd_info = dhd->info;
- dhd_dbg_ring_t *ring = NULL;
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- BCM_REFERENCE(ring);
+ ASSERT(idx < DHD_MAX_IFS);
- if (dhd->concise_dbg_buf) {
- MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
- dhd->concise_dbg_buf = NULL;
- }
+ ifp = dhd->iflist[idx];
- if (dhd->logdump_cookie) {
- dhd_logdump_cookie_deinit(dhd);
- MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
- dhd->logdump_cookie = NULL;
- }
+ ifp->mcast_regen_bss_enable = val;
-#ifdef EWP_ECNTRS_LOGGING
- if (dhd->ecntr_dbg_ring) {
- ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
- dhd_dbg_ring_deinit(dhd, ring);
- ring->ring_buf = NULL;
- ring->ring_size = 0;
- MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
- dhd->ecntr_dbg_ring = NULL;
- }
-#endif /* EWP_ECNTRS_LOGGING */
+ /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
+ * is enabled
+ */
+ dhd_update_rx_pkt_chainable_state(dhdp, idx);
+ return BCME_OK;
+}
+#endif /* DHD_MCAST_REGEN */
-#ifdef EWP_RTT_LOGGING
- if (dhd->rtt_dbg_ring) {
- ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
- dhd_dbg_ring_deinit(dhd, ring);
- ring->ring_buf = NULL;
- ring->ring_size = 0;
- MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
- dhd->rtt_dbg_ring = NULL;
- }
-#endif /* EWP_RTT_LOGGING */
+/* Get interface specific ap_isolate configuration */
+int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- /* 'general' buffer points to start of the pre-alloc'd memory */
- dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
- dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
- if (dld_buf->buffer) {
- DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
- }
- if (dld_buf_special->buffer) {
- DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
- dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
- }
-#else
- if (dld_buf->buffer) {
- MFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
- }
- if (dld_buf_special->buffer) {
- MFREE(dhd->osh, dld_buf_special->buffer,
- dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
- }
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
- for (i = 0; i < DLD_BUFFER_NUM; i++) {
- dld_buf = &g_dld_buf[i];
- dld_buf->enable = 0;
- dld_buf->buffer = NULL;
- }
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
- mutex_destroy(&dhd_info->logdump_lock);
+ return ifp->ap_isolate;
}
-void
-dhd_log_dump_write(int type, char *binary_data,
- int binary_len, const char *fmt, ...)
+/* Set interface specific ap_isolate configuration */
+int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
{
- int len = 0;
- char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
- va_list args;
- unsigned long flags = 0;
- struct dhd_log_dump_buf *dld_buf = NULL;
- bool flush_log = FALSE;
-
- if (type < 0 || type >= DLD_BUFFER_NUM) {
- DHD_INFO(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
- __FUNCTION__, type));
- return;
- }
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- dld_buf = &g_dld_buf[type];
+ ASSERT(idx < DHD_MAX_IFS);
- if (dld_buf->enable != 1) {
- return;
- }
+ ifp = dhd->iflist[idx];
- va_start(args, fmt);
- len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
- /* Non ANSI C99 compliant returns -1,
- * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
- */
- va_end(args);
- if (len < 0) {
- return;
- }
+ if (ifp)
+ ifp->ap_isolate = val;
- if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
- len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
- tmp_buf[len] = '\0';
- }
+ return 0;
+}
- /* make a critical section to eliminate race conditions */
- spin_lock_irqsave(&dld_buf->lock, flags);
- if (dld_buf->remain < len) {
- dld_buf->wraparound = 1;
- dld_buf->present = dld_buf->front;
- dld_buf->remain = dld_buf_size[type];
- /* if wrap around happens, flush the ring buffer to the file */
- flush_log = TRUE;
- }
+#ifdef DHD_FW_COREDUMP
+#if defined(CONFIG_X86)
+#define MEMDUMPINFO_LIVE "/installmedia/.memdump.info"
+#define MEMDUMPINFO_INST "/data/.memdump.info"
+#endif /* CONFIG_X86 && OEM_ANDROID */
- memcpy(dld_buf->present, tmp_buf, len);
- dld_buf->remain -= len;
- dld_buf->present += len;
- spin_unlock_irqrestore(&dld_buf->lock, flags);
+#ifdef CUSTOMER_HW4_DEBUG
+#define MEMDUMPINFO PLATFORM_PATH".memdump.info"
+#elif defined(CUSTOMER_HW2)
+#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+#elif (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
+#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+#else
+#define MEMDUMPINFO "/data/misc/wifi/.memdump.info"
+#endif /* CUSTOMER_HW4_DEBUG */
- /* double check invalid memory operation */
- ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
+void dhd_get_memdump_info(dhd_pub_t *dhd)
+{
+ struct file *fp = NULL;
+ uint32 mem_val = DUMP_MEMFILE_MAX;
+ int ret = 0;
+ char *filepath = MEMDUMPINFO;
- if (dld_buf->dhd_pub) {
- dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub;
- dhdp->logdump_periodic_flush =
- logdump_periodic_flush;
- if (logdump_periodic_flush && flush_log) {
- log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
- sizeof(log_dump_type_t));
- if (flush_type) {
- *flush_type = type;
- dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type);
- }
+ /* Read memdump info from the file */
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+#if defined(CONFIG_X86)
+ /* Check if it is Live Brix Image */
+ if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) {
+ goto done;
}
+ /* Try if it is Installed Brix Image */
+ filepath = MEMDUMPINFO_INST;
+ DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ goto done;
+ }
+#else /* Non Brix Android platform */
+ goto done;
+#endif /* CONFIG_X86 && OEM_ANDROID */
}
-}
-char*
-dhd_log_dump_get_timestamp(void)
-{
- static char buf[16];
- u64 ts_nsec;
- unsigned long rem_nsec;
+ /* Handle success case */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ ret = kernel_read(fp, (char *)&mem_val, 4, NULL);
+#else
+ ret = kernel_read(fp, 0, (char *)&mem_val, 4);
+#endif
+ if (ret < 0) {
+ DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+ filp_close(fp, NULL);
+ goto done;
+ }
- ts_nsec = local_clock();
- rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
- snprintf(buf, sizeof(buf), "%5lu.%06lu",
- (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
+ mem_val = bcm_atoi((char *)&mem_val);
- return buf;
-}
-#endif /* DHD_LOG_DUMP */
+ filp_close(fp, NULL);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-void
-dhd_flush_rx_tx_wq(dhd_pub_t *dhdp)
-{
- dhd_info_t * dhd;
+#ifdef DHD_INIT_DEFAULT_MEMDUMP
+ if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX)
+ mem_val = DUMP_MEMFILE_BUGON;
+#endif /* DHD_INIT_DEFAULT_MEMDUMP */
- if (dhdp) {
- dhd = dhdp->info;
- if (dhd) {
- flush_workqueue(dhd->tx_wq);
- flush_workqueue(dhd->rx_wq);
- }
- }
+done:
+#ifdef CUSTOMER_HW4_DEBUG
+ dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_DISABLED;
+#else
+ dhd->memdump_enabled = (mem_val < DUMP_MEMFILE_MAX) ? mem_val : DUMP_MEMFILE;
+#endif /* CUSTOMER_HW4_DEBUG */
- return;
+ DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled));
}
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-#ifdef DHD_DEBUG_UART
-bool
-dhd_debug_uart_is_running(struct net_device *dev)
+void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
{
- dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_dump_t *dump = NULL;
+ dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
+ if (dump == NULL) {
+ DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
+ return;
+ }
+ dump->buf = buf;
+ dump->bufsize = size;
- if (dhd->duart_execute) {
- return TRUE;
+#if defined(CONFIG_ARM64)
+ DHD_ERROR(("%s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n", __FUNCTION__,
+ (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
+#elif defined(__ARM_ARCH_7A__)
+ DHD_ERROR(("%s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n", __FUNCTION__,
+ (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
+#endif /* __ARM_ARCH_7A__ */
+ if (dhdp->memdump_enabled == DUMP_MEMONLY) {
+ BUG_ON(1);
}
- return FALSE;
+#ifdef DHD_LOG_DUMP
+ if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
+ dhd_schedule_log_dump(dhdp);
+ }
+#endif /* DHD_LOG_DUMP */
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
+ DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
}
static void
-dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
+dhd_mem_dump(void *handle, void *event_info, u8 event)
{
- dhd_pub_t *dhdp = handle;
- dhd_debug_uart_exec(dhdp, "rd");
-}
+ dhd_info_t *dhd = handle;
+ dhd_dump_t *dump = event_info;
-static void
-dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
-{
- int ret;
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
+ return;
+ }
- char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
- char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
+ if (!dump) {
+ DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
+ return;
+ }
-#ifdef DHD_FW_COREDUMP
- if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
-#endif // endif
- {
- if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT ||
- dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT ||
-#ifdef DHD_FW_COREDUMP
- dhdp->memdump_success == FALSE ||
-#endif // endif
- FALSE) {
- dhdp->info->duart_execute = TRUE;
- DHD_ERROR(("DHD: %s - execute %s %s\n",
- __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
- ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
- DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
- __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
- dhdp->info->duart_execute = FALSE;
+ if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
+ DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
+ dhd->pub.memdump_success = FALSE;
+ }
+ if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
#ifdef DHD_LOG_DUMP
- if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
-#endif // endif
- {
- BUG_ON(1);
- }
- }
- }
-}
+ dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
+#endif /* DHD_LOG_DUMP */
+#ifdef DHD_DEBUG_UART
+ dhd->pub.memdump_success == TRUE &&
#endif /* DHD_DEBUG_UART */
+ dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
-#if defined(DHD_BLOB_EXISTENCE_CHECK)
-void
-dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
-{
- struct file *fp;
- char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
- fp = filp_open(filepath, O_RDONLY, 0);
- if (IS_ERR(fp)) {
- DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
- filepath));
- dhdp->is_blob = FALSE;
- } else {
- DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__, filepath));
- dhdp->is_blob = TRUE;
-#if defined(CONCATE_BLOB)
- strncat(fw_path, "_blob", strlen("_blob"));
-#else
- BCM_REFERENCE(fw_path);
-#endif /* SKIP_CONCATE_BLOB */
- filp_close(fp, NULL);
+#ifdef SHOW_LOGTRACE
+ /* Wait till event_log_dispatcher_work finishes */
+ cancel_work_sync(&dhd->event_log_dispatcher_work);
+#endif /* SHOW_LOGTRACE */
+
+ BUG_ON(1);
}
+ MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
}
-#endif /* DHD_BLOB_EXISTENCE_CHECK */
+#endif /* DHD_FW_COREDUMP */
+
+#ifdef DHD_SSSR_DUMP
-#if defined(PCIE_FULL_DONGLE)
-/** test / loopback */
-void
-dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
+static void
+dhd_sssr_dump(void *handle, void *event_info, u8 event)
{
- dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
- dhd_info_t *dhd_info = (dhd_info_t *)handle;
+ dhd_info_t *dhd = handle;
+ dhd_pub_t *dhdp;
+ int i;
+ char before_sr_dump[128];
+ char after_sr_dump[128];
- if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
- DHD_ERROR(("%s: Unexpected event \n", __FUNCTION__));
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
return;
}
- if (dhd_info == NULL) {
- DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
- return;
+
+ dhdp = &dhd->pub;
+
+ for (i = 0; i < MAX_NUM_D11CORES; i++) {
+ /* Init file name */
+ memset(before_sr_dump, 0, sizeof(before_sr_dump));
+ memset(after_sr_dump, 0, sizeof(after_sr_dump));
+
+ snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
+ "sssr_core", i, "before_SR");
+ snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
+ "sssr_core", i, "after_SR");
+
+ if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i]) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
+ dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
+ DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
+ __FUNCTION__));
+ }
+ }
+ if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
+ dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
+ DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
+ __FUNCTION__));
+ }
+ }
}
- if (dmmap == NULL) {
- DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
- return;
+
+ if (dhdp->sssr_vasip_buf_before) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_vasip_buf_before,
+ dhdp->sssr_reg_info.vasip_regs.vasip_sr_size, "sssr_vasip_before_SR")) {
+ DHD_ERROR(("%s: writing SSSR VASIP dump before to the file failed\n",
+ __FUNCTION__));
+ }
}
- dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap);
-}
-void
-dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
-{
- dhd_info_t *dhd_info = dhdp->info;
+ if (dhdp->sssr_vasip_buf_after) {
+ if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_vasip_buf_after,
+ dhdp->sssr_reg_info.vasip_regs.vasip_sr_size, "sssr_vasip_after_SR")) {
+ DHD_ERROR(("%s: writing SSSR VASIP dump after to the file failed\n",
+ __FUNCTION__));
+ }
+ }
- dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
- DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
}
-#endif /* PCIE_FULL_DONGLE */
-/* ---------------------------- End of sysfs implementation ------------------------------------- */
-#ifdef SET_PCIE_IRQ_CPU_CORE
void
-dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
+dhd_schedule_sssr_dump(dhd_pub_t *dhdp)
{
- unsigned int pcie_irq = 0;
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
+ DHD_WQ_WORK_SSSR_DUMP, dhd_sssr_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+}
+#endif /* DHD_SSSR_DUMP */
- if (!dhdp) {
- DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
- return;
- }
+#ifdef DHD_LOG_DUMP
+static void
+dhd_log_dump(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
- if (!dhdp->bus) {
- DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
return;
}
- DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
-
- if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
- DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
+ if (do_dhd_log_dump(&dhd->pub)) {
+ DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
return;
}
+}
- /*
- irq_set_affinity() assign dedicated CPU core PCIe interrupt
- If dedicated CPU core is not on-line,
- PCIe interrupt scheduled on CPU core 0
- */
- switch (affinity_cmd) {
- case PCIE_IRQ_AFFINITY_OFF:
- break;
- case PCIE_IRQ_AFFINITY_BIG_CORE_ANY:
-#if defined(CONFIG_ARCH_SM8150)
- irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary);
- irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
-#else /* Exynos and Others */
- irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
-#endif /* CONFIG_ARCH_SM8150 */
- break;
-#if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
- case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS:
- DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
- __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
- irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
- break;
-#endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
- default:
- DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
- __FUNCTION__, affinity_cmd));
- }
+void dhd_schedule_log_dump(dhd_pub_t *dhdp)
+{
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
+ (void*)NULL, DHD_WQ_WORK_DHD_LOG_DUMP,
+ dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
}
-#endif /* SET_PCIE_IRQ_CPU_CORE */
-int
-dhd_write_file(const char *filepath, char *buf, int buf_len)
+static int
+do_dhd_log_dump(dhd_pub_t *dhdp)
{
+ int ret = 0, i = 0;
struct file *fp = NULL;
mm_segment_t old_fs;
- int ret = 0;
+ loff_t pos = 0;
+ unsigned int wr_size = 0;
+ char dump_path[128];
+ struct timeval curtime;
+ uint32 file_mode;
+ unsigned long flags = 0;
+ struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
+
+ const char *pre_strs =
+ "-------------------- General log ---------------------------\n";
+
+ const char *post_strs =
+ "-------------------- Specific log --------------------------\n";
+
+ if (!dhdp) {
+ return -1;
+ }
+
+ DHD_ERROR(("DHD version: %s\n", dhd_version));
+ DHD_ERROR(("F/W version: %s\n", fw_version));
/* change to KERNEL_DS address limit */
old_fs = get_fs();
set_fs(KERNEL_DS);
- /* File is always created. */
- fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
+ /* Init file name */
+ memset(dump_path, 0, sizeof(dump_path));
+ do_gettimeofday(&curtime);
+ snprintf(dump_path, sizeof(dump_path), "%s_%ld.%ld",
+ DHD_COMMON_DUMP_PATH "debug_dump",
+ (unsigned long)curtime.tv_sec, (unsigned long)curtime.tv_usec);
+ file_mode = O_CREAT | O_WRONLY | O_SYNC;
+
+ DHD_ERROR(("debug_dump_path = %s\n", dump_path));
+ fp = filp_open(dump_path, file_mode, 0664);
if (IS_ERR(fp)) {
- DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
- __FUNCTION__, filepath, PTR_ERR(fp)));
- ret = BCME_ERROR;
- } else {
- if (fp->f_mode & FMODE_WRITE) {
- ret = compat_vfs_write(fp, buf, buf_len, &fp->f_pos);
- if (ret < 0) {
- DHD_ERROR(("%s: Couldn't write file '%s'\n",
- __FUNCTION__, filepath));
- ret = BCME_ERROR;
- } else {
- ret = BCME_OK;
+ ret = PTR_ERR(fp);
+ DHD_ERROR(("open file error, err = %d\n", ret));
+ goto exit;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ ret = kernel_write(fp, pre_strs, strlen(pre_strs), &pos);
+#else
+ ret = vfs_write(fp, pre_strs, strlen(pre_strs), &pos);
+#endif
+ if (ret < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ goto exit;
+ }
+
+ do {
+ unsigned int buf_size = (unsigned int)(dld_buf->max -
+ (unsigned long)dld_buf->buffer);
+ if (dld_buf->wraparound) {
+ wr_size = buf_size;
+ } else {
+ if (!dld_buf->buffer[0]) { /* print log if buf is empty. */
+ DHD_ERROR_EX(("Buffer is empty. No event/log.\n"));
}
+ wr_size = (unsigned int)(dld_buf->present - dld_buf->front);
}
- filp_close(fp, NULL);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ ret = kernel_write(fp, dld_buf->buffer, wr_size, &pos);
+#else
+ ret = vfs_write(fp, dld_buf->buffer, wr_size, &pos);
+#endif
+ if (ret < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ goto exit;
+ }
+
+ /* re-init dhd_log_dump_buf structure */
+ spin_lock_irqsave(&dld_buf->lock, flags);
+ dld_buf->wraparound = 0;
+ dld_buf->present = dld_buf->front;
+ dld_buf->remain = buf_size;
+ bzero(dld_buf->buffer, buf_size);
+ spin_unlock_irqrestore(&dld_buf->lock, flags);
+ ret = BCME_OK;
+
+ if (++i < DLD_BUFFER_NUM) {
+ dld_buf = &g_dld_buf[i];
+ } else {
+ break;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ ret = kernel_write(fp, post_strs, strlen(post_strs), &pos);
+#else
+ ret = vfs_write(fp, post_strs, strlen(post_strs), &pos);
+#endif
+ if (ret < 0) {
+ DHD_ERROR(("write file error, err = %d\n", ret));
+ goto exit;
+ }
+ } while (1);
+
+exit:
+#if defined(STAT_REPORT)
+ if (!IS_ERR(fp) && ret >= 0) {
+ wl_stat_report_file_save(dhdp, fp);
}
+#endif /* STAT_REPORT */
- /* restore previous address limit */
+ if (!IS_ERR(fp)) {
+ filp_close(fp, NULL);
+ }
set_fs(old_fs);
return ret;
}
+#endif /* DHD_LOG_DUMP */
-int
-dhd_read_file(const char *filepath, char *buf, int buf_len)
+
+#ifdef BCMASSERT_LOG
+#ifdef CUSTOMER_HW4_DEBUG
+#define ASSERTINFO PLATFORM_PATH".assert.info"
+#elif defined(CUSTOMER_HW2)
+#define ASSERTINFO "/data/misc/wifi/.assert.info"
+#else
+#define ASSERTINFO "/installmedia/.assert.info"
+#endif /* CUSTOMER_HW4_DEBUG */
+void dhd_get_assert_info(dhd_pub_t *dhd)
{
struct file *fp = NULL;
- mm_segment_t old_fs;
- int ret;
-
- /* change to KERNEL_DS address limit */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
+ char *filepath = ASSERTINFO;
+ int mem_val = -1;
+ /*
+ * Read assert info from the file
+ * 0: Trigger Kernel crash by panic()
+ * 1: Print out the logs and don't trigger Kernel panic. (default)
+ * 2: Trigger Kernel crash by BUG()
+ * File doesn't exist: Keep default value (1).
+ */
fp = filp_open(filepath, O_RDONLY, 0);
if (IS_ERR(fp)) {
- set_fs(old_fs);
- DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
+ DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
+ } else {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ ssize_t ret = kernel_read(fp, (char *)&mem_val, 4, NULL);
+#else
+ int ret = kernel_read(fp, 0, (char *)&mem_val, 4);
+#endif
+ if (ret < 0) {
+ DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
+ } else {
+ mem_val = bcm_atoi((char *)&mem_val);
+ DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
+ }
+ filp_close(fp, NULL);
+ }
+#ifdef CUSTOMER_HW4_DEBUG
+ /* By default. set to 1, No Kernel Panic */
+ g_assert_type = (mem_val >= 0) ? mem_val : 1;
+#else
+ /* By default. set to 0, Kernel Panic */
+ g_assert_type = (mem_val >= 0) ? mem_val : 0;
+#endif
+}
+#endif /* BCMASSERT_LOG */
+
+/*
+ * This call is to get the memdump size so that,
+ * halutil can alloc that much buffer in user space.
+ */
+int
+dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
+{
+ int ret = BCME_OK;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+
+ if (dhdp->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
return BCME_ERROR;
}
- ret = compat_kernel_read(fp, 0, buf, buf_len);
- filp_close(fp, NULL);
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
+ DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
+ __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
+ return BCME_ERROR;
+ }
- /* restore previous address limit */
- set_fs(old_fs);
+ ret = dhd_common_socram_dump(dhdp);
+ if (ret == BCME_OK) {
+ *dump_size = dhdp->soc_ram_length;
+ }
+ return ret;
+}
- /* Return the number of bytes read */
- if (ret > 0) {
- /* Success to read */
- ret = 0;
+/*
+ * This is to get the actual memdup after getting the memdump size
+ */
+int
+dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
+{
+ int ret = BCME_OK;
+ int orig_len = 0;
+ dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_pub_t *dhdp = &dhd->pub;
+ if (buf == NULL)
+ return BCME_ERROR;
+ orig_len = *size;
+ if (dhdp->soc_ram) {
+ if (orig_len >= dhdp->soc_ram_length) {
+ memcpy(*buf, dhdp->soc_ram, dhdp->soc_ram_length);
+ /* reset the storage of dump */
+ memset(dhdp->soc_ram, 0, dhdp->soc_ram_length);
+ *size = dhdp->soc_ram_length;
+ } else {
+ ret = BCME_BUFTOOSHORT;
+ DHD_ERROR(("The length of the buffer is too short"
+ " to save the memory dump with %d\n", dhdp->soc_ram_length));
+ }
} else {
- DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
- __FUNCTION__, filepath, ret));
- ret = BCME_ERROR;
+ DHD_ERROR(("socram_dump is not ready to get\n"));
+ ret = BCME_NOTREADY;
}
-
return ret;
}
int
-dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
+dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
{
- int ret;
-
- ret = dhd_write_file(filepath, buf, buf_len);
- if (ret < 0) {
- return ret;
- }
-
- /* Read the file again and check if the file size is not zero */
- memset(buf, 0, buf_len);
- ret = dhd_read_file(filepath, buf, buf_len);
-
- return ret;
-}
+ char *fw_str;
-#ifdef FILTER_IE
-int dhd_read_from_file(dhd_pub_t *dhd)
-{
- int ret = 0, nread = 0;
- void *fd;
- uint8 *buf;
- NULL_CHECK(dhd, "dhd is NULL", ret);
+ if (size == 0)
+ return BCME_BADARG;
- buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE);
- if (!buf) {
- DHD_ERROR(("error: failed to alllocate buf.\n"));
- return BCME_NOMEM;
+ fw_str = strstr(info_string, "Firmware: ");
+ if (fw_str == NULL) {
+ return BCME_ERROR;
}
- /* open file to read */
- fd = dhd_os_open_image1(dhd, FILTER_IE_PATH);
- if (!fd) {
- DHD_ERROR(("error: failed to open %s\n", FILTER_IE_PATH));
- ret = BCME_EPERM;
- goto exit;
- }
- nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd);
- if (nread > 0) {
- buf[nread] = '\0';
- if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) {
- DHD_ERROR(("error: failed to parse filter ie\n"));
- }
+ memset(*buf, 0, size);
+ if (dhd_ver) {
+ strncpy(*buf, dhd_version, size - 1);
} else {
- DHD_ERROR(("error: zero length file.failed to read\n"));
- ret = BCME_ERROR;
- }
- dhd_os_close_image1(dhd, fd);
-exit:
- if (buf) {
- MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE);
- buf = NULL;
+ strncpy(*buf, fw_str, size - 1);
}
- return ret;
+ return BCME_OK;
}
-int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf)
+#ifdef DHD_WMF
+/* Returns interface specific WMF configuration */
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
{
- uint8* pstr = buf;
- int element_count = 0;
-
- if (buf == NULL) {
- return BCME_ERROR;
- }
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- while (*pstr != '\0') {
- if (*pstr == '\n') {
- element_count++;
- }
- pstr++;
- }
- /*
- * New line character must not be present after last line.
- * To count last line
- */
- element_count++;
+ ASSERT(idx < DHD_MAX_IFS);
- return element_count;
+ ifp = dhd->iflist[idx];
+ return &ifp->wmf;
}
+#endif /* DHD_WMF */
-int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len)
+#if defined(TRAFFIC_MGMT_DWM)
+void traffic_mgmt_pkt_set_prio(dhd_pub_t *dhdp, void * pktbuf)
{
- uint8 i, j, msb, lsb, oui_len = 0;
- /*
- * OUI can vary from 3 bytes to 5 bytes.
- * While reading from file as ascii input it can
- * take maximum size of 14 bytes and minumum size of
- * 8 bytes including ":"
- * Example 5byte OUI <AB:DE:BE:CD:FA>
- * Example 3byte OUI <AB:DC:EF>
- */
-
- if ((inbuf == NULL) || (len < 8) || (len > 14)) {
- DHD_ERROR(("error: failed to parse OUI \n"));
- return BCME_ERROR;
- }
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *pktdata, *ip_body;
+ uint8 dwm_filter;
+ uint8 tos_tc = 0;
+ uint8 dscp = 0;
+ pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
+ eh = (struct ether_header *) pktdata;
+ ip_body = NULL;
+
+ if (dhdp->dhd_tm_dwm_tbl.dhd_dwm_enabled) {
+ if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) {
+ evh = (struct ethervlan_header *)eh;
+ if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
+ (evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+ ip_body = pktdata + sizeof(struct ethervlan_header);
+ }
+ } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
+ (eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
+ ip_body = pktdata + sizeof(struct ether_header);
+ }
+ if (ip_body) {
+ tos_tc = IP_TOS46(ip_body);
+ dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
+ }
- for (j = 0, i = 0; i < len; i += 3, ++j) {
- if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) {
- DHD_ERROR(("error: invalid OUI format \n"));
- return BCME_ERROR;
+ if (dscp < DHD_DWM_TBL_SIZE) {
+ dwm_filter = dhdp->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp];
+ if (DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_filter)) {
+ PKTSETPRIO(pktbuf, DHD_TRF_MGMT_DWM_PRIO(dwm_filter));
+ }
}
- msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0';
- lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) -
- 'A' + 10 : inbuf[i + 1] - '0';
- oui[j] = (msb << 4) | lsb;
}
- /* Size of oui.It can vary from 3/4/5 */
- oui_len = j;
+}
+#endif
- return oui_len;
+bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
+{
+ return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
}
-int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len)
+#ifdef DHD_L2_FILTER
+arp_table_t*
+dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
{
- int i = 0;
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- while (i < len) {
- if (!bcm_isdigit(buf[i])) {
- DHD_ERROR(("error: non digit value found in filter_ie \n"));
- return BCME_ERROR;
- }
- i++;
- }
- if (bcm_atoi((char*)buf) > 255) {
- DHD_ERROR(("error: element id cannot be greater than 255 \n"));
- return BCME_ERROR;
- }
+ ASSERT(bssidx < DHD_MAX_IFS);
- return BCME_OK;
+ ifp = dhd->iflist[bssidx];
+ return ifp->phnd_arp_table;
}
-int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf)
+int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
{
- int element_count = 0, i = 0, oui_size = 0, ret = 0;
- uint16 bufsize, buf_space_left, id = 0, len = 0;
- uint16 filter_iovsize, all_tlvsize;
- wl_filter_ie_tlv_t *p_ie_tlv = NULL;
- wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL;
- char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL;
- uint8 data[20];
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- element_count = dhd_get_filter_ie_count(dhd, buf);
- DHD_INFO(("total element count %d \n", element_count));
- /* Calculate the whole buffer size */
- filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ;
- p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize);
+ ASSERT(idx < DHD_MAX_IFS);
- if (p_filter_iov == NULL) {
- DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize));
- return BCME_ERROR;
- }
+ ifp = dhd->iflist[idx];
- /* setup filter iovar header */
- p_filter_iov->version = WL_FILTER_IE_VERSION;
- p_filter_iov->len = filter_iovsize;
- p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ;
- p_filter_iov->pktflag = FC_PROBE_REQ;
- p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION;
- /* setup TLVs */
- bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */
- p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0];
- buf_space_left = bufsize;
-
- while ((i < element_count) && (buf != NULL)) {
- len = 0;
- /* token contains one line of input data */
- token = bcmstrtok((char**)&buf, "\n", NULL);
- if (token == NULL) {
- break;
- }
- if ((ele_token = bcmstrstr(token, ",")) == NULL) {
- /* only element id is present */
- if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) {
- DHD_ERROR(("error: Invalid element id \n"));
- ret = BCME_ERROR;
- goto exit;
- }
- id = bcm_atoi((char*)token);
- data[len++] = WL_FILTER_IE_SET;
- } else {
- /* oui is present */
- ele_token = bcmstrtok(&token, ",", NULL);
- if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token,
- strlen(ele_token)) == BCME_ERROR)) {
- DHD_ERROR(("error: Invalid element id \n"));
- ret = BCME_ERROR;
- goto exit;
- }
- id = bcm_atoi((char*)ele_token);
- data[len++] = WL_FILTER_IE_SET;
- if ((oui_token = bcmstrstr(token, ",")) == NULL) {
- oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token));
- if (oui_size == BCME_ERROR) {
- DHD_ERROR(("error: Invalid OUI \n"));
- ret = BCME_ERROR;
- goto exit;
- }
- len += oui_size;
- } else {
- /* type is present */
- oui_token = bcmstrtok(&token, ",", NULL);
- if ((oui_token == NULL) || ((oui_size =
- dhd_parse_oui(dhd, oui_token,
- &(data[len]), strlen(oui_token))) == BCME_ERROR)) {
- DHD_ERROR(("error: Invalid OUI \n"));
- ret = BCME_ERROR;
- goto exit;
- }
- len += oui_size;
- if ((type = bcmstrstr(token, ",")) == NULL) {
- if (dhd_check_valid_ie(dhd, token,
- strlen(token)) == BCME_ERROR) {
- DHD_ERROR(("error: Invalid type \n"));
- ret = BCME_ERROR;
- goto exit;
- }
- data[len++] = bcm_atoi((char*)token);
- } else {
- /* subtype is present */
- type = bcmstrtok(&token, ",", NULL);
- if ((type == NULL) || (dhd_check_valid_ie(dhd, type,
- strlen(type)) == BCME_ERROR)) {
- DHD_ERROR(("error: Invalid type \n"));
- ret = BCME_ERROR;
- goto exit;
- }
- data[len++] = bcm_atoi((char*)type);
- /* subtype is last element */
- if ((token == NULL) || (*token == '\0') ||
- (dhd_check_valid_ie(dhd, token,
- strlen(token)) == BCME_ERROR)) {
- DHD_ERROR(("error: Invalid subtype \n"));
- ret = BCME_ERROR;
- goto exit;
- }
- data[len++] = bcm_atoi((char*)token);
- }
- }
- }
- ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv,
- &buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
- "status=%d\n", __FUNCTION__, ret));
- goto exit;
- }
- i++;
- }
- if (i == 0) {
- /* file is empty or first line is blank */
- DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
- ret = BCME_ERROR;
- goto exit;
- }
- /* update the iov header, set len to include all TLVs + header */
- all_tlvsize = (bufsize - buf_space_left);
- p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE);
- ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov,
- p_filter_iov->len, NULL, 0, TRUE);
- if (ret != BCME_OK) {
- DHD_ERROR(("error: IOVAR failed, status=%d\n", ret));
- }
-exit:
- /* clean up */
- if (p_filter_iov) {
- MFREE(dhd->osh, p_filter_iov, filter_iovsize);
- p_filter_iov = NULL;
- }
- return ret;
-}
-#endif /* FILTER_IE */
-#ifdef DHD_WAKE_STATUS
-wake_counts_t*
-dhd_get_wakecount(dhd_pub_t *dhdp)
-{
-#ifdef BCMDBUS
- return NULL;
-#else
- return dhd_bus_get_wakecount(dhdp);
-#endif /* BCMDBUS */
+ if (ifp)
+ return ifp->parp_enable;
+ else
+ return FALSE;
}
-#endif /* DHD_WAKE_STATUS */
-int
-dhd_get_random_bytes(uint8 *buf, uint len)
+/* Set interface specific proxy arp configuration */
+int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
{
-#ifdef BCMPCIE
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
- int rndlen = get_random_bytes_arch(buf, len);
- if (rndlen != len) {
- bzero(buf, len);
- get_random_bytes(buf, len);
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+
+ if (!ifp)
+ return BCME_ERROR;
+
+ /* At present all 3 variables are being
+ * handled at once
+ */
+ ifp->parp_enable = val;
+ ifp->parp_discard = val;
+ ifp->parp_allnode = val;
+
+ /* Flush ARP entries when disabled */
+ if (val == FALSE) {
+ bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
+ FALSE, dhdp->tickcnt);
}
-#else
- get_random_bytes_arch(buf, len);
-#endif // endif
-#endif /* BCMPCIE */
return BCME_OK;
}
-#ifdef DHD_ERPOM
-static void
-dhd_error_recovery(void *handle, void *event_info, u8 event)
+bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
{
- dhd_info_t *dhd = handle;
- dhd_pub_t *dhdp;
- int ret = 0;
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return;
- }
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- dhdp = &dhd->pub;
+ ASSERT(idx < DHD_MAX_IFS);
- if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
- DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
- __FUNCTION__));
- return;
- }
+ ifp = dhd->iflist[idx];
- ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE);
- if (ret != BCME_DNGL_DEVRESET) {
- DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
- "toggle REG_ON\n", __FUNCTION__, ret));
- /* toggle REG_ON */
- dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
- return;
- }
+ ASSERT(ifp);
+ return ifp->parp_discard;
}
-void
-dhd_schedule_reset(dhd_pub_t *dhdp)
+bool
+dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
{
- if (dhdp->enable_erpom) {
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
- DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
- }
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ return ifp->parp_allnode;
}
-#endif /* DHD_ERPOM */
-void
-get_debug_dump_time(char *str)
+int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
{
- struct osl_timespec curtime;
- unsigned long local_time;
- struct rtc_time tm;
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- if (!strlen(str)) {
- osl_do_gettimeofday(&curtime);
- local_time = (u32)(curtime.tv_sec -
- (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE));
- rtc_time_to_tm(local_time, &tm);
+ ASSERT(idx < DHD_MAX_IFS);
- snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS,
- tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min,
- tm.tm_sec, (int)(curtime.tv_usec/NSEC_PER_USEC));
- }
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ return ifp->dhcp_unicast;
}
-void
-clear_debug_dump_time(char *str)
+int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
{
- memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN);
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
+
+ ASSERT(ifp);
+
+ ifp->dhcp_unicast = val;
+ return BCME_OK;
}
-void
-dhd_print_tasklet_status(dhd_pub_t *dhd)
+int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
{
- dhd_info_t *dhdinfo;
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
- if (!dhd) {
- DHD_ERROR(("%s : DHD is null\n", __FUNCTION__));
- return;
- }
+ ASSERT(idx < DHD_MAX_IFS);
- dhdinfo = dhd->info;
+ ifp = dhd->iflist[idx];
- if (!dhdinfo) {
- DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__));
- return;
- }
+ ASSERT(ifp);
- DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state));
+ return ifp->block_ping;
}
-/*
- * DHD RING
- */
-#define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
-#define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
+int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
-#define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
-#define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
+ ASSERT(ifp);
-#define DHD_RING_MAGIC 0x20170910
-#define DHD_RING_IDX_INVALID 0xffffffff
+ ifp->block_ping = val;
+ /* Disable rx_pkt_chain feature for interface if block_ping option is
+ * enabled
+ */
+ dhd_update_rx_pkt_chainable_state(dhdp, idx);
+ return BCME_OK;
+}
-#define DHD_RING_SYNC_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
-#define DHD_RING_SYNC_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
-#define DHD_RING_SYNC_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
-#define DHD_RING_SYNC_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
+int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
-typedef struct {
- uint32 elem_size;
- uint32 elem_cnt;
- uint32 write_idx; /* next write index, -1 : not started */
- uint32 read_idx; /* next read index, -1 : not start */
+ ASSERT(idx < DHD_MAX_IFS);
- /* protected elements during serialization */
- int lock_idx; /* start index of locked, element will not be overried */
- int lock_count; /* number of locked, from lock idx */
+ ifp = dhd->iflist[idx];
- /* saved data elements */
- void *elem;
-} dhd_fixed_ring_info_t;
+ ASSERT(ifp);
-typedef struct {
- uint32 elem_size;
- uint32 elem_cnt;
- uint32 idx; /* -1 : not started */
- uint32 rsvd; /* reserved for future use */
+ return ifp->grat_arp;
+}
- /* protected elements during serialization */
- atomic_t ring_locked;
- /* check the overwriting */
- uint32 ring_overwrited;
+int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+ ASSERT(idx < DHD_MAX_IFS);
+ ifp = dhd->iflist[idx];
- /* saved data elements */
- void *elem;
-} dhd_singleidx_ring_info_t;
+ ASSERT(ifp);
-typedef struct {
- uint32 magic;
- uint32 type;
- void *ring_sync; /* spinlock for sync */
- union {
- dhd_fixed_ring_info_t fixed;
- dhd_singleidx_ring_info_t single;
- };
-} dhd_ring_info_t;
+ ifp->grat_arp = val;
-uint32
-dhd_ring_get_hdr_size(void)
-{
- return sizeof(dhd_ring_info_t);
+ return BCME_OK;
}
+#endif /* DHD_L2_FILTER */
-void *
-dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
- uint32 elem_cnt, uint32 type)
-{
- dhd_ring_info_t *ret_ring;
- if (!buf) {
- DHD_RING_ERR(("NO RING BUFFER\n"));
- return NULL;
- }
+#if defined(SET_RPS_CPUS)
+int dhd_rps_cpus_enable(struct net_device *net, int enable)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
+ dhd_if_t *ifp;
+ int ifidx;
+ char * RPS_CPU_SETBUF;
- if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
- DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
- return NULL;
+ ifidx = dhd_net2idx(dhd, net);
+ if (ifidx == DHD_BAD_IF) {
+ DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
+ return -ENODEV;
}
- if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) {
- DHD_RING_ERR(("UNSUPPORTED RING TYPE\n"));
- return NULL;
+ if (ifidx == PRIMARY_INF) {
+ if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
+ DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
+ RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
+ } else {
+ DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
+ RPS_CPU_SETBUF = RPS_CPUS_MASK;
+ }
+ } else if (ifidx == VIRTUAL_INF) {
+ DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
+ RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
+ } else {
+ DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
+ return -EINVAL;
}
- ret_ring = (dhd_ring_info_t *)buf;
- ret_ring->type = type;
- ret_ring->ring_sync = DHD_RING_SYNC_LOCK_INIT(dhdp->osh);
- ret_ring->magic = DHD_RING_MAGIC;
-
- if (type == DHD_RING_TYPE_FIXED) {
- ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
- ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
- ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
- ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
- ret_ring->fixed.elem_size = elem_size;
- ret_ring->fixed.elem_cnt = elem_cnt;
+ ifp = dhd->iflist[ifidx];
+ if (ifp) {
+ if (enable) {
+ DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
+ custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
+ } else {
+ custom_rps_map_clear(ifp->net->_rx);
+ }
} else {
- ret_ring->single.idx = DHD_RING_IDX_INVALID;
- atomic_set(&ret_ring->single.ring_locked, 0);
- ret_ring->single.ring_overwrited = 0;
- ret_ring->single.rsvd = 0;
- ret_ring->single.elem = buf + sizeof(dhd_ring_info_t);
- ret_ring->single.elem_size = elem_size;
- ret_ring->single.elem_cnt = elem_cnt;
+ DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
+ return -ENODEV;
}
-
- return ret_ring;
+ return BCME_OK;
}
-void
-dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring)
+int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- if (!ring) {
- return;
- }
+ struct rps_map *old_map, *map;
+ cpumask_var_t mask;
+ int err, cpu, i;
+ static DEFINE_SPINLOCK(rps_map_lock);
- if (ring->magic != DHD_RING_MAGIC) {
- return;
- }
+ DHD_INFO(("%s : Entered.\n", __FUNCTION__));
- if (ring->type != DHD_RING_TYPE_FIXED &&
- ring->type != DHD_RING_TYPE_SINGLE_IDX) {
- return;
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
+ return -ENOMEM;
}
- DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync);
- ring->ring_sync = NULL;
- if (ring->type == DHD_RING_TYPE_FIXED) {
- dhd_fixed_ring_info_t *fixed = &ring->fixed;
- memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
- fixed->elem_size = fixed->elem_cnt = 0;
- } else {
- dhd_singleidx_ring_info_t *single = &ring->single;
- memset(single->elem, 0, single->elem_size * single->elem_cnt);
- single->elem_size = single->elem_cnt = 0;
- }
- ring->type = 0;
- ring->magic = 0;
-}
-
-static inline uint32
-__dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type)
-{
- uint32 diff;
- uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
- uint32 elem_size, elem_cnt;
- void *elem;
-
- if (type == DHD_RING_TYPE_FIXED) {
- dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring;
- elem_size = fixed->elem_size;
- elem_cnt = fixed->elem_cnt;
- elem = fixed->elem;
- } else if (type == DHD_RING_TYPE_SINGLE_IDX) {
- dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring;
- elem_size = single->elem_size;
- elem_cnt = single->elem_cnt;
- elem = single->elem;
- } else {
- DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type));
- return ret_idx;
+ err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+ if (err) {
+ free_cpumask_var(mask);
+ DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
+ return err;
}
- if (ptr < elem) {
- DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
- return ret_idx;
- }
- diff = (uint32)((uint8 *)ptr - (uint8 *)elem);
- if (diff % elem_size != 0) {
- DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
- return ret_idx;
- }
- ret_idx = diff / elem_size;
- if (ret_idx >= elem_cnt) {
- DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx));
+ map = kzalloc(max_t(unsigned int,
+ RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
+ GFP_KERNEL);
+ if (!map) {
+ free_cpumask_var(mask);
+ DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
+ return -ENOMEM;
}
- return ret_idx;
-}
-/* Sub functions for fixed ring */
-/* get counts between two indexes of ring buffer (internal only) */
-static inline int
-__dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
-{
- if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) {
- return 0;
+ i = 0;
+ for_each_cpu(cpu, mask) {
+ map->cpus[i++] = cpu;
}
- return (ring->elem_cnt + end - start) % ring->elem_cnt + 1;
-}
-
-static inline int
-__dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring)
-{
- return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
-}
-
-static inline void *
-__dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring)
-{
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- return NULL;
+ if (i) {
+ map->len = i;
+ } else {
+ kfree(map);
+ map = NULL;
+ free_cpumask_var(mask);
+ DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
+ return -1;
}
- return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx);
-}
-static inline void
-__dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring)
-{
- uint32 next_idx;
+ spin_lock(&rps_map_lock);
+ old_map = rcu_dereference_protected(queue->rps_map,
+ lockdep_is_held(&rps_map_lock));
+ rcu_assign_pointer(queue->rps_map, map);
+ spin_unlock(&rps_map_lock);
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
- return;
+ if (map) {
+ static_key_slow_inc(&rps_needed);
}
-
- next_idx = (ring->read_idx + 1) % ring->elem_cnt;
- if (ring->read_idx == ring->write_idx) {
- /* Become empty */
- ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID;
- return;
+ if (old_map) {
+ kfree_rcu(old_map, rcu);
+ static_key_slow_dec(&rps_needed);
}
+ free_cpumask_var(mask);
- ring->read_idx = next_idx;
- return;
+ DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
+ return map->len;
}
-static inline void *
-__dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring)
+void custom_rps_map_clear(struct netdev_rx_queue *queue)
{
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- return NULL;
- }
- return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
-}
+ struct rps_map *map;
-static inline void *
-__dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring)
-{
- uint32 tmp_idx;
+ DHD_INFO(("%s : Entered.\n", __FUNCTION__));
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- ring->read_idx = ring->write_idx = 0;
- return (uint8 *)ring->elem;
+ map = rcu_dereference_protected(queue->rps_map, 1);
+ if (map) {
+ RCU_INIT_POINTER(queue->rps_map, NULL);
+ kfree_rcu(map, rcu);
+ DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
}
+}
+#endif
- /* check next index is not locked */
- tmp_idx = (ring->write_idx + 1) % ring->elem_cnt;
- if (ring->lock_idx == tmp_idx) {
- return NULL;
- }
- ring->write_idx = tmp_idx;
- if (ring->write_idx == ring->read_idx) {
- /* record is full, drop oldest one */
- ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt;
- }
- return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
-}
+#ifdef DHD_DEBUG_PAGEALLOC
-static inline void *
-__dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
+void
+dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
{
- uint32 cur_idx;
+ dhd_pub_t *dhdp = (dhd_pub_t *)handle;
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
- return NULL;
- }
+ DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
+ __FUNCTION__, addr_corrupt, (uint32)len));
- cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
- if (cur_idx >= ring->elem_cnt) {
- return NULL;
- }
+ DHD_OS_WAKE_LOCK(dhdp);
+ prhex("Page Corruption:", addr_corrupt, len);
+ dhd_dump_to_kernelog(dhdp);
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ /* Load the dongle side dump to host memory and then BUG_ON() */
+ dhdp->memdump_enabled = DUMP_MEMONLY;
+ dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
+ dhd_bus_mem_dump(dhdp);
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+ DHD_OS_WAKE_UNLOCK(dhdp);
+}
+EXPORT_SYMBOL(dhd_page_corrupt_cb);
+#endif /* DHD_DEBUG_PAGEALLOC */
- if (cur_idx == ring->write_idx) {
- /* no more new record */
- return NULL;
+#if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
+void
+dhd_pktid_error_handler(dhd_pub_t *dhdp)
+{
+ DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK(dhdp);
+ dhd_dump_to_kernelog(dhdp);
+#ifdef DHD_FW_COREDUMP
+ /* Load the dongle side dump to host memory */
+ if (dhdp->memdump_enabled == DUMP_DISABLED) {
+ dhdp->memdump_enabled = DUMP_MEMFILE;
}
-
- cur_idx = (cur_idx + 1) % ring->elem_cnt;
- return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+ dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
+ dhd_bus_mem_dump(dhdp);
+#endif /* DHD_FW_COREDUMP */
+ dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
+ dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
+ DHD_OS_WAKE_UNLOCK(dhdp);
}
+#endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
-static inline void *
-__dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
+struct net_device *
+dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
{
- uint32 cur_idx;
+ dhd_info_t *dhd = dhdp->info;
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
- return NULL;
- }
- cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
- if (cur_idx >= ring->elem_cnt) {
- return NULL;
- }
- if (cur_idx == ring->read_idx) {
- /* no more new record */
+ if (dhd->iflist[0] && dhd->iflist[0]->net)
+ return dhd->iflist[0]->net;
+ else
return NULL;
- }
-
- cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
- return (uint8 *)ring->elem + ring->elem_size * cur_idx;
}
-static inline void
-__dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type)
+#ifdef DHD_DHCP_DUMP
+static void
+dhd_dhcp_dump(char *ifname, uint8 *pktdata, bool tx)
{
- uint32 first_idx;
- uint32 last_idx;
- uint32 ring_filled_cnt;
- uint32 tmp_cnt;
-
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
- return;
- }
-
- if (first_ptr) {
- first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type);
- if (first_idx >= ring->elem_cnt) {
- return;
- }
- } else {
- first_idx = ring->read_idx;
- }
-
- if (last_ptr) {
- last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type);
- if (last_idx >= ring->elem_cnt) {
- return;
- }
- } else {
- last_idx = ring->write_idx;
- }
+ struct bootp_fmt *b = (struct bootp_fmt *) &pktdata[ETHER_HDR_LEN];
+ struct iphdr *h = &b->ip_header;
+ uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->ip_header.tot_len);
+ int dhcp_type = 0, len, opt_len;
- ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
- tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx);
- if (tmp_cnt > ring_filled_cnt) {
- DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
- ring->write_idx, ring->read_idx, first_idx));
+ /* check IP header */
+ if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP) {
return;
}
- tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx);
- if (tmp_cnt > ring_filled_cnt) {
- DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
- ring->write_idx, ring->read_idx, last_idx));
+ /* check UDP port for bootp (67, 68) */
+ if (b->udp_header.source != htons(67) && b->udp_header.source != htons(68) &&
+ b->udp_header.dest != htons(67) && b->udp_header.dest != htons(68)) {
return;
}
- ring->lock_idx = first_idx;
- ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx);
- return;
-}
-
-static inline void
-__dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring)
-{
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
+ /* check header length */
+ if (ntohs(h->tot_len) < ntohs(b->udp_header.len) + sizeof(struct iphdr)) {
return;
}
- ring->lock_idx = DHD_RING_IDX_INVALID;
- ring->lock_count = 0;
- return;
-}
-static inline void *
-__dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring)
-{
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
- return NULL;
- }
- if (ring->lock_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("NO LOCK POINT\n"));
- return NULL;
- }
- return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx;
-}
-
-static inline void *
-__dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring)
-{
- int lock_last_idx;
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
- return NULL;
- }
- if (ring->lock_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("NO LOCK POINT\n"));
- return NULL;
- }
-
- lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt;
- return (uint8 *)ring->elem + ring->elem_size * lock_last_idx;
-}
+ len = ntohs(b->udp_header.len) - sizeof(struct udphdr);
+ opt_len = len
+ - (sizeof(*b) - sizeof(struct iphdr) - sizeof(struct udphdr) - sizeof(b->options));
-static inline int
-__dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring)
-{
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
- return BCME_ERROR;
- }
- if (ring->lock_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("NO LOCK POINT\n"));
- return BCME_ERROR;
+ /* parse bootp options */
+ if (opt_len >= 4 && !memcmp(b->options, bootp_magic_cookie, 4)) {
+ ptr = &b->options[4];
+ while (ptr < end && *ptr != 0xff) {
+ opt = ptr++;
+ if (*opt == 0) {
+ continue;
+ }
+ ptr += *ptr + 1;
+ if (ptr >= end) {
+ break;
+ }
+ /* 53 is dhcp type */
+ if (*opt == 53) {
+ if (opt[1]) {
+ dhcp_type = opt[2];
+ DHD_ERROR(("DHCP[%s] - %s [%s] [%s]\n",
+ ifname, dhcp_types[dhcp_type],
+ tx ? "TX" : "RX", dhcp_ops[b->op]));
+ break;
+ }
+ }
+ }
}
- return ring->lock_count;
}
+#endif /* DHD_DHCP_DUMP */
-static inline void
-__dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring)
+#ifdef DHD_ICMP_DUMP
+static void
+dhd_icmp_dump(char *ifname, uint8 *pktdata, bool tx)
{
- if (ring->read_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
- return;
- }
- if (ring->lock_idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("NO LOCK POINT\n"));
+ uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
+ struct iphdr *iph = (struct iphdr *)pkt;
+ struct icmphdr *icmph;
+
+ /* check IP header */
+ if (iph->ihl != 5 || iph->version != 4 || iph->protocol != IP_PROT_ICMP) {
return;
}
- ring->lock_count--;
- if (ring->lock_count <= 0) {
- ring->lock_idx = DHD_RING_IDX_INVALID;
+ icmph = (struct icmphdr *)((uint8 *)pkt + sizeof(struct iphdr));
+ if (icmph->type == ICMP_ECHO) {
+ DHD_ERROR(("PING REQUEST[%s] [%s] : SEQNUM=%d\n",
+ ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
+ } else if (icmph->type == ICMP_ECHOREPLY) {
+ DHD_ERROR(("PING REPLY[%s] [%s] : SEQNUM=%d\n",
+ ifname, tx ? "TX" : "RX", ntoh16(icmph->un.echo.sequence)));
} else {
- ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt;
+ DHD_ERROR(("ICMP [%s] [%s] : TYPE=%d, CODE=%d\n",
+ ifname, tx ? "TX" : "RX", icmph->type, icmph->code));
}
- return;
-}
-
-static inline void
-__dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
-{
- ring->read_idx = idx;
}
+#endif /* DHD_ICMP_DUMP */
-static inline void
-__dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
+#ifdef SHOW_LOGTRACE
+void
+dhd_get_read_buf_ptr(dhd_pub_t *dhd_pub, trace_buf_info_t *trace_buf_info)
{
- ring->write_idx = idx;
-}
+ dhd_dbg_ring_status_t ring_status;
+ uint32 rlen;
-static inline uint32
-__dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring)
-{
- return ring->read_idx;
+ rlen = dhd_dbg_ring_pull_single(dhd_pub, FW_VERBOSE_RING_ID, trace_buf_info->buf,
+ TRACE_LOG_BUF_MAX_SIZE, TRUE);
+ trace_buf_info->size = rlen;
+ trace_buf_info->availability = NEXT_BUF_NOT_AVAIL;
+ if (rlen == 0) {
+ trace_buf_info->availability = BUF_NOT_AVAILABLE;
+ return;
+ }
+ dhd_dbg_get_ring_status(dhd_pub, FW_VERBOSE_RING_ID, &ring_status);
+ if (ring_status.written_bytes != ring_status.read_bytes) {
+ trace_buf_info->availability = NEXT_BUF_AVAIL;
+ }
}
+#endif /* SHOW_LOGTRACE */
-static inline uint32
-__dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring)
+bool
+dhd_fw_download_status(dhd_pub_t * dhd_pub)
{
- return ring->write_idx;
+ return dhd_pub->fw_download_done;
}
-/* Sub functions for single index ring */
-static inline void *
-__dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring)
+int
+dhd_create_to_notifier_skt(void)
{
- uint32 tmp_idx = 0;
-
- if (ring->idx == DHD_RING_IDX_INVALID) {
- return NULL;
- }
-
- if (ring->ring_overwrited) {
- tmp_idx = (ring->idx + 1) % ring->elem_cnt;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ /* Kernel 3.7 onwards this API accepts only 3 arguments. */
+ /* Kernel version 3.6 is a special case which accepts 4 arguments */
+ nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &g_cfg);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+ /* Kernel version 3.5 and below use this old API format */
+ nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
+ dhd_process_daemon_msg, NULL, THIS_MODULE);
+#else
+ nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE, &g_cfg);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
+ if (!nl_to_event_sk)
+ {
+ printf("Error creating socket.\n");
+ return -1;
}
-
- return (uint8 *)ring->elem + (ring->elem_size * tmp_idx);
+ DHD_INFO(("nl_to socket created successfully...\n"));
+ return 0;
}
-static inline void *
-__dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring)
+void
+dhd_destroy_to_notifier_skt(void)
{
- if (ring->idx == DHD_RING_IDX_INVALID) {
- return NULL;
+ DHD_INFO(("Destroying nl_to socket\n"));
+ if (nl_to_event_sk) {
+ netlink_kernel_release(nl_to_event_sk);
}
-
- return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
}
-static inline void *
-__dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring)
+static void
+dhd_recv_msg_from_daemon(struct sk_buff *skb)
{
- if (ring->idx == DHD_RING_IDX_INVALID) {
- ring->idx = 0;
- return (uint8 *)ring->elem;
- }
-
- /* check the lock is held */
- if (atomic_read(&ring->ring_locked)) {
- return NULL;
- }
+ struct nlmsghdr *nlh;
+ bcm_to_info_t *cmd;
- /* check the index rollover */
- if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) {
- ring->ring_overwrited = 1;
+ nlh = (struct nlmsghdr *)skb->data;
+ cmd = (bcm_to_info_t *)nlmsg_data(nlh);
+ if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
+ sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
+ DHD_INFO(("DHD Daemon Started\n"));
}
-
- ring->idx = (ring->idx + 1) % ring->elem_cnt;
-
- return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
}
-static inline void *
-__dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
+int
+dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
{
- uint32 cur_idx;
+ struct nlmsghdr *nlh;
+ struct sk_buff *skb_out;
- if (ring->idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
- return NULL;
+ if (!nl_to_event_sk) {
+ DHD_INFO(("No socket available\n"));
+ return -1;
}
- cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
- if (cur_idx >= ring->elem_cnt) {
- return NULL;
+ BCM_REFERENCE(skb);
+ if (sender_pid == 0) {
+ DHD_INFO(("Invalid PID 0\n"));
+ return -1;
}
- if (cur_idx == ring->idx) {
- /* no more new record */
- return NULL;
+ if ((skb_out = nlmsg_new(size, 0)) == NULL) {
+ DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
+ return -1;
}
+ nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
+ NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
+ memcpy(nlmsg_data(nlh), (char *)data, size);
- cur_idx = (cur_idx + 1) % ring->elem_cnt;
-
- return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+ if ((nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
+ DHD_INFO(("Error sending message\n"));
+ }
+ return 0;
}
-static inline void *
-__dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
-{
- uint32 cur_idx;
-
- if (ring->idx == DHD_RING_IDX_INVALID) {
- DHD_RING_ERR(("EMPTY RING\n"));
- return NULL;
- }
- cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
- if (cur_idx >= ring->elem_cnt) {
- return NULL;
- }
- if (!ring->ring_overwrited && cur_idx == 0) {
- /* no more new record */
- return NULL;
- }
+static void
+dhd_process_daemon_msg(struct sk_buff *skb)
+{
+ bcm_to_info_t to_info;
- cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
- if (ring->ring_overwrited && cur_idx == ring->idx) {
- /* no more new record */
- return NULL;
- }
+ to_info.magic = BCM_TO_MAGIC;
+ to_info.reason = REASON_DAEMON_STARTED;
+ to_info.trap = NO_TRAP;
- return (uint8 *)ring->elem + ring->elem_size * cur_idx;
+ dhd_recv_msg_from_daemon(skb);
+ dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
}
-static inline void
-__dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring)
+#ifdef REPORT_FATAL_TIMEOUTS
+static void
+dhd_send_trap_to_fw(dhd_pub_t * pub, int reason, int trap)
{
- if (!atomic_read(&ring->ring_locked)) {
- atomic_set(&ring->ring_locked, 1);
- }
-}
+ bcm_to_info_t to_info;
-static inline void
-__dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring)
-{
- if (atomic_read(&ring->ring_locked)) {
- atomic_set(&ring->ring_locked, 0);
- }
+ to_info.magic = BCM_TO_MAGIC;
+ to_info.reason = reason;
+ to_info.trap = trap;
+
+ DHD_ERROR(("Sending Event reason:%d trap:%d\n", reason, trap));
+ dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
}
-/* Get first element : oldest element */
-void *
-dhd_ring_get_first(void *_ring)
+void
+dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason)
+{
+ int to_reason;
+ int trap = NO_TRAP;
+ switch (reason) {
+ case DHD_REASON_COMMAND_TO:
+ to_reason = REASON_COMMAND_TO;
+ trap = DO_TRAP;
+ break;
+ case DHD_REASON_JOIN_TO:
+ to_reason = REASON_JOIN_TO;
+ break;
+ case DHD_REASON_SCAN_TO:
+ to_reason = REASON_SCAN_TO;
+ break;
+ case DHD_REASON_OQS_TO:
+ to_reason = REASON_OQS_TO;
+ trap = DO_TRAP;
+ break;
+ default:
+ to_reason = REASON_UNKOWN;
+ }
+ dhd_send_trap_to_fw(pub, to_reason, trap);
+}
+#endif /* REPORT_FATAL_TIMEOUTS */
+
+#ifdef DHD_LOG_DUMP
+void
+dhd_log_dump_init(dhd_pub_t *dhd)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- void *ret = NULL;
- unsigned long flags;
+ struct dhd_log_dump_buf *dld_buf;
+ int i = 0;
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return NULL;
- }
+ for (i = 0; i < DLD_BUFFER_NUM; i++) {
+ dld_buf = &g_dld_buf[i];
+ spin_lock_init(&dld_buf->lock);
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ dld_buf->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++, dld_buf_size[i]);
+#else
+ dld_buf->buffer = kmalloc(dld_buf_size[i], GFP_KERNEL);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_get_first(&ring->fixed);
- }
- if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
- ret = __dhd_singleidx_ring_get_first(&ring->single);
- }
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
- return ret;
-}
+ if (!dld_buf->buffer) {
+ dld_buf->buffer = kmalloc(dld_buf_size[i], GFP_KERNEL);
+ DHD_ERROR(("Try to allocate memory using kmalloc().\n"));
-/* Free first element : oldest element */
-void
-dhd_ring_free_first(void *_ring)
-{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- unsigned long flags;
+ if (!dld_buf->buffer) {
+ DHD_ERROR(("Failed to allocate memory for dld_buf[%d].\n", i));
+ goto fail;
+ }
+ }
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return;
+ dld_buf->wraparound = 0;
+ dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
+ dld_buf->present = dld_buf->front = dld_buf->buffer;
+ dld_buf->remain = dld_buf_size[i];
+ dld_buf->enable = 1;
}
+ return;
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- __dhd_fixed_ring_free_first(&ring->fixed);
+fail:
+ for (i = 0; i < DLD_BUFFER_NUM; i++) {
+ if (dld_buf[i].buffer) {
+ kfree(dld_buf[i].buffer);
+ }
}
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
}
void
-dhd_ring_set_read_idx(void *_ring, uint32 read_idx)
+dhd_log_dump_deinit(dhd_pub_t *dhd)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- unsigned long flags;
-
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return;
- }
+ struct dhd_log_dump_buf *dld_buf;
+ int i = 0;
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx);
+ for (i = 0; i < DLD_BUFFER_NUM; i++) {
+ dld_buf = &g_dld_buf[i];
+ dld_buf->enable = 0;
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
+ DHD_OS_PREFREE(dhd, dld_buf->buffer, dld_buf_size[i]);
+#else
+ kfree(dld_buf->buffer);
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
}
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
}
void
-dhd_ring_set_write_idx(void *_ring, uint32 write_idx)
+dhd_log_dump_write(int type, const char *fmt, ...)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- unsigned long flags;
+ int len = 0;
+ char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
+ va_list args;
+ unsigned long flags = 0;
+ struct dhd_log_dump_buf *dld_buf = NULL;
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return;
+ switch (type)
+ {
+ case DLD_BUF_TYPE_GENERAL:
+ dld_buf = &g_dld_buf[type];
+ break;
+ case DLD_BUF_TYPE_SPECIAL:
+ dld_buf = &g_dld_buf[type];
+ break;
+ default:
+ DHD_ERROR(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
+ __FUNCTION__, type));
+ return;
}
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx);
+ if (dld_buf->enable != 1) {
+ return;
}
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
-}
-uint32
-dhd_ring_get_read_idx(void *_ring)
-{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- uint32 read_idx = DHD_RING_IDX_INVALID;
- unsigned long flags;
+ va_start(args, fmt);
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return read_idx;
+ len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
+ /* Non ANSI C99 compliant returns -1,
+ * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
+ */
+ if (len < 0) {
+ return;
}
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed);
+ if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
+ len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
+ tmp_buf[len] = '\0';
}
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
-
- return read_idx;
-}
-
-uint32
-dhd_ring_get_write_idx(void *_ring)
-{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- uint32 write_idx = DHD_RING_IDX_INVALID;
- unsigned long flags;
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return write_idx;
+ /* make a critical section to eliminate race conditions */
+ spin_lock_irqsave(&dld_buf->lock, flags);
+ if (dld_buf->remain < len) {
+ dld_buf->wraparound = 1;
+ dld_buf->present = dld_buf->front;
+ dld_buf->remain = dld_buf_size[type];
}
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed);
- }
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ strncpy(dld_buf->present, tmp_buf, len);
+ dld_buf->remain -= len;
+ dld_buf->present += len;
+ spin_unlock_irqrestore(&dld_buf->lock, flags);
- return write_idx;
+ /* double check invalid memory operation */
+ ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
+ va_end(args);
}
-/* Get latest element */
-void *
-dhd_ring_get_last(void *_ring)
+char*
+dhd_log_dump_get_timestamp(void)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- void *ret = NULL;
- unsigned long flags;
+ static char buf[16];
+ u64 ts_nsec;
+ unsigned long rem_nsec;
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return NULL;
- }
+ ts_nsec = local_clock();
+ rem_nsec = do_div(ts_nsec, 1000000000);
+ snprintf(buf, sizeof(buf), "%5lu.%06lu",
+ (unsigned long)ts_nsec, rem_nsec / 1000);
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_get_last(&ring->fixed);
- }
- if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
- ret = __dhd_singleidx_ring_get_last(&ring->single);
- }
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
- return ret;
+ return buf;
}
+#endif /* DHD_LOG_DUMP */
-/* Get next point can be written
- * will overwrite which doesn't read
- * will return NULL if next pointer is locked
- */
-void *
-dhd_ring_get_empty(void *_ring)
+int
+dhd_write_file(const char *filepath, char *buf, int buf_len)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- void *ret = NULL;
- unsigned long flags;
+ struct file *fp = NULL;
+ mm_segment_t old_fs;
+ int ret = 0;
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return NULL;
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* File is always created. */
+ fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
+ __FUNCTION__, filepath, PTR_ERR(fp)));
+ ret = BCME_ERROR;
+ } else {
+ if (fp->f_mode & FMODE_WRITE) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ ret = kernel_write(fp, buf, buf_len, &fp->f_pos);
+#else
+ ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
+#endif
+ if (ret < 0) {
+ DHD_ERROR(("%s: Couldn't write file '%s'\n",
+ __FUNCTION__, filepath));
+ ret = BCME_ERROR;
+ } else {
+ ret = BCME_OK;
+ }
+ }
+ filp_close(fp, NULL);
}
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_get_empty(&ring->fixed);
- }
- if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
- ret = __dhd_singleidx_ring_get_empty(&ring->single);
- }
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ /* restore previous address limit */
+ set_fs(old_fs);
+
return ret;
}
-void *
-dhd_ring_get_next(void *_ring, void *cur)
+int
+dhd_read_file(const char *filepath, char *buf, int buf_len)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- void *ret = NULL;
- unsigned long flags;
+ struct file *fp = NULL;
+ mm_segment_t old_fs;
+ int ret;
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return NULL;
- }
+ /* change to KERNEL_DS address limit */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type);
- }
- if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
- ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type);
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ set_fs(old_fs);
+ DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
+ return BCME_ERROR;
}
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
- return ret;
-}
-void *
-dhd_ring_get_prev(void *_ring, void *cur)
-{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- void *ret = NULL;
- unsigned long flags;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
+ ret = kernel_read(fp, buf, buf_len, NULL);
+#else
+ ret = kernel_read(fp, 0, buf, buf_len);
+#endif
+ filp_close(fp, NULL);
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return NULL;
- }
+ /* restore previous address limit */
+ set_fs(old_fs);
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type);
- }
- if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
- ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type);
+ /* Return the number of bytes read */
+ if (ret > 0) {
+ /* Success to read */
+ ret = 0;
+ } else {
+ DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
+ __FUNCTION__, filepath, ret));
+ ret = BCME_ERROR;
}
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+
return ret;
}
int
-dhd_ring_get_cur_size(void *_ring)
+dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- int cnt = 0;
- unsigned long flags;
-
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return cnt;
- }
+ int ret;
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
+ ret = dhd_write_file(filepath, buf, buf_len);
+ if (ret < 0) {
+ return ret;
}
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
- return cnt;
-}
-
-/* protect element between lock_ptr and write_idx */
-void
-dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
-{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- unsigned long flags;
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return;
- }
+ /* Read the file again and check if the file size is not zero */
+ memset(buf, 0, buf_len);
+ ret = dhd_read_file(filepath, buf, buf_len);
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type);
- }
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ return ret;
}
-/* free all lock */
-void
-dhd_ring_lock_free(void *_ring)
+#ifdef DHD_LB_TXP
+#define DHD_LB_TXBOUND 64
+/*
+ * Function that performs the TX processing on a given CPU
+ */
+bool
+dhd_lb_tx_process(dhd_info_t *dhd)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- unsigned long flags;
-
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return;
- }
+ struct sk_buff *skb;
+ int cnt = 0;
+ struct net_device *net;
+ int ifidx;
+ bool resched = FALSE;
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- __dhd_fixed_ring_lock_free(&ring->fixed);
+ DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
+ if (dhd == NULL) {
+ DHD_ERROR((" Null pointer DHD \r\n"));
+ return resched;
}
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
-}
-void *
-dhd_ring_lock_get_first(void *_ring)
-{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- void *ret = NULL;
- unsigned long flags;
+ DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return NULL;
- }
+ /* Base Loop to perform the actual Tx */
+ do {
+ skb = skb_dequeue(&dhd->tx_pend_queue);
+ if (skb == NULL) {
+ DHD_TRACE(("Dequeued a Null Packet \r\n"));
+ break;
+ }
+ cnt++;
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
- }
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
- return ret;
-}
+ net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
+ ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
-void *
-dhd_ring_lock_get_last(void *_ring)
-{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- void *ret = NULL;
- unsigned long flags;
+ BCM_REFERENCE(net);
+ DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
+ net, ifidx));
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return NULL;
- }
+ __dhd_sendpkt(&dhd->pub, ifidx, skb);
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
- }
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
- return ret;
-}
+ if (cnt >= DHD_LB_TXBOUND) {
+ resched = TRUE;
+ break;
+ }
-int
-dhd_ring_lock_get_count(void *_ring)
-{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- int ret = BCME_ERROR;
- unsigned long flags;
+ } while (1);
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return ret;
- }
+ DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
- }
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
- return ret;
+ return resched;
}
-/* free first locked element */
void
-dhd_ring_lock_free_first(void *_ring)
+dhd_lb_tx_handler(unsigned long data)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- unsigned long flags;
-
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return;
- }
+ dhd_info_t *dhd = (dhd_info_t *)data;
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_FIXED) {
- __dhd_fixed_ring_lock_free_first(&ring->fixed);
+ if (dhd_lb_tx_process(dhd)) {
+ dhd_tasklet_schedule(&dhd->tx_tasklet);
}
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
}
-void
-dhd_ring_whole_lock(void *_ring)
-{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- unsigned long flags;
+#endif /* DHD_LB_TXP */
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return;
- }
+/* ----------------------------------------------------------------------------
+ * Infrastructure code for sysfs interface support for DHD
+ *
+ * What is sysfs interface?
+ * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
+ *
+ * Why sysfs interface?
+ * This is the Linux standard way of changing/configuring Run Time parameters
+ * for a driver. We can use this interface to control "linux" specific driver
+ * parameters.
+ *
+ * -----------------------------------------------------------------------------
+ */
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
- __dhd_singleidx_ring_whole_lock(&ring->single);
- }
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
-}
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
-void
-dhd_ring_whole_unlock(void *_ring)
+#if defined(DHD_TRACE_WAKE_LOCK)
+
+/* Function to show the history buffer */
+static ssize_t
+show_wklock_trace(struct dhd_info *dev, char *buf)
{
- dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
- unsigned long flags;
+ ssize_t ret = 0;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
- if (!ring || ring->magic != DHD_RING_MAGIC) {
- DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
- return;
- }
+ buf[ret] = '\n';
+ buf[ret+1] = 0;
- DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
- if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
- __dhd_singleidx_ring_whole_unlock(&ring->single);
- }
- DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
+ dhd_wk_lock_stats_dump(&dhd->pub);
+ return ret+1;
}
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
-#define DHD_VFS_INODE(dir) (dir->d_inode)
-#else
-#define DHD_VFS_INODE(dir) d_inode(dir)
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
-#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
-#else
-#define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
-int
-dhd_file_delete(char *path)
+/* Function to enable/disable wakelock trace */
+static ssize_t
+wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
{
- struct path file_path;
- int err;
- struct dentry *dir;
-
- err = kern_path(path, 0, &file_path);
+ unsigned long onoff;
+ unsigned long flags;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
- if (err < 0) {
- DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err));
- return err;
+ onoff = bcm_strtoul(buf, NULL, 10);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
}
- if (
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
- !d_is_file(file_path.dentry) ||
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0))
- d_really_is_negative(file_path.dentry) ||
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
- FALSE)
- {
- err = -EINVAL;
- } else {
- dir = dget_parent(file_path.dentry);
- if (!IS_ERR(dir)) {
- err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL);
- dput(dir);
- } else {
- err = PTR_ERR(dir);
- }
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ trace_wklock_onoff = onoff;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ if (trace_wklock_onoff) {
+ printk("ENABLE WAKLOCK TRACE\n");
+ } else {
+ printk("DISABLE WAKELOCK TRACE\n");
}
- path_put(&file_path);
+ return (ssize_t)(onoff+1);
+}
+#endif /* DHD_TRACE_WAKE_LOCK */
- if (err < 0) {
- DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err));
- }
+#if defined(DHD_LB_TXP)
+static ssize_t
+show_lbtxp(struct dhd_info *dev, char *buf)
+{
+ ssize_t ret = 0;
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
- return err;
+ onoff = atomic_read(&dhd->lb_txp_active);
+ ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
+ onoff);
+ return ret;
}
-#ifdef DHD_DUMP_MNGR
-static int
-dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
+
+static ssize_t
+lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
{
+ unsigned long onoff;
+ dhd_info_t *dhd = (dhd_info_t *)dev;
int i;
- int fm_idx = -1;
- for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) {
- if (strlen(fm_ptr->elems[i].type_name) == 0) {
- fm_idx = i;
- break;
- }
- if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) {
- fm_idx = i;
- break;
- }
- }
+ onoff = bcm_strtoul(buf, NULL, 10);
- if (fm_idx == -1) {
- return fm_idx;
+ sscanf(buf, "%lu", &onoff);
+ if (onoff != 0 && onoff != 1) {
+ return -EINVAL;
}
+ atomic_set(&dhd->lb_txp_active, onoff);
- if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
- strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
- fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
- fm_ptr->elems[fm_idx].file_idx = 0;
+ /* Since the scheme is changed clear the counters */
+ for (i = 0; i < NR_CPUS; i++) {
+ DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
+ DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
}
- return fm_idx;
+ return count;
}
+#endif /* DHD_LB_TXP */
/*
- * dhd_dump_file_manage_enqueue - enqueue dump file path
- * and delete odest file if file count is max.
-*/
-void
-dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname)
-{
- int fm_idx;
- int fp_idx;
- dhd_dump_file_manage_t *fm_ptr;
- DFM_elem_t *elem;
+ * Generic Attribute Structure for DHD.
+ * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
+ * to instantiate an object of type dhd_attr, populate it with
+ * the required show/store functions (ex:- dhd_attr_cpumask_primary)
+ * and add the object to default_attrs[] array, that gets registered
+ * to the kobject of dhd (named bcm-dhd).
+ */
- if (!dhd || !dhd->dump_file_manage) {
- DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
- __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL)));
- return;
- }
+struct dhd_attr {
+ struct attribute attr;
+ ssize_t(*show)(struct dhd_info *, char *);
+ ssize_t(*store)(struct dhd_info *, const char *, size_t count);
+};
- fm_ptr = dhd->dump_file_manage;
+#if defined(DHD_TRACE_WAKE_LOCK)
+static struct dhd_attr dhd_attr_wklock =
+ __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
+#endif /* defined(DHD_TRACE_WAKE_LOCK */
- /* find file_manage idx */
- DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path));
- if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) {
- DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
- __FUNCTION__, fname));
- return;
- }
+#if defined(DHD_LB_TXP)
+static struct dhd_attr dhd_attr_lbtxp =
+ __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff);
+#endif /* DHD_LB_TXP */
- elem = &fm_ptr->elems[fm_idx];
- fp_idx = elem->file_idx;
- DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
- __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx]));
+/* Attribute object that gets registered with "bcm-dhd" kobject tree */
+static struct attribute *default_attrs[] = {
+#if defined(DHD_TRACE_WAKE_LOCK)
+ &dhd_attr_wklock.attr,
+#endif /* DHD_TRACE_WAKE_LOCK */
+#if defined(DHD_LB_TXP)
+ &dhd_attr_lbtxp.attr,
+#endif /* DHD_LB_TXP */
+ NULL
+};
- /* delete oldest file */
- if (strlen(elem->file_path[fp_idx]) != 0) {
- if (dhd_file_delete(elem->file_path[fp_idx]) < 0) {
- DHD_ERROR(("%s(): Failed to delete file: %s\n",
- __FUNCTION__, elem->file_path[fp_idx]));
- } else {
- DHD_ERROR(("%s(): Successed to delete file: %s\n",
- __FUNCTION__, elem->file_path[fp_idx]));
- }
- }
+#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
+#define to_attr(a) container_of(a, struct dhd_attr, attr)
+
+/*
+ * bcm-dhd kobject show function, the "attr" attribute specifices to which
+ * node under "bcm-dhd" the show function is called.
+ */
+static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ dhd_info_t *dhd = to_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ int ret;
- /* save dump file path */
- strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
- elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
+ if (d_attr->show)
+ ret = d_attr->show(dhd, buf);
+ else
+ ret = -EIO;
- /* change file index to next file index */
- elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
+ return ret;
}
-#endif /* DHD_DUMP_MNGR */
-#ifdef DHD_MAP_LOGGING
-/* Will be called from SMMU fault handler */
-void
-dhd_smmu_fault_handler(uint32 axid, ulong fault_addr)
+/*
+ * bcm-dhd kobject show function, the "attr" attribute specifices to which
+ * node under "bcm-dhd" the store function is called.
+ */
+static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
{
- dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
- uint32 irq = (uint32)-1;
-
- DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
- DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr));
- dhdp->smmu_fault_occurred = TRUE;
-#ifdef DNGL_AXI_ERROR_LOGGING
- dhdp->axi_error = TRUE;
- dhdp->axi_err_dump->axid = axid;
- dhdp->axi_err_dump->fault_address = fault_addr;
-#endif /* DNGL_AXI_ERROR_LOGGING */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ dhd_info_t *dhd = to_dhd(kobj);
+ struct dhd_attr *d_attr = to_attr(attr);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ int ret;
- /* Disable PCIe IRQ */
- dhdpcie_get_pcieirq(dhdp->bus, &irq);
- if (irq != (uint32)-1) {
- disable_irq_nosync(irq);
- }
+ if (d_attr->store)
+ ret = d_attr->store(dhd, buf, count);
+ else
+ ret = -EIO;
- /* Take debug information first */
- DHD_OS_WAKE_LOCK(dhdp);
- dhd_prot_smmu_fault_dump(dhdp);
- DHD_OS_WAKE_UNLOCK(dhdp);
+ return ret;
- /* Take AXI information if possible */
-#ifdef DNGL_AXI_ERROR_LOGGING
-#ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
- dhd_axi_error_dispatch(dhdp);
-#else
- dhd_axi_error(dhdp);
-#endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
-#endif /* DNGL_AXI_ERROR_LOGGING */
}
-EXPORT_SYMBOL(dhd_smmu_fault_handler);
-#endif /* DHD_MAP_LOGGING */
-
-#ifdef DHD_WIFI_SHUTDOWN
-void wifi_plat_dev_drv_shutdown(struct platform_device *pdev)
-{
- dhd_pub_t *dhd_pub = NULL;
- dhd_info_t *dhd_info = NULL;
- dhd_if_t *dhd_if = NULL;
- DHD_ERROR(("%s enter\n", __FUNCTION__));
- dhd_pub = g_dhd_pub;
+static struct sysfs_ops dhd_sysfs_ops = {
+ .show = dhd_show,
+ .store = dhd_store,
+};
- if (dhd_os_check_if_up(dhd_pub)) {
- dhd_info = (dhd_info_t *)dhd_pub->info;
- dhd_if = dhd_info->iflist[0];
- ASSERT(dhd_if);
- ASSERT(dhd_if->net);
- if (dhd_if && dhd_if->net) {
- dhd_stop(dhd_if->net);
- }
- }
-}
-#endif /* DHD_WIFI_SHUTDOWN */
+static struct kobj_type dhd_ktype = {
+ .sysfs_ops = &dhd_sysfs_ops,
+ .default_attrs = default_attrs,
+};
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
-int
-compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
-{
- return (int)kernel_read(file, addr, (size_t)count, &offset);
-}
-int
-compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset)
-{
- return (int)kernel_write(file, addr, count, offset);
-}
-#else
-int
-compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
+/* Create a kobject and attach to sysfs interface */
+static int dhd_sysfs_init(dhd_info_t *dhd)
{
- return kernel_read(file, offset, addr, count);
-}
-int
-compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset)
-{
- return (int)vfs_write(file, addr, count, offset);
-}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
+ int ret = -1;
-#ifdef DHDTCPSYNC_FLOOD_BLK
-static void dhd_blk_tsfl_handler(struct work_struct * work)
-{
- dhd_if_t *ifp = NULL;
- dhd_pub_t *dhdp = NULL;
- /* Ignore compiler warnings due to -Werror=cast-qual */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif /* STRICT_GCC_WARNINGS && __GNUC__ */
- ifp = container_of(work, dhd_if_t, blk_tsfl_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif /* STRICT_GCC_WARNINGS && __GNUC__ */
- if (ifp) {
- dhdp = &ifp->info->pub;
- if (dhdp) {
- if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
- (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
- wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
- } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
- (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
- DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
- wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED);
- }
- }
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+ return ret;
}
+
+ /* Initialize the kobject */
+ ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
+ if (ret) {
+ kobject_put(&dhd->dhd_kobj);
+ DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
+ return ret;
+ }
+
+ /*
+ * We are always responsible for sending the uevent that the kobject
+ * was added to the system.
+ */
+ kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
+
+ return ret;
}
-void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
-{
- ifp->tsync_rcvd = 0;
- ifp->tsyncack_txed = 0;
- ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
-}
-void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
+
+/* Done with the kobject and detach the sysfs interface */
+static void dhd_sysfs_exit(dhd_info_t *dhd)
{
- dhd_if_t *ifp = NULL;
- if (dev) {
- ifp = DHD_DEV_IFP(dev);
- }
- if (ifp) {
- ifp->tsync_rcvd = 0;
- ifp->tsyncack_txed = 0;
- ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
+ if (dhd == NULL) {
+ DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
+ return;
}
+
+ /* Releae the kobject */
+ if (dhd->dhd_kobj.state_initialized)
+ kobject_put(&dhd->dhd_kobj);
}
-#endif /* DHDTCPSYNC_FLOOD_BLK */
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
-static void dhd_m4_state_handler(struct work_struct *work)
+#ifdef DHD_DEBUG_UART
+bool
+dhd_debug_uart_is_running(struct net_device *dev)
{
- dhd_if_t *ifp = NULL;
- /* Ignore compiler warnings due to -Werror=cast-qual */
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- struct delayed_work *dw = to_delayed_work(work);
- ifp = container_of(dw, dhd_if_t, m4state_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
- if (ifp && ifp->net &&
- (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) {
- DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n",
- ifp->net->name));
- wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT);
+ if (dhd->duart_execute) {
+ return TRUE;
}
+
+ return FALSE;
}
-void
-dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx)
+static void
+dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
{
- dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
- struct ether_header *eh;
- uint16 type;
+ dhd_pub_t *dhdp = handle;
+ dhd_debug_uart_exec(dhdp, "rd");
+}
- if (!success) {
- dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
+static void
+dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
+{
+ int ret;
- eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
- type = ntoh16(eh->ether_type);
- if (type == ETHER_TYPE_802_1X) {
- if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) {
- dhd_if_t *ifp = NULL;
- ifp = dhd->iflist[ifidx];
- if (!ifp || !ifp->net) {
- return;
- }
+ char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
+ char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
- DHD_INFO(("%s: M4 TX failed on %d.\n",
- __FUNCTION__, ifidx));
+#ifdef DHD_FW_COREDUMP
+ if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
+#endif
+ {
+ if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN ||
+#ifdef DHD_FW_COREDUMP
+ dhdp->memdump_success == FALSE ||
+#endif
+ FALSE) {
+ dhdp->info->duart_execute = TRUE;
+ DHD_ERROR(("DHD: %s - execute %s %s\n",
+ __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+ DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
+ __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
+ dhdp->info->duart_execute = FALSE;
- OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED);
- schedule_delayed_work(&ifp->m4state_work,
- msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS));
+#ifdef DHD_LOG_DUMP
+ if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
+#endif
+ {
+ BUG_ON(1);
}
}
}
}
+#endif /* DHD_DEBUG_UART */
+#if defined(DHD_BLOB_EXISTENCE_CHECK)
void
-dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx)
+dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
{
- dhd_info_t *dhdinfo;
- dhd_if_t *ifp;
+ struct file *fp;
+ char *filepath = CONFIG_BCMDHD_CLM_PATH;
+
+ fp = filp_open(filepath, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ DHD_ERROR(("%s: ----- blob file dosen't exist -----\n", __FUNCTION__));
+ dhdp->is_blob = FALSE;
+ } else {
+ DHD_ERROR(("%s: ----- blob file exist -----\n", __FUNCTION__));
+ dhdp->is_blob = TRUE;
+#if defined(CONCATE_BLOB)
+ strncat(fw_path, "_blob", strlen("_blob"));
+#else
+ BCM_REFERENCE(fw_path);
+#endif /* SKIP_CONCATE_BLOB */
+ filp_close(fp, NULL);
+ }
+}
+#endif /* DHD_BLOB_EXISTENCE_CHECK */
+
+#if defined(PCIE_FULL_DONGLE)
+/** test / loopback */
+void
+dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
+{
+ dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
+ dhd_info_t *dhd_info = (dhd_info_t *)handle;
+ dhd_pub_t *dhdp = &dhd_info->pub;
- if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) {
- DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx));
+ if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
return;
}
- dhdinfo = (dhd_info_t *)(dhdp->info);
- if (!dhdinfo) {
- DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__));
+ if ((dhd_info == NULL) || (dhdp == NULL)) {
+ DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
return;
}
- ifp = dhdinfo->iflist[ifidx];
- if (ifp) {
- cancel_delayed_work_sync(&ifp->m4state_work);
+ if (dmmap == NULL) {
+ DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
+ return;
}
+ dmaxfer_free_prev_dmaaddr(dhdp, dmmap);
}
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-#ifdef DHD_HP2P
-unsigned long
-dhd_os_hp2plock(dhd_pub_t *pub)
+
+void
+dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
{
- dhd_info_t *dhd;
- unsigned long flags = 0;
+ dhd_info_t *dhd_info = dhdp->info;
- dhd = (dhd_info_t *)(pub->info);
+ dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
+ DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
+}
+#endif /* PCIE_FULL_DONGLE */
+/* ---------------------------- End of sysfs implementation ------------------------------------- */
+#ifdef HOFFLOAD_MODULES
+void
+dhd_linux_get_modfw_address(dhd_pub_t *dhd)
+{
+ const char* module_name = NULL;
+ const struct firmware *module_fw;
+ struct module_metadata *hmem = &dhd->hmem;
- if (dhd) {
- spin_lock_irqsave(&dhd->hp2p_lock, flags);
+ if (dhd_hmem_module_string[0] != '\0') {
+ module_name = dhd_hmem_module_string;
+ } else {
+ DHD_ERROR(("%s No module image name specified\n", __FUNCTION__));
+ return;
}
-
- return flags;
+ if (request_firmware(&module_fw, module_name, dhd_bus_to_dev(dhd->bus))) {
+ DHD_ERROR(("modules.img not available\n"));
+ return;
+ }
+ if (!dhd_alloc_module_memory(dhd->bus, module_fw->size, hmem)) {
+ release_firmware(module_fw);
+ return;
+ }
+ memcpy(hmem->data, module_fw->data, module_fw->size);
+ release_firmware(module_fw);
}
+#endif /* HOFFLOAD_MODULES */
+#ifdef SET_PCIE_IRQ_CPU_CORE
void
-dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags)
+dhd_set_irq_cpucore(dhd_pub_t *dhdp, int set)
{
- dhd_info_t *dhd;
+ unsigned int irq;
+ if (!dhdp) {
+ DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
+ return;
+ }
- dhd = (dhd_info_t *)(pub->info);
+ if (!dhdp->bus) {
+ DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
+ return;
+ }
- if (dhd) {
- spin_unlock_irqrestore(&dhd->hp2p_lock, flags);
+ if (dhdpcie_get_pcieirq(dhdp->bus, &irq)) {
+ return;
}
+
+ set_irq_cpucore(irq, set);
}
-#endif /* DHD_HP2P */
-#ifdef DNGL_AXI_ERROR_LOGGING
-static void
-dhd_axi_error_dump(void *handle, void *event_info, u8 event)
+#endif /* SET_PCIE_IRQ_CPU_CORE */
+
+#if defined(DHD_HANG_SEND_UP_TEST)
+void
+dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
{
- dhd_info_t *dhd = (dhd_info_t *)handle;
+ dhd_info_t *dhd = NULL;
dhd_pub_t *dhdp = NULL;
+ uint reason = HANG_REASON_MAX;
+ char buf[WLC_IOCTL_SMLEN] = {0, };
+ uint32 fw_test_code = 0;
+ dhd = DHD_DEV_INFO(dev);
- if (!dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- goto exit;
+ if (dhd) {
+ dhdp = &dhd->pub;
}
- dhdp = &dhd->pub;
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- goto exit;
+ if (!dhd || !dhdp) {
+ return;
}
- /**
- * First save axi error information to a file
- * because panic should happen right after this.
- * After dhd reset, dhd reads the file, and do hang event process
- * to send axi error stored on the file to Bigdata server
- */
- if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) {
- DHD_ERROR(("%s: Invalid AXI version: 0x%x\n",
- __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version));
+ reason = (uint) bcm_strtoul(string_num, NULL, 0);
+ DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason));
+
+ if (reason == 0) {
+ if (dhdp->req_hang_type) {
+ DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
+ __FUNCTION__, dhdp->req_hang_type));
+ dhdp->req_hang_type = 0;
+ return;
+ } else {
+ DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
+ return;
+ }
+ } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
+ DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
+ return;
}
- DHD_OS_WAKE_LOCK(dhdp);
-#ifdef DHD_FW_COREDUMP
-#ifdef DHD_SSSR_DUMP
- dhdp->collect_sssr = TRUE;
-#endif /* DHD_SSSR_DUMP */
- DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
- dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
-#endif /* DHD_FW_COREDUMP */
- DHD_OS_WAKE_UNLOCK(dhdp);
+ if (dhdp->req_hang_type != 0) {
+ DHD_ERROR(("Already HANG requested for test\n"));
+ return;
+ }
-exit:
- /* Trigger kernel panic after taking necessary dumps */
- BUG_ON(1);
+ switch (reason) {
+ case HANG_REASON_IOCTL_RESP_TIMEOUT:
+ DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ fw_test_code = 102; /* resumed on timeour */
+ bcm_mkiovar("bus:disconnect", (void *)&fw_test_code, 4, buf, sizeof(buf));
+ dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ break;
+ case HANG_REASON_DONGLE_TRAP:
+ DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ fw_test_code = 99; /* dongle trap */
+ bcm_mkiovar("bus:disconnect", (void *)&fw_test_code, 4, buf, sizeof(buf));
+ dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ break;
+ case HANG_REASON_D3_ACK_TIMEOUT:
+ DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ break;
+ case HANG_REASON_BUS_DOWN:
+ DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ break;
+ case HANG_REASON_PCIE_LINK_DOWN:
+ case HANG_REASON_MSGBUF_LIVELOCK:
+ dhdp->req_hang_type = 0;
+ DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
+ break;
+ case HANG_REASON_IFACE_OP_FAILURE:
+ DHD_ERROR(("Make HANG!!!: P2P inrerface delete failure(0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ break;
+ case HANG_REASON_HT_AVAIL_ERROR:
+ dhdp->req_hang_type = 0;
+ DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
+ break;
+ case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
+ DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
+ dhdp->req_hang_type = reason;
+ break;
+ default:
+ dhdp->req_hang_type = 0;
+ DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
+ break;
+ }
+}
+#endif /* DHD_HANG_SEND_UP_TEST */
+#ifdef DHD_WAKE_STATUS
+wake_counts_t*
+dhd_get_wakecount(dhd_pub_t *dhdp)
+{
+#ifdef BCMDBUS
+ return NULL;
+#else
+ return dhd_bus_get_wakecount(dhdp);
+#endif /* BCMDBUS */
}
+#endif /* DHD_WAKE_STATUS */
-void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type)
+#ifdef BCM_ASLR_HEAP
+uint32
+dhd_get_random_number(void)
{
- DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__));
- dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
- type, DHD_WQ_WORK_AXI_ERROR_DUMP,
- dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH);
+ uint32 rand = 0;
+ get_random_bytes_arch(&rand, sizeof(rand));
+ return rand;
}
-#endif /* DNGL_AXI_ERROR_LOGGING */
+#endif /* BCM_ASLR_HEAP */
-#ifdef BCMPCIE
-static void
-dhd_cto_recovery_handler(void *handle, void *event_info, u8 event)
+#ifdef DHD_PKT_LOGGING
+void
+dhd_pktlog_dump(void *handle, void *event_info, u8 event)
{
dhd_info_t *dhd = handle;
- dhd_pub_t *dhdp = NULL;
if (!dhd) {
DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- BUG_ON(1);
return;
}
- dhdp = &dhd->pub;
- dhdpcie_cto_recovery_handler(dhdp);
+ if (dhd_pktlog_write_file(&dhd->pub)) {
+ DHD_ERROR(("%s: writing pktlog dump to the file failed\n", __FUNCTION__));
+ return;
+ }
}
void
-dhd_schedule_cto_recovery(dhd_pub_t *dhdp)
+dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
{
- DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__));
dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
- NULL, DHD_WQ_WORK_CTO_RECOVERY,
- dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH);
-}
-#endif /* BCMPCIE */
-
-#ifdef SUPPORT_SET_TID
-/*
- * Set custom TID value for UDP frame based on UID value.
- * This will be triggered by android private command below.
- * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
- * Mode 0(SET_TID_OFF) : Disable changing TID
- * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
- * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
-*/
-void
-dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
-{
- struct ether_header *eh = NULL;
- struct sock *sk = NULL;
- uint8 *pktdata = NULL;
- uint8 *ip_hdr = NULL;
- uint8 cur_prio;
- uint8 prio;
- uint32 uid;
-
- if (dhdp->tid_mode == SET_TID_OFF) {
- return;
- }
-
- pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
- eh = (struct ether_header *) pktdata;
- ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
-
- if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
- return;
- }
-
- cur_prio = PKTPRIO(pkt);
- prio = dhdp->target_tid;
- uid = dhdp->target_uid;
-
- if ((cur_prio == prio) ||
- (cur_prio != PRIO_8021D_BE)) {
- return;
- }
-
- sk = ((struct sk_buff*)(pkt))->sk;
-
- if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
- (sk && (uid == __kuid_val(sock_i_uid(sk))))) {
- PKTSETPRIO(pkt, prio);
- }
+ (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
+ dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
}
-#endif /* SUPPORT_SET_TID */
+#endif /* DHD_PKT_LOGGING */
void *dhd_get_pub(struct net_device *dev)
{
}
return dhd->wd_timer_valid;
}
-
-#if defined(WLDWDS) && defined(FOURADDR_AUTO_BRG)
-/* This function is to automatically add/del interface to the bridged dev that priamy dev is in */
-static void dhd_bridge_dev_set(dhd_info_t *dhd, int ifidx, struct net_device *dev)
-{
- struct net_device *primary_ndev = NULL, *br_dev = NULL;
- int cmd;
- struct ifreq ifr;
-
- /* add new interface to bridge dev */
- if (dev) {
- int found = 0, i;
- DHD_ERROR(("bssidx %d\n", dhd->pub.info->iflist[ifidx]->bssidx));
- for (i = 0 ; i < ifidx; i++) {
- DHD_ERROR(("bssidx %d %d\n", i, dhd->pub.info->iflist[i]->bssidx));
- /* search the primary interface */
- if (dhd->pub.info->iflist[i]->bssidx == dhd->pub.info->iflist[ifidx]->bssidx) {
- primary_ndev = dhd->pub.info->iflist[i]->net;
- DHD_ERROR(("%dst is primary dev %s\n", i, primary_ndev->name));
- found = 1;
- break;
- }
- }
- if (found == 0) {
- DHD_ERROR(("Can not find primary dev %s\n", dev->name));
- return;
- }
- cmd = SIOCBRADDIF;
- ifr.ifr_ifindex = dev->ifindex;
- } else { /* del interface from bridge dev */
- primary_ndev = dhd->pub.info->iflist[ifidx]->net;
- cmd = SIOCBRDELIF;
- ifr.ifr_ifindex = primary_ndev->ifindex;
- }
- /* if primary net device is bridged */
- if (primary_ndev->priv_flags & IFF_BRIDGE_PORT) {
- rtnl_lock();
- /* get bridge device */
- br_dev = netdev_master_upper_dev_get(primary_ndev);
- if (br_dev) {
- const struct net_device_ops *ops = br_dev->netdev_ops;
- DHD_ERROR(("br %s pri %s\n", br_dev->name, primary_ndev->name));
- if (ops) {
- if (cmd == SIOCBRADDIF) {
- DHD_ERROR(("br call ndo_add_slave\n"));
- ops->ndo_add_slave(br_dev, dev);
- /* Also bring wds0.x interface up automatically */
- dev_change_flags(dev, dev->flags | IFF_UP);
- }
- else {
- DHD_ERROR(("br call ndo_del_slave\n"));
- ops->ndo_del_slave(br_dev, primary_ndev);
- }
- }
- }
- else {
- DHD_ERROR(("no br dev\n"));
- }
- rtnl_unlock();
- }
- else {
- DHD_ERROR(("device %s is not bridged\n", primary_ndev->name));
- }
-}
-#endif /* defiend(WLDWDS) && defined(FOURADDR_AUTO_BRG) */
/*
* DHD Linux header file (dhd_linux exports for cfg80211 and other components)
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux.h 816392 2019-04-24 14:39:02Z $
+ * $Id: dhd_linux.h 699532 2017-05-15 11:00:39Z $
*/
/* wifi platform functions for power, interrupt and pre-alloc, either
#include <linux/fs.h>
#include <dngl_stats.h>
#include <dhd.h>
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif
/* Linux wireless extension support */
#if defined(WL_WIRELESS_EXT)
#include <wl_iw.h>
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
#include <linux/earlysuspend.h>
#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
-#ifdef PCIE_FULL_DONGLE
-#include <etd.h>
-#endif /* PCIE_FULL_DONGLE */
-#ifdef WL_MONITOR
-#include <bcmmsgbuf.h>
-#define MAX_RADIOTAP_SIZE 256 /* Maximum size to hold HE Radiotap header format */
-#define MAX_MON_PKT_SIZE (4096 + MAX_RADIOTAP_SIZE)
-#endif /* WL_MONITOR */
-
-#define FILE_DUMP_MAX_WAIT_TIME 4000
-
-#define htod32(i) (i)
-#define htod16(i) (i)
-#define dtoh32(i) (i)
-#define dtoh16(i) (i)
-#define htodchanspec(i) (i)
-#define dtohchanspec(i) (i)
-
-#ifdef BLOCK_IPV6_PACKET
-#define HEX_PREF_STR "0x"
-#define UNI_FILTER_STR "010000000000"
-#define ZERO_ADDR_STR "000000000000"
-#define ETHER_TYPE_STR "0000"
-#define IPV6_FILTER_STR "20"
-#define ZERO_TYPE_STR "00"
-#endif /* BLOCK_IPV6_PACKET */
-
-typedef struct dhd_if_event {
- struct list_head list;
- wl_event_data_if_t event;
- char name[IFNAMSIZ+1];
- uint8 mac[ETHER_ADDR_LEN];
-} dhd_if_event_t;
-
-/* Interface control information */
-typedef struct dhd_if {
- struct dhd_info *info; /* back pointer to dhd_info */
- /* OS/stack specifics */
- struct net_device *net;
- int idx; /* iface idx in dongle */
- uint subunit; /* subunit */
- uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
- bool set_macaddress;
- bool set_multicast;
- uint8 bssidx; /* bsscfg index for the interface */
- bool attached; /* Delayed attachment when unset */
- bool txflowcontrol; /* Per interface flow control indicator */
- char name[IFNAMSIZ+1]; /* linux interface name */
- char dngl_name[IFNAMSIZ+1]; /* corresponding dongle interface name */
- struct net_device_stats stats;
-#ifdef PCIE_FULL_DONGLE
- struct list_head sta_list; /* sll of associated stations */
- spinlock_t sta_list_lock; /* lock for manipulating sll */
-#endif /* PCIE_FULL_DONGLE */
- uint32 ap_isolate; /* ap-isolation settings */
-#ifdef DHD_L2_FILTER
- bool parp_enable;
- bool parp_discard;
- bool parp_allnode;
- arp_table_t *phnd_arp_table;
- /* for Per BSS modification */
- bool dhcp_unicast;
- bool block_ping;
- bool grat_arp;
- bool block_tdls;
-#endif /* DHD_L2_FILTER */
-#ifdef DHD_MCAST_REGEN
- bool mcast_regen_bss_enable;
-#endif // endif
- bool rx_pkt_chainable; /* set all rx packet to chainable config by default */
- cumm_ctr_t cumm_ctr; /* cummulative queue length of child flowrings */
- uint8 tx_paths_active;
- bool del_in_progress;
- bool static_if; /* used to avoid some operations on static_if */
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
- struct delayed_work m4state_work;
- atomic_t m4state;
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
- bool recv_reassoc_evt;
- bool post_roam_evt;
-#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
-#ifdef DHDTCPSYNC_FLOOD_BLK
- uint32 tsync_rcvd;
- uint32 tsyncack_txed;
- u64 last_sync;
- struct work_struct blk_tsfl_work;
-#endif /* DHDTCPSYNC_FLOOD_BLK */
-} dhd_if_t;
-
-struct ipv6_work_info_t {
- uint8 if_idx;
- char ipv6_addr[IPV6_ADDR_LEN];
- unsigned long event;
-};
-
-typedef struct dhd_dump {
- uint8 *buf;
- int bufsize;
- uint8 *hscb_buf;
- int hscb_bufsize;
-} dhd_dump_t;
-#ifdef DNGL_AXI_ERROR_LOGGING
-typedef struct dhd_axi_error_dump {
- ulong fault_address;
- uint32 axid;
- struct hnd_ext_trap_axi_error_v1 etd_axi_error_v1;
-} dhd_axi_error_dump_t;
-#endif /* DNGL_AXI_ERROR_LOGGING */
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-struct dhd_rx_tx_work {
- struct work_struct work;
- struct sk_buff *skb;
- struct net_device *net;
- struct dhd_pub *pub;
-};
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
-#if defined(DHD_LB)
-#if !defined(PCIE_FULL_DONGLE)
-#error "DHD Loadbalancing only supported on PCIE_FULL_DONGLE"
-#endif /* !PCIE_FULL_DONGLE */
-#endif /* DHD_LB */
-
-#if defined(DHD_LB_RXP) || defined(DHD_LB_RXC) || defined(DHD_LB_TXC) || \
- defined(DHD_LB_STATS)
-#if !defined(DHD_LB)
-#error "DHD loadbalance derivatives are supported only if DHD_LB is defined"
-#endif /* !DHD_LB */
-#endif /* DHD_LB_RXP || DHD_LB_RXC || DHD_LB_TXC || DHD_LB_STATS */
-
-#if defined(DHD_LB)
-/* Dynamic CPU selection for load balancing */
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
-#include <linux/notifier.h>
-#include <linux/workqueue.h>
-#include <asm/atomic.h>
-
-#if !defined(DHD_LB_PRIMARY_CPUS)
-#define DHD_LB_PRIMARY_CPUS 0x0 /* Big CPU coreids mask */
-#endif // endif
-#if !defined(DHD_LB_SECONDARY_CPUS)
-#define DHD_LB_SECONDARY_CPUS 0xFE /* Little CPU coreids mask */
-#endif // endif
-
-#define HIST_BIN_SIZE 9
-
-#if defined(DHD_LB_TXP)
-/* Pkttag not compatible with PROP_TXSTATUS or WLFC */
-typedef struct dhd_tx_lb_pkttag_fr {
- struct net_device *net;
- int ifidx;
-} dhd_tx_lb_pkttag_fr_t;
-
-#define DHD_LB_TX_PKTTAG_SET_NETDEV(tag, netdevp) ((tag)->net = netdevp)
-#define DHD_LB_TX_PKTTAG_NETDEV(tag) ((tag)->net)
-
-#define DHD_LB_TX_PKTTAG_SET_IFIDX(tag, ifidx) ((tag)->ifidx = ifidx)
-#define DHD_LB_TX_PKTTAG_IFIDX(tag) ((tag)->ifidx)
-#endif /* DHD_LB_TXP */
-
-#endif /* DHD_LB */
-
-#ifdef FILTER_IE
-#define FILTER_IE_PATH "/etc/wifi/filter_ie"
-#define FILTER_IE_BUFSZ 1024 /* ioc buffsize for FILTER_IE */
-#define FILE_BLOCK_READ_SIZE 256
-#define WL_FILTER_IE_IOV_HDR_SIZE OFFSETOF(wl_filter_ie_iov_v1_t, tlvs)
-#endif /* FILTER_IE */
-
-#define NULL_CHECK(p, s, err) \
- do { \
- if (!(p)) { \
- printk("NULL POINTER (%s) : %s\n", __FUNCTION__, (s)); \
- err = BCME_ERROR; \
- return err; \
- } \
- } while (0)
/* dongle status */
enum wifi_adapter_status {
#define WLAN_PLAT_NODFS_FLAG 0x01
#define WLAN_PLAT_AP_FLAG 0x02
-#if !defined(CONFIG_WIFI_CONTROL_FUNC)
struct wifi_platform_data {
#ifdef BUS_POWER_RESTORE
int (*set_power)(int val, wifi_adapter_info_t *adapter);
int (*set_reset)(int val);
int (*set_carddetect)(int val);
void *(*mem_prealloc)(int section, unsigned long size);
-#ifdef CUSTOM_MULTI_MAC
- int (*get_mac_addr)(unsigned char *buf, char *name);
-#else
int (*get_mac_addr)(unsigned char *buf);
-#endif
-#ifdef BCMSDIO
- int (*get_wake_irq)(void);
-#endif // endif
-#ifdef CUSTOM_FORCE_NODFS_FLAG
+#if defined(CUSTOM_COUNTRY_CODE)
void *(*get_country_code)(char *ccode, u32 flags);
-#else /* defined (CUSTOM_FORCE_NODFS_FLAG) */
+#else /* defined (CUSTOM_COUNTRY_CODE) */
void *(*get_country_code)(char *ccode);
#endif
};
-#endif
typedef struct bcmdhd_wifi_platdata {
uint num_adapters;
struct list_head list; /* link into dhd_if::sta_list */
int idx; /* index of self in dhd_pub::sta_pool[] */
int ifidx; /* index of interface in dhd */
+#ifdef DHD_WMF
+ struct dhd_sta *psta_prim; /* primary index of psta interface */
+#endif /* DHD_WMF */
} dhd_sta_t;
typedef dhd_sta_t dhd_sta_pool_t;
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
-typedef enum {
- M3_RXED,
- M4_TXFAILED
-} msg_4way_state_t;
-#define MAX_4WAY_TIMEOUT_MS 2000
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-
-#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
-extern uint32 report_hang_privcmd_err;
-#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
-
-#if defined(ARGOS_NOTIFY_CB)
-int argos_register_notifier_init(struct net_device *net);
-int argos_register_notifier_deinit(void);
-
-extern int sec_argos_register_notifier(struct notifier_block *n, char *label);
-extern int sec_argos_unregister_notifier(struct notifier_block *n, char *label);
-
-typedef struct {
- struct net_device *wlan_primary_netdev;
- int argos_rps_cpus_enabled;
-} argos_rps_ctrl;
-
-#define RPS_TPUT_THRESHOLD 300
-#define DELAY_TO_CLEAR_RPS_CPUS 300
-#endif // endif
-
-#if defined(BT_OVER_SDIO)
-extern void wl_android_set_wifi_on_flag(bool enable);
-#endif /* BT_OVER_SDIO */
-
-#ifdef DHD_LOG_DUMP
-/* 0: DLD_BUF_TYPE_GENERAL, 1: DLD_BUF_TYPE_PRESERVE
-* 2: DLD_BUF_TYPE_SPECIAL
-*/
-#define DLD_BUFFER_NUM 3
-
-#ifndef CUSTOM_LOG_DUMP_BUFSIZE_MB
-#define CUSTOM_LOG_DUMP_BUFSIZE_MB 4 /* DHD_LOG_DUMP_BUF_SIZE 4 MB static memory in kernel */
-#endif /* CUSTOM_LOG_DUMP_BUFSIZE_MB */
-
-#define LOG_DUMP_TOTAL_BUFSIZE (1024 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-
-/*
- * Below are different sections that use the prealloced buffer
- * and sum of the sizes of these should not cross LOG_DUMP_TOTAL_BUFSIZE
- */
-#define LOG_DUMP_GENERAL_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_PRESERVE_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_ECNTRS_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_RTT_MAX_BUFSIZE (256 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-#define LOG_DUMP_FILTER_MAX_BUFSIZE (128 * 1024 * CUSTOM_LOG_DUMP_BUFSIZE_MB)
-
-#if LOG_DUMP_TOTAL_BUFSIZE < (LOG_DUMP_GENERAL_MAX_BUFSIZE + \
- LOG_DUMP_PRESERVE_MAX_BUFSIZE + LOG_DUMP_ECNTRS_MAX_BUFSIZE + LOG_DUMP_RTT_MAX_BUFSIZE \
- + LOG_DUMP_FILTER_MAX_BUFSIZE)
-#error "LOG_DUMP_TOTAL_BUFSIZE is lesser than sum of all rings"
-#endif // endif
-
-/* Special buffer is allocated as separately in prealloc */
-#define LOG_DUMP_SPECIAL_MAX_BUFSIZE (8 * 1024)
-
-#define LOG_DUMP_MAX_FILESIZE (8 *1024 * 1024) /* 8 MB default */
-#ifdef CONFIG_LOG_BUF_SHIFT
-/* 15% of kernel log buf size, if for example klog buf size is 512KB
-* 15% of 512KB ~= 80KB
-*/
-#define LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE \
- (15 * ((1 << CONFIG_LOG_BUF_SHIFT)/100))
-#endif /* CONFIG_LOG_BUF_SHIFT */
-
-#define LOG_DUMP_COOKIE_BUFSIZE 1024u
-
-typedef struct {
- char *hdr_str;
- log_dump_section_type_t sec_type;
-} dld_hdr_t;
-
-typedef struct {
- int attr;
- char *hdr_str;
- log_dump_section_type_t sec_type;
- int log_type;
-} dld_log_hdr_t;
-
-#define DHD_PRINT_BUF_NAME_LEN 30
-#endif /* DHD_LOG_DUMP */
-
int dhd_wifi_platform_register_drv(void);
void dhd_wifi_platform_unregister_drv(void);
wifi_adapter_info_t* dhd_wifi_platform_attach_adapter(uint32 bus_type,
int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec);
int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present);
int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr);
-int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf, char *name);
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf);
#ifdef CUSTOM_COUNTRY_CODE
void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode,
u32 flags);
int dhd_get_fw_mode(struct dhd_info *dhdinfo);
bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo);
+#ifdef DHD_WMF
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx);
+int dhd_get_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx);
+int dhd_set_wmf_psta_disable(dhd_pub_t *dhdp, uint32 idx, int val);
+void dhd_update_psta_interface_for_sta(dhd_pub_t *dhdp, char* ifname,
+ void* mac_addr, void* event_data);
+#endif /* DHD_WMF */
#if defined(BT_OVER_SDIO)
int dhd_net_bus_get(struct net_device *dev);
int dhd_net_bus_put(struct net_device *dev);
#endif /* BT_OVER_SDIO */
-#if defined(WLADPS)
+#ifdef HOFFLOAD_MODULES
+extern void dhd_free_module_memory(struct dhd_bus *bus, struct module_metadata *hmem);
+extern void* dhd_alloc_module_memory(struct dhd_bus *bus, uint32_t size,
+ struct module_metadata *hmem);
+#endif /* HOFFLOAD_MODULES */
+#if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
#define ADPS_ENABLE 1
#define ADPS_DISABLE 0
+typedef struct bcm_iov_buf {
+ uint16 version;
+ uint16 len;
+ uint16 id;
+ uint16 data[1];
+} bcm_iov_buf_t;
int dhd_enable_adps(dhd_pub_t *dhd, uint8 on);
-#endif // endif
-#ifdef DHDTCPSYNC_FLOOD_BLK
-extern void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp);
-extern void dhd_reset_tcpsync_info_by_dev(struct net_device *dev);
-#endif /* DHDTCPSYNC_FLOOD_BLK */
-
-int compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count);
-int compat_vfs_write(struct file *file, char *addr, int count, loff_t *offset);
-
+#endif /* WLADPS || WLADPS_PRIVATE_CMD */
#endif /* __DHD_LINUX_H__ */
+++ /dev/null
-/*
- * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
- * Basically selected code segments from usb-cdc.c and usb-rndis.c
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: dhd_linux_exportfs.c 808905 2019-03-11 10:32:39Z $
- */
-#include <linux/kobject.h>
-#include <linux/proc_fs.h>
-#include <linux/sysfs.h>
-#include <osl.h>
-#include <dhd_dbg.h>
-#include <dhd_linux_priv.h>
-#ifdef DHD_ADPS_BAM_EXPORT
-#include <wl_bam.h>
-#endif // endif
-#ifdef CSI_SUPPORT
-#include <dhd_csi.h>
-#endif /* CSI_SUPPORT */
-
-#ifdef SHOW_LOGTRACE
-extern dhd_pub_t* g_dhd_pub;
-static int dhd_ring_proc_open(struct inode *inode, struct file *file);
-ssize_t dhd_ring_proc_read(struct file *file, char *buffer, size_t tt, loff_t *loff);
-
-static const struct file_operations dhd_ring_proc_fops = {
- .open = dhd_ring_proc_open,
- .read = dhd_ring_proc_read,
- .release = single_release,
-};
-
-static int
-dhd_ring_proc_open(struct inode *inode, struct file *file)
-{
- int ret = BCME_ERROR;
- if (inode) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
- ret = single_open(file, 0, PDE_DATA(inode));
-#else
- /* This feature is not supported for lower kernel versions */
- ret = single_open(file, 0, NULL);
-#endif // endif
- } else {
- DHD_ERROR(("%s: inode is NULL\n", __FUNCTION__));
- }
- return ret;
-}
-
-ssize_t
-dhd_ring_proc_read(struct file *file, char __user *buffer, size_t tt, loff_t *loff)
-{
- trace_buf_info_t *trace_buf_info;
- int ret = BCME_ERROR;
- dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)((struct seq_file *)(file->private_data))->private;
-
- if (ring == NULL) {
- DHD_ERROR(("%s: ring is NULL\n", __FUNCTION__));
- return ret;
- }
-
- ASSERT(g_dhd_pub);
-
- trace_buf_info = (trace_buf_info_t *)MALLOCZ(g_dhd_pub->osh, sizeof(trace_buf_info_t));
- if (trace_buf_info) {
- dhd_dbg_read_ring_into_trace_buf(ring, trace_buf_info);
- if (copy_to_user(buffer, (void*)trace_buf_info->buf, MIN(trace_buf_info->size, tt)))
- {
- ret = -EFAULT;
- goto exit;
- }
- if (trace_buf_info->availability == BUF_NOT_AVAILABLE)
- ret = BUF_NOT_AVAILABLE;
- else
- ret = trace_buf_info->size;
- } else
- DHD_ERROR(("Memory allocation Failed\n"));
-
-exit:
- if (trace_buf_info) {
- MFREE(g_dhd_pub->osh, trace_buf_info, sizeof(trace_buf_info_t));
- }
- return ret;
-}
-
-void
-dhd_dbg_ring_proc_create(dhd_pub_t *dhdp)
-{
-#ifdef DEBUGABILITY
- dhd_dbg_ring_t *dbg_verbose_ring = NULL;
-
- dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhdp, FW_VERBOSE_RING_ID);
- if (dbg_verbose_ring) {
- if (!proc_create_data("dhd_trace", S_IRUSR, NULL, &dhd_ring_proc_fops,
- dbg_verbose_ring)) {
- DHD_ERROR(("Failed to create /proc/dhd_trace procfs interface\n"));
- } else {
- DHD_ERROR(("Created /proc/dhd_trace procfs interface\n"));
- }
- } else {
- DHD_ERROR(("dbg_verbose_ring is NULL, /proc/dhd_trace not created\n"));
- }
-#endif /* DEBUGABILITY */
-
-#ifdef EWP_ECNTRS_LOGGING
- if (!proc_create_data("dhd_ecounters", S_IRUSR, NULL, &dhd_ring_proc_fops,
- dhdp->ecntr_dbg_ring)) {
- DHD_ERROR(("Failed to create /proc/dhd_ecounters procfs interface\n"));
- } else {
- DHD_ERROR(("Created /proc/dhd_ecounters procfs interface\n"));
- }
-#endif /* EWP_ECNTRS_LOGGING */
-
-#ifdef EWP_RTT_LOGGING
- if (!proc_create_data("dhd_rtt", S_IRUSR, NULL, &dhd_ring_proc_fops,
- dhdp->rtt_dbg_ring)) {
- DHD_ERROR(("Failed to create /proc/dhd_rtt procfs interface\n"));
- } else {
- DHD_ERROR(("Created /proc/dhd_rtt procfs interface\n"));
- }
-#endif /* EWP_RTT_LOGGING */
-}
-
-void
-dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp)
-{
-#ifdef DEBUGABILITY
- remove_proc_entry("dhd_trace", NULL);
-#endif /* DEBUGABILITY */
-
-#ifdef EWP_ECNTRS_LOGGING
- remove_proc_entry("dhd_ecounters", NULL);
-#endif /* EWP_ECNTRS_LOGGING */
-
-#ifdef EWP_RTT_LOGGING
- remove_proc_entry("dhd_rtt", NULL);
-#endif /* EWP_RTT_LOGGING */
-
-}
-#endif /* SHOW_LOGTRACE */
-
-/* ----------------------------------------------------------------------------
- * Infrastructure code for sysfs interface support for DHD
- *
- * What is sysfs interface?
- * https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt
- *
- * Why sysfs interface?
- * This is the Linux standard way of changing/configuring Run Time parameters
- * for a driver. We can use this interface to control "linux" specific driver
- * parameters.
- *
- * -----------------------------------------------------------------------------
- */
-
-#if defined(DHD_TRACE_WAKE_LOCK)
-extern atomic_t trace_wklock_onoff;
-
-/* Function to show the history buffer */
-static ssize_t
-show_wklock_trace(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- dhd_info_t *dhd = (dhd_info_t *)dev;
-
- buf[ret] = '\n';
- buf[ret+1] = 0;
-
- dhd_wk_lock_stats_dump(&dhd->pub);
- return ret+1;
-}
-
-/* Function to enable/disable wakelock trace */
-static ssize_t
-wklock_trace_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
- BCM_REFERENCE(dhd);
-
- onoff = bcm_strtoul(buf, NULL, 10);
- if (onoff != 0 && onoff != 1) {
- return -EINVAL;
- }
-
- atomic_set(&trace_wklock_onoff, onoff);
- if (atomic_read(&trace_wklock_onoff)) {
- printk("ENABLE WAKLOCK TRACE\n");
- } else {
- printk("DISABLE WAKELOCK TRACE\n");
- }
-
- return (ssize_t)(onoff+1);
-}
-#endif /* DHD_TRACE_WAKE_LOCK */
-
-#if defined(DHD_LB_TXP)
-static ssize_t
-show_lbtxp(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
-
- onoff = atomic_read(&dhd->lb_txp_active);
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
- onoff);
- return ret;
-}
-
-static ssize_t
-lbtxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
- int i;
-
- onoff = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &onoff);
- if (onoff != 0 && onoff != 1) {
- return -EINVAL;
- }
- atomic_set(&dhd->lb_txp_active, onoff);
-
- /* Since the scheme is changed clear the counters */
- for (i = 0; i < NR_CPUS; i++) {
- DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
- DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
- }
-
- return count;
-}
-
-#endif /* DHD_LB_TXP */
-
-#if defined(DHD_LB_RXP)
-static ssize_t
-show_lbrxp(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
-
- onoff = atomic_read(&dhd->lb_rxp_active);
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
- onoff);
- return ret;
-}
-
-static ssize_t
-lbrxp_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
- int i, j;
-
- onoff = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &onoff);
- if (onoff != 0 && onoff != 1) {
- return -EINVAL;
- }
- atomic_set(&dhd->lb_rxp_active, onoff);
-
- /* Since the scheme is changed clear the counters */
- for (i = 0; i < NR_CPUS; i++) {
- DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
- }
- }
-
- return count;
-}
-#endif /* DHD_LB_RXP */
-
-#ifdef DHD_LOG_DUMP
-extern int logdump_periodic_flush;
-extern int logdump_ecntr_enable;
-static ssize_t
-show_logdump_periodic_flush(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long val;
-
- val = logdump_periodic_flush;
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
- return ret;
-}
-
-static ssize_t
-logdump_periodic_flush_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long val;
-
- val = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &val);
- if (val != 0 && val != 1) {
- return -EINVAL;
- }
- logdump_periodic_flush = val;
- return count;
-}
-
-static ssize_t
-show_logdump_ecntr(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long val;
-
- val = logdump_ecntr_enable;
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n", val);
- return ret;
-}
-
-static ssize_t
-logdump_ecntr_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long val;
-
- val = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &val);
- if (val != 0 && val != 1) {
- return -EINVAL;
- }
- logdump_ecntr_enable = val;
- return count;
-}
-
-#endif /* DHD_LOG_DUMP */
-
-extern uint enable_ecounter;
-static ssize_t
-show_enable_ecounter(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- unsigned long onoff;
-
- onoff = enable_ecounter;
- ret = scnprintf(buf, PAGE_SIZE - 1, "%lu \n",
- onoff);
- return ret;
-}
-
-static ssize_t
-ecounter_onoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
- dhd_info_t *dhd = (dhd_info_t *)dev;
- dhd_pub_t *dhdp;
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return count;
- }
- dhdp = &dhd->pub;
- if (!FW_SUPPORTED(dhdp, ecounters)) {
- DHD_ERROR(("%s: ecounters not supported by FW\n", __FUNCTION__));
- return count;
- }
-
- onoff = bcm_strtoul(buf, NULL, 10);
-
- sscanf(buf, "%lu", &onoff);
- if (onoff != 0 && onoff != 1) {
- return -EINVAL;
- }
-
- if (enable_ecounter == onoff) {
- DHD_ERROR(("%s: ecounters already %d\n", __FUNCTION__, enable_ecounter));
- return count;
- }
-
- enable_ecounter = onoff;
- dhd_ecounter_configure(dhdp, enable_ecounter);
-
- return count;
-}
-
-/*
- * Generic Attribute Structure for DHD.
- * If we have to add a new sysfs entry under /sys/bcm-dhd/, we have
- * to instantiate an object of type dhd_attr, populate it with
- * the required show/store functions (ex:- dhd_attr_cpumask_primary)
- * and add the object to default_attrs[] array, that gets registered
- * to the kobject of dhd (named bcm-dhd).
- */
-
-struct dhd_attr {
- struct attribute attr;
- ssize_t(*show)(struct dhd_info *, char *);
- ssize_t(*store)(struct dhd_info *, const char *, size_t count);
-};
-
-#if defined(DHD_TRACE_WAKE_LOCK)
-static struct dhd_attr dhd_attr_wklock =
- __ATTR(wklock_trace, 0660, show_wklock_trace, wklock_trace_onoff);
-#endif /* defined(DHD_TRACE_WAKE_LOCK */
-
-#if defined(DHD_LB_TXP)
-static struct dhd_attr dhd_attr_lbtxp =
- __ATTR(lbtxp, 0660, show_lbtxp, lbtxp_onoff);
-#endif /* DHD_LB_TXP */
-
-#if defined(DHD_LB_RXP)
-static struct dhd_attr dhd_attr_lbrxp =
- __ATTR(lbrxp, 0660, show_lbrxp, lbrxp_onoff);
-#endif /* DHD_LB_RXP */
-
-#ifdef DHD_LOG_DUMP
-static struct dhd_attr dhd_attr_logdump_periodic_flush =
- __ATTR(logdump_periodic_flush, 0660, show_logdump_periodic_flush,
- logdump_periodic_flush_onoff);
-static struct dhd_attr dhd_attr_logdump_ecntr =
- __ATTR(logdump_ecntr_enable, 0660, show_logdump_ecntr,
- logdump_ecntr_onoff);
-#endif /* DHD_LOG_DUMP */
-
-static struct dhd_attr dhd_attr_ecounters =
- __ATTR(ecounters, 0660, show_enable_ecounter, ecounter_onoff);
-
-/* Attribute object that gets registered with "bcm-dhd" kobject tree */
-static struct attribute *default_attrs[] = {
-#if defined(DHD_TRACE_WAKE_LOCK)
- &dhd_attr_wklock.attr,
-#endif // endif
-#if defined(DHD_LB_TXP)
- &dhd_attr_lbtxp.attr,
-#endif /* DHD_LB_TXP */
-#if defined(DHD_LB_RXP)
- &dhd_attr_lbrxp.attr,
-#endif /* DHD_LB_RXP */
-#ifdef DHD_LOG_DUMP
- &dhd_attr_logdump_periodic_flush.attr,
- &dhd_attr_logdump_ecntr.attr,
-#endif // endif
- &dhd_attr_ecounters.attr,
- NULL
-};
-
-#define to_dhd(k) container_of(k, struct dhd_info, dhd_kobj)
-#define to_attr(a) container_of(a, struct dhd_attr, attr)
-
-/*
- * bcm-dhd kobject show function, the "attr" attribute specifices to which
- * node under "bcm-dhd" the show function is called.
- */
-static ssize_t dhd_show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhd = to_dhd(kobj);
- struct dhd_attr *d_attr = to_attr(attr);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- int ret;
-
- if (d_attr->show)
- ret = d_attr->show(dhd, buf);
- else
- ret = -EIO;
-
- return ret;
-}
-
-/*
- * bcm-dhd kobject show function, the "attr" attribute specifices to which
- * node under "bcm-dhd" the store function is called.
- */
-static ssize_t dhd_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhd = to_dhd(kobj);
- struct dhd_attr *d_attr = to_attr(attr);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- int ret;
-
- if (d_attr->store)
- ret = d_attr->store(dhd, buf, count);
- else
- ret = -EIO;
-
- return ret;
-
-}
-
-static struct sysfs_ops dhd_sysfs_ops = {
- .show = dhd_show,
- .store = dhd_store,
-};
-
-static struct kobj_type dhd_ktype = {
- .sysfs_ops = &dhd_sysfs_ops,
- .default_attrs = default_attrs,
-};
-
-#ifdef DHD_MAC_ADDR_EXPORT
-struct ether_addr sysfs_mac_addr;
-static ssize_t
-show_mac_addr(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- ret = scnprintf(buf, PAGE_SIZE - 1, MACF,
- (uint32)sysfs_mac_addr.octet[0], (uint32)sysfs_mac_addr.octet[1],
- (uint32)sysfs_mac_addr.octet[2], (uint32)sysfs_mac_addr.octet[3],
- (uint32)sysfs_mac_addr.octet[4], (uint32)sysfs_mac_addr.octet[5]);
-
- return ret;
-}
-
-static ssize_t
-set_mac_addr(struct dhd_info *dev, const char *buf, size_t count)
-{
- if (!bcm_ether_atoe(buf, &sysfs_mac_addr)) {
- DHD_ERROR(("Invalid Mac Address \n"));
- return -EINVAL;
- }
-
- DHD_ERROR(("Mac Address set with "MACDBG"\n", MAC2STRDBG(&sysfs_mac_addr)));
-
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_macaddr =
- __ATTR(mac_addr, 0660, show_mac_addr, set_mac_addr);
-#endif /* DHD_MAC_ADDR_EXPORT */
-
-#ifdef DHD_FW_COREDUMP
-
-#define MEMDUMPINFO "/data/vendor/misc/wifi/.memdump.info"
-
-uint32
-get_mem_val_from_file(void)
-{
- struct file *fp = NULL;
- uint32 mem_val = DUMP_MEMFILE_MAX;
- char *p_mem_val = NULL;
- char *filepath = MEMDUMPINFO;
- int ret = 0;
-
- /* Read memdump info from the file */
- fp = filp_open(filepath, O_RDONLY, 0);
- if (IS_ERR(fp)) {
- DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
-#if defined(CONFIG_X86)
- /* Check if it is Live Brix Image */
- if (strcmp(filepath, MEMDUMPINFO_LIVE) != 0) {
- goto done;
- }
- /* Try if it is Installed Brix Image */
- filepath = MEMDUMPINFO_INST;
- DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
- fp = filp_open(filepath, O_RDONLY, 0);
- if (IS_ERR(fp)) {
- DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
- goto done;
- }
-#else /* Non Brix Android platform */
- goto done;
-#endif /* CONFIG_X86 && OEM_ANDROID */
- }
-
- /* Handle success case */
- ret = compat_kernel_read(fp, 0, (char *)&mem_val, sizeof(uint32));
- if (ret < 0) {
- DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
- filp_close(fp, NULL);
- goto done;
- }
-
- p_mem_val = (char*)&mem_val;
- p_mem_val[sizeof(uint32) - 1] = '\0';
- mem_val = bcm_atoi(p_mem_val);
-
- filp_close(fp, NULL);
-
-done:
- return mem_val;
-}
-
-void dhd_get_memdump_info(dhd_pub_t *dhd)
-{
-#ifndef DHD_EXPORT_CNTL_FILE
- uint32 mem_val = DUMP_MEMFILE_MAX;
-
- mem_val = get_mem_val_from_file();
- if (mem_val != DUMP_MEMFILE_MAX)
- dhd->memdump_enabled = mem_val;
-#ifdef DHD_INIT_DEFAULT_MEMDUMP
- if (mem_val == 0 || mem_val == DUMP_MEMFILE_MAX)
- mem_val = DUMP_MEMFILE_BUGON;
-#endif /* DHD_INIT_DEFAULT_MEMDUMP */
-#else
-#ifdef DHD_INIT_DEFAULT_MEMDUMP
- if (dhd->memdump_enabled == 0 || dhd->memdump_enabled == DUMP_MEMFILE_MAX)
- dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
-#endif /* DHD_INIT_DEFAULT_MEMDUMP */
-#endif /* !DHD_EXPORT_CNTL_FILE */
- DHD_ERROR(("%s: MEMDUMP ENABLED = %d\n", __FUNCTION__, dhd->memdump_enabled));
-}
-
-#ifdef DHD_EXPORT_CNTL_FILE
-static ssize_t
-show_memdump_info(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
- dhd_pub_t *dhdp;
-
- if (!dev) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return ret;
- }
-
- dhdp = &dev->pub;
- ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", dhdp->memdump_enabled);
- return ret;
-}
-
-static ssize_t
-set_memdump_info(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long memval;
- dhd_pub_t *dhdp;
-
- if (!dev) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return count;
- }
- dhdp = &dev->pub;
-
- memval = bcm_strtoul(buf, NULL, 10);
- sscanf(buf, "%lu", &memval);
-
- dhdp->memdump_enabled = (uint32)memval;
-
- DHD_ERROR(("%s: MEMDUMP ENABLED = %iu\n", __FUNCTION__, dhdp->memdump_enabled));
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_memdump =
- __ATTR(memdump, 0660, show_memdump_info, set_memdump_info);
-#endif /* DHD_EXPORT_CNTL_FILE */
-#endif /* DHD_FW_COREDUMP */
-
-#ifdef BCMASSERT_LOG
-#define ASSERTINFO "/data/vendor/misc/wifi/.assert.info"
-int
-get_assert_val_from_file(void)
-{
- struct file *fp = NULL;
- char *filepath = ASSERTINFO;
- char *p_mem_val = NULL;
- int mem_val = -1;
-
- /*
- * Read assert info from the file
- * 0: Trigger Kernel crash by panic()
- * 1: Print out the logs and don't trigger Kernel panic. (default)
- * 2: Trigger Kernel crash by BUG()
- * File doesn't exist: Keep default value (1).
- */
- fp = filp_open(filepath, O_RDONLY, 0);
- if (IS_ERR(fp)) {
- DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
- } else {
- int ret = compat_kernel_read(fp, 0, (char *)&mem_val, sizeof(uint32));
- if (ret < 0) {
- DHD_ERROR(("%s: File read error, ret=%d\n", __FUNCTION__, ret));
- } else {
- p_mem_val = (char *)&mem_val;
- p_mem_val[sizeof(uint32) - 1] = '\0';
- mem_val = bcm_atoi(p_mem_val);
- DHD_ERROR(("%s: ASSERT ENABLED = %d\n", __FUNCTION__, mem_val));
- }
- filp_close(fp, NULL);
- }
-
- mem_val = (mem_val >= 0) ? mem_val : 0;
- return mem_val;
-}
-
-void dhd_get_assert_info(dhd_pub_t *dhd)
-{
-#ifndef DHD_EXPORT_CNTL_FILE
- int mem_val = -1;
-
- mem_val = get_assert_val_from_file();
-
- g_assert_type = mem_val;
-#endif /* !DHD_EXPORT_CNTL_FILE */
-}
-
-#ifdef DHD_EXPORT_CNTL_FILE
-static ssize_t
-show_assert_info(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- if (!dev) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return ret;
- }
-
- ret = scnprintf(buf, PAGE_SIZE -1, "%d\n", g_assert_type);
- return ret;
-
-}
-
-static ssize_t
-set_assert_info(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long assert_val;
-
- assert_val = bcm_strtoul(buf, NULL, 10);
- sscanf(buf, "%lu", &assert_val);
-
- g_assert_type = (uint32)assert_val;
-
- DHD_ERROR(("%s: ASSERT ENABLED = %lu\n", __FUNCTION__, assert_val));
- return count;
-
-}
-
-static struct dhd_attr dhd_attr_cntl_assert =
- __ATTR(assert, 0660, show_assert_info, set_assert_info);
-#endif /* DHD_EXPORT_CNTL_FILE */
-#endif /* BCMASSERT_LOG */
-
-#ifdef DHD_EXPORT_CNTL_FILE
-#if defined(WRITE_WLANINFO)
-static ssize_t
-show_wifiver_info(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- ret = scnprintf(buf, PAGE_SIZE -1, "%s", version_info);
- return ret;
-}
-
-static ssize_t
-set_wifiver_info(struct dhd_info *dev, const char *buf, size_t count)
-{
- DHD_ERROR(("Do not set version info\n"));
- return -EINVAL;
-}
-
-static struct dhd_attr dhd_attr_cntl_wifiver =
- __ATTR(wifiver, 0660, show_wifiver_info, set_wifiver_info);
-#endif /* WRITE_WLANINFO */
-
-#if defined(USE_CID_CHECK)
-char cidinfostr[MAX_VNAME_LEN];
-
-static ssize_t
-show_cid_info(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- ret = scnprintf(buf, PAGE_SIZE -1, "%s", cidinfostr);
- return ret;
-}
-
-static ssize_t
-set_cid_info(struct dhd_info *dev, const char *buf, size_t count)
-{
- int len = strlen(buf) + 1;
- int maxstrsz;
- maxstrsz = MAX_VNAME_LEN;
-
- scnprintf(cidinfostr, ((len > maxstrsz) ? maxstrsz : len), "%s", buf);
- DHD_INFO(("%s : CID info string\n", cidinfostr));
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_cidinfo =
- __ATTR(cid, 0660, show_cid_info, set_cid_info);
-#endif /* USE_CID_CHECK */
-
-#if defined(GEN_SOFTAP_INFO_FILE)
-char softapinfostr[SOFTAP_INFO_BUF_SZ];
-static ssize_t
-show_softap_info(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- ret = scnprintf(buf, PAGE_SIZE -1, "%s", softapinfostr);
- return ret;
-}
-
-static ssize_t
-set_softap_info(struct dhd_info *dev, const char *buf, size_t count)
-{
- DHD_ERROR(("Do not set sofap related info\n"));
- return -EINVAL;
-}
-
-static struct dhd_attr dhd_attr_cntl_softapinfo =
- __ATTR(softap, 0660, show_softap_info, set_softap_info);
-#endif /* GEN_SOFTAP_INFO_FILE */
-
-#if defined(MIMO_ANT_SETTING)
-unsigned long antsel;
-
-static ssize_t
-show_ant_info(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", antsel);
- return ret;
-}
-
-static ssize_t
-set_ant_info(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long ant_val;
-
- ant_val = bcm_strtoul(buf, NULL, 10);
- sscanf(buf, "%lu", &ant_val);
-
- /*
- * Check value
- * 0 - Not set, handle same as file not exist
- */
- if (ant_val > 3) {
- DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
- __FUNCTION__, ant_val));
- return -EINVAL;
- }
-
- antsel = ant_val;
- DHD_ERROR(("[WIFI_SEC] %s: Set Antinfo val = %lu \n", __FUNCTION__, antsel));
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_antinfo =
- __ATTR(ant, 0660, show_ant_info, set_ant_info);
-#endif /* MIMO_ANT_SETTING */
-
-#ifdef DHD_PM_CONTROL_FROM_FILE
-extern bool g_pm_control;
-extern uint32 pmmode_val;
-static ssize_t
-show_pm_info(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- if (!g_pm_control) {
- ret = scnprintf(buf, PAGE_SIZE -1, "PM mode is not set\n");
- } else {
- ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", pmmode_val);
- }
- return ret;
-}
-
-static ssize_t
-set_pm_info(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long pm_val;
-
- pm_val = bcm_strtoul(buf, NULL, 10);
- sscanf(buf, "%lu", &pm_val);
-
- if (pm_val > 2) {
- DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
- __FUNCTION__, pm_val));
- return -EINVAL;
- }
-
- if (!pm_val) {
- g_pm_control = TRUE;
- } else {
- g_pm_control = FALSE;
- }
-
- pmmode_val = (uint32)pm_val;
- DHD_ERROR(("[WIFI_SEC] %s: Set pminfo val = %u\n", __FUNCTION__, pmmode_val));
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_pminfo =
- __ATTR(pm, 0660, show_pm_info, set_pm_info);
-#endif /* DHD_PM_CONTROL_FROM_FILE */
-
-#ifdef LOGTRACE_FROM_FILE
-unsigned long logtrace_val = 1;
-
-static ssize_t
-show_logtrace_info(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- ret = scnprintf(buf, PAGE_SIZE -1, "%lu\n", logtrace_val);
- return ret;
-}
-
-static ssize_t
-set_logtrace_info(struct dhd_info *dev, const char *buf, size_t count)
-{
- unsigned long onoff;
-
- onoff = bcm_strtoul(buf, NULL, 10);
- sscanf(buf, "%lu", &onoff);
-
- if (onoff > 2) {
- DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %lu \n",
- __FUNCTION__, onoff));
- return -EINVAL;
- }
-
- logtrace_val = onoff;
- DHD_ERROR(("[WIFI_SEC] %s: LOGTRACE On/Off from sysfs = %lu\n",
- __FUNCTION__, logtrace_val));
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_logtraceinfo =
- __ATTR(logtrace, 0660, show_logtrace_info, set_logtrace_info);
-#endif /* LOGTRACE_FROM_FILE */
-
-#ifdef USE_WFA_CERT_CONF
-#ifdef BCMSDIO
-uint32 bus_txglom = VALUENOTSET;
-
-static ssize_t
-show_bustxglom(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- if (bus_txglom == VALUENOTSET) {
- ret = scnprintf(buf, PAGE_SIZE - 1, "%s\n", "bustxglom not set from sysfs");
- } else {
- ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", bus_txglom);
- }
- return ret;
-}
-
-static ssize_t
-set_bustxglom(struct dhd_info *dev, const char *buf, size_t count)
-{
- uint32 onoff;
-
- onoff = (uint32)bcm_atoi(buf);
- sscanf(buf, "%u", &onoff);
-
- if (onoff > 2) {
- DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
- __FUNCTION__, onoff));
- return -EINVAL;
- }
-
- bus_txglom = onoff;
- DHD_ERROR(("[WIFI_SEC] %s: BUS TXGLOM On/Off from sysfs = %u\n",
- __FUNCTION__, bus_txglom));
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_bustxglom =
- __ATTR(bustxglom, 0660, show_bustxglom, set_bustxglom);
-#endif /* BCMSDIO */
-
-#if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
-uint32 roam_off = VALUENOTSET;
-
-static ssize_t
-show_roamoff(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- if (roam_off == VALUENOTSET) {
- ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "roam_off not set from sysfs");
- } else {
- ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", roam_off);
- }
- return ret;
-}
-
-static ssize_t
-set_roamoff(struct dhd_info *dev, const char *buf, size_t count)
-{
- uint32 onoff;
-
- onoff = bcm_atoi(buf);
- sscanf(buf, "%u", &onoff);
-
- if (onoff > 2) {
- DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
- __FUNCTION__, onoff));
- return -EINVAL;
- }
-
- roam_off = onoff;
- DHD_ERROR(("[WIFI_SEC] %s: ROAM On/Off from sysfs = %u\n",
- __FUNCTION__, roam_off));
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_roamoff =
- __ATTR(roamoff, 0660, show_roamoff, set_roamoff);
-#endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
-
-#ifdef USE_WL_FRAMEBURST
-uint32 frameburst = VALUENOTSET;
-
-static ssize_t
-show_frameburst(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- if (frameburst == VALUENOTSET) {
- ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "frameburst not set from sysfs");
- } else {
- ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", frameburst);
- }
- return ret;
-}
-
-static ssize_t
-set_frameburst(struct dhd_info *dev, const char *buf, size_t count)
-{
- uint32 onoff;
-
- onoff = bcm_atoi(buf);
- sscanf(buf, "%u", &onoff);
-
- if (onoff > 2) {
- DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
- __FUNCTION__, onoff));
- return -EINVAL;
- }
-
- frameburst = onoff;
- DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
- __FUNCTION__, frameburst));
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_frameburst =
- __ATTR(frameburst, 0660, show_frameburst, set_frameburst);
-#endif /* USE_WL_FRAMEBURST */
-
-#ifdef USE_WL_TXBF
-uint32 txbf = VALUENOTSET;
-
-static ssize_t
-show_txbf(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- if (txbf == VALUENOTSET) {
- ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "txbf not set from sysfs");
- } else {
- ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", txbf);
- }
- return ret;
-}
-
-static ssize_t
-set_txbf(struct dhd_info *dev, const char *buf, size_t count)
-{
- uint32 onoff;
-
- onoff = bcm_atoi(buf);
- sscanf(buf, "%u", &onoff);
-
- if (onoff > 2) {
- DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
- __FUNCTION__, onoff));
- return -EINVAL;
- }
-
- txbf = onoff;
- DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
- __FUNCTION__, txbf));
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_txbf =
- __ATTR(txbf, 0660, show_txbf, set_txbf);
-#endif /* USE_WL_TXBF */
-
-#ifdef PROP_TXSTATUS
-uint32 proptx = VALUENOTSET;
-
-static ssize_t
-show_proptx(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- if (proptx == VALUENOTSET) {
- ret = scnprintf(buf, PAGE_SIZE -1, "%s\n", "proptx not set from sysfs");
- } else {
- ret = scnprintf(buf, PAGE_SIZE -1, "%u\n", proptx);
- }
- return ret;
-}
-
-static ssize_t
-set_proptx(struct dhd_info *dev, const char *buf, size_t count)
-{
- uint32 onoff;
-
- onoff = bcm_strtoul(buf, NULL, 10);
- sscanf(buf, "%u", &onoff);
-
- if (onoff > 2) {
- DHD_ERROR(("[WIFI_SEC] %s: Set Invalid value %u \n",
- __FUNCTION__, onoff));
- return -EINVAL;
- }
-
- proptx = onoff;
- DHD_ERROR(("[WIFI_SEC] %s: FRAMEBURST On/Off from sysfs = %u\n",
- __FUNCTION__, txbf));
- return count;
-}
-
-static struct dhd_attr dhd_attr_cntl_proptx =
- __ATTR(proptx, 0660, show_proptx, set_proptx);
-
-#endif /* PROP_TXSTATUS */
-#endif /* USE_WFA_CERT_CONF */
-#endif /* DHD_EXPORT_CNTL_FILE */
-
-#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
-uint32 report_hang_privcmd_err = 1;
-
-static ssize_t
-show_hang_privcmd_err(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- ret = scnprintf(buf, PAGE_SIZE - 1, "%u\n", report_hang_privcmd_err);
- return ret;
-}
-
-static ssize_t
-set_hang_privcmd_err(struct dhd_info *dev, const char *buf, size_t count)
-{
- uint32 val;
-
- val = bcm_atoi(buf);
- sscanf(buf, "%u", &val);
-
- report_hang_privcmd_err = val ? 1 : 0;
- DHD_INFO(("%s: Set report HANG for private cmd error: %d\n",
- __FUNCTION__, report_hang_privcmd_err));
- return count;
-}
-
-static struct dhd_attr dhd_attr_hang_privcmd_err =
- __ATTR(hang_privcmd_err, 0660, show_hang_privcmd_err, set_hang_privcmd_err);
-#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
-
-#if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
-uint8 control_he_enab = 1;
-#endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
-
-#if defined(CUSTOM_CONTROL_HE_ENAB)
-static ssize_t
-show_control_he_enab(struct dhd_info *dev, char *buf)
-{
- ssize_t ret = 0;
-
- ret = scnprintf(buf, PAGE_SIZE - 1, "%d\n", control_he_enab);
- return ret;
-}
-
-static ssize_t
-set_control_he_enab(struct dhd_info *dev, const char *buf, size_t count)
-{
- uint32 val;
-
- val = bcm_atoi(buf);
-
- control_he_enab = val ? 1 : 0;
- DHD_ERROR(("%s: Set control he enab: %d\n", __FUNCTION__, control_he_enab));
- return count;
-}
-
-static struct dhd_attr dhd_attr_control_he_enab=
-__ATTR(control_he_enab, 0660, show_control_he_enab, set_control_he_enab);
-#endif /* CUSTOM_CONTROL_HE_ENAB */
-/* Attribute object that gets registered with "wifi" kobject tree */
-static struct attribute *control_file_attrs[] = {
-#ifdef DHD_MAC_ADDR_EXPORT
- &dhd_attr_cntl_macaddr.attr,
-#endif /* DHD_MAC_ADDR_EXPORT */
-#ifdef DHD_EXPORT_CNTL_FILE
-#ifdef DHD_FW_COREDUMP
- &dhd_attr_cntl_memdump.attr,
-#endif /* DHD_FW_COREDUMP */
-#ifdef BCMASSERT_LOG
- &dhd_attr_cntl_assert.attr,
-#endif /* BCMASSERT_LOG */
-#ifdef WRITE_WLANINFO
- &dhd_attr_cntl_wifiver.attr,
-#endif /* WRITE_WLANINFO */
-#ifdef USE_CID_CHECK
- &dhd_attr_cntl_cidinfo.attr,
-#endif /* USE_CID_CHECK */
-#ifdef GEN_SOFTAP_INFO_FILE
- &dhd_attr_cntl_softapinfo.attr,
-#endif /* GEN_SOFTAP_INFO_FILE */
-#ifdef MIMO_ANT_SETTING
- &dhd_attr_cntl_antinfo.attr,
-#endif /* MIMO_ANT_SETTING */
-#ifdef DHD_PM_CONTROL_FROM_FILE
- &dhd_attr_cntl_pminfo.attr,
-#endif /* DHD_PM_CONTROL_FROM_FILE */
-#ifdef LOGTRACE_FROM_FILE
- &dhd_attr_cntl_logtraceinfo.attr,
-#endif /* LOGTRACE_FROM_FILE */
-#ifdef USE_WFA_CERT_CONF
-#ifdef BCMSDIO
- &dhd_attr_cntl_bustxglom.attr,
-#endif /* BCMSDIO */
- &dhd_attr_cntl_roamoff.attr,
-#ifdef USE_WL_FRAMEBURST
- &dhd_attr_cntl_frameburst.attr,
-#endif /* USE_WL_FRAMEBURST */
-#ifdef USE_WL_TXBF
- &dhd_attr_cntl_txbf.attr,
-#endif /* USE_WL_TXBF */
-#ifdef PROP_TXSTATUS
- &dhd_attr_cntl_proptx.attr,
-#endif /* PROP_TXSTATUS */
-#endif /* USE_WFA_CERT_CONF */
-#endif /* DHD_EXPORT_CNTL_FILE */
-#ifdef DHD_ADPS_BAM_EXPORT
- &dhd_attr_adps_bam.attr,
-#endif /* DHD_ADPS_BAM_EXPORT */
-#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
- &dhd_attr_hang_privcmd_err.attr,
-#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
-#if defined(CUSTOM_CONTROL_HE_ENAB)
- &dhd_attr_control_he_enab.attr,
-#endif /* CUSTOM_CONTROL_HE_ENAB */
- NULL
-};
-
-#define to_cntl_dhd(k) container_of(k, struct dhd_info, dhd_conf_file_kobj)
-
-/*
- * wifi kobject show function, the "attr" attribute specifices to which
- * node under "sys/wifi" the show function is called.
- */
-static ssize_t dhd_cntl_show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhd = to_cntl_dhd(kobj);
- struct dhd_attr *d_attr = to_attr(attr);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- int ret;
-
- if (d_attr->show)
- ret = d_attr->show(dhd, buf);
- else
- ret = -EIO;
-
- return ret;
-}
-
-/*
- * wifi kobject show function, the "attr" attribute specifices to which
- * node under "sys/wifi" the store function is called.
- */
-static ssize_t dhd_cntl_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhd = to_cntl_dhd(kobj);
- struct dhd_attr *d_attr = to_attr(attr);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- int ret;
-
- if (d_attr->store)
- ret = d_attr->store(dhd, buf, count);
- else
- ret = -EIO;
-
- return ret;
-
-}
-
-static struct sysfs_ops dhd_sysfs_cntl_ops = {
- .show = dhd_cntl_show,
- .store = dhd_cntl_store,
-};
-
-static struct kobj_type dhd_cntl_file_ktype = {
- .sysfs_ops = &dhd_sysfs_cntl_ops,
- .default_attrs = control_file_attrs,
-};
-
-#ifdef CSI_SUPPORT
-/* Function to show current ccode */
-static ssize_t read_csi_data(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
-{
- dhd_info_t *dhd = to_dhd(kobj);
- int n = 0;
-
- n = dhd_csi_dump_list(&dhd->pub, buf);
- DHD_INFO(("Dump data to file, size %d\n", n));
- dhd_csi_clean_list(&dhd->pub);
-
- return n;
-}
-
-static struct bin_attribute dhd_attr_csi = {
- .attr = { .name = "csi",
- .mode = 0660, },
- .size = MAX_CSI_FILESZ,
- .read = read_csi_data,
-};
-#endif /* CSI_SUPPORT */
-
-/* Create a kobject and attach to sysfs interface */
-int dhd_sysfs_init(dhd_info_t *dhd)
-{
- int ret = -1;
-
- if (dhd == NULL) {
- DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
- return ret;
- }
-
- /* Initialize the kobject */
- ret = kobject_init_and_add(&dhd->dhd_kobj, &dhd_ktype, NULL, "bcm-dhd");
- if (ret) {
- kobject_put(&dhd->dhd_kobj);
- DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
- return ret;
- }
- ret = kobject_init_and_add(&dhd->dhd_conf_file_kobj, &dhd_cntl_file_ktype, NULL, "wifi");
- if (ret) {
- kobject_put(&dhd->dhd_conf_file_kobj);
- DHD_ERROR(("%s(): Unable to allocate kobject \r\n", __FUNCTION__));
- return ret;
- }
-
-#ifdef CSI_SUPPORT
- ret = sysfs_create_bin_file(&dhd->dhd_kobj, &dhd_attr_csi);
- if (ret) {
- DHD_ERROR(("%s: can't create %s\n", __FUNCTION__, dhd_attr_csi.attr.name));
- kobject_put(&dhd->dhd_kobj);
- return ret;
- }
-#endif /* CSI_SUPPORT */
-
- /*
- * We are always responsible for sending the uevent that the kobject
- * was added to the system.
- */
- kobject_uevent(&dhd->dhd_kobj, KOBJ_ADD);
- kobject_uevent(&dhd->dhd_conf_file_kobj, KOBJ_ADD);
-
- return ret;
-}
-
-/* Done with the kobject and detach the sysfs interface */
-void dhd_sysfs_exit(dhd_info_t *dhd)
-{
- if (dhd == NULL) {
- DHD_ERROR(("%s(): dhd is NULL \r\n", __FUNCTION__));
- return;
- }
-
- /* Releae the kobject */
- if (dhd->dhd_kobj.state_initialized)
- kobject_put(&dhd->dhd_kobj);
- if (dhd->dhd_conf_file_kobj.state_initialized)
- kobject_put(&dhd->dhd_conf_file_kobj);
-}
+++ /dev/null
-/*
- * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
- * Basically selected code segments from usb-cdc.c and usb-rndis.c
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: dhd_linux_lb.c 805819 2019-02-20 10:49:35Z $
- */
-
-#include <dhd_linux_priv.h>
-
-extern dhd_pub_t* g_dhd_pub;
-
-#if defined(DHD_LB)
-
-void
-dhd_lb_set_default_cpus(dhd_info_t *dhd)
-{
- /* Default CPU allocation for the jobs */
- atomic_set(&dhd->rx_napi_cpu, 1);
- atomic_set(&dhd->rx_compl_cpu, 2);
- atomic_set(&dhd->tx_compl_cpu, 2);
- atomic_set(&dhd->tx_cpu, 2);
- atomic_set(&dhd->net_tx_cpu, 0);
-}
-
-void
-dhd_cpumasks_deinit(dhd_info_t *dhd)
-{
- free_cpumask_var(dhd->cpumask_curr_avail);
- free_cpumask_var(dhd->cpumask_primary);
- free_cpumask_var(dhd->cpumask_primary_new);
- free_cpumask_var(dhd->cpumask_secondary);
- free_cpumask_var(dhd->cpumask_secondary_new);
-}
-
-int
-dhd_cpumasks_init(dhd_info_t *dhd)
-{
- int id;
- uint32 cpus, num_cpus = num_possible_cpus();
- int ret = 0;
-
- DHD_ERROR(("%s CPU masks primary(big)=0x%x secondary(little)=0x%x\n", __FUNCTION__,
- DHD_LB_PRIMARY_CPUS, DHD_LB_SECONDARY_CPUS));
-
- if (!alloc_cpumask_var(&dhd->cpumask_curr_avail, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_primary, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_primary_new, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_secondary, GFP_KERNEL) ||
- !alloc_cpumask_var(&dhd->cpumask_secondary_new, GFP_KERNEL)) {
- DHD_ERROR(("%s Failed to init cpumasks\n", __FUNCTION__));
- ret = -ENOMEM;
- goto fail;
- }
-
- cpumask_copy(dhd->cpumask_curr_avail, cpu_online_mask);
- cpumask_clear(dhd->cpumask_primary);
- cpumask_clear(dhd->cpumask_secondary);
-
- if (num_cpus > 32) {
- DHD_ERROR(("%s max cpus must be 32, %d too big\n", __FUNCTION__, num_cpus));
- ASSERT(0);
- }
-
- cpus = DHD_LB_PRIMARY_CPUS;
- for (id = 0; id < num_cpus; id++) {
- if (isset(&cpus, id))
- cpumask_set_cpu(id, dhd->cpumask_primary);
- }
-
- cpus = DHD_LB_SECONDARY_CPUS;
- for (id = 0; id < num_cpus; id++) {
- if (isset(&cpus, id))
- cpumask_set_cpu(id, dhd->cpumask_secondary);
- }
-
- return ret;
-fail:
- dhd_cpumasks_deinit(dhd);
- return ret;
-}
-
-/*
- * The CPU Candidacy Algorithm
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * The available CPUs for selection are divided into two groups
- * Primary Set - A CPU mask that carries the First Choice CPUs
- * Secondary Set - A CPU mask that carries the Second Choice CPUs.
- *
- * There are two types of Job, that needs to be assigned to
- * the CPUs, from one of the above mentioned CPU group. The Jobs are
- * 1) Rx Packet Processing - napi_cpu
- * 2) Completion Processiong (Tx, RX) - compl_cpu
- *
- * To begin with both napi_cpu and compl_cpu are on CPU0. Whenever a CPU goes
- * on-line/off-line the CPU candidacy algorithm is triggerd. The candidacy
- * algo tries to pickup the first available non boot CPU (CPU0) for napi_cpu.
- * If there are more processors free, it assigns one to compl_cpu.
- * It also tries to ensure that both napi_cpu and compl_cpu are not on the same
- * CPU, as much as possible.
- *
- * By design, both Tx and Rx completion jobs are run on the same CPU core, as it
- * would allow Tx completion skb's to be released into a local free pool from
- * which the rx buffer posts could have been serviced. it is important to note
- * that a Tx packet may not have a large enough buffer for rx posting.
- */
-void dhd_select_cpu_candidacy(dhd_info_t *dhd)
-{
- uint32 primary_available_cpus; /* count of primary available cpus */
- uint32 secondary_available_cpus; /* count of secondary available cpus */
- uint32 napi_cpu = 0; /* cpu selected for napi rx processing */
- uint32 compl_cpu = 0; /* cpu selected for completion jobs */
- uint32 tx_cpu = 0; /* cpu selected for tx processing job */
-
- cpumask_clear(dhd->cpumask_primary_new);
- cpumask_clear(dhd->cpumask_secondary_new);
-
- /*
- * Now select from the primary mask. Even if a Job is
- * already running on a CPU in secondary group, we still move
- * to primary CPU. So no conditional checks.
- */
- cpumask_and(dhd->cpumask_primary_new, dhd->cpumask_primary,
- dhd->cpumask_curr_avail);
-
- cpumask_and(dhd->cpumask_secondary_new, dhd->cpumask_secondary,
- dhd->cpumask_curr_avail);
-
- primary_available_cpus = cpumask_weight(dhd->cpumask_primary_new);
-
- if (primary_available_cpus > 0) {
- napi_cpu = cpumask_first(dhd->cpumask_primary_new);
-
- /* If no further CPU is available,
- * cpumask_next returns >= nr_cpu_ids
- */
- tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_primary_new);
- if (tx_cpu >= nr_cpu_ids)
- tx_cpu = 0;
-
- /* In case there are no more CPUs, do completions & Tx in same CPU */
- compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_primary_new);
- if (compl_cpu >= nr_cpu_ids)
- compl_cpu = tx_cpu;
- }
-
- DHD_INFO(("%s After primary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
- __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
-
- /* -- Now check for the CPUs from the secondary mask -- */
- secondary_available_cpus = cpumask_weight(dhd->cpumask_secondary_new);
-
- DHD_INFO(("%s Available secondary cpus %d nr_cpu_ids %d\n",
- __FUNCTION__, secondary_available_cpus, nr_cpu_ids));
-
- if (secondary_available_cpus > 0) {
- /* At this point if napi_cpu is unassigned it means no CPU
- * is online from Primary Group
- */
- if (napi_cpu == 0) {
- napi_cpu = cpumask_first(dhd->cpumask_secondary_new);
- tx_cpu = cpumask_next(napi_cpu, dhd->cpumask_secondary_new);
- compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
- } else if (tx_cpu == 0) {
- tx_cpu = cpumask_first(dhd->cpumask_secondary_new);
- compl_cpu = cpumask_next(tx_cpu, dhd->cpumask_secondary_new);
- } else if (compl_cpu == 0) {
- compl_cpu = cpumask_first(dhd->cpumask_secondary_new);
- }
-
- /* If no CPU was available for tx processing, choose CPU 0 */
- if (tx_cpu >= nr_cpu_ids)
- tx_cpu = 0;
-
- /* If no CPU was available for completion, choose CPU 0 */
- if (compl_cpu >= nr_cpu_ids)
- compl_cpu = 0;
- }
- if ((primary_available_cpus == 0) &&
- (secondary_available_cpus == 0)) {
- /* No CPUs available from primary or secondary mask */
- napi_cpu = 1;
- compl_cpu = 0;
- tx_cpu = 2;
- }
-
- DHD_INFO(("%s After secondary CPU check napi_cpu %d compl_cpu %d tx_cpu %d\n",
- __FUNCTION__, napi_cpu, compl_cpu, tx_cpu));
-
- ASSERT(napi_cpu < nr_cpu_ids);
- ASSERT(compl_cpu < nr_cpu_ids);
- ASSERT(tx_cpu < nr_cpu_ids);
-
- atomic_set(&dhd->rx_napi_cpu, napi_cpu);
- atomic_set(&dhd->tx_compl_cpu, compl_cpu);
- atomic_set(&dhd->rx_compl_cpu, compl_cpu);
- atomic_set(&dhd->tx_cpu, tx_cpu);
-
- return;
-}
-
-/*
- * Function to handle CPU Hotplug notifications.
- * One of the task it does is to trigger the CPU Candidacy algorithm
- * for load balancing.
- */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
-
-int dhd_cpu_startup_callback(unsigned int cpu)
-{
- dhd_info_t *dhd = g_dhd_pub->info;
-
- DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
- DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
- cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
-
- return 0;
-}
-
-int dhd_cpu_teardown_callback(unsigned int cpu)
-{
- dhd_info_t *dhd = g_dhd_pub->info;
-
- DHD_INFO(("%s(): \r\n cpu:%d", __FUNCTION__, cpu));
- DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
- cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
-
- return 0;
-}
-#else
-int
-dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
- unsigned long int cpu = (unsigned long int)hcpu;
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd_info_t *dhd = container_of(nfb, dhd_info_t, cpu_notifier);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
- if (!dhd || !(dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE)) {
- DHD_INFO(("%s(): LB data is not initialized yet.\n",
- __FUNCTION__));
- return NOTIFY_BAD;
- }
-
- switch (action)
- {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- DHD_LB_STATS_INCR(dhd->cpu_online_cnt[cpu]);
- cpumask_set_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
- break;
-
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- DHD_LB_STATS_INCR(dhd->cpu_offline_cnt[cpu]);
- cpumask_clear_cpu(cpu, dhd->cpumask_curr_avail);
- dhd_select_cpu_candidacy(dhd);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-#endif /* LINUX_VERSION_CODE < 4.10.0 */
-
-int dhd_register_cpuhp_callback(dhd_info_t *dhd)
-{
- int cpuhp_ret = 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
- cpuhp_ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dhd",
- dhd_cpu_startup_callback, dhd_cpu_teardown_callback);
-
- if (cpuhp_ret < 0) {
- DHD_ERROR(("%s(): cpuhp_setup_state failed %d RX LB won't happen \r\n",
- __FUNCTION__, cpuhp_ret));
- }
-#else
- /*
- * If we are able to initialize CPU masks, lets register to the
- * CPU Hotplug framework to change the CPU for each job dynamically
- * using candidacy algorithm.
- */
- dhd->cpu_notifier.notifier_call = dhd_cpu_callback;
- register_hotcpu_notifier(&dhd->cpu_notifier); /* Register a callback */
-#endif /* LINUX_VERSION_CODE < 4.10.0 */
- return cpuhp_ret;
-}
-
-int dhd_unregister_cpuhp_callback(dhd_info_t *dhd)
-{
- int ret = 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
- /* Don't want to call tear down while unregistering */
- cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
-#else
- if (dhd->cpu_notifier.notifier_call != NULL) {
- unregister_cpu_notifier(&dhd->cpu_notifier);
- }
-#endif // endif
- return ret;
-}
-
-#if defined(DHD_LB_STATS)
-void dhd_lb_stats_init(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd;
- int i, j, num_cpus = num_possible_cpus();
- int alloc_size = sizeof(uint32) * num_cpus;
-
- if (dhdp == NULL) {
- DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
- __FUNCTION__));
- return;
- }
-
- dhd = dhdp->info;
- if (dhd == NULL) {
- DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
- return;
- }
-
- DHD_LB_STATS_CLR(dhd->dhd_dpc_cnt);
- DHD_LB_STATS_CLR(dhd->napi_sched_cnt);
-
- dhd->napi_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->napi_percpu_run_cnt) {
- DHD_ERROR(("%s(): napi_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->napi_percpu_run_cnt[i]);
-
- DHD_LB_STATS_CLR(dhd->rxc_sched_cnt);
-
- dhd->rxc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->rxc_percpu_run_cnt) {
- DHD_ERROR(("%s(): rxc_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->rxc_percpu_run_cnt[i]);
-
- DHD_LB_STATS_CLR(dhd->txc_sched_cnt);
-
- dhd->txc_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->txc_percpu_run_cnt) {
- DHD_ERROR(("%s(): txc_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->txc_percpu_run_cnt[i]);
-
- dhd->cpu_online_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->cpu_online_cnt) {
- DHD_ERROR(("%s(): cpu_online_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->cpu_online_cnt[i]);
-
- dhd->cpu_offline_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->cpu_offline_cnt) {
- DHD_ERROR(("%s(): cpu_offline_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->cpu_offline_cnt[i]);
-
- dhd->txp_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->txp_percpu_run_cnt) {
- DHD_ERROR(("%s(): txp_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->txp_percpu_run_cnt[i]);
-
- dhd->tx_start_percpu_run_cnt = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->tx_start_percpu_run_cnt) {
- DHD_ERROR(("%s(): tx_start_percpu_run_cnt malloc failed \n",
- __FUNCTION__));
- return;
- }
- for (i = 0; i < num_cpus; i++)
- DHD_LB_STATS_CLR(dhd->tx_start_percpu_run_cnt[i]);
-
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- dhd->napi_rx_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->napi_rx_hist[j]) {
- DHD_ERROR(("%s(): dhd->napi_rx_hist[%d] malloc failed \n",
- __FUNCTION__, j));
- return;
- }
- for (i = 0; i < num_cpus; i++) {
- DHD_LB_STATS_CLR(dhd->napi_rx_hist[j][i]);
- }
- }
-#ifdef DHD_LB_TXC
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- dhd->txc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->txc_hist[j]) {
- DHD_ERROR(("%s(): dhd->txc_hist[%d] malloc failed \n",
- __FUNCTION__, j));
- return;
- }
- for (i = 0; i < num_cpus; i++) {
- DHD_LB_STATS_CLR(dhd->txc_hist[j][i]);
- }
- }
-#endif /* DHD_LB_TXC */
-#ifdef DHD_LB_RXC
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- dhd->rxc_hist[j] = (uint32 *)MALLOC(dhdp->osh, alloc_size);
- if (!dhd->rxc_hist[j]) {
- DHD_ERROR(("%s(): dhd->rxc_hist[%d] malloc failed \n",
- __FUNCTION__, j));
- return;
- }
- for (i = 0; i < num_cpus; i++) {
- DHD_LB_STATS_CLR(dhd->rxc_hist[j][i]);
- }
- }
-#endif /* DHD_LB_RXC */
- return;
-}
-
-void dhd_lb_stats_deinit(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd;
- int j, num_cpus = num_possible_cpus();
- int alloc_size = sizeof(uint32) * num_cpus;
-
- if (dhdp == NULL) {
- DHD_ERROR(("%s(): Invalid argument dhd pubb pointer is NULL \n",
- __FUNCTION__));
- return;
- }
-
- dhd = dhdp->info;
- if (dhd == NULL) {
- DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
- return;
- }
-
- if (dhd->napi_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->napi_percpu_run_cnt, alloc_size);
- dhd->napi_percpu_run_cnt = NULL;
- }
- if (dhd->rxc_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->rxc_percpu_run_cnt, alloc_size);
- dhd->rxc_percpu_run_cnt = NULL;
- }
- if (dhd->txc_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->txc_percpu_run_cnt, alloc_size);
- dhd->txc_percpu_run_cnt = NULL;
- }
- if (dhd->cpu_online_cnt) {
- MFREE(dhdp->osh, dhd->cpu_online_cnt, alloc_size);
- dhd->cpu_online_cnt = NULL;
- }
- if (dhd->cpu_offline_cnt) {
- MFREE(dhdp->osh, dhd->cpu_offline_cnt, alloc_size);
- dhd->cpu_offline_cnt = NULL;
- }
-
- if (dhd->txp_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->txp_percpu_run_cnt, alloc_size);
- dhd->txp_percpu_run_cnt = NULL;
- }
- if (dhd->tx_start_percpu_run_cnt) {
- MFREE(dhdp->osh, dhd->tx_start_percpu_run_cnt, alloc_size);
- dhd->tx_start_percpu_run_cnt = NULL;
- }
-
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- if (dhd->napi_rx_hist[j]) {
- MFREE(dhdp->osh, dhd->napi_rx_hist[j], alloc_size);
- dhd->napi_rx_hist[j] = NULL;
- }
-#ifdef DHD_LB_TXC
- if (dhd->txc_hist[j]) {
- MFREE(dhdp->osh, dhd->txc_hist[j], alloc_size);
- dhd->txc_hist[j] = NULL;
- }
-#endif /* DHD_LB_TXC */
-#ifdef DHD_LB_RXC
- if (dhd->rxc_hist[j]) {
- MFREE(dhdp->osh, dhd->rxc_hist[j], alloc_size);
- dhd->rxc_hist[j] = NULL;
- }
-#endif /* DHD_LB_RXC */
- }
-
- return;
-}
-
-void dhd_lb_stats_dump_histo(dhd_pub_t *dhdp,
- struct bcmstrbuf *strbuf, uint32 **hist)
-{
- int i, j;
- uint32 *per_cpu_total;
- uint32 total = 0;
- uint32 num_cpus = num_possible_cpus();
-
- per_cpu_total = (uint32 *)MALLOC(dhdp->osh, sizeof(uint32) * num_cpus);
- if (!per_cpu_total) {
- DHD_ERROR(("%s(): dhd->per_cpu_total malloc failed \n", __FUNCTION__));
- return;
- }
- bzero(per_cpu_total, sizeof(uint32) * num_cpus);
-
- bcm_bprintf(strbuf, "CPU: \t\t");
- for (i = 0; i < num_cpus; i++)
- bcm_bprintf(strbuf, "%d\t", i);
- bcm_bprintf(strbuf, "\nBin\n");
-
- for (i = 0; i < HIST_BIN_SIZE; i++) {
- bcm_bprintf(strbuf, "%d:\t\t", 1<<i);
- for (j = 0; j < num_cpus; j++) {
- bcm_bprintf(strbuf, "%d\t", hist[i][j]);
- }
- bcm_bprintf(strbuf, "\n");
- }
- bcm_bprintf(strbuf, "Per CPU Total \t");
- total = 0;
- for (i = 0; i < num_cpus; i++) {
- for (j = 0; j < HIST_BIN_SIZE; j++) {
- per_cpu_total[i] += (hist[j][i] * (1<<j));
- }
- bcm_bprintf(strbuf, "%d\t", per_cpu_total[i]);
- total += per_cpu_total[i];
- }
- bcm_bprintf(strbuf, "\nTotal\t\t%d \n", total);
-
- if (per_cpu_total) {
- MFREE(dhdp->osh, per_cpu_total, sizeof(uint32) * num_cpus);
- per_cpu_total = NULL;
- }
- return;
-}
-
-void dhd_lb_stats_dump_cpu_array(struct bcmstrbuf *strbuf, uint32 *p)
-{
- int i, num_cpus = num_possible_cpus();
-
- bcm_bprintf(strbuf, "CPU: \t");
- for (i = 0; i < num_cpus; i++)
- bcm_bprintf(strbuf, "%d\t", i);
- bcm_bprintf(strbuf, "\n");
-
- bcm_bprintf(strbuf, "Val: \t");
- for (i = 0; i < num_cpus; i++)
- bcm_bprintf(strbuf, "%u\t", *(p+i));
- bcm_bprintf(strbuf, "\n");
- return;
-}
-
-void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
-{
- dhd_info_t *dhd;
-
- if (dhdp == NULL || strbuf == NULL) {
- DHD_ERROR(("%s(): Invalid argument dhdp %p strbuf %p \n",
- __FUNCTION__, dhdp, strbuf));
- return;
- }
-
- dhd = dhdp->info;
- if (dhd == NULL) {
- DHD_ERROR(("%s(): DHD pointer is NULL \n", __FUNCTION__));
- return;
- }
-
- bcm_bprintf(strbuf, "\ncpu_online_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_online_cnt);
-
- bcm_bprintf(strbuf, "\ncpu_offline_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->cpu_offline_cnt);
-
- bcm_bprintf(strbuf, "\nsched_cnt: dhd_dpc %u napi %u rxc %u txc %u\n",
- dhd->dhd_dpc_cnt, dhd->napi_sched_cnt, dhd->rxc_sched_cnt,
- dhd->txc_sched_cnt);
-
-#ifdef DHD_LB_RXP
- bcm_bprintf(strbuf, "\nnapi_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->napi_percpu_run_cnt);
- bcm_bprintf(strbuf, "\nNAPI Packets Received Histogram:\n");
- dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->napi_rx_hist);
-#endif /* DHD_LB_RXP */
-
-#ifdef DHD_LB_RXC
- bcm_bprintf(strbuf, "\nrxc_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->rxc_percpu_run_cnt);
- bcm_bprintf(strbuf, "\nRX Completions (Buffer Post) Histogram:\n");
- dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->rxc_hist);
-#endif /* DHD_LB_RXC */
-
-#ifdef DHD_LB_TXC
- bcm_bprintf(strbuf, "\ntxc_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->txc_percpu_run_cnt);
- bcm_bprintf(strbuf, "\nTX Completions (Buffer Free) Histogram:\n");
- dhd_lb_stats_dump_histo(dhdp, strbuf, dhd->txc_hist);
-#endif /* DHD_LB_TXC */
-
-#ifdef DHD_LB_TXP
- bcm_bprintf(strbuf, "\ntxp_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->txp_percpu_run_cnt);
-
- bcm_bprintf(strbuf, "\ntx_start_percpu_run_cnt:\n");
- dhd_lb_stats_dump_cpu_array(strbuf, dhd->tx_start_percpu_run_cnt);
-#endif /* DHD_LB_TXP */
-}
-
-/* Given a number 'n' returns 'm' that is next larger power of 2 after n */
-static inline uint32 next_larger_power2(uint32 num)
-{
- num--;
- num |= (num >> 1);
- num |= (num >> 2);
- num |= (num >> 4);
- num |= (num >> 8);
- num |= (num >> 16);
-
- return (num + 1);
-}
-
-void dhd_lb_stats_update_histo(uint32 **bin, uint32 count, uint32 cpu)
-{
- uint32 bin_power;
- uint32 *p;
- bin_power = next_larger_power2(count);
-
- switch (bin_power) {
- case 1: p = bin[0] + cpu; break;
- case 2: p = bin[1] + cpu; break;
- case 4: p = bin[2] + cpu; break;
- case 8: p = bin[3] + cpu; break;
- case 16: p = bin[4] + cpu; break;
- case 32: p = bin[5] + cpu; break;
- case 64: p = bin[6] + cpu; break;
- case 128: p = bin[7] + cpu; break;
- default : p = bin[8] + cpu; break;
- }
-
- *p = *p + 1;
- return;
-}
-
-void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count)
-{
- int cpu;
- dhd_info_t *dhd = dhdp->info;
-
- cpu = get_cpu();
- put_cpu();
- dhd_lb_stats_update_histo(dhd->napi_rx_hist, count, cpu);
-
- return;
-}
-
-void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count)
-{
- int cpu;
- dhd_info_t *dhd = dhdp->info;
-
- cpu = get_cpu();
- put_cpu();
- dhd_lb_stats_update_histo(dhd->txc_hist, count, cpu);
-
- return;
-}
-
-void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count)
-{
- int cpu;
- dhd_info_t *dhd = dhdp->info;
-
- cpu = get_cpu();
- put_cpu();
- dhd_lb_stats_update_histo(dhd->rxc_hist, count, cpu);
-
- return;
-}
-
-void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txc_percpu_run_cnt);
-}
-
-void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->rxc_percpu_run_cnt);
-}
-#endif /* DHD_LB_STATS */
-
-#endif /* DHD_LB */
-#if defined(DHD_LB)
-/**
- * dhd_tasklet_schedule - Function that runs in IPI context of the destination
- * CPU and schedules a tasklet.
- * @tasklet: opaque pointer to the tasklet
- */
-INLINE void
-dhd_tasklet_schedule(void *tasklet)
-{
- tasklet_schedule((struct tasklet_struct *)tasklet);
-}
-/**
- * dhd_tasklet_schedule_on - Executes the passed takslet in a given CPU
- * @tasklet: tasklet to be scheduled
- * @on_cpu: cpu core id
- *
- * If the requested cpu is online, then an IPI is sent to this cpu via the
- * smp_call_function_single with no wait and the tasklet_schedule function
- * will be invoked to schedule the specified tasklet on the requested CPU.
- */
-INLINE void
-dhd_tasklet_schedule_on(struct tasklet_struct *tasklet, int on_cpu)
-{
- const int wait = 0;
- smp_call_function_single(on_cpu,
- dhd_tasklet_schedule, (void *)tasklet, wait);
-}
-
-/**
- * dhd_work_schedule_on - Executes the passed work in a given CPU
- * @work: work to be scheduled
- * @on_cpu: cpu core id
- *
- * If the requested cpu is online, then an IPI is sent to this cpu via the
- * schedule_work_on and the work function
- * will be invoked to schedule the specified work on the requested CPU.
- */
-
-INLINE void
-dhd_work_schedule_on(struct work_struct *work, int on_cpu)
-{
- schedule_work_on(on_cpu, work);
-}
-
-#if defined(DHD_LB_TXC)
-/**
- * dhd_lb_tx_compl_dispatch - load balance by dispatching the tx_compl_tasklet
- * on another cpu. The tx_compl_tasklet will take care of DMA unmapping and
- * freeing the packets placed in the tx_compl workq
- */
-void
-dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu, on_cpu;
-
- if (dhd->rx_napi_netdev == NULL) {
- DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
- return;
- }
-
- DHD_LB_STATS_INCR(dhd->txc_sched_cnt);
- /*
- * If the destination CPU is NOT online or is same as current CPU
- * no need to schedule the work
- */
- curr_cpu = get_cpu();
- put_cpu();
-
- on_cpu = atomic_read(&dhd->tx_compl_cpu);
-
- if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
- dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
- } else {
- schedule_work(&dhd->tx_compl_dispatcher_work);
- }
-}
-
-static void dhd_tx_compl_dispatcher_fn(struct work_struct * work)
-{
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, tx_compl_dispatcher_work);
- int cpu;
-
- get_online_cpus();
- cpu = atomic_read(&dhd->tx_compl_cpu);
- if (!cpu_online(cpu))
- dhd_tasklet_schedule(&dhd->tx_compl_tasklet);
- else
- dhd_tasklet_schedule_on(&dhd->tx_compl_tasklet, cpu);
- put_online_cpus();
-}
-#endif /* DHD_LB_TXC */
-
-#if defined(DHD_LB_RXC)
-/**
- * dhd_lb_rx_compl_dispatch - load balance by dispatching the rx_compl_tasklet
- * on another cpu. The rx_compl_tasklet will take care of reposting rx buffers
- * in the H2D RxBuffer Post common ring, by using the recycled pktids that were
- * placed in the rx_compl workq.
- *
- * @dhdp: pointer to dhd_pub object
- */
-void
-dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu, on_cpu;
-
- if (dhd->rx_napi_netdev == NULL) {
- DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
- return;
- }
-
- DHD_LB_STATS_INCR(dhd->rxc_sched_cnt);
- /*
- * If the destination CPU is NOT online or is same as current CPU
- * no need to schedule the work
- */
- curr_cpu = get_cpu();
- put_cpu();
- on_cpu = atomic_read(&dhd->rx_compl_cpu);
-
- if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu))) {
- dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
- } else {
- schedule_work(&dhd->rx_compl_dispatcher_work);
- }
-}
-
-void dhd_rx_compl_dispatcher_fn(struct work_struct * work)
-{
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, rx_compl_dispatcher_work);
- int cpu;
-
- get_online_cpus();
- cpu = atomic_read(&dhd->rx_compl_cpu);
- if (!cpu_online(cpu))
- dhd_tasklet_schedule(&dhd->rx_compl_tasklet);
- else {
- dhd_tasklet_schedule_on(&dhd->rx_compl_tasklet, cpu);
- }
- put_online_cpus();
-}
-#endif /* DHD_LB_RXC */
-
-#if defined(DHD_LB_TXP)
-void dhd_tx_dispatcher_work(struct work_struct * work)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, tx_dispatcher_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- dhd_tasklet_schedule(&dhd->tx_tasklet);
-}
-
-void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp)
-{
- int cpu;
- int net_tx_cpu;
- dhd_info_t *dhd = dhdp->info;
-
- preempt_disable();
- cpu = atomic_read(&dhd->tx_cpu);
- net_tx_cpu = atomic_read(&dhd->net_tx_cpu);
-
- /*
- * Now if the NET_TX has pushed the packet in the same
- * CPU that is chosen for Tx processing, seperate it out
- * i.e run the TX processing tasklet in compl_cpu
- */
- if (net_tx_cpu == cpu)
- cpu = atomic_read(&dhd->tx_compl_cpu);
-
- if (!cpu_online(cpu)) {
- /*
- * Ooohh... but the Chosen CPU is not online,
- * Do the job in the current CPU itself.
- */
- dhd_tasklet_schedule(&dhd->tx_tasklet);
- } else {
- /*
- * Schedule tx_dispatcher_work to on the cpu which
- * in turn will schedule tx_tasklet.
- */
- dhd_work_schedule_on(&dhd->tx_dispatcher_work, cpu);
- }
- preempt_enable();
-}
-
-/**
- * dhd_lb_tx_dispatch - load balance by dispatching the tx_tasklet
- * on another cpu. The tx_tasklet will take care of actually putting
- * the skbs into appropriate flow ring and ringing H2D interrupt
- *
- * @dhdp: pointer to dhd_pub object
- */
-void
-dhd_lb_tx_dispatch(dhd_pub_t *dhdp)
-{
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu;
-
- curr_cpu = get_cpu();
- put_cpu();
-
- /* Record the CPU in which the TX request from Network stack came */
- atomic_set(&dhd->net_tx_cpu, curr_cpu);
-
- /* Schedule the work to dispatch ... */
- dhd_tx_dispatcher_fn(dhdp);
-}
-#endif /* DHD_LB_TXP */
-
-#if defined(DHD_LB_RXP)
-/**
- * dhd_napi_poll - Load balance napi poll function to process received
- * packets and send up the network stack using netif_receive_skb()
- *
- * @napi: napi object in which context this poll function is invoked
- * @budget: number of packets to be processed.
- *
- * Fetch the dhd_info given the rx_napi_struct. Move all packets from the
- * rx_napi_queue into a local rx_process_queue (lock and queue move and unlock).
- * Dequeue each packet from head of rx_process_queue, fetch the ifid from the
- * packet tag and sendup.
- */
-int
-dhd_napi_poll(struct napi_struct *napi, int budget)
-{
- int ifid;
- const int pkt_count = 1;
- const int chan = 0;
- struct sk_buff * skb;
- unsigned long flags;
- struct dhd_info *dhd;
- int processed = 0;
- struct sk_buff_head rx_process_queue;
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- dhd = container_of(napi, struct dhd_info, rx_napi_struct);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
- DHD_INFO(("%s napi_queue<%d> budget<%d>\n",
- __FUNCTION__, skb_queue_len(&dhd->rx_napi_queue), budget));
- __skb_queue_head_init(&rx_process_queue);
-
- /* extract the entire rx_napi_queue into local rx_process_queue */
- spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
- skb_queue_splice_tail_init(&dhd->rx_napi_queue, &rx_process_queue);
- spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
-
- while ((skb = __skb_dequeue(&rx_process_queue)) != NULL) {
- OSL_PREFETCH(skb->data);
-
- ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
-
- DHD_INFO(("%s dhd_rx_frame pkt<%p> ifid<%d>\n",
- __FUNCTION__, skb, ifid));
-
- dhd_rx_frame(&dhd->pub, ifid, skb, pkt_count, chan);
- processed++;
- }
-
- DHD_LB_STATS_UPDATE_NAPI_HISTO(&dhd->pub, processed);
-
- DHD_INFO(("%s processed %d\n", __FUNCTION__, processed));
- napi_complete(napi);
-
- return budget - 1;
-}
-
-/**
- * dhd_napi_schedule - Place the napi struct into the current cpus softnet napi
- * poll list. This function may be invoked via the smp_call_function_single
- * from a remote CPU.
- *
- * This function will essentially invoke __raise_softirq_irqoff(NET_RX_SOFTIRQ)
- * after the napi_struct is added to the softnet data's poll_list
- *
- * @info: pointer to a dhd_info struct
- */
-static void
-dhd_napi_schedule(void *info)
-{
- dhd_info_t *dhd = (dhd_info_t *)info;
-
- DHD_INFO(("%s rx_napi_struct<%p> on cpu<%d>\n",
- __FUNCTION__, &dhd->rx_napi_struct, atomic_read(&dhd->rx_napi_cpu)));
-
- /* add napi_struct to softnet data poll list and raise NET_RX_SOFTIRQ */
- if (napi_schedule_prep(&dhd->rx_napi_struct)) {
- __napi_schedule(&dhd->rx_napi_struct);
-#ifdef WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE
- raise_softirq(NET_RX_SOFTIRQ);
-#endif /* WAKEUP_KSOFTIRQD_POST_NAPI_SCHEDULE */
- }
-
- /*
- * If the rx_napi_struct was already running, then we let it complete
- * processing all its packets. The rx_napi_struct may only run on one
- * core at a time, to avoid out-of-order handling.
- */
-}
-
-/**
- * dhd_napi_schedule_on - API to schedule on a desired CPU core a NET_RX_SOFTIRQ
- * action after placing the dhd's rx_process napi object in the the remote CPU's
- * softnet data's poll_list.
- *
- * @dhd: dhd_info which has the rx_process napi object
- * @on_cpu: desired remote CPU id
- */
-static INLINE int
-dhd_napi_schedule_on(dhd_info_t *dhd, int on_cpu)
-{
- int wait = 0; /* asynchronous IPI */
- DHD_INFO(("%s dhd<%p> napi<%p> on_cpu<%d>\n",
- __FUNCTION__, dhd, &dhd->rx_napi_struct, on_cpu));
-
- if (smp_call_function_single(on_cpu, dhd_napi_schedule, dhd, wait)) {
- DHD_ERROR(("%s smp_call_function_single on_cpu<%d> failed\n",
- __FUNCTION__, on_cpu));
- }
-
- DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
-
- return 0;
-}
-
-/*
- * Call get_online_cpus/put_online_cpus around dhd_napi_schedule_on
- * Why should we do this?
- * The candidacy algorithm is run from the call back function
- * registered to CPU hotplug notifier. This call back happens from Worker
- * context. The dhd_napi_schedule_on is also from worker context.
- * Note that both of this can run on two different CPUs at the same time.
- * So we can possibly have a window where a given CPUn is being brought
- * down from CPUm while we try to run a function on CPUn.
- * To prevent this its better have the whole code to execute an SMP
- * function under get_online_cpus.
- * This function call ensures that hotplug mechanism does not kick-in
- * until we are done dealing with online CPUs
- * If the hotplug worker is already running, no worries because the
- * candidacy algo would then reflect the same in dhd->rx_napi_cpu.
- *
- * The below mentioned code structure is proposed in
- * https://www.kernel.org/doc/Documentation/cpu-hotplug.txt
- * for the question
- * Q: I need to ensure that a particular cpu is not removed when there is some
- * work specific to this cpu is in progress
- *
- * According to the documentation calling get_online_cpus is NOT required, if
- * we are running from tasklet context. Since dhd_rx_napi_dispatcher_fn can
- * run from Work Queue context we have to call these functions
- */
-void dhd_rx_napi_dispatcher_fn(struct work_struct * work)
-{
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- struct dhd_info *dhd =
- container_of(work, struct dhd_info, rx_napi_dispatcher_work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
- dhd_napi_schedule(dhd);
-}
-
-/**
- * dhd_lb_rx_napi_dispatch - load balance by dispatching the rx_napi_struct
- * to run on another CPU. The rx_napi_struct's poll function will retrieve all
- * the packets enqueued into the rx_napi_queue and sendup.
- * The producer's rx packet queue is appended to the rx_napi_queue before
- * dispatching the rx_napi_struct.
- */
-void
-dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp)
-{
- unsigned long flags;
- dhd_info_t *dhd = dhdp->info;
- int curr_cpu;
- int on_cpu;
-#ifdef DHD_LB_IRQSET
- cpumask_t cpus;
-#endif /* DHD_LB_IRQSET */
-
- if (dhd->rx_napi_netdev == NULL) {
- DHD_ERROR(("%s: dhd->rx_napi_netdev is NULL\n", __FUNCTION__));
- return;
- }
-
- DHD_INFO(("%s append napi_queue<%d> pend_queue<%d>\n", __FUNCTION__,
- skb_queue_len(&dhd->rx_napi_queue), skb_queue_len(&dhd->rx_pend_queue)));
-
- /* append the producer's queue of packets to the napi's rx process queue */
- spin_lock_irqsave(&dhd->rx_napi_queue.lock, flags);
- skb_queue_splice_tail_init(&dhd->rx_pend_queue, &dhd->rx_napi_queue);
- spin_unlock_irqrestore(&dhd->rx_napi_queue.lock, flags);
-
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->napi_percpu_run_cnt);
-
- /* if LB RXP is disabled directly schedule NAPI */
- if (atomic_read(&dhd->lb_rxp_active) == 0) {
- dhd_napi_schedule(dhd);
- return;
- }
-
- /*
- * If the destination CPU is NOT online or is same as current CPU
- * no need to schedule the work
- */
- curr_cpu = get_cpu();
- put_cpu();
-
- preempt_disable();
- on_cpu = atomic_read(&dhd->rx_napi_cpu);
-#ifdef DHD_LB_IRQSET
- if (cpumask_and(&cpus, cpumask_of(curr_cpu), dhd->cpumask_primary) ||
- (!cpu_online(on_cpu)))
-#else
- if ((on_cpu == curr_cpu) || (!cpu_online(on_cpu)))
-#endif /* DHD_LB_IRQSET */
- {
- DHD_INFO(("%s : curr_cpu : %d, cpumask : 0x%lx\n", __FUNCTION__,
- curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
- dhd_napi_schedule(dhd);
- } else {
- DHD_INFO(("%s : schedule to curr_cpu : %d, cpumask : 0x%lx\n",
- __FUNCTION__, curr_cpu, *cpumask_bits(dhd->cpumask_primary)));
- dhd_work_schedule_on(&dhd->rx_napi_dispatcher_work, on_cpu);
- DHD_LB_STATS_INCR(dhd->napi_sched_cnt);
- }
- preempt_enable();
-}
-
-/**
- * dhd_lb_rx_pkt_enqueue - Enqueue the packet into the producer's queue
- */
-void
-dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx)
-{
- dhd_info_t *dhd = dhdp->info;
-
- DHD_INFO(("%s enqueue pkt<%p> ifidx<%d> pend_queue<%d>\n", __FUNCTION__,
- pkt, ifidx, skb_queue_len(&dhd->rx_pend_queue)));
- DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pkt), ifidx);
- __skb_queue_tail(&dhd->rx_pend_queue, pkt);
-}
-#endif /* DHD_LB_RXP */
-#endif /* DHD_LB */
-
-#if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
-void
-dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask)
-{
- unsigned int irq = (unsigned int)-1;
- int err = BCME_OK;
-
- if (!dhdp) {
- DHD_ERROR(("%s : dhdp is NULL\n", __FUNCTION__));
- return;
- }
-
- if (!dhdp->bus) {
- DHD_ERROR(("%s : bus is NULL\n", __FUNCTION__));
- return;
- }
-
- DHD_ERROR(("%s : irq set affinity cpu:0x%lx\n",
- __FUNCTION__, *cpumask_bits(cpumask)));
-
- dhdpcie_get_pcieirq(dhdp->bus, &irq);
- err = irq_set_affinity(irq, cpumask);
- if (err)
- DHD_ERROR(("%s : irq set affinity is failed cpu:0x%lx\n",
- __FUNCTION__, *cpumask_bits(cpumask)));
-}
-#endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
-
-#if defined(DHD_LB_TXP)
-
-int BCMFASTPATH
-dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net,
- int ifidx, void *skb)
-{
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->tx_start_percpu_run_cnt);
-
- /* If the feature is disabled run-time do TX from here */
- if (atomic_read(&dhd->lb_txp_active) == 0) {
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
- return __dhd_sendpkt(&dhd->pub, ifidx, skb);
- }
-
- /* Store the address of net device and interface index in the Packet tag */
- DHD_LB_TX_PKTTAG_SET_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), net);
- DHD_LB_TX_PKTTAG_SET_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb), ifidx);
-
- /* Enqueue the skb into tx_pend_queue */
- skb_queue_tail(&dhd->tx_pend_queue, skb);
-
- DHD_TRACE(("%s(): Added skb %p for netdev %p \r\n", __FUNCTION__, skb, net));
-
- /* Dispatch the Tx job to be processed by the tx_tasklet */
- dhd_lb_tx_dispatch(&dhd->pub);
-
- return NETDEV_TX_OK;
-}
-#endif /* DHD_LB_TXP */
-
-#ifdef DHD_LB_TXP
-#define DHD_LB_TXBOUND 64
-/*
- * Function that performs the TX processing on a given CPU
- */
-bool
-dhd_lb_tx_process(dhd_info_t *dhd)
-{
- struct sk_buff *skb;
- int cnt = 0;
- struct net_device *net;
- int ifidx;
- bool resched = FALSE;
-
- DHD_TRACE(("%s(): TX Processing \r\n", __FUNCTION__));
- if (dhd == NULL) {
- DHD_ERROR((" Null pointer DHD \r\n"));
- return resched;
- }
-
- BCM_REFERENCE(net);
-
- DHD_LB_STATS_PERCPU_ARR_INCR(dhd->txp_percpu_run_cnt);
-
- /* Base Loop to perform the actual Tx */
- do {
- skb = skb_dequeue(&dhd->tx_pend_queue);
- if (skb == NULL) {
- DHD_TRACE(("Dequeued a Null Packet \r\n"));
- break;
- }
- cnt++;
-
- net = DHD_LB_TX_PKTTAG_NETDEV((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
- ifidx = DHD_LB_TX_PKTTAG_IFIDX((dhd_tx_lb_pkttag_fr_t *)PKTTAG(skb));
-
- DHD_TRACE(("Processing skb %p for net %p index %d \r\n", skb,
- net, ifidx));
-
- __dhd_sendpkt(&dhd->pub, ifidx, skb);
-
- if (cnt >= DHD_LB_TXBOUND) {
- resched = TRUE;
- break;
- }
-
- } while (1);
-
- DHD_INFO(("%s(): Processed %d packets \r\n", __FUNCTION__, cnt));
-
- return resched;
-}
-
-void
-dhd_lb_tx_handler(unsigned long data)
-{
- dhd_info_t *dhd = (dhd_info_t *)data;
-
- if (dhd_lb_tx_process(dhd)) {
- dhd_tasklet_schedule(&dhd->tx_tasklet);
- }
-}
-
-#endif /* DHD_LB_TXP */
+++ /dev/null
-/*
- * Packet dump helper functions
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: dhd_linux_pktdump.c 820929 2019-05-21 14:09:11Z $
- */
-
-#include <typedefs.h>
-#include <ethernet.h>
-#include <bcmutils.h>
-#include <bcmevent.h>
-#include <bcmendian.h>
-#include <bcmtlv.h>
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhd_dbg.h>
-#include <bcmip.h>
-#include <bcmudp.h>
-#include <bcmdhcp.h>
-#include <bcmarp.h>
-#include <bcmicmp.h>
-#include <dhd_linux_pktdump.h>
-#include <dhd_config.h>
-
-#define DHD_PKTDUMP(arg) printk arg
-#define DHD_PKTDUMP_MEM(arg) printk arg
-#define PACKED_STRUCT __attribute__ ((packed))
-
-#define EAPOL_HDR_LEN 4
-
-/* EAPOL types */
-#define EAP_PACKET 0
-#define EAPOL_START 1
-#define EAPOL_LOGOFF 2
-#define EAPOL_KEY 3
-#define EAPOL_ASF 4
-
-/* EAPOL-Key types */
-#define EAPOL_RC4_KEY 1
-#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */
-#define EAPOL_WPA_KEY 254 /* WPA */
-
-/* EAPOL-Key header field size */
-#define AKW_BLOCK_LEN 8
-#define WPA_KEY_REPLAY_LEN 8
-#define WPA_KEY_NONCE_LEN 32
-#define WPA_KEY_IV_LEN 16
-#define WPA_KEY_RSC_LEN 8
-#define WPA_KEY_ID_LEN 8
-#define WPA_KEY_MIC_LEN 16
-#define WPA_MAX_KEY_SIZE 32
-#define WPA_KEY_DATA_LEN (WPA_MAX_KEY_SIZE + AKW_BLOCK_LEN)
-
-/* Key information bit */
-#define KEYINFO_TYPE_MASK (1 << 3)
-#define KEYINFO_INSTALL_MASK (1 << 6)
-#define KEYINFO_KEYACK_MASK (1 << 7)
-#define KEYINFO_KEYMIC_MASK (1 << 8)
-#define KEYINFO_SECURE_MASK (1 << 9)
-#define KEYINFO_ERROR_MASK (1 << 10)
-#define KEYINFO_REQ_MASK (1 << 11)
-
-/* EAP Code */
-#define EAP_CODE_REQUEST 1 /* Request */
-#define EAP_CODE_RESPONSE 2 /* Response */
-#define EAP_CODE_SUCCESS 3 /* Success */
-#define EAP_CODE_FAILURE 4 /* Failure */
-
-/* EAP Type */
-#define EAP_TYPE_RSVD 0 /* Reserved */
-#define EAP_TYPE_IDENT 1 /* Identify */
-#define EAP_TYPE_NOTI 2 /* Notification */
-#define EAP_TYPE_TLS 13 /* EAP-TLS */
-#define EAP_TYPE_LEAP 17 /* Cisco-LEAP */
-#define EAP_TYPE_TTLS 21 /* EAP-TTLS */
-#define EAP_TYPE_AKA 23 /* EAP-AKA */
-#define EAP_TYPE_PEAP 25 /* EAP-PEAP */
-#define EAP_TYPE_FAST 43 /* EAP-FAST */
-#define EAP_TYPE_PSK 47 /* EAP-PSK */
-#define EAP_TYPE_AKAP 50 /* EAP-AKA' */
-#define EAP_TYPE_EXP 254 /* Reserved for Expended Type */
-
-/* WSC */
-#define EAP_HDR_LEN 5
-#define EAP_WSC_NONCE_OFFSET 10
-#define EAP_WSC_DATA_OFFSET (OFFSETOF(eap_wsc_fmt_t, data))
-#define EAP_WSC_MIN_DATA_LEN ((EAP_HDR_LEN) + (EAP_WSC_DATA_OFFSET))
-#define WFA_VID "\x00\x37\x2A" /* WFA SMI code */
-#define WFA_VID_LEN 3 /* WFA VID length */
-#define WFA_VTYPE 1u /* WFA Vendor type */
-
-/* WSC opcode */
-#define WSC_OPCODE_UPNP 0
-#define WSC_OPCODE_START 1
-#define WSC_OPCODE_ACK 2
-#define WSC_OPCODE_NACK 3
-#define WSC_OPCODE_MSG 4
-#define WSC_OPCODE_DONE 5
-#define WSC_OPCODE_FRAG_ACK 6
-
-/* WSC flag */
-#define WSC_FLAG_MF 1 /* more fragements */
-#define WSC_FLAG_LF 2 /* length field */
-
-/* WSC message code */
-#define WSC_ATTR_MSG 0x1022
-#define WSC_MSG_M1 0x04
-#define WSC_MSG_M2 0x05
-#define WSC_MSG_M3 0x07
-#define WSC_MSG_M4 0x08
-#define WSC_MSG_M5 0x09
-#define WSC_MSG_M6 0x0A
-#define WSC_MSG_M7 0x0B
-#define WSC_MSG_M8 0x0C
-
-/* Debug prints */
-typedef enum pkt_cnt_type {
- PKT_CNT_TYPE_INVALID = 0,
- PKT_CNT_TYPE_ARP = 1,
- PKT_CNT_TYPE_DNS = 2,
- PKT_CNT_TYPE_MAX = 3
-} pkt_cnt_type_t;
-
-typedef struct pkt_cnt {
- uint32 tx_cnt;
- uint32 tx_err_cnt;
- uint32 rx_cnt;
-} pkt_cnt_t;
-
-typedef struct pkt_cnt_log {
- bool enabled;
- uint16 reason;
- timer_list_compat_t pktcnt_timer;
- pkt_cnt_t arp_cnt;
- pkt_cnt_t dns_cnt;
-} pkt_cnts_log_t;
-
-#define PKT_CNT_TIMER_INTERNVAL_MS 5000 /* packet count timeout(ms) */
-#define PKT_CNT_RSN_VALID(rsn) \
- (((rsn) > (PKT_CNT_RSN_INVALID)) && ((rsn) < (PKT_CNT_RSN_MAX)))
-
-#ifdef DHD_PKTDUMP_ROAM
-static const char pkt_cnt_msg[][20] = {
- "INVALID",
- "ROAM_SUCCESS",
- "GROUP_KEY_UPDATE",
- "CONNECT_SUCCESS",
- "INVALID"
-};
-#endif
-
-static const char tx_pktfate[][30] = {
- "TX_PKT_FATE_ACKED", /* 0: WLFC_CTL_PKTFLAG_DISCARD */
- "TX_PKT_FATE_FW_QUEUED", /* 1: WLFC_CTL_PKTFLAG_D11SUPPRESS */
- "TX_PKT_FATE_FW_QUEUED", /* 2: WLFC_CTL_PKTFLAG_WLSUPPRESS */
- "TX_PKT_FATE_FW_DROP_INVALID", /* 3: WLFC_CTL_PKTFLAG_TOSSED_BYWLC */
- "TX_PKT_FATE_SENT", /* 4: WLFC_CTL_PKTFLAG_DISCARD_NOACK */
- "TX_PKT_FATE_FW_DROP_OTHER", /* 5: WLFC_CTL_PKTFLAG_SUPPRESS_ACKED */
- "TX_PKT_FATE_FW_DROP_EXPTIME", /* 6: WLFC_CTL_PKTFLAG_EXPIRED */
- "TX_PKT_FATE_FW_DROP_OTHER", /* 7: WLFC_CTL_PKTFLAG_DROPPED */
- "TX_PKT_FATE_FW_PKT_FREE", /* 8: WLFC_CTL_PKTFLAG_MKTFREE */
-};
-
-#define DBGREPLAY " Replay Counter: %02x%02x%02x%02x%02x%02x%02x%02x"
-#define REPLAY_FMT(key) ((const eapol_key_hdr_t *)(key))->replay[0], \
- ((const eapol_key_hdr_t *)(key))->replay[1], \
- ((const eapol_key_hdr_t *)(key))->replay[2], \
- ((const eapol_key_hdr_t *)(key))->replay[3], \
- ((const eapol_key_hdr_t *)(key))->replay[4], \
- ((const eapol_key_hdr_t *)(key))->replay[5], \
- ((const eapol_key_hdr_t *)(key))->replay[6], \
- ((const eapol_key_hdr_t *)(key))->replay[7]
-#define TXFATE_FMT " TX_PKTHASH:0x%X TX_PKT_FATE:%s"
-#define TX_PKTHASH(pkthash) ((pkthash) ? (*pkthash) : (0))
-#define TX_FATE_STR(fate) (((*fate) <= (WLFC_CTL_PKTFLAG_MKTFREE)) ? \
- (tx_pktfate[(*fate)]) : "TX_PKT_FATE_FW_DROP_OTHER")
-#define TX_FATE(fate) ((fate) ? (TX_FATE_STR(fate)) : "N/A")
-#define TX_FATE_ACKED(fate) ((fate) ? ((*fate) == (WLFC_CTL_PKTFLAG_DISCARD)) : (0))
-
-#define EAP_PRINT(x, args...) \
- do { \
- if (dump_msg_level & DUMP_EAPOL_VAL) { \
- if (tx) { \
- DHD_PKTDUMP(("[dhd-%s] 802_1X " x " [TX] : (%s) %s (%s)"TXFATE_FMT"\n", \
- ifname, ## args, \
- tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
- TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP(("[dhd-%s] 802_1X " x " [RX] : (%s) %s (%s)\n", \
- ifname, ## args, \
- tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf)); \
- } \
- } \
- } while (0)
-
-#define EAP_PRINT_REPLAY(x, args...) \
- do { \
- if (dump_msg_level & DUMP_EAPOL_VAL) { \
- if (tx) { \
- DHD_PKTDUMP(("[dhd-%s] 802_1X " x " [TX] : (%s) %s (%s)"DBGREPLAY TXFATE_FMT"\n", \
- ifname, ## args, \
- tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
- REPLAY_FMT(eap_key), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP(("[dhd-%s] 802_1X " x " [RX] : (%s) %s (%s)"DBGREPLAY"\n", \
- ifname, ## args, \
- tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
- REPLAY_FMT(eap_key))); \
- } \
- } \
- } while (0)
-
-#define EAP_PRINT_OTHER(x, args...) \
- do { \
- if (dump_msg_level & DUMP_EAPOL_VAL) { \
- if (tx) { \
- DHD_PKTDUMP(("[dhd-%s] 802_1X " x " [TX] : (%s) %s (%s) " \
- "ver %d, type %d"TXFATE_FMT"\n", \
- ifname, ## args, \
- tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
- eapol_hdr->version, eapol_hdr->type, \
- TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP(("[dhd-%s] 802_1X " x " [RX] : (%s) %s (%s) " \
- "ver %d, type %d\n", \
- ifname, ## args, \
- tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
- eapol_hdr->version, eapol_hdr->type)); \
- } \
- } \
- } while (0)
-
-#define EAP_PRINT_OTHER_4WAY(x, args...) \
- do { \
- if (dump_msg_level & DUMP_EAPOL_VAL) { \
- if (tx) { \
- DHD_PKTDUMP(("[dhd-%s] 802_1X " x " [TX] : (%s) %s (%s) " \
- "ver %d type %d keytype %d keyinfo 0x%02X"TXFATE_FMT"\n", \
- ifname, ## args, \
- tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
- eapol_hdr->version, eapol_hdr->type, eap_key->type, \
- (uint32)hton16(eap_key->key_info), \
- TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP(("[dhd-%s] 802_1X " x " [RX] : (%s) %s (%s) " \
- "ver %d type %d keytype %d keyinfo 0x%02X\n", \
- ifname, ## args, \
- tx?seabuf:deabuf, tx?"->":"<-", tx?deabuf:seabuf, \
- eapol_hdr->version, eapol_hdr->type, eap_key->type, \
- (uint32)hton16(eap_key->key_info))); \
- } \
- } \
- } while (0)
-
-/* EAPOL header */
-typedef struct eapol_header {
- struct ether_header eth; /* 802.3/Ethernet header */
- uint8 version; /* EAPOL protocol version */
- uint8 type; /* EAPOL type */
- uint16 length; /* Length of body */
- uint8 body[1]; /* Body (optional) */
-} PACKED_STRUCT eapol_header_t;
-
-/* EAP header */
-typedef struct eap_header_fmt {
- uint8 code;
- uint8 id;
- uint16 len;
- uint8 type;
- uint8 data[1];
-} PACKED_STRUCT eap_header_fmt_t;
-
-/* WSC EAP format */
-typedef struct eap_wsc_fmt {
- uint8 oui[3];
- uint32 ouitype;
- uint8 opcode;
- uint8 flags;
- uint8 data[1];
-} PACKED_STRUCT eap_wsc_fmt_t;
-
-/* EAPOL-Key */
-typedef struct eapol_key_hdr {
- uint8 type; /* Key Descriptor Type */
- uint16 key_info; /* Key Information (unaligned) */
- uint16 key_len; /* Key Length (unaligned) */
- uint8 replay[WPA_KEY_REPLAY_LEN]; /* Replay Counter */
- uint8 nonce[WPA_KEY_NONCE_LEN]; /* Nonce */
- uint8 iv[WPA_KEY_IV_LEN]; /* Key IV */
- uint8 rsc[WPA_KEY_RSC_LEN]; /* Key RSC */
- uint8 id[WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
- uint8 mic[WPA_KEY_MIC_LEN]; /* Key MIC */
- uint16 data_len; /* Key Data Length */
- uint8 data[WPA_KEY_DATA_LEN]; /* Key data */
-} PACKED_STRUCT eapol_key_hdr_t;
-
-msg_eapol_t
-dhd_is_4way_msg(uint8 *pktdata)
-{
- eapol_header_t *eapol_hdr;
- eapol_key_hdr_t *eap_key;
- msg_eapol_t type = EAPOL_OTHER;
- bool pair, ack, mic, kerr, req, sec, install;
- uint16 key_info;
-
- if (!pktdata) {
- DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
- return type;
- }
-
- eapol_hdr = (eapol_header_t *)pktdata;
- eap_key = (eapol_key_hdr_t *)(eapol_hdr->body);
- if (eap_key->type != EAPOL_WPA2_KEY) {
- return type;
- }
-
- key_info = hton16(eap_key->key_info);
- pair = !!(key_info & KEYINFO_TYPE_MASK);
- ack = !!(key_info & KEYINFO_KEYACK_MASK);
- mic = !!(key_info & KEYINFO_KEYMIC_MASK);
- kerr = !!(key_info & KEYINFO_ERROR_MASK);
- req = !!(key_info & KEYINFO_REQ_MASK);
- sec = !!(key_info & KEYINFO_SECURE_MASK);
- install = !!(key_info & KEYINFO_INSTALL_MASK);
-
- if (pair && !install && ack && !mic && !sec && !kerr && !req) {
- type = EAPOL_4WAY_M1;
- } else if (pair && !install && !ack && mic && !sec && !kerr && !req) {
- type = EAPOL_4WAY_M2;
- } else if (pair && ack && mic && sec && !kerr && !req) {
- type = EAPOL_4WAY_M3;
- } else if (pair && !install && !ack && mic && sec && !req && !kerr) {
- type = EAPOL_4WAY_M4;
- } else if (!pair && !install && ack && mic && sec && !req && !kerr) {
- type = EAPOL_GROUPKEY_M1;
- } else if (!pair && !install && !ack && mic && sec && !req && !kerr) {
- type = EAPOL_GROUPKEY_M2;
- } else {
- type = EAPOL_OTHER;
- }
-
- return type;
-}
-
-void
-dhd_dump_pkt(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen,
- bool tx, uint32 *pkthash, uint16 *pktfate)
-{
- struct ether_header *eh;
- uint16 ether_type;
-
- if (!pktdata || pktlen < ETHER_HDR_LEN) {
- return;
- }
-
- eh = (struct ether_header *)pktdata;
- ether_type = ntoh16(eh->ether_type);
- if (ether_type == ETHER_TYPE_802_1X) {
- dhd_dump_eapol_message(dhdp, ifidx, pktdata, pktlen,
- tx, pkthash, pktfate);
- }
- if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
- dhd_dhcp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
- dhd_icmp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
- dhd_dns_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
- }
- if (ntoh16(eh->ether_type) == ETHER_TYPE_ARP) {
- dhd_arp_dump(dhdp, ifidx, pktdata, tx, pkthash, pktfate);
- }
- dhd_trx_pkt_dump(dhdp, ifidx, pktdata, pktlen, tx);
-}
-
-#ifdef DHD_PKTDUMP_ROAM
-static void
-dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype)
-{
- pkt_cnts_log_t *pktcnts;
- pkt_cnt_t *cnt;
-
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return;
- }
-
- pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
- if (!pktcnts) {
- DHD_ERROR(("%s: pktcnts is NULL\n", __FUNCTION__));
- return;
- }
-
- if (!pktcnts->enabled || (tx && !pktfate)) {
- return;
- }
-
- if (pkttype == PKT_CNT_TYPE_ARP) {
- cnt = (pkt_cnt_t *)&pktcnts->arp_cnt;
- } else if (pkttype == PKT_CNT_TYPE_DNS) {
- cnt = (pkt_cnt_t *)&pktcnts->dns_cnt;
- } else {
- /* invalid packet type */
- return;
- }
-
- if (tx) {
- TX_FATE_ACKED(pktfate) ? cnt->tx_cnt++ : cnt->tx_err_cnt++;
- } else {
- cnt->rx_cnt++;
- }
-}
-
-static void
-dhd_dump_pkt_timer(unsigned long data)
-{
- dhd_pub_t *dhdp = (dhd_pub_t *)data;
- pkt_cnts_log_t *pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
-
- pktcnts->enabled = FALSE;
-
- /* print out the packet counter value */
- DHD_PKTDUMP(("============= PACKET COUNT SUMMARY ============\n"));
- DHD_PKTDUMP(("- Reason: %s\n", pkt_cnt_msg[pktcnts->reason]));
- DHD_PKTDUMP(("- Duration: %d msec(s)\n", PKT_CNT_TIMER_INTERNVAL_MS));
- DHD_PKTDUMP(("- ARP PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n",
- pktcnts->arp_cnt.tx_cnt, pktcnts->arp_cnt.tx_err_cnt,
- pktcnts->arp_cnt.rx_cnt));
- DHD_PKTDUMP(("- DNS PACKETS: tx_success:%d tx_fail:%d rx_cnt:%d\n",
- pktcnts->dns_cnt.tx_cnt, pktcnts->dns_cnt.tx_err_cnt,
- pktcnts->dns_cnt.rx_cnt));
- DHD_PKTDUMP(("============= END OF COUNT SUMMARY ============\n"));
-}
-
-void
-dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn)
-{
- pkt_cnts_log_t *pktcnts;
-
- if (!dhdp || !dhdp->pktcnts) {
- DHD_ERROR(("%s: dhdp or dhdp->pktcnts is NULL\n",
- __FUNCTION__));
- return;
- }
-
- if (!PKT_CNT_RSN_VALID(rsn)) {
- DHD_ERROR(("%s: invalid reason code %d\n",
- __FUNCTION__, rsn));
- return;
- }
-
- pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
- if (timer_pending(&pktcnts->pktcnt_timer)) {
- del_timer_sync(&pktcnts->pktcnt_timer);
- }
-
- bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t));
- bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t));
- pktcnts->reason = rsn;
- pktcnts->enabled = TRUE;
- mod_timer(&pktcnts->pktcnt_timer,
- jiffies + msecs_to_jiffies(PKT_CNT_TIMER_INTERNVAL_MS));
- DHD_PKTDUMP(("%s: Arm the pktcnt timer. reason=%d\n",
- __FUNCTION__, rsn));
-}
-
-void
-dhd_dump_pkt_init(dhd_pub_t *dhdp)
-{
- pkt_cnts_log_t *pktcnts;
-
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return;
- }
-
- pktcnts = (pkt_cnts_log_t *)MALLOCZ(dhdp->osh, sizeof(pkt_cnts_log_t));
- if (!pktcnts) {
- DHD_ERROR(("%s: failed to allocate memory for pktcnts\n",
- __FUNCTION__));
- return;
- }
-
- /* init timers */
- init_timer_compat(&pktcnts->pktcnt_timer, dhd_dump_pkt_timer, dhdp);
- dhdp->pktcnts = pktcnts;
-}
-
-void
-dhd_dump_pkt_deinit(dhd_pub_t *dhdp)
-{
- pkt_cnts_log_t *pktcnts;
-
- if (!dhdp || !dhdp->pktcnts) {
- DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__));
- return;
- }
-
- pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
- pktcnts->enabled = FALSE;
- del_timer_sync(&pktcnts->pktcnt_timer);
- MFREE(dhdp->osh, dhdp->pktcnts, sizeof(pkt_cnts_log_t));
- dhdp->pktcnts = NULL;
-}
-
-void
-dhd_dump_pkt_clear(dhd_pub_t *dhdp)
-{
- pkt_cnts_log_t *pktcnts;
-
- if (!dhdp || !dhdp->pktcnts) {
- DHD_ERROR(("%s: dhdp or pktcnts is NULL\n", __FUNCTION__));
- return;
- }
-
- pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
- pktcnts->enabled = FALSE;
- del_timer_sync(&pktcnts->pktcnt_timer);
- pktcnts->reason = 0;
- bzero(&pktcnts->arp_cnt, sizeof(pkt_cnt_t));
- bzero(&pktcnts->dns_cnt, sizeof(pkt_cnt_t));
-}
-
-bool
-dhd_dump_pkt_enabled(dhd_pub_t *dhdp)
-{
- pkt_cnts_log_t *pktcnts;
-
- if (!dhdp || !dhdp->pktcnts) {
- return FALSE;
- }
-
- pktcnts = (pkt_cnts_log_t *)(dhdp->pktcnts);
-
- return pktcnts->enabled;
-}
-#else
-static INLINE void
-dhd_dump_pkt_cnts_inc(dhd_pub_t *dhdp, bool tx, uint16 *pktfate, uint16 pkttype) { }
-static INLINE bool
-dhd_dump_pkt_enabled(dhd_pub_t *dhdp) { return FALSE; }
-#endif /* DHD_PKTDUMP_ROAM */
-
-#ifdef DHD_8021X_DUMP
-static void
-dhd_dump_wsc_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
- uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
-{
- eapol_header_t *eapol_hdr;
- eap_header_fmt_t *eap_hdr;
- eap_wsc_fmt_t *eap_wsc;
- char *ifname;
- uint16 eap_len;
- bool cond;
- char seabuf[ETHER_ADDR_STR_LEN]="";
- char deabuf[ETHER_ADDR_STR_LEN]="";
-
- if (!pktdata) {
- DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
- return;
- }
-
- if (pktlen < (ETHER_HDR_LEN + EAPOL_HDR_LEN)) {
- DHD_ERROR(("%s: invalid pkt length\n", __FUNCTION__));
- return;
- }
-
- bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
- bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
-
- eapol_hdr = (eapol_header_t *)pktdata;
- eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body);
- if (eap_hdr->type != EAP_TYPE_EXP) {
- return;
- }
-
- eap_len = ntoh16(eap_hdr->len);
- if (eap_len < EAP_WSC_MIN_DATA_LEN) {
- return;
- }
-
- eap_wsc = (eap_wsc_fmt_t *)(eap_hdr->data);
- if (bcmp(eap_wsc->oui, (const uint8 *)WFA_VID, WFA_VID_LEN) ||
- (ntoh32(eap_wsc->ouitype) != WFA_VTYPE)) {
- return;
- }
-
- if (eap_wsc->flags) {
- return;
- }
-
- ifname = dhd_ifname(dhd, ifidx);
- cond = (tx && pktfate) ? FALSE : TRUE;
-
- if (eap_wsc->opcode == WSC_OPCODE_MSG) {
- const uint8 *tlv_buf = (const uint8 *)(eap_wsc->data);
- const uint8 *msg;
- uint16 msglen;
- uint16 wsc_data_len = (uint16)(eap_len - EAP_HDR_LEN - EAP_WSC_DATA_OFFSET);
- bcm_xtlv_opts_t opt = BCM_XTLV_OPTION_IDBE | BCM_XTLV_OPTION_LENBE;
-
- msg = bcm_get_data_from_xtlv_buf(tlv_buf, wsc_data_len,
- WSC_ATTR_MSG, &msglen, opt);
- if (msg && msglen) {
- switch (*msg) {
- case WSC_MSG_M1:
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M1;
- DHD_STATLOG_DATA(dhd, ST(WPS_M1), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, WPS M1");
- break;
- case WSC_MSG_M2:
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M2;
- DHD_STATLOG_DATA(dhd, ST(WPS_M2), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, WPS M2");
- break;
- case WSC_MSG_M3:
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M3;
- DHD_STATLOG_DATA(dhd, ST(WPS_M3), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, WPS M3");
- break;
- case WSC_MSG_M4:
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M4;
- DHD_STATLOG_DATA(dhd, ST(WPS_M4), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, WPS M4");
- break;
- case WSC_MSG_M5:
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M5;
- DHD_STATLOG_DATA(dhd, ST(WPS_M5), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, WPS M5");
- break;
- case WSC_MSG_M6:
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M6;
- DHD_STATLOG_DATA(dhd, ST(WPS_M6), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, WPS M6");
- break;
- case WSC_MSG_M7:
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M7;
- DHD_STATLOG_DATA(dhd, ST(WPS_M7), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, WPS M7");
- break;
- case WSC_MSG_M8:
- dhd->conf->eapol_status = EAPOL_STATUS_WPS_M8;
- DHD_STATLOG_DATA(dhd, ST(WPS_M8), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, WPS M8");
- break;
- default:
- break;
- }
- }
- } else if (eap_wsc->opcode == WSC_OPCODE_START) {
- dhd->conf->eapol_status = EAPOL_STATUS_WSC_START;
- DHD_STATLOG_DATA(dhd, ST(WSC_START), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, WSC Start");
- } else if (eap_wsc->opcode == WSC_OPCODE_DONE) {
- dhd->conf->eapol_status = EAPOL_STATUS_WSC_DONE;
- DHD_STATLOG_DATA(dhd, ST(WSC_DONE), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, WSC Done");
- }
-}
-
-static void
-dhd_dump_eap_packet(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
- uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
-{
- eapol_header_t *eapol_hdr;
- eap_header_fmt_t *eap_hdr;
- char *ifname;
- bool cond;
- char seabuf[ETHER_ADDR_STR_LEN]="";
- char deabuf[ETHER_ADDR_STR_LEN]="";
-
- if (!pktdata) {
- DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
- return;
- }
-
- bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
- bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
-
- eapol_hdr = (eapol_header_t *)pktdata;
- eap_hdr = (eap_header_fmt_t *)(eapol_hdr->body);
- ifname = dhd_ifname(dhd, ifidx);
- cond = (tx && pktfate) ? FALSE : TRUE;
-
- if (eap_hdr->code == EAP_CODE_REQUEST ||
- eap_hdr->code == EAP_CODE_RESPONSE) {
- bool isreq = (eap_hdr->code == EAP_CODE_REQUEST);
- switch (eap_hdr->type) {
- case EAP_TYPE_IDENT:
- if (isreq) {
- dhd->conf->eapol_status = EAPOL_STATUS_REQID;
- DHD_STATLOG_DATA(dhd, ST(EAP_REQ_IDENTITY), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Request, Identity");
- } else {
- dhd->conf->eapol_status = EAPOL_STATUS_RSPID;
- DHD_STATLOG_DATA(dhd, ST(EAP_RESP_IDENTITY), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Response, Identity");
- }
- break;
- case EAP_TYPE_TLS:
- if (isreq) {
- DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TLS), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Request, TLS");
- } else {
- DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TLS), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Response, TLS");
- }
- break;
- case EAP_TYPE_LEAP:
- if (isreq) {
- DHD_STATLOG_DATA(dhd, ST(EAP_REQ_LEAP), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Request, LEAP");
- } else {
- DHD_STATLOG_DATA(dhd, ST(EAP_RESP_LEAP), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Response, LEAP");
- }
- break;
- case EAP_TYPE_TTLS:
- if (isreq) {
- DHD_STATLOG_DATA(dhd, ST(EAP_REQ_TTLS), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Request, TTLS");
- } else {
- DHD_STATLOG_DATA(dhd, ST(EAP_RESP_TTLS), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Response, TTLS");
- }
- break;
- case EAP_TYPE_AKA:
- if (isreq) {
- DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKA), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Request, AKA");
- } else {
- DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKA), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Response, AKA");
- }
- break;
- case EAP_TYPE_PEAP:
- if (isreq) {
- DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PEAP), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Request, PEAP");
- } else {
- DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PEAP), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Response, PEAP");
- }
- break;
- case EAP_TYPE_FAST:
- if (isreq) {
- DHD_STATLOG_DATA(dhd, ST(EAP_REQ_FAST), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Request, FAST");
- } else {
- DHD_STATLOG_DATA(dhd, ST(EAP_RESP_FAST), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Response, FAST");
- }
- break;
- case EAP_TYPE_PSK:
- if (isreq) {
- DHD_STATLOG_DATA(dhd, ST(EAP_REQ_PSK), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Request, PSK");
- } else {
- DHD_STATLOG_DATA(dhd, ST(EAP_RESP_PSK), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Response, PSK");
- }
- break;
- case EAP_TYPE_AKAP:
- if (isreq) {
- DHD_STATLOG_DATA(dhd, ST(EAP_REQ_AKAP), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Request, AKAP");
- } else {
- DHD_STATLOG_DATA(dhd, ST(EAP_RESP_AKAP), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Response, AKAP");
- }
- break;
- case EAP_TYPE_EXP:
- dhd_dump_wsc_message(dhd, ifidx, pktdata, pktlen, tx,
- pkthash, pktfate);
- break;
- default:
- break;
- }
- } else if (eap_hdr->code == EAP_CODE_SUCCESS) {
- DHD_STATLOG_DATA(dhd, ST(EAP_SUCCESS), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Success");
- } else if (eap_hdr->code == EAP_CODE_FAILURE) {
- DHD_STATLOG_DATA(dhd, ST(EAP_FAILURE), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, Failure");
- }
-}
-
-static void
-dhd_dump_eapol_4way_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata, bool tx,
- uint32 *pkthash, uint16 *pktfate)
-{
- eapol_header_t *eapol_hdr;
- eapol_key_hdr_t *eap_key;
- msg_eapol_t type;
- char *ifname;
- bool cond;
- char seabuf[ETHER_ADDR_STR_LEN]="";
- char deabuf[ETHER_ADDR_STR_LEN]="";
-
- if (!pktdata) {
- DHD_PKTDUMP(("%s: pktdata is NULL\n", __FUNCTION__));
- return;
- }
-
- bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
- bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
-
- type = dhd_is_4way_msg(pktdata);
- ifname = dhd_ifname(dhd, ifidx);
- eapol_hdr = (eapol_header_t *)pktdata;
- eap_key = (eapol_key_hdr_t *)(eapol_hdr->body);
- cond = (tx && pktfate) ? FALSE : TRUE;
-
- if (eap_key->type != EAPOL_WPA2_KEY) {
- EAP_PRINT_OTHER("NON EAPOL_WPA2_KEY");
- return;
- }
-
- switch (type) {
- case EAPOL_4WAY_M1:
- dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M1;
- DHD_STATLOG_DATA(dhd, ST(EAPOL_M1), ifidx, tx, cond);
- EAP_PRINT("EAPOL Packet, 4-way handshake, M1");
- break;
- case EAPOL_4WAY_M2:
- dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M2;
- DHD_STATLOG_DATA(dhd, ST(EAPOL_M2), ifidx, tx, cond);
- EAP_PRINT("EAPOL Packet, 4-way handshake, M2");
- break;
- case EAPOL_4WAY_M3:
- dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M3;
- DHD_STATLOG_DATA(dhd, ST(EAPOL_M3), ifidx, tx, cond);
- EAP_PRINT("EAPOL Packet, 4-way handshake, M3");
- break;
- case EAPOL_4WAY_M4:
- dhd->conf->eapol_status = EAPOL_STATUS_4WAY_M4;
- DHD_STATLOG_DATA(dhd, ST(EAPOL_M4), ifidx, tx, cond);
- EAP_PRINT("EAPOL Packet, 4-way handshake, M4");
- break;
- case EAPOL_GROUPKEY_M1:
- DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M1), ifidx, tx, cond);
- EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M1");
- break;
- case EAPOL_GROUPKEY_M2:
- DHD_STATLOG_DATA(dhd, ST(EAPOL_GROUPKEY_M2), ifidx, tx, cond);
- EAP_PRINT_REPLAY("EAPOL Packet, GROUP Key handshake, M2");
- if (ifidx == 0 && tx && pktfate) {
- dhd_dump_mod_pkt_timer(dhd, PKT_CNT_RSN_GRPKEY_UP);
- }
- break;
- default:
- DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond);
- EAP_PRINT_OTHER("OTHER 4WAY");
- break;
- }
-}
-
-void
-dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
- uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate)
-{
- char *ifname;
- eapol_header_t *eapol_hdr = (eapol_header_t *)pktdata;
- bool cond;
- char seabuf[ETHER_ADDR_STR_LEN]="";
- char deabuf[ETHER_ADDR_STR_LEN]="";
-
- if (!pktdata) {
- DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
- return;
- }
-
- bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
- bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
-
- eapol_hdr = (eapol_header_t *)pktdata;
- ifname = dhd_ifname(dhd, ifidx);
- cond = (tx && pktfate) ? FALSE : TRUE;
-
- if (eapol_hdr->type == EAP_PACKET) {
- dhd_dump_eap_packet(dhd, ifidx, pktdata, pktlen, tx,
- pkthash, pktfate);
- } else if (eapol_hdr->type == EAPOL_START) {
- DHD_STATLOG_DATA(dhd, ST(EAPOL_START), ifidx, tx, cond);
- EAP_PRINT("EAP Packet, EAPOL-Start");
- } else if (eapol_hdr->type == EAPOL_KEY) {
- dhd_dump_eapol_4way_message(dhd, ifidx, pktdata, tx,
- pkthash, pktfate);
- } else {
- DHD_STATLOG_DATA(dhd, ST(8021X_OTHER), ifidx, tx, cond);
- EAP_PRINT_OTHER("OTHER 8021X");
- }
-}
-#endif /* DHD_8021X_DUMP */
-
-#ifdef DHD_DHCP_DUMP
-#define BOOTP_CHADDR_LEN 16
-#define BOOTP_SNAME_LEN 64
-#define BOOTP_FILE_LEN 128
-#define BOOTP_MIN_DHCP_OPT_LEN 312
-#define BOOTP_MAGIC_COOKIE_LEN 4
-
-#define DHCP_MSGTYPE_DISCOVER 1
-#define DHCP_MSGTYPE_OFFER 2
-#define DHCP_MSGTYPE_REQUEST 3
-#define DHCP_MSGTYPE_DECLINE 4
-#define DHCP_MSGTYPE_ACK 5
-#define DHCP_MSGTYPE_NAK 6
-#define DHCP_MSGTYPE_RELEASE 7
-#define DHCP_MSGTYPE_INFORM 8
-
-#define DHCP_PRINT(str) \
- do { \
- if (tx) { \
- DHD_PKTDUMP(("[dhd-%s] " str " %8s[%8s] [TX] : %s(%s) %s %s(%s)"TXFATE_FMT"\n", \
- ifname, typestr, opstr, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
- TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP(("[dhd-%s] " str " %8s[%8s] [RX] : %s(%s) %s %s(%s)\n", \
- ifname, typestr, opstr, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf)); \
- } \
- } while (0)
-
-typedef struct bootp_fmt {
- struct ipv4_hdr iph;
- struct bcmudp_hdr udph;
- uint8 op;
- uint8 htype;
- uint8 hlen;
- uint8 hops;
- uint32 transaction_id;
- uint16 secs;
- uint16 flags;
- uint32 client_ip;
- uint32 assigned_ip;
- uint32 server_ip;
- uint32 relay_ip;
- uint8 hw_address[BOOTP_CHADDR_LEN];
- uint8 server_name[BOOTP_SNAME_LEN];
- uint8 file_name[BOOTP_FILE_LEN];
- uint8 options[BOOTP_MIN_DHCP_OPT_LEN];
-} PACKED_STRUCT bootp_fmt_t;
-
-static const uint8 bootp_magic_cookie[4] = { 99, 130, 83, 99 };
-static char dhcp_ops[][10] = {
- "NA", "REQUEST", "REPLY"
-};
-static char dhcp_types[][10] = {
- "NA", "DISCOVER", "OFFER", "REQUEST", "DECLINE", "ACK", "NAK", "RELEASE", "INFORM"
-};
-
-static const int dhcp_types_stat[9] = {
- ST(INVALID), ST(DHCP_DISCOVER), ST(DHCP_OFFER), ST(DHCP_REQUEST),
- ST(DHCP_DECLINE), ST(DHCP_ACK), ST(DHCP_NAK), ST(DHCP_RELEASE),
- ST(DHCP_INFORM)
-};
-
-void
-dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
- uint32 *pkthash, uint16 *pktfate)
-{
- bootp_fmt_t *b = (bootp_fmt_t *)&pktdata[ETHER_HDR_LEN];
- struct ipv4_hdr *iph = &b->iph;
- uint8 *ptr, *opt, *end = (uint8 *) b + ntohs(b->iph.tot_len);
- int dhcp_type = 0, len, opt_len;
- char *ifname = NULL, *typestr = NULL, *opstr = NULL;
- bool cond;
- char sabuf[20]="", dabuf[20]="";
- char seabuf[ETHER_ADDR_STR_LEN]="";
- char deabuf[ETHER_ADDR_STR_LEN]="";
-
- if (!(dump_msg_level & DUMP_DHCP_VAL))
- return;
-
- /* check IP header */
- if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) ||
- IP_VER(iph) != IP_VER_4 ||
- IPV4_PROT(iph) != IP_PROT_UDP) {
- return;
- }
-
- /* check UDP port for bootp (67, 68) */
- if (b->udph.src_port != htons(DHCP_PORT_SERVER) &&
- b->udph.src_port != htons(DHCP_PORT_CLIENT) &&
- b->udph.dst_port != htons(DHCP_PORT_SERVER) &&
- b->udph.dst_port != htons(DHCP_PORT_CLIENT)) {
- return;
- }
-
- /* check header length */
- if (ntohs(iph->tot_len) < ntohs(b->udph.len) + sizeof(struct bcmudp_hdr)) {
- return;
- }
- bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf);
- bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf);
- bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
- bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
-
- ifname = dhd_ifname(dhdp, ifidx);
- cond = (tx && pktfate) ? FALSE : TRUE;
- len = ntohs(b->udph.len) - sizeof(struct bcmudp_hdr);
- opt_len = len - (sizeof(*b) - sizeof(struct ipv4_hdr) -
- sizeof(struct bcmudp_hdr) - sizeof(b->options));
-
- /* parse bootp options */
- if (opt_len >= BOOTP_MAGIC_COOKIE_LEN &&
- !memcmp(b->options, bootp_magic_cookie, BOOTP_MAGIC_COOKIE_LEN)) {
- ptr = &b->options[BOOTP_MAGIC_COOKIE_LEN];
- while (ptr < end && *ptr != 0xff) {
- opt = ptr++;
- if (*opt == 0) {
- continue;
- }
- ptr += *ptr + 1;
- if (ptr >= end) {
- break;
- }
- if (*opt == DHCP_OPT_MSGTYPE) {
- if (opt[1]) {
- dhcp_type = opt[2];
- typestr = dhcp_types[dhcp_type];
- opstr = dhcp_ops[b->op];
- DHD_STATLOG_DATA(dhdp, dhcp_types_stat[dhcp_type],
- ifidx, tx, cond);
- DHCP_PRINT("DHCP");
- break;
- }
- }
- }
- }
-}
-#endif /* DHD_DHCP_DUMP */
-
-#ifdef DHD_ICMP_DUMP
-#define ICMP_TYPE_DEST_UNREACH 3
-#define ICMP_ECHO_SEQ_OFFSET 6
-#define ICMP_ECHO_SEQ(h) (*(uint16 *)((uint8 *)(h) + (ICMP_ECHO_SEQ_OFFSET)))
-#define ICMP_PING_PRINT(str) \
- do { \
- if (tx) { \
- DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s) SEQNUM=%d" \
- TXFATE_FMT"\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, seqnum, \
- TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] : %s(%s) %s %s(%s) SEQNUM=%d\n", \
- ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, seqnum)); \
- } \
- } while (0)
-
-#define ICMP_PRINT(str) \
- do { \
- if (tx) { \
- DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s) TYPE=%d, CODE=%d" \
- TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, type, code, \
- TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] : %s(%s) %s %s(%s) TYPE=%d," \
- " CODE=%d\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, type, code)); \
- } \
- } while (0)
-
-void
-dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
- uint32 *pkthash, uint16 *pktfate)
-{
- uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
- struct ipv4_hdr *iph = (struct ipv4_hdr *)pkt;
- struct bcmicmp_hdr *icmph;
- char *ifname;
- bool cond;
- uint16 seqnum, type, code;
- char sabuf[20]="", dabuf[20]="";
- char seabuf[ETHER_ADDR_STR_LEN]="";
- char deabuf[ETHER_ADDR_STR_LEN]="";
-
- if (!(dump_msg_level & DUMP_ICMP_VAL))
- return;
-
- /* check IP header */
- if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) ||
- IP_VER(iph) != IP_VER_4 ||
- IPV4_PROT(iph) != IP_PROT_ICMP) {
- return;
- }
-
- /* check header length */
- if (ntohs(iph->tot_len) - IPV4_HLEN(iph) < sizeof(struct bcmicmp_hdr)) {
- return;
- }
-
- ifname = dhd_ifname(dhdp, ifidx);
- cond = (tx && pktfate) ? FALSE : TRUE;
- icmph = (struct bcmicmp_hdr *)((uint8 *)pkt + sizeof(struct ipv4_hdr));
- seqnum = 0;
- type = icmph->type;
- code = icmph->code;
- bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf);
- bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf);
- bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
- bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
- if (type == ICMP_TYPE_ECHO_REQUEST) {
- seqnum = ntoh16(ICMP_ECHO_SEQ(icmph));
- DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_REQ), ifidx, tx, cond);
- ICMP_PING_PRINT("PING REQUEST");
- } else if (type == ICMP_TYPE_ECHO_REPLY) {
- seqnum = ntoh16(ICMP_ECHO_SEQ(icmph));
- DHD_STATLOG_DATA(dhdp, ST(ICMP_PING_RESP), ifidx, tx, cond);
- ICMP_PING_PRINT("PING REPLY ");
- } else if (type == ICMP_TYPE_DEST_UNREACH) {
- DHD_STATLOG_DATA(dhdp, ST(ICMP_DEST_UNREACH), ifidx, tx, cond);
- ICMP_PRINT("ICMP DEST UNREACH");
- } else {
- DHD_STATLOG_DATA(dhdp, ST(ICMP_OTHER), ifidx, tx, cond);
- ICMP_PRINT("ICMP OTHER");
- }
-}
-#endif /* DHD_ICMP_DUMP */
-
-#ifdef DHD_ARP_DUMP
-#define ARP_PRINT(str) \
- do { \
- if (tx) { \
- if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
- DHD_PKTDUMP(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s)"TXFATE_FMT"\n", \
- ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
- TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s)"TXFATE_FMT"\n", \
- ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
- TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } \
- } else { \
- DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] : %s(%s) %s %s(%s)\n", \
- ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf)); \
- } \
- } while (0) \
-
-#define ARP_PRINT_OTHER(str) \
- do { \
- if (tx) { \
- if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
- DHD_PKTDUMP(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s) op_code=%d" \
- TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, opcode, \
- TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP_MEM(("[dhd-%s] "str " [TX] : %s(%s) %s %s(%s) op_code=%d" \
- TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, opcode, \
- TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } \
- } else { \
- DHD_PKTDUMP_MEM(("[dhd-%s] "str " [RX] : %s(%s) %s %s(%s) op_code=%d\n", \
- ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, opcode)); \
- } \
- } while (0)
-
-void
-dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
- uint32 *pkthash, uint16 *pktfate)
-{
- uint8 *pkt = (uint8 *)&pktdata[ETHER_HDR_LEN];
- struct bcmarp *arph = (struct bcmarp *)pkt;
- char *ifname;
- uint16 opcode;
- bool cond, dump_enabled;
- char sabuf[20]="", dabuf[20]="";
- char seabuf[ETHER_ADDR_STR_LEN]="";
- char deabuf[ETHER_ADDR_STR_LEN]="";
-
- if (!(dump_msg_level & DUMP_ARP_VAL))
- return;
-
- /* validation check */
- if (arph->htype != hton16(HTYPE_ETHERNET) ||
- arph->hlen != ETHER_ADDR_LEN ||
- arph->plen != 4) {
- return;
- }
-
- ifname = dhd_ifname(dhdp, ifidx);
- opcode = ntoh16(arph->oper);
- cond = (tx && pktfate) ? FALSE : TRUE;
- dump_enabled = dhd_dump_pkt_enabled(dhdp);
- bcm_ip_ntoa((struct ipv4_addr *)arph->src_ip, sabuf);
- bcm_ip_ntoa((struct ipv4_addr *)arph->dst_ip, dabuf);
- bcm_ether_ntoa((struct ether_addr *)arph->dst_eth, deabuf);
- bcm_ether_ntoa((struct ether_addr *)arph->src_eth, seabuf);
- if (opcode == ARP_OPC_REQUEST) {
- DHD_STATLOG_DATA(dhdp, ST(ARP_REQ), ifidx, tx, cond);
- ARP_PRINT("ARP REQUEST ");
- } else if (opcode == ARP_OPC_REPLY) {
- DHD_STATLOG_DATA(dhdp, ST(ARP_RESP), ifidx, tx, cond);
- ARP_PRINT("ARP RESPONSE");
- } else {
- ARP_PRINT_OTHER("ARP OTHER");
- }
-
- if (ifidx == 0) {
- dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_ARP);
- }
-}
-#endif /* DHD_ARP_DUMP */
-
-#ifdef DHD_DNS_DUMP
-typedef struct dns_fmt {
- struct ipv4_hdr iph;
- struct bcmudp_hdr udph;
- uint16 id;
- uint16 flags;
- uint16 qdcount;
- uint16 ancount;
- uint16 nscount;
- uint16 arcount;
-} PACKED_STRUCT dns_fmt_t;
-
-#define UDP_PORT_DNS 53
-#define DNS_QR_LOC 15
-#define DNS_OPCODE_LOC 11
-#define DNS_RCODE_LOC 0
-#define DNS_QR_MASK ((0x1) << (DNS_QR_LOC))
-#define DNS_OPCODE_MASK ((0xF) << (DNS_OPCODE_LOC))
-#define DNS_RCODE_MASK ((0xF) << (DNS_RCODE_LOC))
-#define GET_DNS_QR(flags) (((flags) & (DNS_QR_MASK)) >> (DNS_QR_LOC))
-#define GET_DNS_OPCODE(flags) (((flags) & (DNS_OPCODE_MASK)) >> (DNS_OPCODE_LOC))
-#define GET_DNS_RCODE(flags) (((flags) & (DNS_RCODE_MASK)) >> (DNS_RCODE_LOC))
-#define DNS_UNASSIGNED_OPCODE(flags) ((GET_DNS_OPCODE(flags) >= (6)))
-
-static const char dns_opcode_types[][11] = {
- "QUERY", "IQUERY", "STATUS", "UNASSIGNED", "NOTIFY", "UPDATE"
-};
-
-#define DNSOPCODE(op) \
- (DNS_UNASSIGNED_OPCODE(flags) ? "UNASSIGNED" : dns_opcode_types[op])
-
-#define DNS_REQ_PRINT(str) \
- do { \
- if (tx) { \
- if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
- DHD_PKTDUMP(("[dhd-%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s" \
- TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
- id, DNSOPCODE(opcode), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP_MEM(("[dhd-%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s" \
- TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
- id, DNSOPCODE(opcode), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } \
- } else { \
- DHD_PKTDUMP_MEM(("[dhd-%s] " str " [RX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s\n", \
- ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, tx?"->":"<-", \
- tx?dabuf:sabuf, tx?deabuf:seabuf, id, DNSOPCODE(opcode))); \
- } \
- } while (0)
-
-#define DNS_RESP_PRINT(str) \
- do { \
- if (tx) { \
- if (dump_enabled && pktfate && !TX_FATE_ACKED(pktfate)) { \
- DHD_PKTDUMP(("[dhd-%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s RCODE:%d" \
- TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, id, DNSOPCODE(opcode), \
- GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } else { \
- DHD_PKTDUMP_MEM(("[dhd-%s] " str " [TX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s RCODE:%d" \
- TXFATE_FMT "\n", ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, id, DNSOPCODE(opcode), \
- GET_DNS_RCODE(flags), TX_PKTHASH(pkthash), TX_FATE(pktfate))); \
- } \
- } else { \
- DHD_PKTDUMP_MEM(("[dhd-%s] " str " [RX] : %s(%s) %s %s(%s) ID:0x%04X OPCODE:%s RCODE:%d\n", \
- ifname, tx?sabuf:dabuf, tx?seabuf:deabuf, \
- tx?"->":"<-", tx?dabuf:sabuf, tx?deabuf:seabuf, \
- id, DNSOPCODE(opcode), GET_DNS_RCODE(flags))); \
- } \
- } while (0)
-
-void
-dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
- uint32 *pkthash, uint16 *pktfate)
-{
- dns_fmt_t *dnsh = (dns_fmt_t *)&pktdata[ETHER_HDR_LEN];
- struct ipv4_hdr *iph = &dnsh->iph;
- uint16 flags, opcode, id;
- char *ifname;
- bool cond, dump_enabled;
- char sabuf[20]="", dabuf[20]="";
- char seabuf[ETHER_ADDR_STR_LEN]="";
- char deabuf[ETHER_ADDR_STR_LEN]="";
-
- if (!(dump_msg_level & DUMP_DNS_VAL))
- return;
-
- /* check IP header */
- if ((IPV4_HLEN(iph) < IPV4_HLEN_MIN) ||
- IP_VER(iph) != IP_VER_4 ||
- IPV4_PROT(iph) != IP_PROT_UDP) {
- return;
- }
-
- /* check UDP port for DNS */
- if (dnsh->udph.src_port != hton16(UDP_PORT_DNS) &&
- dnsh->udph.dst_port != hton16(UDP_PORT_DNS)) {
- return;
- }
-
- /* check header length */
- if (ntoh16(iph->tot_len) < (ntoh16(dnsh->udph.len) +
- sizeof(struct bcmudp_hdr))) {
- return;
- }
-
- ifname = dhd_ifname(dhdp, ifidx);
- cond = (tx && pktfate) ? FALSE : TRUE;
- dump_enabled = dhd_dump_pkt_enabled(dhdp);
- flags = hton16(dnsh->flags);
- opcode = GET_DNS_OPCODE(flags);
- id = hton16(dnsh->id);
- bcm_ip_ntoa((struct ipv4_addr *)iph->src_ip, sabuf);
- bcm_ip_ntoa((struct ipv4_addr *)iph->dst_ip, dabuf);
- bcm_ether_ntoa((struct ether_addr *)pktdata, deabuf);
- bcm_ether_ntoa((struct ether_addr *)(pktdata+6), seabuf);
- if (GET_DNS_QR(flags)) {
- /* Response */
- DHD_STATLOG_DATA(dhdp, ST(DNS_RESP), ifidx, tx, cond);
- DNS_RESP_PRINT("DNS RESPONSE");
- } else {
- /* Request */
- DHD_STATLOG_DATA(dhdp, ST(DNS_QUERY), ifidx, tx, cond);
- DNS_REQ_PRINT("DNS REQUEST");
- }
-
- if (ifidx == 0) {
- dhd_dump_pkt_cnts_inc(dhdp, tx, pktfate, PKT_CNT_TYPE_DNS);
- }
-}
-#endif /* DHD_DNS_DUMP */
-
-#ifdef DHD_TRX_DUMP
-void
-dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, uint32 pktlen, bool tx)
-{
- struct ether_header *eh;
- uint16 protocol;
- char *pkttype = "UNKNOWN";
-
- if (!(dump_msg_level & DUMP_TRX_VAL))
- return;
-
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return;
- }
-
- if (!pktdata) {
- DHD_ERROR(("%s: pktdata is NULL\n", __FUNCTION__));
- return;
- }
-
- eh = (struct ether_header *)pktdata;
- protocol = hton16(eh->ether_type);
- BCM_REFERENCE(pktlen);
-
- switch (protocol) {
- case ETHER_TYPE_IP:
- pkttype = "IP";
- break;
- case ETHER_TYPE_ARP:
- pkttype = "ARP";
- break;
- case ETHER_TYPE_BRCM:
- pkttype = "BRCM";
- break;
- case ETHER_TYPE_802_1X:
- pkttype = "802.1X";
- break;
- case ETHER_TYPE_WAI:
- pkttype = "WAPI";
- break;
- default:
- break;
- }
-
- if (protocol != ETHER_TYPE_BRCM) {
- if (pktdata[0] == 0xFF) {
- DHD_PKTDUMP(("[dhd-%s] %s BROADCAST DUMP - %s\n",
- dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype));
- } else if (pktdata[0] & 1) {
- DHD_PKTDUMP(("[dhd-%s] %s MULTICAST DUMP " MACDBG " - %s\n",
- dhd_ifname(dhdp, ifidx), tx?"TX":"RX", MAC2STRDBG(pktdata), pkttype));
- } else {
- DHD_PKTDUMP(("[dhd-%s] %s DUMP - %s\n",
- dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype));
- }
-#ifdef DHD_RX_FULL_DUMP
- prhex("Data", pktdata, pktlen);
-#endif /* DHD_RX_FULL_DUMP */
- }
- else {
- DHD_PKTDUMP(("[dhd-%s] %s DUMP - %s\n",
- dhd_ifname(dhdp, ifidx), tx?"TX":"RX", pkttype));
- }
-}
-#endif /* DHD_RX_DUMP */
+++ /dev/null
-/*
- * Header file for the Packet dump helper functions
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: dhd_linux_pktdump.h 820929 2019-05-21 14:09:11Z $
- */
-
-#ifndef __DHD_LINUX_PKTDUMP_H_
-#define __DHD_LINUX_PKTDUMP_H_
-
-#include <typedefs.h>
-#include <dhd.h>
-
-typedef enum {
- EAPOL_OTHER = 0,
- EAPOL_4WAY_M1,
- EAPOL_4WAY_M2,
- EAPOL_4WAY_M3,
- EAPOL_4WAY_M4,
- EAPOL_GROUPKEY_M1,
- EAPOL_GROUPKEY_M2
-} msg_eapol_t;
-
-typedef enum pkt_cnt_rsn {
- PKT_CNT_RSN_INVALID = 0,
- PKT_CNT_RSN_ROAM = 1,
- PKT_CNT_RSN_GRPKEY_UP = 2,
- PKT_CNT_RSN_CONNECT = 3,
- PKT_CNT_RSN_MAX = 4
-} pkt_cnt_rsn_t;
-
-extern msg_eapol_t dhd_is_4way_msg(uint8 *pktdata);
-extern void dhd_dump_pkt(dhd_pub_t *dhd, int ifidx, uint8 *pktdata,
- uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate);
-
-#ifdef DHD_PKTDUMP_ROAM
-extern void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn);
-extern void dhd_dump_pkt_init(dhd_pub_t *dhdp);
-extern void dhd_dump_pkt_deinit(dhd_pub_t *dhdp);
-extern void dhd_dump_pkt_clear(dhd_pub_t *dhdp);
-#else
-static INLINE void dhd_dump_mod_pkt_timer(dhd_pub_t *dhdp, uint16 rsn) { }
-static INLINE void dhd_dump_pkt_init(dhd_pub_t *dhdp) { }
-static INLINE void dhd_dump_pkt_deinit(dhd_pub_t *dhdp) { }
-static INLINE void dhd_dump_pkt_clear(dhd_pub_t *dhdp) { }
-#endif /* DHD_PKTDUMP_ROAM */
-
-/* Rx packet dump */
-#ifdef DHD_TRX_DUMP
-extern void dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx,
- uint8 *pktdata, uint32 pktlen, bool tx);
-#else
-static INLINE void dhd_trx_pkt_dump(dhd_pub_t *dhdp, int ifidx,
- uint8 *pktdata, uint32 pktlen, bool tx) { }
-#endif /* DHD_TRX_DUMP */
-
-/* DHCP packet dump */
-#ifdef DHD_DHCP_DUMP
-extern void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
- uint32 *pkthash, uint16 *pktfate);
-#else
-static INLINE void dhd_dhcp_dump(dhd_pub_t *dhdp, int ifidx,
- uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
-#endif /* DHD_DHCP_DUMP */
-
-/* DNS packet dump */
-#ifdef DHD_DNS_DUMP
-extern void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
- uint32 *pkthash, uint16 *pktfate);
-#else
-static INLINE void dhd_dns_dump(dhd_pub_t *dhdp, int ifidx,
- uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
-#endif /* DHD_DNS_DUMP */
-
-/* ICMP packet dump */
-#ifdef DHD_ICMP_DUMP
-extern void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
- uint32 *pkthash, uint16 *pktfate);
-#else
-static INLINE void dhd_icmp_dump(dhd_pub_t *dhdp, int ifidx,
- uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
-#endif /* DHD_ICMP_DUMP */
-
-/* ARP packet dump */
-#ifdef DHD_ARP_DUMP
-extern void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx, uint8 *pktdata, bool tx,
- uint32 *pkthash, uint16 *pktfate);
-#else
-static INLINE void dhd_arp_dump(dhd_pub_t *dhdp, int ifidx,
- uint8 *pktdata, bool tx, uint32 *pkthash, uint16 *pktfate) { }
-#endif /* DHD_ARP_DUMP */
-
-/* 802.1X packet dump */
-#ifdef DHD_8021X_DUMP
-extern void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx,
- uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate);
-#else
-static INLINE void dhd_dump_eapol_message(dhd_pub_t *dhd, int ifidx,
- uint8 *pktdata, uint32 pktlen, bool tx, uint32 *pkthash, uint16 *pktfate) { }
-#endif /* DHD_8021X_DUMP */
-
-#endif /* __DHD_LINUX_PKTDUMP_H_ */
/*
* Linux platform device for DHD WLAN adapter
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux_platdev.c 805835 2019-02-20 12:35:44Z $
+ * $Id: dhd_linux_platdev.c 662397 2016-09-29 10:15:08Z $
*/
#include <typedefs.h>
#include <linux/kernel.h>
#include <wl_android.h>
#if defined(CONFIG_WIFI_CONTROL_FUNC)
#include <linux/wlan_plat.h>
-#endif // endif
+#endif
#ifdef CONFIG_DTS
#include<linux/regulator/consumer.h>
#include<linux/of_gpio.h>
#define WIFI_PLAT_NAME2 "bcm4329_wlan"
#define WIFI_PLAT_EXT "bcmdhd_wifi_platform"
-#ifdef DHD_WIFI_SHUTDOWN
-extern void wifi_plat_dev_drv_shutdown(struct platform_device *pdev);
-#endif // endif
-
#ifdef CONFIG_DTS
struct regulator *wifi_regulator = NULL;
+extern struct wifi_platform_data dhd_wlan_control;
#endif /* CONFIG_DTS */
bool cfg_multichip = FALSE;
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
-#endif // endif
+#endif
struct resource dhd_wlan_resources = {0};
struct wifi_platform_data dhd_wlan_control = {0};
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
-#endif // endif
+#endif
#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */
#endif /* !defind(CONFIG_DTS) */
extern void* wl_cfg80211_get_dhdp(struct net_device *dev);
-extern int dhd_wlan_init(void);
-extern int dhd_wlan_deinit(void);
-
#ifdef ENABLE_4335BT_WAR
extern int bcm_bt_lock(int cookie);
extern void bcm_bt_unlock(int cookie);
}
plat_data = adapter->wifi_plat_data;
- DHD_ERROR(("%s = %d, delay: %lu msec\n", __FUNCTION__, on, msec));
+ DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
if (plat_data->set_power) {
#ifdef ENABLE_4335BT_WAR
if (on) {
}
-int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf,
- char *name)
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf)
{
struct wifi_platform_data *plat_data;
return -EINVAL;
plat_data = adapter->wifi_plat_data;
if (plat_data->get_mac_addr) {
-#ifdef CUSTOM_MULTI_MAC
- return plat_data->get_mac_addr(buf, name);
-#else
return plat_data->get_mac_addr(buf);
-#endif
}
return -EOPNOTSUPP;
}
DHD_TRACE(("%s\n", __FUNCTION__));
if (plat_data->get_country_code) {
-#ifdef CUSTOM_FORCE_NODFS_FLAG
+#ifdef CUSTOM_COUNTRY_CODE
return plat_data->get_country_code(ccode, flags);
#else
return plat_data->get_country_code(ccode);
ASSERT(dhd_wifi_platdata != NULL);
ASSERT(dhd_wifi_platdata->num_adapters == 1);
adapter = &dhd_wifi_platdata->adapters[0];
-#if defined(CONFIG_WIFI_CONTROL_FUNC)
- adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data);
-#else
adapter->wifi_plat_data = (void *)&dhd_wlan_control;
-#endif
+// adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data);
resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq");
if (resource == NULL)
adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
#ifdef DHD_ISR_NO_SUSPEND
adapter->intr_flags |= IRQF_NO_SUSPEND;
-#endif // endif
+#endif
}
#ifdef CONFIG_DTS
.remove = wifi_plat_dev_drv_remove,
.suspend = wifi_plat_dev_drv_suspend,
.resume = wifi_plat_dev_drv_resume,
-#ifdef DHD_WIFI_SHUTDOWN
- .shutdown = wifi_plat_dev_drv_shutdown,
-#endif // endif
.driver = {
.name = WIFI_PLAT_NAME,
#ifdef CONFIG_DTS
.remove = wifi_plat_dev_drv_remove,
.suspend = wifi_plat_dev_drv_suspend,
.resume = wifi_plat_dev_drv_resume,
-#ifdef DHD_WIFI_SHUTDOWN
- .shutdown = wifi_plat_dev_drv_shutdown,
-#endif // endif
.driver = {
.name = WIFI_PLAT_NAME2,
}
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+#endif
const struct platform_device *pdev = to_platform_device(dev);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
-#endif // endif
+#endif
if (strcmp(pdev->name, name) == 0) {
DHD_ERROR(("found wifi platform device %s\n", name));
dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
#endif
-#ifdef BOARD_HIKEY_MODULAR
- dhd_wlan_init();
-#endif /* BOARD_HIKEY_MODULAR */
-
#if !defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
if (!dts_enabled) {
if (dev1 == NULL && dev2 == NULL) {
adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
#ifdef DHD_ISR_NO_SUSPEND
adapter->intr_flags |= IRQF_NO_SUSPEND;
-#endif // endif
+#endif
wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
}
#endif /* !defined(CONFIG_DTS) */
+
#if defined(CONFIG_DTS) && !defined(CUSTOMER_HW)
wifi_plat_dev_probe_ret = platform_driver_register(&wifi_platform_dev_driver);
#endif /* CONFIG_DTS */
}
wifi_platform_bus_enumerate(adapter, FALSE);
}
-#ifdef BOARD_HIKEY_MODULAR
- dhd_wlan_deinit();
-#endif /* BOARD_HIKEY_MODULAR */
#endif /* !defined(CONFIG_DTS) */
#if defined(CUSTOMER_HW)
}
#endif /* BCMPCIE */
+
void dhd_wifi_platform_unregister_drv(void)
{
#ifndef CUSTOMER_HW
extern uint dhd_deferred_tx;
#if defined(BCMLXSDMMC) || defined(BCMDBUS)
extern struct semaphore dhd_registration_sem;
-#endif // endif
+#endif
#ifdef BCMSDIO
static int dhd_wifi_platform_load_sdio(void)
BCM_REFERENCE(i);
BCM_REFERENCE(adapter);
-
/* Sanity check on the module parameters
* - Both watchdog and DPC as tasklets are ok
* - If both watchdog and DPC are threads, TX must be deferred
#if defined(BCMLXSDMMC) && !defined(DHD_PRELOAD)
sema_init(&dhd_registration_sem, 0);
-#endif // endif
+#endif
if (dhd_wifi_platdata == NULL) {
DHD_ERROR(("DHD wifi platform data is required for Android build\n"));
- DHD_ERROR(("DHD registering bus directly\n"));
+ DHD_ERROR(("DHD registeing bus directly\n"));
/* x86 bring-up PC needs no power-up operations */
err = dhd_bus_register();
return err;
#endif
err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY);
if (err) {
- DHD_ERROR(("%s: wifi pwr on error ! \n", __FUNCTION__));
dhd_bus_unreg_sdio_notify();
/* WL_REG_ON state unknown, Power off forcely */
wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
wifi_platform_bus_enumerate(adapter, FALSE);
}
-#else
- /* x86 bring-up PC needs no power-up operations */
- err = dhd_bus_register();
-#endif // endif
+#endif
return err;
}
#ifdef BCMDBUS
static int dhd_wifi_platform_load_usb(void)
{
- int err = 0;
-#if !defined(DHD_PRELOAD)
wifi_adapter_info_t *adapter;
s32 timeout = -1;
int i;
+ int err = 0;
enum wifi_adapter_status wait_status;
-#endif
err = dhd_bus_register();
if (err) {
goto exit;
}
-#if !defined(DHD_PRELOAD)
/* power up all adapters */
for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
adapter = &dhd_wifi_platdata->adapters[i];
goto fail;
}
}
-#endif
exit:
return err;
-#if !defined(DHD_PRELOAD)
fail:
dhd_bus_unregister();
/* power down all adapters */
}
return err;
-#endif
}
#else /* BCMDBUS */
static int dhd_wifi_platform_load_usb(void)
+++ /dev/null
-/*
- * DHD Linux header file - contains private structure definition of the Linux specific layer
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: dhd_linux_priv.h 815919 2019-04-22 09:06:50Z $
- */
-
-#ifndef __DHD_LINUX_PRIV_H__
-#define __DHD_LINUX_PRIV_H__
-
-#include <osl.h>
-
-#ifdef SHOW_LOGTRACE
-#include <linux/syscalls.h>
-#include <event_log.h>
-#endif /* SHOW_LOGTRACE */
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#ifdef CONFIG_COMPAT
-#include <linux/compat.h>
-#endif /* CONFIG COMPAT */
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhd_dbg.h>
-#include <dhd_debug.h>
-#include <dhd_linux.h>
-#include <dhd_bus.h>
-
-#ifdef PCIE_FULL_DONGLE
-#include <bcmmsgbuf.h>
-#include <dhd_flowring.h>
-#endif /* PCIE_FULL_DONGLE */
-
-/*
- * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c
- * Local private structure (extension of pub)
- */
-typedef struct dhd_info {
-#if defined(WL_WIRELESS_EXT)
- wl_iw_t iw; /* wireless extensions state (must be first) */
-#endif /* defined(WL_WIRELESS_EXT) */
- dhd_pub_t pub;
- /* for supporting multiple interfaces.
- * static_ifs hold the net ifaces without valid FW IF
- */
- dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS];
-
- wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */
- char fw_path[PATH_MAX]; /* path to firmware image */
- char nv_path[PATH_MAX]; /* path to nvram vars file */
- char clm_path[PATH_MAX]; /* path to clm vars file */
- char conf_path[PATH_MAX]; /* path to config vars file */
-#ifdef DHD_UCODE_DOWNLOAD
- char uc_path[PATH_MAX]; /* path to ucode image */
-#endif /* DHD_UCODE_DOWNLOAD */
-
- /* serialize dhd iovars */
- struct mutex dhd_iovar_mutex;
-
- struct semaphore proto_sem;
-#ifdef PROP_TXSTATUS
- spinlock_t wlfc_spinlock;
-
-#ifdef BCMDBUS
- ulong wlfc_lock_flags;
- ulong wlfc_pub_lock_flags;
-#endif /* BCMDBUS */
-#endif /* PROP_TXSTATUS */
- wait_queue_head_t ioctl_resp_wait;
- wait_queue_head_t d3ack_wait;
- wait_queue_head_t dhd_bus_busy_state_wait;
- wait_queue_head_t dmaxfer_wait;
- uint32 default_wd_interval;
-
- timer_list_compat_t timer;
- bool wd_timer_valid;
- struct tasklet_struct tasklet;
- spinlock_t sdlock;
- spinlock_t txqlock;
- spinlock_t dhd_lock;
-#ifdef BCMDBUS
- ulong txqlock_flags;
-#else
-
- struct semaphore sdsem;
- tsk_ctl_t thr_dpc_ctl;
- tsk_ctl_t thr_wdt_ctl;
-#endif /* BCMDBUS */
-
- tsk_ctl_t thr_rxf_ctl;
- spinlock_t rxf_lock;
- bool rxthread_enabled;
-
- /* Wakelocks */
-#if defined(CONFIG_HAS_WAKELOCK)
- struct wake_lock wl_wifi; /* Wifi wakelock */
- struct wake_lock wl_rxwake; /* Wifi rx wakelock */
- struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */
- struct wake_lock wl_wdwake; /* Wifi wd wakelock */
- struct wake_lock wl_evtwake; /* Wifi event wakelock */
- struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */
- struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */
-#ifdef BCMPCIE_OOB_HOST_WAKE
- struct wake_lock wl_intrwake; /* Host wakeup wakelock */
-#endif /* BCMPCIE_OOB_HOST_WAKE */
-#ifdef DHD_USE_SCAN_WAKELOCK
- struct wake_lock wl_scanwake; /* Wifi scan wakelock */
-#endif /* DHD_USE_SCAN_WAKELOCK */
-#endif /* CONFIG_HAS_WAKELOCK */
-
- /* net_device interface lock, prevent race conditions among net_dev interface
- * calls and wifi_on or wifi_off
- */
- struct mutex dhd_net_if_mutex;
- struct mutex dhd_suspend_mutex;
-#if defined(PKT_FILTER_SUPPORT) && defined(APF)
- struct mutex dhd_apf_mutex;
-#endif /* PKT_FILTER_SUPPORT && APF */
- spinlock_t wakelock_spinlock;
- spinlock_t wakelock_evt_spinlock;
- uint32 wakelock_counter;
- int wakelock_wd_counter;
- int wakelock_rx_timeout_enable;
- int wakelock_ctrl_timeout_enable;
- bool waive_wakelock;
- uint32 wakelock_before_waive;
-
- /* Thread to issue ioctl for multicast */
- wait_queue_head_t ctrl_wait;
- atomic_t pend_8021x_cnt;
- dhd_attach_states_t dhd_state;
-#ifdef SHOW_LOGTRACE
- dhd_event_log_t event_data;
-#endif /* SHOW_LOGTRACE */
-
-#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
- struct early_suspend early_suspend;
-#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
-
-#ifdef ARP_OFFLOAD_SUPPORT
- u32 pend_ipaddr;
-#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef DHDTCPACK_SUPPRESS
- spinlock_t tcpack_lock;
-#endif /* DHDTCPACK_SUPPRESS */
-#ifdef FIX_CPU_MIN_CLOCK
- bool cpufreq_fix_status;
- struct mutex cpufreq_fix;
- struct pm_qos_request dhd_cpu_qos;
-#ifdef FIX_BUS_MIN_CLOCK
- struct pm_qos_request dhd_bus_qos;
-#endif /* FIX_BUS_MIN_CLOCK */
-#endif /* FIX_CPU_MIN_CLOCK */
- void *dhd_deferred_wq;
-#ifdef DEBUG_CPU_FREQ
- struct notifier_block freq_trans;
- int __percpu *new_freq;
-#endif // endif
- unsigned int unit;
- struct notifier_block pm_notifier;
-#ifdef DHD_PSTA
- uint32 psta_mode; /* PSTA or PSR */
-#endif /* DHD_PSTA */
-#ifdef DHD_WET
- uint32 wet_mode;
-#endif /* DHD_WET */
-#ifdef DHD_DEBUG
- dhd_dump_t *dump;
- struct timer_list join_timer;
- u32 join_timeout_val;
- bool join_timer_active;
- uint scan_time_count;
- struct timer_list scan_timer;
- bool scan_timer_active;
-#endif // endif
-#if defined(DHD_LB)
- /* CPU Load Balance dynamic CPU selection */
-
- /* Variable that tracks the currect CPUs available for candidacy */
- cpumask_var_t cpumask_curr_avail;
-
- /* Primary and secondary CPU mask */
- cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
- cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
-
- struct notifier_block cpu_notifier;
-
- /* Tasklet to handle Tx Completion packet freeing */
- struct tasklet_struct tx_compl_tasklet;
- atomic_t tx_compl_cpu;
-
- /* Tasklet to handle RxBuf Post during Rx completion */
- struct tasklet_struct rx_compl_tasklet;
- atomic_t rx_compl_cpu;
-
- /* Napi struct for handling rx packet sendup. Packets are removed from
- * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
- * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
- * to run to rx_napi_cpu.
- */
- struct sk_buff_head rx_pend_queue ____cacheline_aligned;
- struct sk_buff_head rx_napi_queue ____cacheline_aligned;
- struct napi_struct rx_napi_struct ____cacheline_aligned;
- atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */
- struct net_device *rx_napi_netdev; /* netdev of primary interface */
-
- struct work_struct rx_napi_dispatcher_work;
- struct work_struct tx_compl_dispatcher_work;
- struct work_struct tx_dispatcher_work;
- struct work_struct rx_compl_dispatcher_work;
-
- /* Number of times DPC Tasklet ran */
- uint32 dhd_dpc_cnt;
- /* Number of times NAPI processing got scheduled */
- uint32 napi_sched_cnt;
- /* Number of times NAPI processing ran on each available core */
- uint32 *napi_percpu_run_cnt;
- /* Number of times RX Completions got scheduled */
- uint32 rxc_sched_cnt;
- /* Number of times RX Completion ran on each available core */
- uint32 *rxc_percpu_run_cnt;
- /* Number of times TX Completions got scheduled */
- uint32 txc_sched_cnt;
- /* Number of times TX Completions ran on each available core */
- uint32 *txc_percpu_run_cnt;
- /* CPU status */
- /* Number of times each CPU came online */
- uint32 *cpu_online_cnt;
- /* Number of times each CPU went offline */
- uint32 *cpu_offline_cnt;
-
- /* Number of times TX processing run on each core */
- uint32 *txp_percpu_run_cnt;
- /* Number of times TX start run on each core */
- uint32 *tx_start_percpu_run_cnt;
-
- /* Tx load balancing */
-
- /* TODO: Need to see if batch processing is really required in case of TX
- * processing. In case of RX the Dongle can send a bunch of rx completions,
- * hence we took a 3 queue approach
- * enque - adds the skbs to rx_pend_queue
- * dispatch - uses a lock and adds the list of skbs from pend queue to
- * napi queue
- * napi processing - copies the pend_queue into a local queue and works
- * on it.
- * But for TX its going to be 1 skb at a time, so we are just thinking
- * of using only one queue and use the lock supported skb queue functions
- * to add and process it. If its in-efficient we'll re-visit the queue
- * design.
- */
-
- /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
- /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */
- /*
- * From the Tasklet that actually sends out data
- * copy the list tx_pend_queue into tx_active_queue. There by we need
- * to spinlock to only perform the copy the rest of the code ie to
- * construct the tx_pend_queue and the code to process tx_active_queue
- * can be lockless. The concept is borrowed as is from RX processing
- */
- /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */
-
- /* Control TXP in runtime, enable by default */
- atomic_t lb_txp_active;
-
- /* Control RXP in runtime, enable by default */
- atomic_t lb_rxp_active;
-
- /*
- * When the NET_TX tries to send a TX packet put it into tx_pend_queue
- * For now, the processing tasklet will also direcly operate on this
- * queue
- */
- struct sk_buff_head tx_pend_queue ____cacheline_aligned;
-
- /* Control RXP in runtime, enable by default */
- /* cpu on which the DHD Tx is happenning */
- atomic_t tx_cpu;
-
- /* CPU on which the Network stack is calling the DHD's xmit function */
- atomic_t net_tx_cpu;
-
- /* Tasklet context from which the DHD's TX processing happens */
- struct tasklet_struct tx_tasklet;
-
- /*
- * Consumer Histogram - NAPI RX Packet processing
- * -----------------------------------------------
- * On Each CPU, when the NAPI RX Packet processing call back was invoked
- * how many packets were processed is captured in this data structure.
- * Now its difficult to capture the "exact" number of packets processed.
- * So considering the packet counter to be a 32 bit one, we have a
- * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
- * processed is rounded off to the next power of 2 and put in the
- * approriate "bin" the value in the bin gets incremented.
- * For example, assume that in CPU 1 if NAPI Rx runs 3 times
- * and the packet count processed is as follows (assume the bin counters are 0)
- * iteration 1 - 10 (the bin counter 2^4 increments to 1)
- * iteration 2 - 30 (the bin counter 2^5 increments to 1)
- * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
- */
- uint32 *napi_rx_hist[HIST_BIN_SIZE];
- uint32 *txc_hist[HIST_BIN_SIZE];
- uint32 *rxc_hist[HIST_BIN_SIZE];
-#endif /* DHD_LB */
-#if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
- struct work_struct axi_error_dispatcher_work;
-#endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
-#ifdef SHOW_LOGTRACE
-#ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
- tsk_ctl_t thr_logtrace_ctl;
-#else
- struct delayed_work event_log_dispatcher_work;
-#endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
-#endif /* SHOW_LOGTRACE */
-
-#if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
-#endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
- struct kobject dhd_kobj;
- struct kobject dhd_conf_file_kobj;
- struct timer_list timesync_timer;
-#if defined(BT_OVER_SDIO)
- char btfw_path[PATH_MAX];
-#endif /* defined (BT_OVER_SDIO) */
-#ifdef WL_MONITOR
- struct net_device *monitor_dev; /* monitor pseudo device */
- struct sk_buff *monitor_skb;
- uint monitor_len;
- uint monitor_type; /* monitor pseudo device */
-#endif /* WL_MONITOR */
-#if defined(BT_OVER_SDIO)
- struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
- int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
-#endif /* BT_OVER_SDIO */
-#ifdef SHOW_LOGTRACE
- struct sk_buff_head evt_trace_queue ____cacheline_aligned;
-#endif // endif
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- struct workqueue_struct *tx_wq;
- struct workqueue_struct *rx_wq;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-#ifdef DHD_DEBUG_UART
- bool duart_execute;
-#endif /* DHD_DEBUG_UART */
- struct mutex logdump_lock;
- /* indicates mem_dump was scheduled as work queue or called directly */
- bool scheduled_memdump;
- struct work_struct dhd_hang_process_work;
-#ifdef DHD_HP2P
- spinlock_t hp2p_lock;
-#endif /* DHD_HP2P */
-} dhd_info_t;
-
-extern int dhd_sysfs_init(dhd_info_t *dhd);
-extern void dhd_sysfs_exit(dhd_info_t *dhd);
-extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp);
-extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp);
-
-int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf);
-
-#if defined(DHD_LB)
-#if defined(DHD_LB_TXP)
-int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb);
-void dhd_tx_dispatcher_work(struct work_struct * work);
-void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
-void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
-void dhd_lb_tx_handler(unsigned long data);
-#endif /* DHD_LB_TXP */
-
-#if defined(DHD_LB_RXP)
-int dhd_napi_poll(struct napi_struct *napi, int budget);
-void dhd_rx_napi_dispatcher_fn(struct work_struct * work);
-void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
-void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
-#endif /* DHD_LB_RXP */
-
-void dhd_lb_set_default_cpus(dhd_info_t *dhd);
-void dhd_cpumasks_deinit(dhd_info_t *dhd);
-int dhd_cpumasks_init(dhd_info_t *dhd);
-
-void dhd_select_cpu_candidacy(dhd_info_t *dhd);
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
-int dhd_cpu_startup_callback(unsigned int cpu);
-int dhd_cpu_teardown_callback(unsigned int cpu);
-#else
-int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu);
-#endif /* LINUX_VERSION_CODE < 4.10.0 */
-
-int dhd_register_cpuhp_callback(dhd_info_t *dhd);
-int dhd_unregister_cpuhp_callback(dhd_info_t *dhd);
-
-#if defined(DHD_LB_TXC)
-void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
-#endif /* DHD_LB_TXC */
-
-#if defined(DHD_LB_RXC)
-void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
-void dhd_rx_compl_dispatcher_fn(struct work_struct * work);
-#endif /* DHD_LB_RXC */
-
-#endif /* DHD_LB */
-
-#if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
-void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask);
-#endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
-
-#endif /* __DHD_LINUX_PRIV_H__ */
/*
* Expose some of the kernel scheduler routines
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux_sched.c 815919 2019-04-22 09:06:50Z $
+ * $Id: dhd_linux_sched.c 514727 2014-11-12 03:02:48Z $
*/
#include <linux/kernel.h>
#include <linux/module.h>
int setScheduler(struct task_struct *p, int policy, struct sched_param *param)
{
int rc = 0;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
rc = sched_setscheduler(p, policy, param);
+#endif /* LinuxVer */
return rc;
}
int get_scheduler_policy(struct task_struct *p)
{
int rc = SCHED_NORMAL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
rc = p->policy;
+#endif /* LinuxVer */
return rc;
}
* Broadcom Dongle Host Driver (DHD), Generic work queue framework
* Generic interface to handle dhd deferred work events
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux_wq.c 815919 2019-04-22 09:06:50Z $
+ * $Id: dhd_linux_wq.c 641330 2016-06-02 06:55:00Z $
*/
#include <linux/init.h>
struct work_struct deferred_work; /* should be the first member */
struct kfifo *prio_fifo;
- struct kfifo *work_fifo;
- u8 *prio_fifo_buf;
- u8 *work_fifo_buf;
- spinlock_t work_lock;
- void *dhd_info; /* review: does it require */
- u32 event_skip_mask;
+ struct kfifo *work_fifo;
+ u8 *prio_fifo_buf;
+ u8 *work_fifo_buf;
+ spinlock_t work_lock;
+ void *dhd_info; /* review: does it require */
};
static inline struct kfifo*
dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
{
struct kfifo *fifo;
- gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+ gfp_t flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+ fifo = kfifo_init(buf, size, flags, lock);
+#else
fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
if (!fifo) {
return NULL;
}
kfifo_init(fifo, buf, size);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
return fifo;
}
dhd_kfifo_free(struct kfifo *fifo)
{
kfifo_free(fifo);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+ /* FC11 releases the fifo memory */
+ kfree(fifo);
+#endif
}
/* deferred work functions */
void*
dhd_deferred_work_init(void *dhd_info)
{
- struct dhd_deferred_wq *work = NULL;
- u8* buf;
- unsigned long fifo_size = 0;
- gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+ struct dhd_deferred_wq *work = NULL;
+ u8* buf;
+ unsigned long fifo_size = 0;
+ gfp_t flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
if (!dhd_info) {
DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
}
work->dhd_info = dhd_info;
- work->event_skip_mask = 0;
DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
return work;
{
struct dhd_deferred_wq *deferred_work = work;
+
if (!deferred_work) {
DHD_ERROR(("%s: deferred work has been freed already\n",
__FUNCTION__));
return DHD_WQ_STS_UNKNOWN_PRIORITY;
}
- if ((deferred_wq->event_skip_mask & (1 << event))) {
- DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n",
- __FUNCTION__, deferred_wq->event_skip_mask));
- return DHD_WQ_STS_EVENT_SKIPPED;
- }
-
/*
* default element size is 1, which can be changed
* using kfifo_esize(). Older kernel(FC11) doesn't support
continue;
}
+
if (work_event.event_handler) {
work_event.event_handler(deferred_work->dhd_info,
work_event.event_data, work_event.event);
return;
}
-
-void
-dhd_deferred_work_set_skip(void *work, u8 event, bool set)
-{
- struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work;
-
- if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) {
- DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__));
- return;
- }
-
- if (set) {
- /* Set */
- deferred_wq->event_skip_mask |= (1 << event);
- } else {
- /* Clear */
- deferred_wq->event_skip_mask &= ~(1 << event);
- }
-}
* Broadcom Dongle Host Driver (DHD), Generic work queue framework
* Generic interface to handle dhd deferred work events
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_linux_wq.h 814378 2019-04-11 02:21:31Z $
+ * $Id: dhd_linux_wq.h 704361 2017-06-13 08:50:38Z $
*/
#ifndef _dhd_linux_wq_h_
#define _dhd_linux_wq_h_
DHD_WQ_WORK_SET_MCAST_LIST,
DHD_WQ_WORK_IPV6_NDO,
DHD_WQ_WORK_HANG_MSG,
+ DHD_WQ_WORK_SOC_RAM_DUMP,
DHD_WQ_WORK_DHD_LOG_DUMP,
- DHD_WQ_WORK_PKTLOG_DUMP,
DHD_WQ_WORK_INFORM_DHD_MON,
DHD_WQ_WORK_EVENT_LOGTRACE,
DHD_WQ_WORK_DMA_LB_MEM_REL,
- DHD_WQ_WORK_NATOE_EVENT,
- DHD_WQ_WORK_NATOE_IOCTL,
- DHD_WQ_WORK_MACDBG,
DHD_WQ_WORK_DEBUG_UART_DUMP,
- DHD_WQ_WORK_GET_BIGDATA_AP,
- DHD_WQ_WORK_SOC_RAM_DUMP,
-#ifdef DHD_ERPOM
- DHD_WQ_WORK_ERROR_RECOVERY,
-#endif /* DHD_ERPOM */
- DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
- DHD_WQ_WORK_AXI_ERROR_DUMP,
- DHD_WQ_WORK_CTO_RECOVERY,
+ DHD_WQ_WORK_SSSR_DUMP,
+ DHD_WQ_WORK_PKTLOG_DUMP,
#ifdef DHD_UPDATE_INTF_MAC
DHD_WQ_WORK_IF_UPDATE,
#endif /* DHD_UPDATE_INTF_MAC */
#define DHD_WQ_STS_SCHED_FAILED -3
#define DHD_WQ_STS_UNKNOWN_EVENT -4
#define DHD_WQ_STS_UNKNOWN_PRIORITY -5
-#define DHD_WQ_STS_EVENT_SKIPPED -6
typedef void (*event_handler_t)(void *handle, void *event_data, u8 event);
void dhd_deferred_work_deinit(void *workq);
int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
event_handler_t evt_handler, u8 priority);
-void dhd_deferred_work_set_skip(void *work, u8 event, bool set);
#endif /* _dhd_linux_wq_h_ */
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
static bool lastMessages = FALSE;
#define US_PRE_SEC 1000000
-#define DATA_UNIT_FOR_LOG_CNT 4
static void dhd_mschdbg_us_to_sec(uint32 time_h, uint32 time_l, uint32 *sec, uint32 *remain)
{
case WL_MSCH_PROFILER_EVENT_LOG:
{
- while (len >= (int)WL_MSCH_EVENT_LOG_HEAD_SIZE) {
+ while (len > 0) {
msch_event_log_profiler_event_data_t *p =
(msch_event_log_profiler_event_data_t *)data;
- /* TODO: How to parse MSCH if extended event tag is present ??? */
- prcd_event_log_hdr_t hdr;
int size = WL_MSCH_EVENT_LOG_HEAD_SIZE + p->hdr.count * sizeof(uint32);
- if (len < size || size > sizeof(msch_event_log_profiler_event_data_t)) {
- break;
- }
data += size;
len -= size;
dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss);
MSCH_EVENT_HEAD(0);
MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag));
- bzero(&hdr, sizeof(hdr));
- hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
- hdr.count = p->hdr.count + 1;
- /* exclude LSB 2 bits which indicate binary/non-binary data */
- hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2;
- hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num);
- if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) {
- hdr.binary_payload = TRUE;
- }
- dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0);
+ p->hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
+ p->hdr.fmt_num = ntoh16(p->hdr.fmt_num);
+ dhd_dbg_verboselog_printf(dhdp, &p->hdr, raw_event_ptr, p->data);
}
lastMessages = TRUE;
break;
}
void
-wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, prcd_event_log_hdr_t *plog_hdr,
- uint32 *log_ptr)
+wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int tag, uint32 *log_ptr)
{
- uint32 log_pyld_len;
head_log = "CONSOLE";
-
- if (plog_hdr->count == 0) {
- return;
- }
- log_pyld_len = (plog_hdr->count - 1) * DATA_UNIT_FOR_LOG_CNT;
-
- if (plog_hdr->tag == EVENT_LOG_TAG_MSCHPROFILE) {
+ if (tag == EVENT_LOG_TAG_MSCHPROFILE) {
msch_event_log_profiler_event_data_t *p =
(msch_event_log_profiler_event_data_t *)log_ptr;
- /* TODO: How to parse MSCH if extended event tag is present ??? */
- prcd_event_log_hdr_t hdr;
uint32 s, ss;
-
- if (log_pyld_len < OFFSETOF(msch_event_log_profiler_event_data_t, data) ||
- log_pyld_len > sizeof(msch_event_log_profiler_event_data_t)) {
- return;
- }
-
dhd_mschdbg_us_to_sec(p->time_hi, p->time_lo, &s, &ss);
MSCH_EVENT_HEAD(0);
MSCH_EVENT(("%06d.%06d [wl%d]: ", s, ss, p->hdr.tag));
- bzero(&hdr, sizeof(hdr));
- hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
- hdr.count = p->hdr.count + 1;
- /* exclude LSB 2 bits which indicate binary/non-binary data */
- hdr.fmt_num = ntoh16(p->hdr.fmt_num) >> 2;
- hdr.fmt_num_raw = ntoh16(p->hdr.fmt_num);
- if (ntoh16(p->hdr.fmt_num) == DHD_OW_BI_RAW_EVENT_LOG_FMT) {
- hdr.binary_payload = TRUE;
- }
- dhd_dbg_verboselog_printf(dhdp, &hdr, raw_event_ptr, p->data, 0, 0);
+ p->hdr.tag = EVENT_LOG_TAG_MSCHPROFILE;
+ p->hdr.fmt_num = ntoh16(p->hdr.fmt_num);
+ dhd_dbg_verboselog_printf(dhdp, &p->hdr, raw_event_ptr, p->data);
} else {
msch_collect_tlv_t *p = (msch_collect_tlv_t *)log_ptr;
int type = ntoh16(p->type);
int len = ntoh16(p->size);
-
- if (log_pyld_len < OFFSETOF(msch_collect_tlv_t, value) + len) {
- return;
- }
-
dhd_mschdbg_dump_data(dhdp, raw_event_ptr, type, p->value, len);
}
}
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifdef SHOW_LOGTRACE
extern void wl_mschdbg_event_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int type,
void *data, int len);
-extern void wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr,
- prcd_event_log_hdr_t *plog_hdr, uint32 *log_ptr);
+extern void wl_mschdbg_verboselog_handler(dhd_pub_t *dhdp, void *raw_event_ptr, int tag,
+ uint32 *log_ptr);
#endif /* SHOW_LOGTRACE */
#endif /* _dhd_mschdbg_h_ */
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_msgbuf.c 825801 2019-06-17 10:51:10Z $
+ * $Id: dhd_msgbuf.c 704361 2017-06-13 08:50:38Z $
*/
+
#include <typedefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <bcmmsgbuf.h>
#include <bcmendian.h>
-#include <bcmstdlib_s.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <bcmpcie.h>
#include <dhd_pcie.h>
#include <dhd_config.h>
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
#if defined(DHD_LB)
#include <linux/cpu.h>
#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
#endif /* DHD_LB */
-#include <etd.h>
#include <hnd_debug.h>
-#include <bcmtlv.h>
#include <hnd_armtrap.h>
-#include <dnglevent.h>
-#ifdef DHD_EWPR_VER2
-#include <dhd_bitpack.h>
-#endif /* DHD_EWPR_VER2 */
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif /* DHD_PKT_LOGGING */
extern char dhd_version[];
extern char fw_version[];
#define MSGBUF_IOCTL_ACK_PENDING (1<<0)
#define MSGBUF_IOCTL_RESP_PENDING (1<<1)
-#define DHD_IOCTL_REQ_PKTBUFSZ 2048
-#define MSGBUF_IOCTL_MAX_RQSTLEN (DHD_IOCTL_REQ_PKTBUFSZ - H2DRING_CTRL_SUB_ITEMSIZE)
-
#define DMA_ALIGN_LEN 4
#define DMA_D2H_SCRATCH_BUF_LEN 8
#ifdef BCM_HOST_BUF
#ifndef DMA_HOST_BUFFER_LEN
#define DMA_HOST_BUFFER_LEN 0x200000
-#endif // endif
+#endif
#endif /* BCM_HOST_BUF */
#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
#define DHD_H2D_INFORING_MAX_BUF_POST 32
#define DHD_MAX_TSBUF_POST 8
-#define DHD_PROT_FUNCS 43
+#define DHD_PROT_FUNCS 41
/* Length of buffer in host for bus throughput measurement */
#define DHD_BUS_TPUT_BUF_LEN 2048
/* Giving room before ioctl_trans_id rollsover. */
#define BUFFER_BEFORE_ROLLOVER 300
-/* 512K memory + 32K registers */
-#define SNAPSHOT_UPLOAD_BUF_SIZE ((512 + 32) * 1024)
-
struct msgbuf_ring; /* ring context for common and flow rings */
/**
* Dongle advertizes host side sync mechanism requirements.
*/
-#define PCIE_D2H_SYNC_WAIT_TRIES (512U)
-#define PCIE_D2H_SYNC_NUM_OF_STEPS (5U)
+#define PCIE_D2H_SYNC_WAIT_TRIES (512UL)
+#define PCIE_D2H_SYNC_NUM_OF_STEPS (5UL)
#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
-#define HWA_DB_TYPE_RXPOST (0x0050)
-#define HWA_DB_TYPE_TXCPLT (0x0060)
-#define HWA_DB_TYPE_RXCPLT (0x0170)
-#define HWA_DB_INDEX_VALUE(val) ((uint32)(val) << 16)
-
-#define HWA_ENAB_BITMAP_RXPOST (1U << 0) /* 1A */
-#define HWA_ENAB_BITMAP_RXCPLT (1U << 1) /* 2B */
-#define HWA_ENAB_BITMAP_TXCPLT (1U << 2) /* 4B */
-
/**
* Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
*
typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
volatile cmn_msg_hdr_t *msg, int msglen);
-/**
- * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
- * For EDL messages.
- *
- * On success: return cmn_msg_hdr_t::msg_type
- * On failure: return 0 (invalid msg_type)
- */
-#ifdef EWP_EDL
-typedef int (* d2h_edl_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
- volatile cmn_msg_hdr_t *msg);
-#endif /* EWP_EDL */
-
/*
* +----------------------------------------------------------------------------
*
#define DHD_D2H_RINGID(offset) \
((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
+
#define DHD_DMAH_NULL ((void*)NULL)
/*
#define DHD_DMA_PAD (L1_CACHE_BYTES)
#else
#define DHD_DMA_PAD (128)
-#endif // endif
-
-/*
- * +----------------------------------------------------------------------------
- * Flowring Pool
- *
- * Unlike common rings, which are attached very early on (dhd_prot_attach),
- * flowrings are dynamically instantiated. Moreover, flowrings may require a
- * larger DMA-able buffer. To avoid issues with fragmented cache coherent
- * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
- * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
- *
- * Each DMA-able buffer may be allocated independently, or may be carved out
- * of a single large contiguous region that is registered with the protocol
- * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
- * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
- *
- * No flowring pool action is performed in dhd_prot_attach(), as the number
- * of h2d rings is not yet known.
- *
- * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
- * determine the number of flowrings required, and a pool of msgbuf_rings are
- * allocated and a DMA-able buffer (carved or allocated) is attached.
- * See: dhd_prot_flowrings_pool_attach()
- *
- * A flowring msgbuf_ring object may be fetched from this pool during flowring
- * creation, using the flowid. Likewise, flowrings may be freed back into the
- * pool on flowring deletion.
- * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
- *
- * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
- * are detached (returned back to the carved region or freed), and the pool of
- * msgbuf_ring and any objects allocated against it are freed.
- * See: dhd_prot_flowrings_pool_detach()
- *
- * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
- * state as-if upon an attach. All DMA-able buffers are retained.
- * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
- * pool attach will notice that the pool persists and continue to use it. This
- * will avoid the case of a fragmented DMA-able region.
- *
- * +----------------------------------------------------------------------------
- */
-
-/* Conversion of a flowid to a flowring pool index */
-#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
- ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
-
-/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
-#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
- (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
- DHD_FLOWRINGS_POOL_OFFSET(flowid)
-
-/* Traverse each flowring in the flowring pool, assigning ring and flowid */
-#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
- for ((flowid) = DHD_FLOWRING_START_FLOWID, \
- (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
- (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
- (ring)++, (flowid)++)
+#endif
/* Used in loopback tests */
typedef struct dhd_dmaxfer {
uint32 len;
bool in_progress;
uint64 start_usec;
- uint64 time_taken;
- uint32 d11_lpbk;
- int status;
+ uint32 d11_lpbk;
} dhd_dmaxfer_t;
/**
#endif /* TXP_FLUSH_NITEMS */
uint8 ring_type;
- uint16 hwa_db_type; /* hwa type non-zero for Data path rings */
uint8 n_completion_ids;
bool create_pending;
uint16 create_req_id;
uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
uchar name[RING_NAME_MAX_LENGTH];
uint32 ring_mem_allocated;
- void *ring_lock;
} msgbuf_ring_t;
#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
((uint8 *)(DHD_RING_BGN_VA((ring))) + \
(((ring)->max_items - 1) * (ring)->item_len))
+
+
/* This can be overwritten by module parameter defined in dhd_linux.c
* or by dhd iovar h2d_max_txpost.
*/
/** DHD protocol handle. Is an opaque type to other DHD software layers. */
typedef struct dhd_prot {
osl_t *osh; /* OSL handle */
- uint16 rxbufpost_sz;
uint16 rxbufpost;
uint16 max_rxbufpost;
uint16 max_eventbufpost;
uint16 cur_ts_bufs_posted;
/* Flow control mechanism based on active transmits pending */
- osl_atomic_t active_tx_count; /* increments/decrements on every packet tx/tx_status */
+ uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
uint16 h2d_max_txpost;
uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
- msgbuf_ring_t *d2hring_edl; /* D2H Enhanced Debug Lane (EDL) ring */
msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
uint32 flowring_num;
d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
-#ifdef EWP_EDL
- d2h_edl_sync_cb_t d2h_edl_sync_cb; /* Sync on EDL D2H DMA done: SEQNUM or XORCSUM */
-#endif /* EWP_EDL */
ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
ulong d2h_sync_wait_tot; /* total wait loops */
void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
void *pktid_rx_map; /* pktid map for rx path */
void *pktid_tx_map; /* pktid map for tx path */
+ void *rx_lock; /* rx pktid map and rings access protection */
bool metadata_dbg;
void *pktid_map_handle_ioctl;
-#ifdef DHD_MAP_PKTID_LOGGING
- void *pktid_dma_map; /* pktid map for DMA MAP */
- void *pktid_dma_unmap; /* pktid map for DMA UNMAP */
-#endif /* DHD_MAP_PKTID_LOGGING */
- uint32 pktid_depleted_cnt; /* pktid depleted count */
- /* netif tx queue stop count */
- uint8 pktid_txq_stop_cnt;
- /* netif tx queue start count */
- uint8 pktid_txq_start_cnt;
- uint64 ioctl_fillup_time; /* timestamp for ioctl fillup */
- uint64 ioctl_ack_time; /* timestamp for ioctl ack */
- uint64 ioctl_cmplt_time; /* timestamp for ioctl completion */
/* Applications/utilities can read tx and rx metadata using IOVARs */
uint16 rx_metadata_offset;
uint16 tx_metadata_offset;
+
#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
/* Host's soft doorbell configuration */
bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */
- uint32 host_ipc_version; /* Host sypported IPC rev */
+ uint32 host_ipc_version; /* Host sypported IPC rev */
uint32 device_ipc_version; /* FW supported IPC rev */
uint32 active_ipc_version; /* Host advertised IPC rev */
dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */
bool hostts_req_buf_inuse;
bool rx_ts_log_enabled;
bool tx_ts_log_enabled;
- bool no_retry;
- bool no_aggr;
- bool fixed_rate;
- dhd_dma_buf_t host_scb_buf; /* scb host offload buffer */
-#ifdef DHD_HP2P
- msgbuf_ring_t *d2hring_hp2p_txcpl; /* D2H HPP Tx completion ring */
- msgbuf_ring_t *d2hring_hp2p_rxcpl; /* D2H HPP Rx completion ring */
-#endif /* DHD_HP2P */
- bool no_tx_resource;
} dhd_prot_t;
-#ifdef DHD_EWPR_VER2
-#define HANG_INFO_BASE64_BUFFER_SIZE 640
-#endif // endif
-
-#ifdef DHD_DUMP_PCIE_RINGS
-static
-int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
- const void *user_buf, unsigned long *file_posn);
-#ifdef EWP_EDL
-static
-int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
- unsigned long *file_posn);
-#endif /* EWP_EDL */
-#endif /* DHD_DUMP_PCIE_RINGS */
-
-extern bool dhd_timesync_delay_post_bufs(dhd_pub_t *dhdp);
extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
+
+static atomic_t dhd_msgbuf_rxbuf_post_event_bufs_running = ATOMIC_INIT(0);
+
/* Convert a dmaaddr_t to a base_addr with htol operations */
static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
/* APIs for managing a DMA-able buffer */
static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
+static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
/* msgbuf ring management */
static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
+
/* D2H Message handling */
static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
-static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
+static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
-#ifdef DHD_HP2P
-static void dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd);
-#endif /* DHD_HP2P */
-#ifdef EWP_EDL
-static void dhd_prot_detach_edl_rings(dhd_pub_t *dhd);
-#endif // endif
static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
-static void dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf);
-
-#ifdef DHD_HP2P
-static void dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus);
-static void dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus);
-static void dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid);
-static void dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc);
-#endif // endif
+
typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
/** callback functions for messages generated by the dongle */
NULL, /* MSG_TYPE_HOSTTIMSTAMP */
dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */
- NULL, /* MSG_TYPE_SNAPSHOT_UPLOAD */
- dhd_prot_process_snapshot_complete, /* MSG_TYPE_SNAPSHOT_CMPLT */
};
+
#ifdef DHD_RX_CHAINING
#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
#endif /* DHD_RX_CHAINING */
-#define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL)
-
static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
+/**
+ * D2H DMA to completion callback handlers. Based on the mode advertised by the
+ * dongle through the PCIE shared region, the appropriate callback will be
+ * registered in the proto layer to be invoked prior to precessing any message
+ * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
+ * does not require host participation, then a noop callback handler will be
+ * bound that simply returns the msg_type.
+ */
+static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
+ uint32 tries, volatile uchar *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ volatile cmn_msg_hdr_t *msg, int msglen);
+static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
+static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create);
+static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create);
+static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
+
bool
dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
{
ret = (wr == rd) ? TRUE : FALSE;
return ret;
}
-
-void
-dhd_prot_dump_ring_ptrs(void *prot_info)
-{
- msgbuf_ring_t *ring = (msgbuf_ring_t *)prot_info;
- DHD_ERROR(("%s curr_rd: %d rd: %d wr: %d \n", __FUNCTION__,
- ring->curr_rd, ring->rd, ring->wr));
-}
-
uint16
dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
{
{
h2d_max_txpost = max_txpost;
}
-/**
- * D2H DMA to completion callback handlers. Based on the mode advertised by the
- * dongle through the PCIE shared region, the appropriate callback will be
- * registered in the proto layer to be invoked prior to precessing any message
- * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
- * does not require host participation, then a noop callback handler will be
- * bound that simply returns the msg_type.
- */
-static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
- uint32 tries, volatile uchar *msg, int msglen);
-static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
- volatile cmn_msg_hdr_t *msg, int msglen);
-static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
- volatile cmn_msg_hdr_t *msg, int msglen);
-static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
- volatile cmn_msg_hdr_t *msg, int msglen);
-static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
-static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
- uint16 ring_type, uint32 id);
-static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
- uint8 type, uint32 id);
-
/**
* dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
* not completed, a livelock condition occurs. Host will avert this livelock by
volatile uchar *msg, int msglen)
{
uint32 ring_seqnum = ring->seqnum;
-
- if (dhd_query_bus_erros(dhd)) {
- return;
- }
-
DHD_ERROR((
"LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
- " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d> rd<%d> wr<%d>\n",
+ " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n",
dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
- ring->dma_buf.va, msg, ring->curr_rd, ring->rd, ring->wr));
-
- dhd_prhex("D2H MsgBuf Failure", msg, msglen, DHD_ERROR_VAL);
-
- /* Try to resume if already suspended or suspend in progress */
-
- /* Skip if still in suspended or suspend in progress */
- if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
- DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
- __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
- goto exit;
- }
+ ring->dma_buf.va, msg, ring->curr_rd));
+ prhex("D2H MsgBuf Failure", (volatile uchar *)msg, msglen);
dhd_bus_dump_console_buffer(dhd->bus);
dhd_prot_debug_info_print(dhd);
}
#endif /* DHD_FW_COREDUMP */
-exit:
dhd_schedule_reset(dhd);
- dhd->livelock_occured = TRUE;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+ dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
+ dhd_os_send_hang_message(dhd);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
}
/**
msg_seqnum = *marker;
if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
ring->seqnum++; /* next expected sequence number */
- /* Check for LIVELOCK induce flag, which is set by firing
- * dhd iovar to induce LIVELOCK error. If flag is set,
- * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
- */
- if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
- goto dma_completed;
- }
+ goto dma_completed;
}
- total_tries = (uint32)(((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries);
+ total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
if (total_tries > prot->d2h_sync_wait_max)
prot->d2h_sync_wait_max = total_tries;
*/
for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
- /* First verify if the seqnumber has been update,
- * if yes, then only check xorcsum.
- * Once seqnum and xorcsum is proper that means
- * complete message has arrived.
- */
- if (msg->epoch == ring_seqnum) {
- prot_checksum = bcm_compute_xor32((volatile uint32 *)msg,
- num_words);
- if (prot_checksum == 0U) { /* checksum is OK */
+ prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
+ if (prot_checksum == 0U) { /* checksum is OK */
+ if (msg->epoch == ring_seqnum) {
ring->seqnum++; /* next expected sequence number */
- /* Check for LIVELOCK induce flag, which is set by firing
- * dhd iovar to induce LIVELOCK error. If flag is set,
- * MSG_TYPE_INVALID is returned, which results in to
- * LIVELOCK error.
- */
- if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
- goto dma_completed;
- }
+ goto dma_completed;
}
}
dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
volatile cmn_msg_hdr_t *msg, int msglen)
{
- /* Check for LIVELOCK induce flag, which is set by firing
- * dhd iovar to induce LIVELOCK error. If flag is set,
- * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
- */
- if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
- DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
- return MSG_TYPE_INVALID;
- } else {
- return msg->msg_type;
- }
-}
-
-#ifdef EWP_EDL
-/**
- * dhd_prot_d2h_sync_edl - Sync on a D2H DMA completion by validating the cmn_msg_hdr_t
- * header values at both the beginning and end of the payload.
- * The cmn_msg_hdr_t is placed at the start and end of the payload
- * in each work item in the EDL ring.
- * Dongle will place a seqnum inside the cmn_msg_hdr_t 'epoch' field
- * and the length of the payload in the 'request_id' field.
- * Structure of each work item in the EDL ring:
- * | cmn_msg_hdr_t | payload (var len) | cmn_msg_hdr_t |
- * NOTE: - it was felt that calculating xorcsum for the entire payload (max length of 1648 bytes) is
- * too costly on the dongle side and might take up too many ARM cycles,
- * hence the xorcsum sync method is not being used for EDL ring.
- */
-static int
-BCMFASTPATH(dhd_prot_d2h_sync_edl)(dhd_pub_t *dhd, msgbuf_ring_t *ring,
- volatile cmn_msg_hdr_t *msg)
-{
- uint32 tries;
- int msglen = 0, len = 0;
- uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
- dhd_prot_t *prot = dhd->prot;
- uint32 step = 0;
- uint32 delay = PCIE_D2H_SYNC_DELAY;
- uint32 total_tries = 0;
- volatile cmn_msg_hdr_t *trailer = NULL;
- volatile uint8 *buf = NULL;
- bool valid_msg = FALSE;
-
- BCM_REFERENCE(delay);
- /*
- * For retries we have to make some sort of stepper algorithm.
- * We see that every time when the Dongle comes out of the D3
- * Cold state, the first D2H mem2mem DMA takes more time to
- * complete, leading to livelock issues.
- *
- * Case 1 - Apart from Host CPU some other bus master is
- * accessing the DDR port, probably page close to the ring
- * so, PCIE does not get a change to update the memory.
- * Solution - Increase the number of tries.
- *
- * Case 2 - The 50usec delay given by the Host CPU is not
- * sufficient for the PCIe RC to start its work.
- * In this case the breathing time of 50usec given by
- * the Host CPU is not sufficient.
- * Solution: Increase the delay in a stepper fashion.
- * This is done to ensure that there are no
- * unwanted extra delay introdcued in normal conditions.
- */
- for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
- for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
- /* First verify if the seqnumber has been updated,
- * if yes, only then validate the header and trailer.
- * Once seqnum, header and trailer have been validated, it means
- * that the complete message has arrived.
- */
- valid_msg = FALSE;
- if (msg->epoch == ring_seqnum &&
- msg->msg_type == MSG_TYPE_INFO_PYLD &&
- msg->request_id > 0 &&
- msg->request_id <= ring->item_len) {
- /* proceed to check trailer only if header is valid */
- buf = (volatile uint8 *)msg;
- msglen = sizeof(cmn_msg_hdr_t) + msg->request_id;
- buf += msglen;
- if (msglen + sizeof(cmn_msg_hdr_t) <= ring->item_len) {
- trailer = (volatile cmn_msg_hdr_t *)buf;
- valid_msg = (trailer->epoch == ring_seqnum) &&
- (trailer->msg_type == msg->msg_type) &&
- (trailer->request_id == msg->request_id);
- if (!valid_msg) {
- DHD_TRACE(("%s:invalid trailer! seqnum=%u;reqid=%u"
- " expected, seqnum=%u; reqid=%u. Retrying... \n",
- __FUNCTION__, trailer->epoch, trailer->request_id,
- msg->epoch, msg->request_id));
- }
- } else {
- DHD_TRACE(("%s: invalid payload length (%u)! Retrying.. \n",
- __FUNCTION__, msg->request_id));
- }
-
- if (valid_msg) {
- /* data is OK */
- ring->seqnum++; /* next expected sequence number */
- if (dhd->dhd_induce_error != DHD_INDUCE_LIVELOCK) {
- goto dma_completed;
- }
- }
- } else {
- DHD_TRACE(("%s: wrong hdr, seqnum expected %u, got %u."
- " msg_type=0x%x, request_id=%u."
- " Retrying...\n",
- __FUNCTION__, ring_seqnum, msg->epoch,
- msg->msg_type, msg->request_id));
- }
-
- total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
-
- if (total_tries > prot->d2h_sync_wait_max)
- prot->d2h_sync_wait_max = total_tries;
-
- OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
- OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
- OSL_DELAY(delay * step); /* Add stepper delay */
-
- } /* for PCIE_D2H_SYNC_WAIT_TRIES */
- } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
-
- DHD_ERROR(("%s: EDL header check fails !\n", __FUNCTION__));
- DHD_ERROR(("%s: header: seqnum=%u; expected-seqnum=%u"
- " msgtype=0x%x; expected-msgtype=0x%x"
- " length=%u; expected-max-length=%u", __FUNCTION__,
- msg->epoch, ring_seqnum, msg->msg_type, MSG_TYPE_INFO_PYLD,
- msg->request_id, ring->item_len));
- dhd_prhex("msg header bytes: ", (volatile uchar *)msg, sizeof(*msg), DHD_ERROR_VAL);
- if (trailer && msglen > 0 &&
- (msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len) {
- DHD_ERROR(("%s: trailer: seqnum=%u; expected-seqnum=%u"
- " msgtype=0x%x; expected-msgtype=0x%x"
- " length=%u; expected-length=%u", __FUNCTION__,
- trailer->epoch, ring_seqnum, trailer->msg_type, MSG_TYPE_INFO_PYLD,
- trailer->request_id, msg->request_id));
- dhd_prhex("msg trailer bytes: ", (volatile uchar *)trailer,
- sizeof(*trailer), DHD_ERROR_VAL);
- }
-
- if ((msglen + sizeof(cmn_msg_hdr_t)) <= ring->item_len)
- len = msglen + sizeof(cmn_msg_hdr_t);
- else
- len = ring->item_len;
-
- dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
- (volatile uchar *) msg, len);
-
- ring->seqnum++; /* skip this message */
- return BCME_ERROR; /* invalid msg_type 0 -> noop callback */
-
-dma_completed:
- DHD_TRACE(("%s: EDL header check pass, seqnum=%u; reqid=%u\n", __FUNCTION__,
- msg->epoch, msg->request_id));
-
- prot->d2h_sync_wait_tot += tries;
- return BCME_OK;
-}
-
-/**
- * dhd_prot_d2h_sync_edl_none - Dongle ensure that the DMA will complete and host
- * need to try to sync. This noop sync handler will be bound when the dongle
- * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
- */
-static int BCMFASTPATH
-dhd_prot_d2h_sync_edl_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
- volatile cmn_msg_hdr_t *msg)
-{
- /* Check for LIVELOCK induce flag, which is set by firing
- * dhd iovar to induce LIVELOCK error. If flag is set,
- * MSG_TYPE_INVALID is returned, which results in to LIVELOCK error.
- */
- if (dhd->dhd_induce_error == DHD_INDUCE_LIVELOCK) {
- DHD_ERROR(("%s: Inducing livelock\n", __FUNCTION__));
- return BCME_ERROR;
- } else {
- if (msg->msg_type == MSG_TYPE_INFO_PYLD)
- return BCME_OK;
- else
- return msg->msg_type;
- }
+ return msg->msg_type;
}
-#endif /* EWP_EDL */
INLINE void
dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
- if (HWA_ACTIVE(dhd)) {
- prot->d2hring_tx_cpln.hwa_db_type =
- (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXCPLT) ? HWA_DB_TYPE_TXCPLT : 0;
- prot->d2hring_rx_cpln.hwa_db_type =
- (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXCPLT) ? HWA_DB_TYPE_RXCPLT : 0;
- DHD_ERROR(("%s: TXCPLT hwa_db_type:0x%x RXCPLT hwa_db_type:0x%x\n",
- __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type,
- prot->d2hring_rx_cpln.hwa_db_type));
- }
-
if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
-#ifdef EWP_EDL
- prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
-#endif /* EWP_EDL */
DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
} else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
-#ifdef EWP_EDL
- prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl;
-#endif /* EWP_EDL */
DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
} else {
prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
-#ifdef EWP_EDL
- prot->d2h_edl_sync_cb = dhd_prot_d2h_sync_edl_none;
-#endif /* EWP_EDL */
DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
}
}
{
dhd_prot_t *prot = dhd->prot;
prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
-
- if (HWA_ACTIVE(dhd)) {
- prot->h2dring_rxp_subn.hwa_db_type =
- (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_RXPOST) ? HWA_DB_TYPE_RXPOST : 0;
- DHD_ERROR(("%s: RXPOST hwa_db_type:0x%x\n",
- __FUNCTION__, prot->d2hring_tx_cpln.hwa_db_type));
- }
-
prot->h2dring_rxp_subn.current_phase = 0;
prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
+
/*
* +---------------------------------------------------------------------------+
* PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
base_addr->high_addr = htol32(PHYSADDRHI(pa));
}
+
/**
* dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
*/
* returns BCME_OK=0 on success
* returns non-zero negative error value on failure.
*/
-int
+static int
dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
{
uint32 dma_pad = 0;
osl_t *osh = dhd->osh;
uint16 dma_align = DMA_ALIGN_LEN;
- uint32 rem = 0;
+
ASSERT(dma_buf != NULL);
ASSERT(dma_buf->va == NULL);
ASSERT(dma_buf->len == 0);
- /* Pad the buffer length to align to cacheline size. */
- rem = (buf_len % DHD_DMA_PAD);
- dma_pad = rem ? (DHD_DMA_PAD - rem) : 0;
-
+ /* Pad the buffer length by one extra cacheline size.
+ * Required for D2H direction.
+ */
+ dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
* dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
* dhd_dma_buf_alloc().
*/
-void
+static void
dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
{
osl_t *osh = dhd->osh;
/*
* +---------------------------------------------------------------------------+
- * DHD_MAP_PKTID_LOGGING
- * Logging the PKTID and DMA map/unmap information for the SMMU fault issue
- * debugging in customer platform.
+ * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
+ * Main purpose is to save memory on the dongle, has other purposes as well.
+ * The packet id map, also includes storage for some packet parameters that
+ * may be saved. A native packet pointer along with the parameters may be saved
+ * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
+ * and the metadata may be retrieved using the previously allocated packet id.
* +---------------------------------------------------------------------------+
*/
+#define DHD_PCIE_PKTID
+#define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
+#define MAX_RX_PKTID (1024)
+#define MAX_TX_PKTID (3072 * 2)
-#ifdef DHD_MAP_PKTID_LOGGING
-typedef struct dhd_pktid_log_item {
- dmaaddr_t pa; /* DMA bus address */
- uint64 ts_nsec; /* Timestamp: nsec */
- uint32 size; /* DMA map/unmap size */
- uint32 pktid; /* Packet ID */
- uint8 pkttype; /* Packet Type */
- uint8 rsvd[7]; /* Reserved for future use */
-} dhd_pktid_log_item_t;
-
-typedef struct dhd_pktid_log {
- uint32 items; /* number of total items */
- uint32 index; /* index of pktid_log_item */
- dhd_pktid_log_item_t map[0]; /* metadata storage */
-} dhd_pktid_log_t;
-
-typedef void * dhd_pktid_log_handle_t; /* opaque handle to pktid log */
-
-#define MAX_PKTID_LOG (2048)
-#define DHD_PKTID_LOG_ITEM_SZ (sizeof(dhd_pktid_log_item_t))
-#define DHD_PKTID_LOG_SZ(items) (uint32)((sizeof(dhd_pktid_log_t)) + \
- ((DHD_PKTID_LOG_ITEM_SZ) * (items)))
-
-#define DHD_PKTID_LOG_INIT(dhd, hdl) dhd_pktid_logging_init((dhd), (hdl))
-#define DHD_PKTID_LOG_FINI(dhd, hdl) dhd_pktid_logging_fini((dhd), (hdl))
-#define DHD_PKTID_LOG(dhd, hdl, pa, pktid, len, pkttype) \
- dhd_pktid_logging((dhd), (hdl), (pa), (pktid), (len), (pkttype))
-#define DHD_PKTID_LOG_DUMP(dhd) dhd_pktid_logging_dump((dhd))
-
-static dhd_pktid_log_handle_t *
-dhd_pktid_logging_init(dhd_pub_t *dhd, uint32 num_items)
-{
- dhd_pktid_log_t *log;
- uint32 log_size;
-
- log_size = DHD_PKTID_LOG_SZ(num_items);
- log = (dhd_pktid_log_t *)MALLOCZ(dhd->osh, log_size);
- if (log == NULL) {
- DHD_ERROR(("%s: MALLOC failed for size %d\n",
- __FUNCTION__, log_size));
- return (dhd_pktid_log_handle_t *)NULL;
- }
+/* On Router, the pktptr serves as a pktid. */
- log->items = num_items;
- log->index = 0;
- return (dhd_pktid_log_handle_t *)log; /* opaque handle */
-}
-
-static void
-dhd_pktid_logging_fini(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle)
-{
- dhd_pktid_log_t *log;
- uint32 log_size;
-
- if (handle == NULL) {
- DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
- return;
- }
-
- log = (dhd_pktid_log_t *)handle;
- log_size = DHD_PKTID_LOG_SZ(log->items);
- MFREE(dhd->osh, handle, log_size);
-}
-
-static void
-dhd_pktid_logging(dhd_pub_t *dhd, dhd_pktid_log_handle_t *handle, dmaaddr_t pa,
- uint32 pktid, uint32 len, uint8 pkttype)
-{
- dhd_pktid_log_t *log;
- uint32 idx;
-
- if (handle == NULL) {
- DHD_ERROR(("%s: handle is NULL\n", __FUNCTION__));
- return;
- }
-
- log = (dhd_pktid_log_t *)handle;
- idx = log->index;
- log->map[idx].ts_nsec = OSL_LOCALTIME_NS();
- log->map[idx].pa = pa;
- log->map[idx].pktid = pktid;
- log->map[idx].size = len;
- log->map[idx].pkttype = pkttype;
- log->index = (idx + 1) % (log->items); /* update index */
-}
-
-void
-dhd_pktid_logging_dump(dhd_pub_t *dhd)
-{
- dhd_prot_t *prot = dhd->prot;
- dhd_pktid_log_t *map_log, *unmap_log;
- uint64 ts_sec, ts_usec;
-
- if (prot == NULL) {
- DHD_ERROR(("%s: prot is NULL\n", __FUNCTION__));
- return;
- }
-
- map_log = (dhd_pktid_log_t *)(prot->pktid_dma_map);
- unmap_log = (dhd_pktid_log_t *)(prot->pktid_dma_unmap);
- OSL_GET_LOCALTIME(&ts_sec, &ts_usec);
- if (map_log && unmap_log) {
- DHD_ERROR(("%s: map_idx=%d unmap_idx=%d "
- "current time=[%5lu.%06lu]\n", __FUNCTION__,
- map_log->index, unmap_log->index,
- (unsigned long)ts_sec, (unsigned long)ts_usec));
- DHD_ERROR(("%s: pktid_map_log(pa)=0x%llx size=%d, "
- "pktid_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
- (uint64)__virt_to_phys((ulong)(map_log->map)),
- (uint32)(DHD_PKTID_LOG_ITEM_SZ * map_log->items),
- (uint64)__virt_to_phys((ulong)(unmap_log->map)),
- (uint32)(DHD_PKTID_LOG_ITEM_SZ * unmap_log->items)));
- }
-}
-#endif /* DHD_MAP_PKTID_LOGGING */
-
-/* +----------------- End of DHD_MAP_PKTID_LOGGING -----------------------+ */
-
-/*
- * +---------------------------------------------------------------------------+
- * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
- * Main purpose is to save memory on the dongle, has other purposes as well.
- * The packet id map, also includes storage for some packet parameters that
- * may be saved. A native packet pointer along with the parameters may be saved
- * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
- * and the metadata may be retrieved using the previously allocated packet id.
- * +---------------------------------------------------------------------------+
- */
-#define DHD_PCIE_PKTID
-#define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
-#define MAX_RX_PKTID (1024)
-#define MAX_TX_PKTID (3072 * 12)
-
-/* On Router, the pktptr serves as a pktid. */
-
-#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
-#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
-#endif // endif
+#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
+#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
+#endif
/* Enum for marking the buffer color based on usage */
typedef enum dhd_pkttype {
PKTTYPE_TSBUF_RX
} dhd_pkttype_t;
-#define DHD_PKTID_MIN_AVAIL_COUNT 512U
-#define DHD_PKTID_DEPLETED_MAX_COUNT (DHD_PKTID_MIN_AVAIL_COUNT * 2U)
-#define DHD_PKTID_INVALID (0U)
-#define DHD_IOCTL_REQ_PKTID (0xFFFE)
-#define DHD_FAKE_PKTID (0xFACE)
-#define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
-#define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
-#define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
-#define DHD_H2D_BTLOGRING_REQ_PKTID 0xFFFA
-#define DHD_D2H_BTLOGRING_REQ_PKTID 0xFFF9
-#define DHD_H2D_SNAPSHOT_UPLOAD_REQ_PKTID 0xFFF8
-#ifdef DHD_HP2P
-#define DHD_D2H_HPPRING_TXREQ_PKTID 0xFFF7
-#define DHD_D2H_HPPRING_RXREQ_PKTID 0xFFF6
-#endif /* DHD_HP2P */
+#define DHD_PKTID_INVALID (0U)
+#define DHD_IOCTL_REQ_PKTID (0xFFFE)
+#define DHD_FAKE_PKTID (0xFACE)
+#define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
+#define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
+#define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
#define IS_FLOWRING(ring) \
((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
#endif /* MACOSX_DHD */
#if defined(DHD_PCIE_PKTID)
-#if defined(MACOSX_DHD)
+#if defined(MACOSX_DHD) || defined(DHD_EFI)
#define IOCTLRESP_USE_CONSTMEM
static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
-#endif // endif
+#endif
/* Determine number of pktids that are available */
static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
void *dmah, void *secdma, dhd_pkttype_t pkttype);
+
/* Return an allocated pktid, retrieving previously saved pkt and metadata */
static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
#define DHD_TEST_IS_ALLOC 3
#define DHD_TEST_IS_FREE 4
-typedef enum dhd_pktid_map_type {
- DHD_PKTID_MAP_TYPE_CTRL = 1,
- DHD_PKTID_MAP_TYPE_TX,
- DHD_PKTID_MAP_TYPE_RX,
- DHD_PKTID_MAP_TYPE_UNKNOWN
-} dhd_pktid_map_type_t;
-
#ifdef USE_DHD_PKTID_AUDIT_LOCK
#define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
#endif /* DHD_PKTID_AUDIT_ENABLED */
-#define USE_DHD_PKTID_LOCK 1
+/* #define USE_DHD_PKTID_LOCK 1 */
#ifdef USE_DHD_PKTID_LOCK
#define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
#define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
-#define DHD_PKTID_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
+#define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock)
#define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
#else
#define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
uint32 items; /* total items in map */
uint32 avail; /* total available items */
int failures; /* lockers unavailable count */
- /* Spinlock to protect dhd_pktid_map in process/tasklet context */
- void *pktid_lock; /* Used when USE_DHD_PKTID_LOCK is defined */
-
#if defined(DHD_PKTID_AUDIT_ENABLED)
void *pktid_audit_lock;
struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
#if defined(DHD_PKTID_AUDIT_ENABLED)
-
-static int
-dhd_get_pktid_map_type(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map)
-{
- dhd_prot_t *prot = dhd->prot;
- int pktid_map_type;
-
- if (pktid_map == prot->pktid_ctrl_map) {
- pktid_map_type = DHD_PKTID_MAP_TYPE_CTRL;
- } else if (pktid_map == prot->pktid_tx_map) {
- pktid_map_type = DHD_PKTID_MAP_TYPE_TX;
- } else if (pktid_map == prot->pktid_rx_map) {
- pktid_map_type = DHD_PKTID_MAP_TYPE_RX;
- } else {
- pktid_map_type = DHD_PKTID_MAP_TYPE_UNKNOWN;
- }
-
- return pktid_map_type;
-}
-
/**
-* __dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
+* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
*/
static int
-__dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
+dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
const int test_for, const char *errmsg)
{
#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
struct bcm_mwbmap *handle;
uint32 flags;
bool ignore_audit;
- int error = BCME_OK;
if (pktid_map == (dhd_pktid_map_t *)NULL) {
DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
handle = pktid_map->pktid_audit;
if (handle == (struct bcm_mwbmap *)NULL) {
DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
- goto out;
+ DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+ return BCME_OK;
}
/* Exclude special pktids from audit */
ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
if (ignore_audit) {
- goto out;
+ DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+ return BCME_OK;
}
if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
- error = BCME_ERROR;
- goto out;
+ /* lock is released in "error" */
+ goto error;
}
/* Perform audit */
if (!bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
errmsg, pktid));
- error = BCME_ERROR;
- } else {
- bcm_mwbmap_force(handle, pktid);
+ goto error;
}
+ bcm_mwbmap_force(handle, pktid);
break;
case DHD_DUPLICATE_FREE:
if (bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
errmsg, pktid));
- error = BCME_ERROR;
- } else {
- bcm_mwbmap_free(handle, pktid);
+ goto error;
}
+ bcm_mwbmap_free(handle, pktid);
break;
case DHD_TEST_IS_ALLOC:
if (bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
errmsg, pktid));
- error = BCME_ERROR;
+ goto error;
}
break;
if (!bcm_mwbmap_isfree(handle, pktid)) {
DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
errmsg, pktid));
- error = BCME_ERROR;
+ goto error;
}
break;
default:
- DHD_ERROR(("%s: Invalid test case: %d\n", __FUNCTION__, test_for));
- error = BCME_ERROR;
- break;
+ goto error;
}
-out:
DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+ return BCME_OK;
- if (error != BCME_OK) {
- dhd->pktid_audit_failed = TRUE;
- }
-
- return error;
-}
+error:
-static int
-dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
- const int test_for, const char *errmsg)
-{
- int ret = BCME_OK;
- ret = __dhd_pktid_audit(dhd, pktid_map, pktid, test_for, errmsg);
- if (ret == BCME_ERROR) {
- DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
- __FUNCTION__, pktid, dhd_get_pktid_map_type(dhd, pktid_map)));
- dhd_pktid_error_handler(dhd);
- }
+ DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
+ /* May insert any trap mechanism here ! */
+ dhd_pktid_error_handler(dhd);
- return ret;
+ return BCME_ERROR;
}
#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
static int
dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
- const int test_for, void *msg, uint32 msg_len, const char *func)
+ const int test_for, void *msg, uint32 msg_len, const char * func)
{
- int ret = BCME_OK;
-
- if (dhd_query_bus_erros(dhdp)) {
- return BCME_ERROR;
- }
-
- ret = __dhd_pktid_audit(dhdp, map, pktid, test_for, func);
+ int ret = 0;
+ ret = DHD_PKTID_AUDIT(dhdp, map, pktid, test_for);
if (ret == BCME_ERROR) {
- DHD_ERROR(("%s: Got Pkt Id Audit failure: PKTID<%d> PKTID MAP TYPE<%d>\n",
- __FUNCTION__, pktid, dhd_get_pktid_map_type(dhdp, map)));
prhex(func, (uchar *)msg, msg_len);
- dhd_pktid_error_handler(dhdp);
}
return ret;
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
+
/**
* +---------------------------------------------------------------------------+
* Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
return (dhd_pktid_map_handle_t *)NULL;
}
+ /* Initialize the lock that protects this structure */
map->items = num_items;
map->avail = num_items;
map_items = DHD_PKIDMAP_ITEMS(map->items);
map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
-
- /* Initialize the lock that protects this structure */
- map->pktid_lock = DHD_PKTID_LOCK_INIT(osh);
- if (map->pktid_lock == NULL) {
- DHD_ERROR(("%s:%d: Lock init failed \r\n", __FUNCTION__, __LINE__));
- goto error;
- }
-
map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
if (map->keys == NULL) {
DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
}
#endif /* DHD_PKTID_AUDIT_ENABLED */
-
if (map->keys) {
MFREE(osh, map->keys, map_keys_sz);
}
-
- if (map->pktid_lock) {
- DHD_PKTID_LOCK_DEINIT(osh, map->pktid_lock);
- }
-
VMFREE(osh, map, dhd_pktid_map_sz);
}
return (dhd_pktid_map_handle_t *)NULL;
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
uint32 map_items;
- unsigned long flags;
+ uint32 flags;
bool data_tx = FALSE;
map = (dhd_pktid_map_t *)handle;
- DHD_PKTID_LOCK(map->pktid_lock, flags);
+ DHD_GENERAL_LOCK(dhd, flags);
osh = dhd->osh;
map_items = DHD_PKIDMAP_ITEMS(map->items);
locker->state = LOCKER_IS_FREE;
data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
if (data_tx) {
- OSL_ATOMIC_DEC(dhd->osh, &dhd->prot->active_tx_count);
+ dhd->prot->active_tx_count--;
}
#ifdef DHD_PKTID_AUDIT_RING
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
#endif /* DHD_PKTID_AUDIT_RING */
-#ifdef DHD_MAP_PKTID_LOGGING
- DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap,
- locker->pa, nkey, locker->len,
- locker->pkttype);
-#endif /* DHD_MAP_PKTID_LOGGING */
{
if (SECURE_DMA_ENAB(dhd->osh))
map->avail = map_items;
memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
}
#ifdef IOCTLRESP_USE_CONSTMEM
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
uint32 map_items;
- unsigned long flags;
+ uint32 flags;
map = (dhd_pktid_map_t *)handle;
- DHD_PKTID_LOCK(map->pktid_lock, flags);
+ DHD_GENERAL_LOCK(dhd, flags);
map_items = DHD_PKIDMAP_ITEMS(map->items);
/* skip reserved KEY #0, and start from 1 */
retbuf.dmah = locker->dmah;
retbuf.secdma = locker->secdma;
+ /* This could be a callback registered with dhd_pktid_map */
+ DHD_GENERAL_UNLOCK(dhd, flags);
free_ioctl_return_buffer(dhd, &retbuf);
+ DHD_GENERAL_LOCK(dhd, flags);
}
else {
#ifdef DHD_PKTID_AUDIT_RING
map->avail = map_items;
memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
}
#endif /* IOCTLRESP_USE_CONSTMEM */
+
/**
* Free the pktid map.
*/
uint32 dhd_pktid_map_sz;
uint32 map_keys_sz;
- if (handle == NULL)
- return;
-
/* Free any pending packets */
dhd_pktid_map_reset(dhd, handle);
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
- DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
-
#if defined(DHD_PKTID_AUDIT_ENABLED)
if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
MFREE(dhd->osh, map->keys, map_keys_sz);
VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
}
-
#ifdef IOCTLRESP_USE_CONSTMEM
static void
dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
uint32 dhd_pktid_map_sz;
uint32 map_keys_sz;
- if (handle == NULL)
- return;
-
/* Free any pending packets */
dhd_pktid_map_reset_ioctl(dhd, handle);
dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
- DHD_PKTID_LOCK_DEINIT(dhd->osh, map->pktid_lock);
-
#if defined(DHD_PKTID_AUDIT_ENABLED)
if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
{
dhd_pktid_map_t *map;
uint32 avail;
- unsigned long flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- DHD_PKTID_LOCK(map->pktid_lock, flags);
avail = map->avail;
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return avail;
}
uint32 nkey;
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
- unsigned long flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- DHD_PKTID_LOCK(map->pktid_lock, flags);
-
if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
map->failures++;
DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return DHD_PKTID_INVALID; /* failed alloc request */
}
" map->avail<%u>, nkey<%u>, pkttype<%u>\n",
__FUNCTION__, __LINE__, map->avail, nkey,
pkttype));
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
return DHD_PKTID_INVALID; /* failed alloc request */
}
locker->len = 0;
locker->state = LOCKER_IS_BUSY; /* reserve this locker */
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
-
ASSERT(nkey != DHD_PKTID_INVALID);
-
return nkey; /* return locker's numbered key */
}
{
dhd_pktid_map_t *map;
dhd_pktid_item_t *locker;
- unsigned long flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- DHD_PKTID_LOCK(map->pktid_lock, flags);
-
if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
__FUNCTION__, __LINE__, nkey, pkttype));
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
#ifdef DHD_FW_COREDUMP
if (dhd->memdump_enabled) {
/* collect core dump */
locker->pkttype = pkttype;
locker->pkt = pkt;
locker->state = LOCKER_IS_BUSY; /* make this locker busy */
-#ifdef DHD_MAP_PKTID_LOGGING
- DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_map, pa, nkey, len, pkttype);
-#endif /* DHD_MAP_PKTID_LOGGING */
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
}
/**
dhd_pktid_item_t *locker;
void * pkt;
unsigned long long locker_addr;
- unsigned long flags;
ASSERT(handle != NULL);
map = (dhd_pktid_map_t *)handle;
- DHD_PKTID_LOCK(map->pktid_lock, flags);
-
if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
__FUNCTION__, __LINE__, nkey, pkttype));
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
#ifdef DHD_FW_COREDUMP
if (dhd->memdump_enabled) {
/* collect core dump */
if (locker->state == LOCKER_IS_FREE) {
DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
__FUNCTION__, __LINE__, nkey));
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
#ifdef DHD_FW_COREDUMP
if (dhd->memdump_enabled) {
/* collect core dump */
"pkttype <%d> locker->pa <0x%llx> \n",
__FUNCTION__, __LINE__, locker->state, locker->pkttype,
pkttype, locker_addr));
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
#ifdef DHD_FW_COREDUMP
if (dhd->memdump_enabled) {
/* collect core dump */
#if defined(DHD_PKTID_AUDIT_MAP)
DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
#endif /* DHD_PKTID_AUDIT_MAP */
-#ifdef DHD_MAP_PKTID_LOGGING
- DHD_PKTID_LOG(dhd, dhd->prot->pktid_dma_unmap, locker->pa, nkey,
- (uint32)locker->len, pkttype);
-#endif /* DHD_MAP_PKTID_LOGGING */
*pa = locker->pa; /* return contents of locker */
*len = (uint32)locker->len;
locker->pkt = NULL; /* Clear pkt */
locker->len = 0;
- DHD_PKTID_UNLOCK(map->pktid_lock, flags);
-
return pkt;
}
#else /* ! DHD_PCIE_PKTID */
+
typedef struct pktlist {
PKT_LIST *tx_pkt_list; /* list for tx packets */
PKT_LIST *rx_pkt_list; /* list for rx packets */
#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
#define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
+
static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
dhd_pkttype_t pkttype);
}
static void
-dhd_pktid_map_reset(dhd_pub_t *dhd, pktlists_t *handle)
+dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
{
osl_t *osh = dhd->osh;
+ pktlists_t *handle = (pktlists_t *) map;
+
+ ASSERT(handle != NULL);
+ if (handle == (pktlists_t *)NULL)
+ return;
if (handle->ctrl_pkt_list) {
PKTLIST_FINI(handle->ctrl_pkt_list);
PKTLIST_FINI(handle->tx_pkt_list);
MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
}
-}
-
-static void
-dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
-{
- osl_t *osh = dhd->osh;
- pktlists_t *handle = (pktlists_t *) map;
-
- ASSERT(handle != NULL);
- if (handle == (pktlists_t *)NULL) {
- return;
- }
-
- dhd_pktid_map_reset(dhd, handle);
if (handle) {
MFREE(osh, handle, sizeof(pktlists_t));
/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
+
/**
* The PCIE FD protocol layer is constructed in two phases:
* Phase 1. dhd_prot_attach()
osl_t *osh = dhd->osh;
dhd_prot_t *prot;
- /* FW going to DMA extended trap data,
- * allocate buffer for the maximum extended trap data.
- */
- uint32 trap_buf_len = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
-
/* Allocate prot structure */
if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
sizeof(dhd_prot_t)))) {
dhd->dma_h2d_ring_upd_support = FALSE;
dhd->dma_ring_upd_overwrite = FALSE;
- dhd->hwa_inited = 0;
dhd->idma_inited = 0;
dhd->ifrm_inited = 0;
- dhd->dar_inited = 0;
/* Common Ring Allocations */
ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN))
#else
if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN))
-
#endif /* BCM_HOST_BUF */
{
goto fail;
#ifdef DHD_RX_CHAINING
dhd_rxchain_reset(&prot->rxchain);
-#endif // endif
+#endif
+
+ prot->rx_lock = dhd_os_spin_lock_init(dhd->osh);
prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
if (prot->pktid_ctrl_map == NULL) {
goto fail;
prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
- if (prot->pktid_tx_map == NULL)
+ if (prot->pktid_rx_map == NULL)
goto fail;
#ifdef IOCTLRESP_USE_CONSTMEM
}
#endif /* IOCTLRESP_USE_CONSTMEM */
-#ifdef DHD_MAP_PKTID_LOGGING
- prot->pktid_dma_map = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
- if (prot->pktid_dma_map == NULL) {
- DHD_ERROR(("%s: failed to allocate pktid_dma_map\n",
- __FUNCTION__));
- }
-
- prot->pktid_dma_unmap = DHD_PKTID_LOG_INIT(dhd, MAX_PKTID_LOG);
- if (prot->pktid_dma_unmap == NULL) {
- DHD_ERROR(("%s: failed to allocate pktid_dma_unmap\n",
- __FUNCTION__));
- }
-#endif /* DHD_MAP_PKTID_LOGGING */
-
/* Initialize the work queues to be used by the Load Balancing logic */
#if defined(DHD_LB_TXC)
{
void *buffer;
buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
- if (buffer == NULL) {
- DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
- goto fail;
- }
bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
buffer, DHD_LB_WORKQ_SZ);
prot->tx_compl_prod_sync = 0;
DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
- }
+ }
#endif /* DHD_LB_TXC */
#if defined(DHD_LB_RXC)
- {
+ {
void *buffer;
buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
- if (buffer == NULL) {
- DHD_ERROR(("%s: failed to allocate RXC work buffer\n", __FUNCTION__));
- goto fail;
- }
bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
buffer, DHD_LB_WORKQ_SZ);
prot->rx_compl_prod_sync = 0;
DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
__FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
- }
+ }
#endif /* DHD_LB_RXC */
-
/* Initialize trap buffer */
- if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, trap_buf_len)) {
+ if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, BCMPCIE_EXT_TRAP_DATA_MAXLEN)) {
DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
goto fail;
}
fail:
- if (prot) {
- /* Free up all allocated memories */
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ if (prot != NULL) {
dhd_prot_detach(dhd);
}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
return BCME_NOMEM;
} /* dhd_prot_attach */
-static int
-dhd_alloc_host_scbs(dhd_pub_t *dhd)
-{
- int ret = BCME_OK;
- sh_addr_t base_addr;
- dhd_prot_t *prot = dhd->prot;
- uint32 host_scb_size = 0;
-
- if (dhd->hscb_enable) {
- /* read number of bytes to allocate from F/W */
- dhd_bus_cmn_readshared(dhd->bus, &host_scb_size, HOST_SCB_ADDR, 0);
- if (host_scb_size) {
- /* alloc array of host scbs */
- ret = dhd_dma_buf_alloc(dhd, &prot->host_scb_buf, host_scb_size);
- /* write host scb address to F/W */
- if (ret == BCME_OK) {
- dhd_base_addr_htolpa(&base_addr, prot->host_scb_buf.pa);
- dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
- HOST_SCB_ADDR, 0);
- } else {
- DHD_TRACE(("dhd_alloc_host_scbs: dhd_dma_buf_alloc error\n"));
- }
- } else {
- DHD_TRACE(("dhd_alloc_host_scbs: host_scb_size is 0.\n"));
- }
- } else {
- DHD_TRACE(("dhd_alloc_host_scbs: Host scb not supported in F/W.\n"));
- }
-
- return ret;
-}
-
void
dhd_set_host_cap(dhd_pub_t *dhd)
{
if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
if (dhd->h2d_phase_supported) {
+
data |= HOSTCAP_H2D_VALID_PHASE;
- if (dhd->force_dongletrap_on_bad_h2d_phase)
+
+ if (dhd->force_dongletrap_on_bad_h2d_phase) {
data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
+ }
}
- if (prot->host_ipc_version > prot->device_ipc_version)
+ if (prot->host_ipc_version > prot->device_ipc_version) {
prot->active_ipc_version = prot->device_ipc_version;
- else
+ } else {
prot->active_ipc_version = prot->host_ipc_version;
+ }
data |= prot->active_ipc_version;
if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
+
DHD_INFO(("Advertise Hostready Capability\n"));
+
data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
}
+#ifdef PCIE_INB_DW
+ if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) {
+ DHD_INFO(("Advertise Inband-DW Capability\n"));
+ data |= HOSTCAP_DS_INBAND_DW;
+ data |= HOSTCAP_DS_NO_OOB_DW;
+ dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB);
+ } else
+#endif /* PCIE_INB_DW */
+#ifdef PCIE_OOB
+ if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) {
+ dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB);
+ } else
+#endif /* PCIE_OOB */
{
/* Disable DS altogether */
data |= HOSTCAP_DS_NO_OOB_DW;
dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
}
- /* Indicate support for extended trap data */
- data |= HOSTCAP_EXTENDED_TRAP_DATA;
-
- /* Indicate support for TX status metadata */
- if (dhd->pcie_txs_metadata_enable != 0)
- data |= HOSTCAP_TXSTATUS_METADATA;
-
- /* Enable fast delete ring in firmware if supported */
- if (dhd->fast_delete_ring_support) {
- data |= HOSTCAP_FAST_DELETE_RING;
- }
-
- if (dhdpcie_bus_get_pcie_hwa_supported(dhd->bus)) {
- DHD_ERROR(("HWA inited\n"));
- /* TODO: Is hostcap needed? */
- dhd->hwa_inited = TRUE;
- }
-
if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
+
DHD_ERROR(("IDMA inited\n"));
data |= HOSTCAP_H2D_IDMA;
dhd->idma_inited = TRUE;
dhd_prot_dma_indx_free(dhd);
}
- if (dhdpcie_bus_get_pcie_dar_supported(dhd->bus)) {
- DHD_ERROR(("DAR doorbell Use\n"));
- data |= HOSTCAP_H2D_DAR;
- dhd->dar_inited = TRUE;
- }
-
- data |= HOSTCAP_UR_FW_NO_TRAP;
-
- if (dhd->hscb_enable) {
- data |= HOSTCAP_HSCB;
- }
-
-#ifdef EWP_EDL
- if (dhd->dongle_edl_support) {
- data |= HOSTCAP_EDL_RING;
- DHD_ERROR(("Enable EDL host cap\n"));
- } else {
- DHD_ERROR(("DO NOT SET EDL host cap\n"));
- }
-#endif /* EWP_EDL */
-
-#ifdef DHD_HP2P
- if (dhd->hp2p_capable) {
- data |= HOSTCAP_PKT_TIMESTAMP;
- data |= HOSTCAP_PKT_HP2P;
- DHD_ERROR(("Enable HP2P in host cap\n"));
- } else {
- DHD_ERROR(("HP2P not enabled in host cap\n"));
- }
-#endif // endif
+ /* Indicate support for TX status metadata */
+ data |= HOSTCAP_TXSTATUS_METADATA;
-#ifdef DHD_DB0TS
- if (dhd->db0ts_capable) {
- data |= HOSTCAP_DB0_TIMESTAMP;
- DHD_ERROR(("Enable DB0 TS in host cap\n"));
- } else {
- DHD_ERROR(("DB0 TS not enabled in host cap\n"));
- }
-#endif /* DHD_DB0TS */
- if (dhd->extdtxs_in_txcpl) {
- DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
- data |= HOSTCAP_PKT_TXSTATUS;
- }
- else {
- DHD_ERROR(("Enable hostcap: EXTD TXS in txcpl\n"));
- }
+ /* Indicate support for extended trap data */
+ data |= HOSTCAP_EXTENDED_TRAP_DATA;
DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
__FUNCTION__,
dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
}
+#ifdef HOFFLOAD_MODULES
+ dhd_bus_cmn_writeshared(dhd->bus, &dhd->hmem.data_addr,
+ sizeof(dhd->hmem.data_addr), WRT_HOST_MODULE_ADDR, 0);
+#endif
+#ifdef DHD_TIMESYNC
+ dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version);
+#endif /* DHD_TIMESYNC */
}
/**
sh_addr_t base_addr;
dhd_prot_t *prot = dhd->prot;
int ret = 0;
- uint32 idmacontrol;
- uint32 waitcount = 0;
-
-#ifdef WL_MONITOR
- dhd->monitor_enable = FALSE;
-#endif /* WL_MONITOR */
/**
* A user defined value can be assigned to global variable h2d_max_txpost via
/* using the latest shared structure template */
prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
}
- DHD_ERROR(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
+ DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
/* Initialize. bzero() would blow away the dma pointers. */
prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
prot->cur_ioctlresp_bufs_posted = 0;
- OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
+ prot->active_tx_count = 0;
prot->data_seq_no = 0;
prot->ioctl_seq_no = 0;
prot->rxbufpost = 0;
prot->ioctl_resplen = 0;
prot->ioctl_received = IOCTL_WAIT;
+ /* Register the interrupt function upfront */
+ /* remove corerev checks in data path */
+ prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
+
+ prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
+
/* Initialize Common MsgBuf Rings */
prot->device_ipc_version = dhd->bus->api.fw_rev;
prot->host_ipc_version = PCIE_SHARED_VERSION;
- prot->no_tx_resource = FALSE;
/* Init the host API version */
dhd_set_host_cap(dhd);
- /* alloc and configure scb host address for dongle */
- if ((ret = dhd_alloc_host_scbs(dhd))) {
- return ret;
- }
-
- /* Register the interrupt function upfront */
- /* remove corerev checks in data path */
- /* do this after host/fw negotiation for DAR */
- prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
- prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
-
- dhd->bus->_dar_war = (dhd->bus->sih->buscorerev < 64) ? TRUE : FALSE;
-
dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
dhd_prot_h2d_sync_init(dhd);
+#ifdef PCIE_INB_DW
+ /* Set the initial DS state */
+ if (INBAND_DW_ENAB(dhd->bus)) {
+ dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus,
+ DW_DEVICE_DS_ACTIVE);
+ }
+#endif /* PCIE_INB_DW */
+
/* init the scratch buffer */
dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
D2H_DMA_INDX_RD_BUF, 0);
+
}
+
/* Signal to the dongle that common ring init is complete */
- if (dhd->hostrdy_after_init)
- dhd_bus_hostready(dhd->bus);
+ dhd_bus_hostready(dhd->bus);
/*
* If the DMA-able buffers for flowring needs to come from a specific
H2D_IFRM_INDX_WR_BUF, 0);
}
- /* If IDMA is enabled and initied, wait for FW to setup the IDMA descriptors
- * Waiting just before configuring doorbell
- */
-#define IDMA_ENABLE_WAIT 10
- if (IDMA_ACTIVE(dhd)) {
- /* wait for idma_en bit in IDMAcontrol register to be set */
- /* Loop till idma_en is not set */
- uint buscorerev = dhd->bus->sih->buscorerev;
- idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- IDMAControl(buscorerev), 0, 0);
- while (!(idmacontrol & PCIE_IDMA_MODE_EN(buscorerev)) &&
- (waitcount++ < IDMA_ENABLE_WAIT)) {
-
- DHD_ERROR(("iDMA not enabled yet,waiting 1 ms c=%d IDMAControl = %08x\n",
- waitcount, idmacontrol));
- OSL_DELAY(1000); /* 1ms as its onetime only */
- idmacontrol = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- IDMAControl(buscorerev), 0, 0);
- }
-
- if (waitcount < IDMA_ENABLE_WAIT) {
- DHD_ERROR(("iDMA enabled PCIEControl = %08x\n", idmacontrol));
- } else {
- DHD_ERROR(("Error: wait for iDMA timed out wait=%d IDMAControl = %08x\n",
- waitcount, idmacontrol));
- return BCME_ERROR;
+ /* See if info rings could be created */
+ if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
+ if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
+ /* For now log and proceed, further clean up action maybe necessary
+ * when we have more clarity.
+ */
+ DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
+ __FUNCTION__, ret));
}
}
/* Post to dongle host configured soft doorbells */
dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
+ /* Post buffers for packet reception and ioctl/event responses */
+ dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+ /* Fix re-entry problem without general lock */
+ atomic_set(&dhd_msgbuf_rxbuf_post_event_bufs_running, 0);
dhd_msgbuf_rxbuf_post_event_bufs(dhd);
- prot->no_retry = FALSE;
- prot->no_aggr = FALSE;
- prot->fixed_rate = FALSE;
-
- /*
- * Note that any communication with the Dongle should be added
- * below this point. Any other host data structure initialiation that
- * needs to be done prior to the DPC starts executing should be done
- * befor this point.
- * Because once we start sending H2D requests to Dongle, the Dongle
- * respond immediately. So the DPC context to handle this
- * D2H response could preempt the context in which dhd_prot_init is running.
- * We want to ensure that all the Host part of dhd_prot_init is
- * done before that.
- */
-
- /* See if info rings could be created, info rings should be created
- * only if dongle does not support EDL
- */
-#ifdef EWP_EDL
- if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
-#else
- if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
-#endif /* EWP_EDL */
- {
- if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
- /* For now log and proceed, further clean up action maybe necessary
- * when we have more clarity.
- */
- DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
- __FUNCTION__, ret));
- }
- }
-
-#ifdef EWP_EDL
- /* Create Enhanced Debug Lane rings (EDL) if dongle supports it */
- if (dhd->dongle_edl_support) {
- if ((ret = dhd_prot_init_edl_rings(dhd)) != BCME_OK) {
- DHD_ERROR(("%s EDL rings couldn't be created: Err Code%d",
- __FUNCTION__, ret));
- }
- }
-#endif /* EWP_EDL */
-
-#ifdef DHD_HP2P
- /* create HPP txcmpl/rxcmpl rings */
- if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_7 && dhd->hp2p_capable) {
- if ((ret = dhd_prot_init_hp2p_rings(dhd)) != BCME_OK) {
- /* For now log and proceed, further clean up action maybe necessary
- * when we have more clarity.
- */
- DHD_ERROR(("%s HP2P rings couldn't be created: Err Code%d",
- __FUNCTION__, ret));
- }
- }
-#endif /* DHD_HP2P */
-
return BCME_OK;
} /* dhd_prot_init */
+
/**
* dhd_prot_detach - PCIE FD protocol layer destructor.
* Unlink, frees allocated protocol memory (including dhd_prot)
dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
- dhd_dma_buf_free(dhd, &prot->host_scb_buf);
/* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
/* detach info rings */
dhd_prot_detach_info_rings(dhd);
-#ifdef EWP_EDL
- dhd_prot_detach_edl_rings(dhd);
-#endif // endif
-#ifdef DHD_HP2P
- /* detach HPP rings */
- dhd_prot_detach_hp2p_rings(dhd);
-#endif /* DHD_HP2P */
-
/* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
* handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
* they will be part of pktid_ctrl_map handler and PKT memory is allocated using
DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
#ifdef IOCTLRESP_USE_CONSTMEM
DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
-#endif // endif
-#ifdef DHD_MAP_PKTID_LOGGING
- DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_map);
- DHD_PKTID_LOG_FINI(dhd, prot->pktid_dma_unmap);
-#endif /* DHD_MAP_PKTID_LOGGING */
+#endif
+
+ dhd_os_spin_lock_deinit(dhd->osh, prot->rx_lock);
+
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
#if defined(DHD_LB_TXC)
if (prot->tx_compl_prod.buffer)
sizeof(void*) * DHD_LB_WORKQ_SZ);
#endif /* DHD_LB_RXC */
- DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
-
dhd->prot = NULL;
}
} /* dhd_prot_detach */
+
/**
* dhd_prot_reset - Reset the protocol layer without freeing any objects.
* This may be invoked to soft reboot the dongle, without having to
if (prot->d2hring_info_cpln) {
dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
}
-#ifdef EWP_EDL
- if (prot->d2hring_edl) {
- dhd_prot_ring_reset(dhd, prot->d2hring_edl);
- }
-#endif /* EWP_EDL */
/* Reset all DMA-able buffers allocated during prot attach */
dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
- dhd_dma_buf_reset(dhd, &prot->host_scb_buf);
dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
- /* Rest all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
+ /* Reset all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
+
prot->rx_metadata_offset = 0;
prot->tx_metadata_offset = 0;
prot->cur_event_bufs_posted = 0;
prot->cur_ioctlresp_bufs_posted = 0;
- OSL_ATOMIC_INIT(dhd->osh, &prot->active_tx_count);
+ prot->active_tx_count = 0;
prot->data_seq_no = 0;
prot->ioctl_seq_no = 0;
prot->ioctl_state = 0;
dhd_flow_rings_deinit(dhd);
}
-#ifdef DHD_HP2P
- if (prot->d2hring_hp2p_txcpl) {
- dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_txcpl);
- }
- if (prot->d2hring_hp2p_rxcpl) {
- dhd_prot_ring_reset(dhd, prot->d2hring_hp2p_rxcpl);
- }
-#endif /* DHD_HP2P */
-
/* Reset PKTID map */
DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
#define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0)
#endif /* !DHD_LB_TXC */
+
#if defined(DHD_LB)
/* DHD load balancing: deferral of work to another online CPU */
/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
#if defined(BCMPCIE)
dhd_txcomplete(dhd, pkt, true);
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
- dhd_eap_txcomplete(dhd, pkt, TRUE, txstatus->cmn_hdr.if_id);
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-#endif // endif
+#endif
PKTFREE(dhd->osh, pkt, TRUE);
count++;
{
dhd_prot_t *prot = dhd->prot;
int ret = BCME_ERROR;
- uint16 ringid;
-
- {
- /* dongle may increase max_submission_rings so keep
- * ringid at end of dynamic rings
- */
- ringid = dhd->bus->max_tx_flowrings +
- (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
- BCMPCIE_H2D_COMMON_MSGRINGS;
- }
-
- if (prot->d2hring_info_cpln) {
- /* for d2hring re-entry case, clear inited flag */
- prot->d2hring_info_cpln->inited = FALSE;
- }
+ uint16 ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
}
DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
- ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln,
- BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL, DHD_D2H_DBGRING_REQ_PKTID);
+ ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln);
if (ret != BCME_OK)
return ret;
- prot->h2dring_info_subn->seqnum = H2D_EPOCH_INIT_VAL;
- prot->h2dring_info_subn->current_phase = 0;
prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
- prot->d2hring_info_cpln->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
prot->h2dring_info_subn->n_completion_ids = 1;
prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
- ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn,
- BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT, DHD_H2D_DBGRING_REQ_PKTID);
+ ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn);
/* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
* so can not cleanup if one ring was created while the other failed
}
}
-#ifdef DHD_HP2P
-static int
-dhd_check_create_hp2p_rings(dhd_pub_t *dhd)
-{
- dhd_prot_t *prot = dhd->prot;
- int ret = BCME_ERROR;
- uint16 ringid;
-
- /* Last 2 dynamic ring indices are used by hp2p rings */
- ringid = dhd->bus->max_submission_rings + dhd->bus->max_completion_rings - 2;
-
- if (prot->d2hring_hp2p_txcpl == NULL) {
- prot->d2hring_hp2p_txcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
-
- if (prot->d2hring_hp2p_txcpl == NULL) {
- DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_txcpl\n",
- __FUNCTION__));
- return BCME_NOMEM;
- }
-
- DHD_INFO(("%s: about to create hp2p txcpl ring\n", __FUNCTION__));
- ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_txcpl, "d2hhp2p_txcpl",
- dhd_bus_get_hp2p_ring_max_size(dhd->bus, TRUE), D2HRING_TXCMPLT_ITEMSIZE,
- ringid);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: couldn't alloc resources for hp2p txcpl ring\n",
- __FUNCTION__));
- goto err2;
- }
- } else {
- /* for re-entry case, clear inited flag */
- prot->d2hring_hp2p_txcpl->inited = FALSE;
- }
- if (prot->d2hring_hp2p_rxcpl == NULL) {
- prot->d2hring_hp2p_rxcpl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
-
- if (prot->d2hring_hp2p_rxcpl == NULL) {
- DHD_ERROR(("%s: couldn't alloc memory for d2hring_hp2p_rxcpl\n",
- __FUNCTION__));
- return BCME_NOMEM;
- }
-
- /* create the hp2p rx completion ring next to hp2p tx compl ring
- * ringid = id next to hp2p tx compl ring
- */
- ringid = ringid + 1;
-
- DHD_INFO(("%s: about to create hp2p rxcpl ring\n", __FUNCTION__));
- ret = dhd_prot_ring_attach(dhd, prot->d2hring_hp2p_rxcpl, "d2hhp2p_rxcpl",
- dhd_bus_get_hp2p_ring_max_size(dhd->bus, FALSE), D2HRING_RXCMPLT_ITEMSIZE,
- ringid);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: couldn't alloc resources for hp2p rxcpl ring\n",
- __FUNCTION__));
- goto err1;
- }
- } else {
- /* for re-entry case, clear inited flag */
- prot->d2hring_hp2p_rxcpl->inited = FALSE;
- }
-
- return ret;
-err1:
- MFREE(prot->osh, prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
- prot->d2hring_hp2p_rxcpl = NULL;
-
-err2:
- MFREE(prot->osh, prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
- prot->d2hring_hp2p_txcpl = NULL;
- return ret;
-} /* dhd_check_create_hp2p_rings */
-
-int
-dhd_prot_init_hp2p_rings(dhd_pub_t *dhd)
-{
- dhd_prot_t *prot = dhd->prot;
- int ret = BCME_OK;
-
- dhd->hp2p_ring_active = FALSE;
-
- if ((ret = dhd_check_create_hp2p_rings(dhd)) != BCME_OK) {
- DHD_ERROR(("%s: hp2p rings aren't created! \n",
- __FUNCTION__));
- return ret;
- }
-
- if ((prot->d2hring_hp2p_txcpl->inited) || (prot->d2hring_hp2p_txcpl->create_pending)) {
- DHD_INFO(("hp2p tx completion ring was created!\n"));
- return ret;
- }
-
- DHD_TRACE(("trying to send create d2h hp2p txcpl ring: id %d\n",
- prot->d2hring_hp2p_txcpl->idx));
- ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_txcpl,
- BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL, DHD_D2H_HPPRING_TXREQ_PKTID);
- if (ret != BCME_OK)
- return ret;
-
- prot->d2hring_hp2p_txcpl->seqnum = D2H_EPOCH_INIT_VAL;
- prot->d2hring_hp2p_txcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
-
- if ((prot->d2hring_hp2p_rxcpl->inited) || (prot->d2hring_hp2p_rxcpl->create_pending)) {
- DHD_INFO(("hp2p rx completion ring was created!\n"));
- return ret;
- }
-
- DHD_TRACE(("trying to send create d2h hp2p rxcpl ring: id %d\n",
- prot->d2hring_hp2p_rxcpl->idx));
- ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_hp2p_rxcpl,
- BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL, DHD_D2H_HPPRING_RXREQ_PKTID);
- if (ret != BCME_OK)
- return ret;
-
- prot->d2hring_hp2p_rxcpl->seqnum = D2H_EPOCH_INIT_VAL;
- prot->d2hring_hp2p_rxcpl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
-
- /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
- * so can not cleanup if one ring was created while the other failed
- */
- return BCME_OK;
-} /* dhd_prot_init_hp2p_rings */
-
-static void
-dhd_prot_detach_hp2p_rings(dhd_pub_t *dhd)
-{
- if (dhd->prot->d2hring_hp2p_txcpl) {
- dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_txcpl);
- MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_txcpl, sizeof(msgbuf_ring_t));
- dhd->prot->d2hring_hp2p_txcpl = NULL;
- }
- if (dhd->prot->d2hring_hp2p_rxcpl) {
- dhd_prot_ring_detach(dhd, dhd->prot->d2hring_hp2p_rxcpl);
- MFREE(dhd->prot->osh, dhd->prot->d2hring_hp2p_rxcpl, sizeof(msgbuf_ring_t));
- dhd->prot->d2hring_hp2p_rxcpl = NULL;
- }
-}
-#endif /* DHD_HP2P */
-
-#ifdef EWP_EDL
-static int
-dhd_check_create_edl_rings(dhd_pub_t *dhd)
-{
- dhd_prot_t *prot = dhd->prot;
- int ret = BCME_ERROR;
- uint16 ringid;
-
- {
- /* dongle may increase max_submission_rings so keep
- * ringid at end of dynamic rings (re-use info ring cpl ring id)
- */
- ringid = dhd->bus->max_tx_flowrings +
- (dhd->bus->max_submission_rings - dhd->bus->max_tx_flowrings) +
- BCMPCIE_H2D_COMMON_MSGRINGS + 1;
- }
-
- if (prot->d2hring_edl) {
- prot->d2hring_edl->inited = FALSE;
- return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
- }
-
- if (prot->d2hring_edl == NULL) {
- prot->d2hring_edl = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
-
- if (prot->d2hring_edl == NULL) {
- DHD_ERROR(("%s: couldn't alloc memory for d2hring_edl\n",
- __FUNCTION__));
- return BCME_NOMEM;
- }
-
- DHD_ERROR(("%s: about to create EDL ring, ringid: %u \n", __FUNCTION__,
- ringid));
- ret = dhd_prot_ring_attach(dhd, prot->d2hring_edl, "d2hring_edl",
- D2HRING_EDL_MAX_ITEM, D2HRING_EDL_ITEMSIZE,
- ringid);
- if (ret != BCME_OK) {
- DHD_ERROR(("%s: couldn't alloc resources for EDL ring\n",
- __FUNCTION__));
- goto err;
- }
- }
-
- return ret;
-err:
- MFREE(prot->osh, prot->d2hring_edl, sizeof(msgbuf_ring_t));
- prot->d2hring_edl = NULL;
-
- return ret;
-} /* dhd_check_create_btlog_rings */
-
-int
-dhd_prot_init_edl_rings(dhd_pub_t *dhd)
-{
- dhd_prot_t *prot = dhd->prot;
- int ret = BCME_ERROR;
-
- if ((ret = dhd_check_create_edl_rings(dhd)) != BCME_OK) {
- DHD_ERROR(("%s: EDL rings aren't created! \n",
- __FUNCTION__));
- return ret;
- }
-
- if ((prot->d2hring_edl->inited) || (prot->d2hring_edl->create_pending)) {
- DHD_INFO(("EDL completion ring was created!\n"));
- return ret;
- }
-
- DHD_ERROR(("trying to send create d2h edl ring: idx %d\n", prot->d2hring_edl->idx));
- ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_edl,
- BCMPCIE_D2H_RING_TYPE_EDL, DHD_D2H_DBGRING_REQ_PKTID);
- if (ret != BCME_OK)
- return ret;
-
- prot->d2hring_edl->seqnum = D2H_EPOCH_INIT_VAL;
- prot->d2hring_edl->current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
-
- return BCME_OK;
-} /* dhd_prot_init_btlog_rings */
-
-static void
-dhd_prot_detach_edl_rings(dhd_pub_t *dhd)
-{
- if (dhd->prot->d2hring_edl) {
- dhd_prot_ring_detach(dhd, dhd->prot->d2hring_edl);
- MFREE(dhd->prot->osh, dhd->prot->d2hring_edl, sizeof(msgbuf_ring_t));
- dhd->prot->d2hring_edl = NULL;
- }
-}
-#endif /* EWP_EDL */
-
/**
* Initialize protocol: sync w/dongle state.
* Sets dongle media info (iswl, drv_version, mac address).
{
int ret = 0;
wlc_rev_info_t revinfo;
- char buf[128];
- dhd_prot_t *prot = dhd->prot;
+
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* Post ts buffer after shim layer is attached */
ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
+
#ifdef DHD_FW_COREDUMP
/* Check the memdump capability */
dhd_get_memdump_info(dhd);
DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
- /* Get the RxBuf post size */
- memset(buf, 0, sizeof(buf));
- bcm_mkiovar("rxbufpost_sz", NULL, 0, buf, sizeof(buf));
- ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0);
- if (ret < 0) {
- DHD_ERROR(("%s: GET RxBuf post FAILED, default to %d\n",
- __FUNCTION__, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
- prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
- } else {
- memcpy_s(&(prot->rxbufpost_sz), sizeof(prot->rxbufpost_sz), buf, sizeof(uint16));
- if (prot->rxbufpost_sz > DHD_FLOWRING_RX_BUFPOST_PKTSZ_MAX) {
- DHD_ERROR(("%s: Invalid RxBuf post size : %d, default to %d\n",
- __FUNCTION__, prot->rxbufpost_sz, DHD_FLOWRING_RX_BUFPOST_PKTSZ));
- prot->rxbufpost_sz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
- } else {
- DHD_ERROR(("%s: RxBuf Post : %d\n", __FUNCTION__, prot->rxbufpost_sz));
- }
- }
-
- /* Post buffers for packet reception */
- dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
-
DHD_SSSR_DUMP_INIT(dhd);
dhd_process_cid_mac(dhd, TRUE);
ret = dhd_preinit_ioctls(dhd);
dhd_process_cid_mac(dhd, FALSE);
-#if defined(DHD_H2D_LOG_TIME_SYNC)
-#ifdef DHD_HP2P
- if (FW_SUPPORTED(dhd, h2dlogts) || dhd->hp2p_capable) {
- if (dhd->hp2p_enable) {
- dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH / 40;
- } else {
- dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
- }
-#else
- if (FW_SUPPORTED(dhd, h2dlogts)) {
- dhd->dhd_rte_time_sync_ms = DHD_H2D_LOG_TIME_STAMP_MATCH;
-#endif // endif
- dhd->bus->dhd_rte_time_sync_count = OSL_SYSUPTIME_US();
- /* This is during initialization. */
- dhd_h2d_log_time_sync(dhd);
- } else {
- dhd->dhd_rte_time_sync_ms = 0;
- }
-#endif /* DHD_H2D_LOG_TIME_SYNC || DHD_HP2P */
/* Always assumes wl for now */
dhd->iswl = TRUE;
done:
return ret;
} /* dhd_sync_with_dongle */
+
#define DHD_DBG_SHOW_METADATA 0
#if DHD_DBG_SHOW_METADATA
}
}
-/**
- * dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle
- * and all the bottom most functions like dhd_pktid_map_free hold separate DHD_PKTID_LOCK
- * to ensure thread safety, so no need to hold any locks for this function
- */
+/* dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle */
static INLINE void * BCMFASTPATH
dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
{
return;
}
-#endif // endif
+#endif
+
+#ifdef PCIE_INB_DW
+static int
+dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus)
+{
+ unsigned long flags = 0;
+
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ bus->host_active_cnt++;
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ bus->host_active_cnt--;
+ dhd_bus_inb_ack_pending_ds_req(bus);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ return BCME_ERROR;
+ }
+ }
+
+ return BCME_OK;
+}
+
+static void
+dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus)
+{
+ unsigned long flags = 0;
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ bus->host_active_cnt--;
+ dhd_bus_inb_ack_pending_ds_req(bus);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+}
+#endif /* PCIE_INB_DW */
static void BCMFASTPATH
dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
{
void *p, **pktbuf;
+ uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
uint8 *rxbuf_post_tmp;
host_rxbuf_post_t *rxbuf_post;
void *msg_start;
msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
void *lcl_buf;
uint16 lcl_buf_size;
- uint16 pktsz = prot->rxbufpost_sz;
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
/* allocate a local buffer to store pkt buffer va, pa and length */
lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
if (!lcl_buf) {
DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return 0;
}
pktbuf = lcl_buf;
/* only post what we have */
count = i;
- /* grab the ring lock to allocate pktid and post on ring */
- DHD_RING_LOCK(ring->ring_lock, flags);
+ /* grab the rx lock to allocate pktid and post on ring */
+ DHD_SPIN_LOCK(prot->rx_lock, flags);
/* Claim space for exactly 'count' no of messages, for mitigation purpose */
msg_start = (void *)
dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
if (msg_start == NULL) {
DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
- DHD_RING_UNLOCK(ring->ring_lock, flags);
goto cleanup;
}
/* if msg_start != NULL, we should have alloced space for atleast 1 item */
/* Move rxbuf_post_tmp to next item */
rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
-
-#ifdef DHD_LBUF_AUDIT
- PKTAUDIT(dhd->osh, p);
-#endif // endif
}
if (i < alloced) {
/* update ring's WR index and ring doorbell to dongle */
if (alloced > 0) {
+ unsigned long flags1;
+ DHD_GENERAL_LOCK(dhd, flags1);
dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
+ DHD_GENERAL_UNLOCK(dhd, flags1);
}
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_SPIN_UNLOCK(prot->rx_lock, flags);
cleanup:
for (i = alloced; i < count; i++) {
}
MFREE(dhd->osh, lcl_buf, lcl_buf_size);
-
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return alloced;
} /* dhd_prot_rxbufpost */
static int
-dhd_prot_infobufpost(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+dhd_prot_infobufpost(dhd_pub_t *dhd)
{
unsigned long flags;
uint32 pktid;
dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t *ring = prot->h2dring_info_subn;
uint16 alloced = 0;
- uint16 pktsz = DHD_INFOBUF_RX_BUFPOST_PKTSZ;
+ uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
uint32 pktlen;
info_buf_post_msg_t *infobuf_post;
uint8 *infobuf_post_tmp;
void* msg_start;
uint8 i = 0;
dmaaddr_t pa;
- int16 count = 0;
+ int16 count;
if (ring == NULL)
return 0;
if (ring->inited != TRUE)
return 0;
- if (ring == dhd->prot->h2dring_info_subn) {
- if (prot->max_infobufpost == 0)
- return 0;
-
- count = prot->max_infobufpost - prot->infobufpost;
- }
- else {
- DHD_ERROR(("Unknown ring\n"));
+ if (prot->max_infobufpost == 0)
return 0;
- }
+
+ count = prot->max_infobufpost - prot->infobufpost;
if (count <= 0) {
DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
return 0;
}
- /* grab the ring lock to allocate pktid and post on ring */
- DHD_RING_LOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+ DHD_GENERAL_LOCK(dhd, flags);
/* Claim space for exactly 'count' no of messages, for mitigation purpose */
msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
+ DHD_GENERAL_UNLOCK(dhd, flags);
if (msg_start == NULL) {
DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return -1;
}
infobuf_post->cmn_hdr.flags = ring->current_phase;
ring->seqnum++;
+#if defined(DHD_PCIE_PKTID)
+ /* get the lock before calling DHD_NATIVE_TO_PKTID */
+ DHD_GENERAL_LOCK(dhd, flags);
+#endif /* DHD_PCIE_PKTID */
+
pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
+
#if defined(DHD_PCIE_PKTID)
+ /* free lock */
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
if (pktid == DHD_PKTID_INVALID) {
if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
ring->dma_buf.secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
} else
DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
#else
PKTFREE(dhd->osh, p, FALSE);
#endif /* DHD_USE_STATIC_CTRLBUF */
- DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
+ DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
break;
}
#endif /* DHD_PCIE_PKTID */
infobuf_post->cmn_hdr.request_id = htol32(pktid);
/* Move rxbuf_post_tmp to next item */
infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
-#ifdef DHD_LBUF_AUDIT
- PKTAUDIT(dhd->osh, p);
-#endif // endif
}
if (i < alloced) {
/* Update the write pointer in TCM & ring bell */
if (alloced > 0) {
- if (ring == dhd->prot->h2dring_info_subn) {
- prot->infobufpost += alloced;
- }
+ prot->infobufpost += alloced;
+ DHD_INFO(("allocated %d buffers for info ring\n", alloced));
+ DHD_GENERAL_LOCK(dhd, flags);
dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
+ DHD_GENERAL_UNLOCK(dhd, flags);
}
-
- DHD_RING_UNLOCK(ring->ring_lock, flags);
-
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return alloced;
} /* dhd_prot_infobufpost */
return -1;
}
+
if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
non_ioctl_resp_buf = TRUE;
else
pktlen = PKTLEN(dhd->osh, p);
if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
DMA_RX, p, 0, ring->dma_buf.secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
}
#ifndef BCM_SECURE_DMA
else
#endif /* DMAMAP_STATS */
}
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
- /* grab the ring lock to allocate pktid and post on ring */
- DHD_RING_LOCK(ring->ring_lock, flags);
+ DHD_GENERAL_LOCK(dhd, flags);
rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (rxbuf_post == NULL) {
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
__FUNCTION__, __LINE__));
#endif /* IOCTLRESP_USE_CONSTMEM */
{
if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
ring->dma_buf.secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
} else {
DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
}
BCMPCIE_CMNHDR_PHASE_BIT_INIT;
}
}
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
- DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
+ DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
goto free_pkt_return;
}
BCMPCIE_CMNHDR_PHASE_BIT_INIT;
}
}
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
#ifdef IOCTLRESP_USE_CONSTMEM
if (non_ioctl_resp_buf)
#endif /* IOCTLRESP_USE_CONSTMEM */
{
if (SECURE_DMA_ENAB(dhd->osh)) {
+ DHD_GENERAL_LOCK(dhd, flags);
SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
ring->dma_buf.secdma, 0);
+ DHD_GENERAL_UNLOCK(dhd, flags);
} else
DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
}
rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
-#ifdef DHD_LBUF_AUDIT
- if (non_ioctl_resp_buf)
- PKTAUDIT(dhd->osh, p);
-#endif // endif
-
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
+ DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return 1;
free_pkt_return:
- if (!non_ioctl_resp_buf) {
#ifdef IOCTLRESP_USE_CONSTMEM
+ if (!non_ioctl_resp_buf) {
free_ioctl_return_buffer(dhd, &retbuf);
-#else
- dhd_prot_packet_free(dhd, p, buf_type, FALSE);
-#endif /* IOCTLRESP_USE_CONSTMEM */
- } else {
+ } else
+#endif
+ {
dhd_prot_packet_free(dhd, p, buf_type, FALSE);
}
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
+
return -1;
} /* dhd_prot_rxbufpost_ctrl */
dhd_prot_t *prot = dhd->prot;
int max_to_post;
+ /* Use atomic variable to avoid re-entry */
+ if (atomic_read(&dhd_msgbuf_rxbuf_post_event_bufs_running) > 0) {
+ return;
+ }
+ atomic_inc(&dhd_msgbuf_rxbuf_post_event_bufs_running);
+
max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
if (max_to_post <= 0) {
DHD_ERROR(("%s: Cannot post more than max event buffers\n",
}
prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
MSG_TYPE_EVENT_BUF_POST, max_to_post);
+
+ atomic_dec(&dhd_msgbuf_rxbuf_post_event_bufs_running);
}
static int
dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
{
- return 0;
-}
-
-bool BCMFASTPATH
-dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
-{
+#ifdef DHD_TIMESYNC
dhd_prot_t *prot = dhd->prot;
- bool more = TRUE;
- uint n = 0;
+ int max_to_post;
+
+ if (prot->active_ipc_version < 7) {
+ DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n",
+ prot->active_ipc_version));
+ return 0;
+ }
+
+ max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted;
+ if (max_to_post <= 0) {
+ DHD_INFO(("%s: Cannot post more than max ts buffers\n",
+ __FUNCTION__));
+ return 0;
+ }
+
+ prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
+ MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post);
+#endif /* DHD_TIMESYNC */
+ return 0;
+}
+
+bool BCMFASTPATH
+dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
+{
+ dhd_prot_t *prot = dhd->prot;
+ bool more = TRUE;
+ uint n = 0;
msgbuf_ring_t *ring = prot->d2hring_info_cpln;
- unsigned long flags;
if (ring == NULL)
return FALSE;
uint8 *msg_addr;
uint32 msg_len;
- if (dhd_query_bus_erros(dhd)) {
- more = FALSE;
- break;
- }
-
if (dhd->hang_was_sent) {
more = FALSE;
break;
}
- if (dhd->smmu_fault_occurred) {
- more = FALSE;
- break;
- }
-
- DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the message from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
- DHD_RING_UNLOCK(ring->ring_lock, flags);
if (msg_addr == NULL) {
more = FALSE;
break;
return more;
}
-#ifdef EWP_EDL
-bool
-dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd)
-{
- dhd_prot_t *prot = dhd->prot;
- msgbuf_ring_t *ring = prot->d2hring_edl;
- unsigned long flags = 0;
- uint32 items = 0;
- uint16 rd = 0;
- uint16 depth = 0;
-
- if (ring == NULL)
- return FALSE;
- if (ring->inited != TRUE)
- return FALSE;
- if (ring->item_len == 0) {
- DHD_ERROR(("%s: Bad ring ! ringidx %d, item_len %d \n",
- __FUNCTION__, ring->idx, ring->item_len));
- return FALSE;
- }
-
- if (dhd_query_bus_erros(dhd)) {
- return FALSE;
- }
-
- if (dhd->hang_was_sent) {
- return FALSE;
- }
-
- /* in this DPC context just check if wr index has moved
- * and schedule deferred context to actually process the
- * work items.
- */
- /* update the write index */
- DHD_RING_LOCK(ring->ring_lock, flags);
- if (dhd->dma_d2h_ring_upd_support) {
- /* DMAing write/read indices supported */
- ring->wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
- } else {
- dhd_bus_cmn_readshared(dhd->bus, &ring->wr, RING_WR_UPD, ring->idx);
- }
- rd = ring->rd;
- DHD_RING_UNLOCK(ring->ring_lock, flags);
-
- depth = ring->max_items;
- /* check for avail space, in number of ring items */
- items = READ_AVAIL_SPACE(ring->wr, rd, depth);
- if (items == 0) {
- /* no work items in edl ring */
- return FALSE;
- }
- if (items > ring->max_items) {
- DHD_ERROR(("\r\n======================= \r\n"));
- DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
- __FUNCTION__, ring, ring->name, ring->max_items, items));
- DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n",
- ring->wr, ring->rd, depth));
- DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
- dhd->busstate, dhd->bus->wait_for_d3_ack));
- DHD_ERROR(("\r\n======================= \r\n"));
-#ifdef DHD_FW_COREDUMP
- if (dhd->memdump_enabled) {
- /* collect core dump */
- dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
- dhd_bus_mem_dump(dhd);
-
- }
-#endif /* DHD_FW_COREDUMP */
- dhd_schedule_reset(dhd);
-
- return FALSE;
- }
-
- if (items > D2HRING_EDL_WATERMARK) {
- DHD_ERROR_RLMT(("%s: WARNING! EDL watermark hit, num items=%u;"
- " rd=%u; wr=%u; depth=%u;\n", __FUNCTION__, items,
- ring->rd, ring->wr, depth));
- }
-
- dhd_schedule_logtrace(dhd->info);
-
- return FALSE;
-}
-
-/* This is called either from work queue context of 'event_log_dispatcher_work' or
-* from the kthread context of dhd_logtrace_thread
-*/
-int
-dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data)
-{
- dhd_prot_t *prot = NULL;
- msgbuf_ring_t *ring = NULL;
- int err = 0;
- unsigned long flags = 0;
- cmn_msg_hdr_t *msg = NULL;
- uint8 *msg_addr = NULL;
- uint32 max_items_to_process = 0, n = 0;
- uint32 num_items = 0, new_items = 0;
- uint16 depth = 0;
- volatile uint16 wr = 0;
-
- if (!dhd || !dhd->prot)
- return 0;
-
- prot = dhd->prot;
- ring = prot->d2hring_edl;
- if (!ring || !evt_decode_data) {
- return 0;
- }
-
- if (dhd->hang_was_sent) {
- return FALSE;
- }
-
- DHD_RING_LOCK(ring->ring_lock, flags);
- ring->curr_rd = ring->rd;
- wr = ring->wr;
- depth = ring->max_items;
- /* check for avail space, in number of ring items
- * Note, that this will only give the # of items
- * from rd to wr if wr>=rd, or from rd to ring end
- * if wr < rd. So in the latter case strictly speaking
- * not all the items are read. But this is OK, because
- * these will be processed in the next doorbell as rd
- * would have wrapped around. Processing in the next
- * doorbell is acceptable since EDL only contains debug data
- */
- num_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
-
- if (num_items == 0) {
- /* no work items in edl ring */
- DHD_RING_UNLOCK(ring->ring_lock, flags);
- return 0;
- }
-
- DHD_INFO(("%s: EDL work items [%u] available \n",
- __FUNCTION__, num_items));
-
- /* if space is available, calculate address to be read */
- msg_addr = (char*)ring->dma_buf.va + (ring->rd * ring->item_len);
-
- max_items_to_process = MIN(num_items, DHD_EVENT_LOGTRACE_BOUND);
-
- DHD_RING_UNLOCK(ring->ring_lock, flags);
-
- /* Prefetch data to populate the cache */
- OSL_PREFETCH(msg_addr);
-
- n = max_items_to_process;
- while (n > 0) {
- msg = (cmn_msg_hdr_t *)msg_addr;
- /* wait for DMA of work item to complete */
- if ((err = prot->d2h_edl_sync_cb(dhd, ring, msg)) != BCME_OK) {
- DHD_ERROR(("%s: Error waiting for DMA to cmpl in EDL "
- "ring; err = %d\n", __FUNCTION__, err));
- }
-
- /*
- * Update the curr_rd to the current index in the ring, from where
- * the work item is fetched. This way if the fetched work item
- * fails in LIVELOCK, we can print the exact read index in the ring
- * that shows up the corrupted work item.
- */
- if ((ring->curr_rd + 1) >= ring->max_items) {
- ring->curr_rd = 0;
- } else {
- ring->curr_rd += 1;
- }
-
- if (err != BCME_OK) {
- return 0;
- }
-
- /* process the edl work item, i.e, the event log */
- err = dhd_event_logtrace_process_edl(dhd, msg_addr, evt_decode_data);
-
- /* Dummy sleep so that scheduler kicks in after processing any logprints */
- OSL_SLEEP(0);
-
- /* Prefetch data to populate the cache */
- OSL_PREFETCH(msg_addr + ring->item_len);
-
- msg_addr += ring->item_len;
- --n;
- }
-
- DHD_RING_LOCK(ring->ring_lock, flags);
- /* update host ring read pointer */
- if ((ring->rd + max_items_to_process) >= ring->max_items)
- ring->rd = 0;
- else
- ring->rd += max_items_to_process;
- DHD_RING_UNLOCK(ring->ring_lock, flags);
-
- /* Now after processing max_items_to_process update dongle rd index.
- * The TCM rd index is updated only if bus is not
- * in D3. Else, the rd index is updated from resume
- * context in - 'dhdpcie_bus_suspend'
- */
- DHD_GENERAL_LOCK(dhd, flags);
- if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd)) {
- DHD_INFO(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
- __FUNCTION__, dhd->busstate, dhd->dhd_bus_busy_state));
- DHD_GENERAL_UNLOCK(dhd, flags);
- } else {
- DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_EDL_RING_TCM_RD_UPDATE(dhd);
- }
-
- /* if num_items > bound, then anyway we will reschedule and
- * this function runs again, so that if in between the DPC has
- * updated the wr index, then the updated wr is read. But if
- * num_items <= bound, and if DPC executes and updates the wr index
- * when the above while loop is running, then the updated 'wr' index
- * needs to be re-read from here, If we don't do so, then till
- * the next time this function is scheduled
- * the event logs will not be processed.
- */
- if (num_items <= DHD_EVENT_LOGTRACE_BOUND) {
- /* read the updated wr index if reqd. and update num_items */
- DHD_RING_LOCK(ring->ring_lock, flags);
- if (wr != (volatile uint16)ring->wr) {
- wr = (volatile uint16)ring->wr;
- new_items = READ_AVAIL_SPACE(wr, ring->rd, depth);
- DHD_INFO(("%s: new items [%u] avail in edl\n",
- __FUNCTION__, new_items));
- num_items += new_items;
- }
- DHD_RING_UNLOCK(ring->ring_lock, flags);
- }
-
- /* if # of items processed is less than num_items, need to re-schedule
- * the deferred ctx
- */
- if (max_items_to_process < num_items) {
- DHD_INFO(("%s: EDL bound hit / new items found, "
- "items processed=%u; remaining=%u, "
- "resched deferred ctx...\n",
- __FUNCTION__, max_items_to_process,
- num_items - max_items_to_process));
- return (num_items - max_items_to_process);
- }
-
- return 0;
-
-}
-
-void
-dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd)
-{
- dhd_prot_t *prot = NULL;
- unsigned long flags = 0;
- msgbuf_ring_t *ring = NULL;
-
- if (!dhd)
- return;
-
- prot = dhd->prot;
- if (!prot || !prot->d2hring_edl)
- return;
-
- ring = prot->d2hring_edl;
- DHD_RING_LOCK(ring->ring_lock, flags);
- dhd_prot_upd_read_idx(dhd, ring);
- DHD_RING_UNLOCK(ring->ring_lock, flags);
-}
-#endif /* EWP_EDL */
-
-/* called when DHD needs to check for 'receive complete' messages from the dongle */
+/** called when DHD needs to check for 'receive complete' messages from the dongle */
bool BCMFASTPATH
-dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype)
+dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
{
bool more = FALSE;
uint n = 0;
dhd_prot_t *prot = dhd->prot;
- msgbuf_ring_t *ring;
- uint16 item_len;
+ msgbuf_ring_t *ring = &prot->d2hring_rx_cpln;
+ uint16 item_len = ring->item_len;
host_rxbuf_cmpl_t *msg = NULL;
uint8 *msg_addr;
uint32 msg_len;
uint32 pktid;
int i;
uint8 sync;
- ts_timestamp_t *ts;
- BCM_REFERENCE(ts);
-#ifdef DHD_HP2P
- if (ringtype == DHD_HP2P_RING && prot->d2hring_hp2p_rxcpl)
- ring = prot->d2hring_hp2p_rxcpl;
- else
-#endif /* DHD_HP2P */
- ring = &prot->d2hring_rx_cpln;
- item_len = ring->item_len;
while (1) {
if (dhd_is_device_removed(dhd))
break;
- if (dhd_query_bus_erros(dhd))
- break;
-
if (dhd->hang_was_sent)
break;
- if (dhd->smmu_fault_occurred) {
- break;
- }
-
pkt_cnt = 0;
pktqhead = pkt_newidx = NULL;
pkt_cnt_newidx = 0;
- DHD_RING_LOCK(ring->ring_lock, flags);
+ DHD_SPIN_LOCK(prot->rx_lock, flags);
/* Get the address of the next message to be read from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
if (msg_addr == NULL) {
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_SPIN_UNLOCK(prot->rx_lock, flags);
break;
}
}
/* Actual length of the packet */
PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
-
#if defined(WL_MONITOR)
- if (dhd_monitor_enabled(dhd, ifidx)) {
- if (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) {
- dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
- continue;
- } else {
- DHD_ERROR(("Received non 802.11 packet, "
- "when monitor mode is enabled\n"));
- }
+ if (dhd_monitor_enabled(dhd, ifidx) &&
+ (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) {
+ dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
+ continue;
}
-#endif /* WL_MONITOR */
+#endif
if (!pktqhead) {
pktqhead = prevpkt = pkt;
}
}
-#ifdef DHD_HP2P
- if (dhd->hp2p_capable && ring == prot->d2hring_hp2p_rxcpl) {
-#ifdef DHD_HP2P_DEBUG
- bcm_print_bytes("Rxcpl", (uchar *)msg, sizeof(host_rxbuf_cmpl_t));
-#endif /* DHD_HP2P_DEBUG */
- dhd_update_hp2p_rxstats(dhd, msg);
+#ifdef DHD_TIMESYNC
+ if (dhd->prot->rx_ts_log_enabled) {
+ ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts;
+ dhd_timesync_log_rx_timestamp(dhd->ts, ifidx, ts->low, ts->high);
}
-#endif /* DHD_HP2P */
-
-#ifdef DHD_LBUF_AUDIT
- PKTAUDIT(dhd->osh, pkt);
-#endif // endif
+#endif /* DHD_TIMESYNC */
}
/* roll back read pointer for unprocessed message */
/* Update read pointer */
dhd_prot_upd_read_idx(dhd, ring);
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_SPIN_UNLOCK(prot->rx_lock, flags);
pkt = pktqhead;
for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
/* Post another set of rxbufs to the device */
dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
-#ifdef DHD_RX_CHAINING
- dhd_rxchain_commit(dhd);
-#endif // endif
-
/* After batch processing, check RX bound */
n += pkt_cnt;
if (n >= bound) {
}
/* Call lb_dispatch only if packets are queued */
- if (n &&
-#ifdef WL_MONITOR
- !(dhd_monitor_enabled(dhd, ifidx)) &&
-#endif /* WL_MONITOR */
- TRUE) {
+ if (n) {
DHD_LB_DISPATCH_RX_COMPL(dhd);
DHD_LB_DISPATCH_RX_PROCESS(dhd);
}
return more;
-
}
/**
/** called when DHD needs to check for 'transmit complete' messages from the dongle */
bool BCMFASTPATH
-dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype)
+dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
{
bool more = TRUE;
uint n = 0;
- msgbuf_ring_t *ring;
- unsigned long flags;
-
-#ifdef DHD_HP2P
- if (ringtype == DHD_HP2P_RING && dhd->prot->d2hring_hp2p_txcpl)
- ring = dhd->prot->d2hring_hp2p_txcpl;
- else
-#endif /* DHD_HP2P */
- ring = &dhd->prot->d2hring_tx_cpln;
+ msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
/* Process all the messages - DTOH direction */
while (!dhd_is_device_removed(dhd)) {
uint8 *msg_addr;
uint32 msg_len;
- if (dhd_query_bus_erros(dhd)) {
- more = FALSE;
- break;
- }
-
if (dhd->hang_was_sent) {
more = FALSE;
break;
}
- if (dhd->smmu_fault_occurred) {
- more = FALSE;
- break;
- }
-
- DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the address of the next message to be read from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
- DHD_RING_UNLOCK(ring->ring_lock, flags);
-
if (msg_addr == NULL) {
more = FALSE;
break;
if (data & D2H_DEV_FWHALT) {
DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
-
if (data & D2H_DEV_EXT_TRAP_DATA)
{
if (dhd->extended_trap_data) {
{
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
- unsigned long flags;
/* Process all the messages - DTOH direction */
while (!dhd_is_device_removed(dhd)) {
uint8 *msg_addr;
uint32 msg_len;
- if (dhd_query_bus_erros(dhd)) {
- break;
- }
-
if (dhd->hang_was_sent) {
break;
}
- if (dhd->smmu_fault_occurred) {
- break;
- }
-
- DHD_RING_LOCK(ring->ring_lock, flags);
/* Get the address of the next message to be read from ring */
msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
- DHD_RING_UNLOCK(ring->ring_lock, flags);
-
if (msg_addr == NULL) {
break;
}
goto done;
}
- if (dhd->smmu_fault_occurred) {
- ret = BCME_ERROR;
- goto done;
- }
-
msg = (cmn_msg_hdr_t *)buf;
/* Wait until DMA completes, then fetch msg_type */
goto done;
}
- if (msg_type == MSG_TYPE_INFO_BUF_CMPLT) {
- if (ring == dhd->prot->d2hring_info_cpln) {
- if (!dhd->prot->infobufpost) {
- DHD_ERROR(("infobuf posted are zero,"
- "but there is a completion\n"));
- goto done;
- }
- dhd->prot->infobufpost--;
- dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
- dhd_prot_process_infobuf_complete(dhd, buf);
- }
- } else
if (table_lookup[msg_type]) {
table_lookup[msg_type](dhd, buf);
}
#ifdef DHD_RX_CHAINING
dhd_rxchain_commit(dhd);
-#endif // endif
+#endif
return ret;
} /* dhd_prot_process_msgtype */
DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
}
}
-#ifdef DHD_HP2P
- else if (request_id == DHD_D2H_HPPRING_TXREQ_PKTID) {
- if (dhd->prot->d2hring_hp2p_txcpl != NULL) {
- if (dhd->prot->d2hring_hp2p_txcpl->create_pending == TRUE) {
- DHD_ERROR(("H2D ring create failed for hp2p ring\n"));
- dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
- }
- else
- DHD_ERROR(("ring create ID for a ring, create not pending\n"));
- } else {
- DHD_ERROR(("%s hp2p txcmpl ring doesn't exist\n", __FUNCTION__));
- }
- }
- else if (request_id == DHD_D2H_HPPRING_RXREQ_PKTID) {
- if (dhd->prot->d2hring_hp2p_rxcpl != NULL) {
- if (dhd->prot->d2hring_hp2p_rxcpl->create_pending == TRUE) {
- DHD_ERROR(("D2H ring create failed for hp2p rxcmpl ring\n"));
- dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
- }
- else
- DHD_ERROR(("ring create ID for hp2p rxcmpl ring, not pending\n"));
- } else {
- DHD_ERROR(("%s hp2p rxcpl ring doesn't exist\n", __FUNCTION__));
- }
- }
-#endif /* DHD_HP2P */
else {
DHD_ERROR(("don;t know how to pair with original request\n"));
}
{
ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
unsigned long flags;
-#if defined(DHD_PKTID_AUDIT_RING)
+#ifdef DHD_PKTID_AUDIT_RING
uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
-#endif // endif
-#if defined(DHD_PKTID_AUDIT_RING)
/* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
if (pktid != DHD_IOCTL_REQ_PKTID) {
#ifndef IOCTLRESP_USE_CONSTMEM
DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
#endif /* !IOCTLRESP_USE_CONSTMEM */
}
-#endif // endif
-
- dhd->prot->ioctl_ack_time = OSL_LOCALTIME_NS();
+#endif /* DHD_PKTID_AUDIT_RING */
DHD_GENERAL_LOCK(dhd, flags);
if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
if (ioct_ack->compl_hdr.status != 0) {
DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
}
+#ifdef REPORT_FATAL_TIMEOUTS
+ else {
+ dhd_stop_bus_timer(dhd);
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
}
/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
void *pkt;
unsigned long flags;
dhd_dma_buf_t retbuf;
-
- /* Check for ioctl timeout induce flag, which is set by firing
- * dhd iovar to induce IOCTL timeout. If flag is set,
- * return from here, which results in to IOCTL timeout.
- */
- if (dhd->dhd_induce_error == DHD_INDUCE_IOCTL_TIMEOUT) {
- DHD_ERROR(("%s: Inducing resumed on timeout\n", __FUNCTION__));
- return;
- }
+#ifdef REPORT_FATAL_TIMEOUTS
+ uint16 dhd_xt_id;
+#endif
memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
-#if defined(DHD_PKTID_AUDIT_RING)
+#ifdef DHD_PKTID_AUDIT_RING
#ifndef IOCTLRESP_USE_CONSTMEM
DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
#endif /* !IOCTLRESP_USE_CONSTMEM */
-#endif // endif
+#endif /* DHD_PKTID_AUDIT_RING */
DHD_GENERAL_LOCK(dhd, flags);
if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
return;
}
- dhd->prot->ioctl_cmplt_time = OSL_LOCALTIME_NS();
-
/* Clear Response pending bit */
prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
- DHD_GENERAL_UNLOCK(dhd, flags);
#ifndef IOCTLRESP_USE_CONSTMEM
pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
pkt = retbuf.va;
#endif /* !IOCTLRESP_USE_CONSTMEM */
if (!pkt) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
prhex("dhd_prot_ioctcmplt_process:",
(uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
return;
}
+ DHD_GENERAL_UNLOCK(dhd, flags);
prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
__FUNCTION__, xt_id, prot->ioctl_trans_id,
prot->curr_ioctl_cmd, ioct_resp->cmd));
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_stop_cmd_timer(dhd);
+#endif /* REPORT_FATAL_TIMEOUTS */
dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
dhd_prot_debug_info_print(dhd);
#ifdef DHD_FW_COREDUMP
dhd_schedule_reset(dhd);
goto exit;
}
+#ifdef REPORT_FATAL_TIMEOUTS
+ dhd_xt_id = dhd_get_request_id(dhd);
+ if (xt_id == dhd_xt_id) {
+ dhd_stop_cmd_timer(dhd);
+ } else {
+ DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d",
+ __FUNCTION__, xt_id, dhd_xt_id));
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
}
-int
-dhd_prot_check_tx_resource(dhd_pub_t *dhd)
-{
- return dhd->prot->no_tx_resource;
-}
-
-void
-dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd)
-{
- dhd->prot->pktid_txq_stop_cnt++;
-}
-
-void
-dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd)
-{
- dhd->prot->pktid_txq_start_cnt++;
-}
-
/** called on MSG_TYPE_TX_STATUS message received from dongle */
static void BCMFASTPATH
dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
void *dmah;
void *secdma;
bool pkt_fate;
- msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
-#if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_HP2P)
- flow_info_t *flow_info;
- uint64 tx_status_latency;
-#endif /* TX_STATUS_LATENCY_STATS || DHD_HP2P */
-#if defined(TX_STATUS_LATENCY_STATS)
+#ifdef DEVICE_TX_STUCK_DETECT
flow_ring_node_t *flow_ring_node;
uint16 flowid;
-#endif // endif
- ts_timestamp_t *ts;
+#endif /* DEVICE_TX_STUCK_DETECT */
+
- BCM_REFERENCE(ts);
txstatus = (host_txbuf_cmpl_t *)msg;
-#if defined(TX_STATUS_LATENCY_STATS)
+#ifdef DEVICE_TX_STUCK_DETECT
flowid = txstatus->compl_hdr.flow_ring_id;
flow_ring_node = DHD_FLOW_RING(dhd, flowid);
-#endif // endif
+ /**
+ * Since we got a completion message on this flowid,
+ * update tx_cmpl time stamp
+ */
+ flow_ring_node->tx_cmpl = OSL_SYSUPTIME();
+#endif /* DEVICE_TX_STUCK_DETECT */
/* locks required to protect circular buffer accesses */
- DHD_RING_LOCK(ring->ring_lock, flags);
+ DHD_GENERAL_LOCK(dhd, flags);
pktid = ltoh32(txstatus->cmn_hdr.request_id);
pkt_fate = TRUE;
-#if defined(DHD_PKTID_AUDIT_RING)
+#ifdef DHD_PKTID_AUDIT_RING
DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
-#endif // endif
+#endif /* DHD_PKTID_AUDIT_RING */
DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
- if (OSL_ATOMIC_DEC_RETURN(dhd->osh, &prot->active_tx_count) < 0) {
- DHD_ERROR(("Extra packets are freed\n"));
- }
- ASSERT(pktid != 0);
-
- pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
- pa, len, dmah, secdma, PKTTYPE_DATA_TX);
- if (!pkt) {
- DHD_RING_UNLOCK(ring->ring_lock, flags);
- DHD_ERROR(("%s: received txstatus with NULL pkt\n", __FUNCTION__));
- prhex("dhd_prot_txstatus_process:", (uchar *)msg, D2HRING_TXCMPLT_ITEMSIZE);
-#ifdef DHD_FW_COREDUMP
- if (dhd->memdump_enabled) {
- /* collect core dump */
- dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
- dhd_bus_mem_dump(dhd);
- }
-#else
- ASSERT(0);
-#endif /* DHD_FW_COREDUMP */
- return;
- }
-
- if (DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map) == DHD_PKTID_MIN_AVAIL_COUNT) {
- dhd->prot->no_tx_resource = FALSE;
- dhd_bus_start_queue(dhd->bus);
- }
-
- if (SECURE_DMA_ENAB(dhd->osh)) {
- int offset = 0;
- BCM_REFERENCE(offset);
+ if (prot->active_tx_count) {
+ prot->active_tx_count--;
- if (dhd->prot->tx_metadata_offset)
- offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
- SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
- (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
- secdma, offset);
+ /* Release the Lock when no more tx packets are pending */
+ if (prot->active_tx_count == 0)
+ DHD_TXFL_WAKE_UNLOCK(dhd);
} else {
- DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
+ DHD_ERROR(("Extra packets are freed\n"));
}
-#ifdef TX_STATUS_LATENCY_STATS
- /* update the tx status latency for flowid */
- flow_info = &flow_ring_node->flow_info;
- tx_status_latency = OSL_SYSUPTIME_US() - DHD_PKT_GET_QTIME(pkt);
- flow_info->cum_tx_status_latency += tx_status_latency;
- flow_info->num_tx_status++;
-#endif /* TX_STATUS_LATENCY_STATS */
+ ASSERT(pktid != 0);
#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
{
int elem_ix;
void **elem;
bcm_workq_t *workq;
+ dmaaddr_t pa;
+ uint32 pa_len;
+
+ pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map,
+ pktid, pa, pa_len, dmah, secdma, PKTTYPE_DATA_TX);
workq = &prot->tx_compl_prod;
/*
elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
- DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), len);
+ DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), pa_len);
if (elem_ix == BCM_RING_FULL) {
DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
__FUNCTION__, pkt, prot->tx_compl_prod_sync));
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
return;
}
#endif /* !DHD_LB_TXC */
+ pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
+ pa, len, dmah, secdma, PKTTYPE_DATA_TX);
+
+ if (pkt) {
+ if (SECURE_DMA_ENAB(dhd->osh)) {
+ int offset = 0;
+ BCM_REFERENCE(offset);
+
+ if (dhd->prot->tx_metadata_offset)
+ offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
+ SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
+ (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
+ secdma, offset);
+ } else
+ DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
#ifdef DMAMAP_STATS
- dhd->dma_stats.txdata--;
- dhd->dma_stats.txdata_sz -= len;
+ dhd->dma_stats.txdata--;
+ dhd->dma_stats.txdata_sz -= len;
#endif /* DMAMAP_STATS */
- pkt_fate = dhd_dbg_process_tx_status(dhd, pkt, pktid,
- ltoh16(txstatus->compl_hdr.status) & WLFC_CTL_PKTFLAG_MASK);
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
+ if (dhd->d11_tx_status) {
+ uint16 tx_status;
+
+ tx_status = ltoh16(txstatus->compl_hdr.status) &
+ WLFC_CTL_PKTFLAG_MASK;
+ pkt_fate = (tx_status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE;
+ DHD_DBG_PKT_MON_TX_STATUS(dhd, pkt, pktid, tx_status);
+#ifdef DHD_PKT_LOGGING
+ DHD_PKTLOG_TXS(dhd, pkt, pktid, tx_status);
+#endif /* DHD_PKT_LOGGING */
+ }
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
#if defined(BCMPCIE)
- dhd_txcomplete(dhd, pkt, pkt_fate);
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
- dhd_eap_txcomplete(dhd, pkt, pkt_fate, txstatus->cmn_hdr.if_id);
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
-#endif // endif
+ dhd_txcomplete(dhd, pkt, pkt_fate);
+#endif
#if DHD_DBG_SHOW_METADATA
- if (dhd->prot->metadata_dbg &&
- dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
- uchar *ptr;
- /* The Ethernet header of TX frame was copied and removed.
- * Here, move the data pointer forward by Ethernet header size.
- */
- PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
- ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
- bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
- dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
- }
+ if (dhd->prot->metadata_dbg &&
+ dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
+ uchar *ptr;
+ /* The Ethernet header of TX frame was copied and removed.
+ * Here, move the data pointer forward by Ethernet header size.
+ */
+ PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
+ ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
+ bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
+ dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
+ }
#endif /* DHD_DBG_SHOW_METADATA */
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ PKTFREE(dhd->osh, pkt, TRUE);
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
+ txstatus->tx_status);
-#ifdef DHD_HP2P
- if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
-#ifdef DHD_HP2P_DEBUG
- bcm_print_bytes("txcpl", (uint8 *)txstatus, sizeof(host_txbuf_cmpl_t));
-#endif /* DHD_HP2P_DEBUG */
- dhd_update_hp2p_txstats(dhd, txstatus);
+#ifdef DHD_TIMESYNC
+ if (dhd->prot->tx_ts_log_enabled) {
+ ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts);
+ dhd_timesync_log_tx_timestamp(dhd->ts,
+ txstatus->compl_hdr.flow_ring_id,
+ txstatus->cmn_hdr.if_id,
+ ts->low, ts->high);
+ }
+#endif /* DHD_TIMESYNC */
}
-#endif /* DHD_HP2P */
-#ifdef DHD_LBUF_AUDIT
- PKTAUDIT(dhd->osh, pkt);
-#endif // endif
+ DHD_GENERAL_UNLOCK(dhd, flags);
- DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
- txstatus->tx_status);
- DHD_RING_UNLOCK(ring->ring_lock, flags);
- PKTFREE(dhd->osh, pkt, TRUE);
return;
} /* dhd_prot_txstatus_process */
uint16 buflen;
int ifidx = 0;
void* pkt;
+ unsigned long flags;
dhd_prot_t *prot = dhd->prot;
/* Event complete header */
evnt = (wlevent_req_msg_t *)msg;
bufid = ltoh32(evnt->cmn_hdr.request_id);
-#if defined(DHD_PKTID_AUDIT_RING)
+#ifdef DHD_PKTID_AUDIT_RING
DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
-#endif // endif
+#endif /* DHD_PKTID_AUDIT_RING */
buflen = ltoh16(evnt->event_data_len);
prot->cur_event_bufs_posted--;
dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+ /* locks required to protect pktid_map */
+ DHD_GENERAL_LOCK(dhd, flags);
pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
+ DHD_GENERAL_UNLOCK(dhd, flags);
if (!pkt) {
DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
PKTSETLEN(dhd->osh, pkt, buflen);
-#ifdef DHD_LBUF_AUDIT
- PKTAUDIT(dhd->osh, pkt);
-#endif // endif
+
dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
}
uint32 pktid;
uint16 buflen;
void * pkt;
+ unsigned long flags;
resp = (info_buf_resp_t *)buf;
pktid = ltoh32(resp->cmn_hdr.request_id);
pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
dhd->prot->rx_dataoffset));
- if (dhd->debug_buf_dest_support) {
- if (resp->dest < DEBUG_BUF_DEST_MAX) {
- dhd->debug_buf_dest_stat[resp->dest]++;
- }
+ if (!dhd->prot->infobufpost) {
+ DHD_ERROR(("infobuf posted are zero, but there is a completion\n"));
+ return;
}
+ dhd->prot->infobufpost--;
+ dhd_prot_infobufpost(dhd);
+
+ DHD_GENERAL_LOCK(dhd, flags);
pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
if (!pkt)
return;
PKTSETLEN(dhd->osh, pkt, buflen);
-#ifdef DHD_LBUF_AUDIT
- PKTAUDIT(dhd->osh, pkt);
-#endif // endif
-
/* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
* special ifidx of -1. This is just internal to dhd to get the data to
* dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
*/
- dhd_bus_rx_frame(dhd->bus, pkt, DHD_DUMMY_INFO_IF /* ifidx HACK */, 1);
-}
-
-/** called on MSG_TYPE_SNAPSHOT_CMPLT message received from dongle */
-static void BCMFASTPATH
-dhd_prot_process_snapshot_complete(dhd_pub_t *dhd, void *buf)
-{
+ dhd_bus_rx_frame(dhd->bus, pkt, DHD_EVENT_IF /* ifidx HACK */, 1);
}
/** Stop protocol: sync w/dongle state. */
return 0;
}
-#define MAX_MTU_SZ (1600u)
#define PKTBUF pktbuf
flow_ring_node_t *flow_ring_node;
if (dhd->flow_ring_table == NULL) {
- DHD_ERROR(("dhd flow_ring_table is NULL\n"));
return BCME_NORESOURCE;
}
-#ifdef DHD_PCIE_PKTID
- if (!DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)) {
- if (dhd->prot->pktid_depleted_cnt == DHD_PKTID_DEPLETED_MAX_COUNT) {
- dhd_bus_stop_queue(dhd->bus);
- dhd->prot->no_tx_resource = TRUE;
- }
- dhd->prot->pktid_depleted_cnt++;
- goto err_no_res;
- } else {
- dhd->prot->pktid_depleted_cnt = 0;
- }
-#endif /* DHD_PCIE_PKTID */
flowid = DHD_PKT_GET_FLOWID(PKTBUF);
flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
- DHD_RING_LOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_GENERAL_LOCK(dhd, flags);
/* Create a unique 32-bit packet id */
pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
PKTBUF, PKTTYPE_DATA_TX);
#if defined(DHD_PCIE_PKTID)
if (pktid == DHD_PKTID_INVALID) {
- DHD_ERROR_RLMT(("%s: Pktid pool depleted.\n", __FUNCTION__));
+ DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
/*
* If we return error here, the caller would queue the packet
* again. So we'll just free the skb allocated in DMA Zone.
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (txdesc == NULL) {
DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
- __FUNCTION__, __LINE__, OSL_ATOMIC_READ(dhd->osh, &prot->active_tx_count)));
+ __FUNCTION__, __LINE__, prot->active_tx_count));
goto err_free_pktid;
}
+#ifdef DBG_PKT_MON
+ DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
+#endif /* DBG_PKT_MON */
+#ifdef DHD_PKT_LOGGING
+ DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
+#endif /* DHD_PKT_LOGGING */
+
+
/* Extract the data pointer and length information */
pktdata = PKTDATA(dhd->osh, PKTBUF);
pktlen = PKTLEN(dhd->osh, PKTBUF);
- DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
-
/* Ethernet header: Copy before we cache flush packet using DMA_MAP */
bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
if (ring->pend_items_count == 0)
ring->start_addr = (void *)txdesc;
ring->pend_items_count++;
-#endif // endif
+#endif
/* Form the Tx descriptor message buffer */
txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
prio = (uint8)PKTPRIO(PKTBUF);
+
txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
txdesc->seg_cnt = 1;
if (PHYSADDRISZERO(meta_pa)) {
/* Unmap the data pointer to a DMA-able address */
if (SECURE_DMA_ENAB(dhd->osh)) {
+
int offset = 0;
BCM_REFERENCE(offset);
txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
} else {
-#ifdef DHD_HP2P
- if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
- dhd_update_hp2p_txdesc(dhd, txdesc);
- } else
-#endif /* DHD_HP2P */
- if (1)
- {
- txdesc->metadata_buf_len = htol16(0);
- txdesc->metadata_buf_addr.high_addr = 0;
- txdesc->metadata_buf_addr.low_addr = 0;
- }
+ txdesc->metadata_buf_len = htol16(0);
+ txdesc->metadata_buf_addr.high_addr = 0;
+ txdesc->metadata_buf_addr.low_addr = 0;
}
#ifdef DHD_PKTID_AUDIT_RING
DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
txdesc->cmn_hdr.request_id));
-#ifdef DHD_LBUF_AUDIT
- PKTAUDIT(dhd->osh, PKTBUF);
-#endif // endif
-
- if (pktlen > MAX_MTU_SZ) {
- DHD_ERROR(("%s: ######## pktlen(%d) > MAX_MTU_SZ(%d) #######\n",
- __FUNCTION__, pktlen, MAX_MTU_SZ));
- dhd_prhex("txringitem", (volatile uchar*)txdesc,
- sizeof(host_txbuf_post_t), DHD_ERROR_VAL);
- }
-
/* Update the write pointer in TCM & ring bell */
-#if defined(DHD_HP2P) && defined(TXP_FLUSH_NITEMS)
- if (dhd->hp2p_capable && flow_ring_node->flow_info.tid == HP2P_PRIO) {
- dhd_calc_hp2p_burst(dhd, ring, flowid);
- } else {
- if ((ring->pend_items_count == prot->txp_threshold) ||
- ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
- dhd_prot_txdata_write_flush(dhd, flowid);
- }
- }
-#elif defined(TXP_FLUSH_NITEMS)
+#ifdef TXP_FLUSH_NITEMS
/* Flush if we have either hit the txp_threshold or if this msg is */
/* occupying the last slot in the flow_ring - before wrap around. */
if ((ring->pend_items_count == prot->txp_threshold) ||
((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
- dhd_prot_txdata_write_flush(dhd, flowid);
+ dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
}
#else
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
-#endif /* DHD_HP2P && TXP_FLUSH_NITEMS */
+#endif
-#if defined(TX_STATUS_LATENCY_STATS)
- /* set the time when pkt is queued to flowring */
- DHD_PKT_SET_QTIME(PKTBUF, OSL_SYSUPTIME_US());
-#endif // endif
+ prot->active_tx_count++;
- OSL_ATOMIC_INC(dhd->osh, &prot->active_tx_count);
/*
* Take a wake lock, do not sleep if we have atleast one packet
* to finish.
*/
- DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
+ if (prot->active_tx_count >= 1)
+ DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
-#ifdef TX_STATUS_LATENCY_STATS
- flow_ring_node->flow_info.num_tx_pkts++;
-#endif /* TX_STATUS_LATENCY_STATS */
return BCME_OK;
err_rollback_idx:
err_no_res_pktfree:
#endif /* DHD_PCIE_PKTID */
- DHD_RING_UNLOCK(ring->ring_lock, flags);
-err_no_res:
+
+
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_NORESOURCE;
} /* dhd_prot_txdata */
-/* called with a ring_lock */
+/* called with a lock */
/** optimization to write "n" tx items at a time to ring */
void BCMFASTPATH
-dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid)
+dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
{
#ifdef TXP_FLUSH_NITEMS
+ unsigned long flags = 0;
flow_ring_table_t *flow_ring_table;
flow_ring_node_t *flow_ring_node;
msgbuf_ring_t *ring;
return;
}
+ if (!in_lock) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ }
+
flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
ring->pend_items_count = 0;
ring->start_addr = NULL;
}
+
+ if (!in_lock) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ }
#endif /* TXP_FLUSH_NITEMS */
}
dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
{
dhd_prot_t *prot = dhd->prot;
- int slen = 0;
if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
+ int slen = 0;
pcie_bus_tput_params_t *tput_params;
slen = strlen("pcie_bus_tput") + 1;
sizeof(tput_params->host_buf_addr));
tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
}
-
-}
-
-/* called after an ioctl returns from dongle */
-static void
-dhd_prot_wl_ioctl_ret_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf,
- int ifidx, int ret, int len)
-{
-
- if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
- /* Intercept the wme_dp ioctl here */
- if (!strcmp(buf, "wme_dp")) {
- int slen, val = 0;
-
- slen = strlen("wme_dp") + 1;
- if (len >= (int)(slen + sizeof(int)))
- bcopy(((char *)buf + slen), &val, sizeof(int));
- dhd->wme_dp = (uint8) ltoh32(val);
- }
-
- }
-
}
-#ifdef DHD_PM_CONTROL_FROM_FILE
-extern bool g_pm_control;
-#endif /* DHD_PM_CONTROL_FROM_FILE */
/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
int ret = -1;
uint8 action;
- if (dhd->bus->is_linkdown) {
- DHD_ERROR_RLMT(("%s : PCIe link is down. we have nothing to do\n", __FUNCTION__));
- goto done;
- }
-
- if (dhd_query_bus_erros(dhd)) {
- DHD_ERROR_RLMT(("%s : some BUS error. we have nothing to do\n", __FUNCTION__));
- goto done;
- }
-
if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
- DHD_ERROR_RLMT(("%s : bus is down. we have nothing to do -"
- " bus state: %d, sent hang: %d\n", __FUNCTION__,
- dhd->busstate, dhd->hang_was_sent));
+ DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n",
+ __FUNCTION__, dhd->busstate, dhd->hang_was_sent));
goto done;
}
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (ioc->cmd == WLC_SET_PM) {
-#ifdef DHD_PM_CONTROL_FROM_FILE
- if (g_pm_control == TRUE) {
- DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
- __FUNCTION__, buf ? *(char *)buf : 0));
- goto done;
- }
-#endif /* DHD_PM_CONTROL_FROM_FILE */
DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
}
dhd->dongle_error = ret;
}
- dhd_prot_wl_ioctl_ret_intercept(dhd, ioc, buf, ifidx, ret, len);
+ if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
+ /* Intercept the wme_dp ioctl here */
+ if (!strcmp(buf, "wme_dp")) {
+ int slen, val = 0;
+
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int)))
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ dhd->wme_dp = (uint8) ltoh32(val);
+ }
+
+ }
done:
return ret;
msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
- DHD_RING_LOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_GENERAL_LOCK(dhd, flags);
ioct_rqst = (ioct_reqst_hdr_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (ioct_rqst == NULL) {
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return 0;
}
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
-
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return 0;
}
return BCME_NOMEM;
} /* dhd_prepare_schedule_dmaxfer_free */
+
/** test / loopback */
void
dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
dhd_dma_buf_free(dhdp, dmmap->dstmem);
MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
-
- dhdp->bus->dmaxfer_complete = TRUE;
- dhd_os_dmaxfer_wake(dhdp);
-
dmmap = NULL;
} /* dmaxfer_free_prev_dmaaddr */
+
/** test / loopback */
int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
{
- uint i = 0, j = 0;
+ uint i;
if (!dmaxfer)
return BCME_ERROR;
dmaxfer->len = len;
- /* Populate source with a pattern like below
- * 0x00000000
- * 0x01010101
- * 0x02020202
- * 0x03030303
- * 0x04040404
- * 0x05050505
- * ...
- * 0xFFFFFFFF
- */
- while (i < dmaxfer->len) {
- ((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
- i++;
- if (i % 4 == 0) {
- j++;
- }
+ /* Populate source with a pattern */
+ for (i = 0; i < dmaxfer->len; i++) {
+ ((uint8*)dmaxfer->srcmem.va)[i] = i % 256;
}
-
OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
dmaxfer->srcdelay = srcdelay;
dhd_prot_t *prot = dhd->prot;
uint64 end_usec;
pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
- int buf_free_scheduled;
BCM_REFERENCE(cmplt);
- end_usec = OSL_SYSUPTIME_US();
-
- DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
- prot->dmaxfer.status = cmplt->compl_hdr.status;
+ DHD_INFO(("DMA status: %d\n", cmplt->compl_hdr.status));
OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
if (memcmp(prot->dmaxfer.srcmem.va,
- prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
- cmplt->compl_hdr.status != BCME_OK) {
- DHD_ERROR(("DMA loopback failed\n"));
- /* it is observed that some times the completion
- * header status is set as OK, but the memcmp fails
- * hence always explicitly set the dmaxfer status
- * as error if this happens.
- */
- prot->dmaxfer.status = BCME_ERROR;
+ prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) {
prhex("XFER SRC: ",
prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
prhex("XFER DST: ",
prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
+ DHD_ERROR(("DMA failed\n"));
}
else {
- switch (prot->dmaxfer.d11_lpbk) {
- case M2M_DMA_LPBK: {
- DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
- } break;
- case D11_LPBK: {
+ if (prot->dmaxfer.d11_lpbk) {
DHD_ERROR(("DMA successful with d11 loopback\n"));
- } break;
- case BMC_LPBK: {
- DHD_ERROR(("DMA successful with bmc loopback\n"));
- } break;
- case M2M_NON_DMA_LPBK: {
- DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
- } break;
- case D11_HOST_MEM_LPBK: {
- DHD_ERROR(("DMA successful d11 host mem loopback\n"));
- } break;
- case BMC_HOST_MEM_LPBK: {
- DHD_ERROR(("DMA successful bmc host mem loopback\n"));
- } break;
- default: {
- DHD_ERROR(("Invalid loopback option\n"));
- } break;
- }
-
- if (DHD_LPBKDTDUMP_ON()) {
- /* debug info print of the Tx and Rx buffers */
- dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
- prot->dmaxfer.len, DHD_INFO_VAL);
- dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
- prot->dmaxfer.len, DHD_INFO_VAL);
+ } else {
+ DHD_ERROR(("DMA successful without d11 loopback\n"));
}
}
}
-
- buf_free_scheduled = dhd_prepare_schedule_dmaxfer_free(dhd);
+ end_usec = OSL_SYSUPTIME_US();
+ dhd_prepare_schedule_dmaxfer_free(dhd);
end_usec -= prot->dmaxfer.start_usec;
- if (end_usec) {
- prot->dmaxfer.time_taken = end_usec;
- DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
- prot->dmaxfer.len, (unsigned long)end_usec,
- (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
- }
+ DHD_ERROR(("DMA loopback %d bytes in %llu usec, %u kBps\n",
+ prot->dmaxfer.len, end_usec,
+ (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)(end_usec + 1))));
dhd->prot->dmaxfer.in_progress = FALSE;
-
- if (buf_free_scheduled != BCME_OK) {
- dhd->bus->dmaxfer_complete = TRUE;
- dhd_os_dmaxfer_wake(dhd);
- }
}
/** Test functionality.
* by a spinlock.
*/
int
-dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
- uint d11_lpbk, uint core_num)
+dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, uint d11_lpbk)
{
unsigned long flags;
int ret = BCME_OK;
if (prot->dmaxfer.in_progress) {
DHD_ERROR(("DMA is in progress...\n"));
- return BCME_ERROR;
- }
-
- if (d11_lpbk >= MAX_LPBK) {
- DHD_ERROR(("loopback mode should be either"
- " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
- return BCME_ERROR;
+ return ret;
}
- DHD_RING_LOCK(ring->ring_lock, flags);
-
prot->dmaxfer.in_progress = TRUE;
if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
&prot->dmaxfer)) != BCME_OK) {
prot->dmaxfer.in_progress = FALSE;
- DHD_RING_UNLOCK(ring->ring_lock, flags);
return ret;
}
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_GENERAL_LOCK(dhd, flags);
+
dmap = (pcie_dma_xfer_params_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (dmap == NULL) {
dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
prot->dmaxfer.in_progress = FALSE;
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_NOMEM;
}
dmap->xfer_len = htol32(prot->dmaxfer.len);
dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
dmap->destdelay = htol32(prot->dmaxfer.destdelay);
- prot->dmaxfer.d11_lpbk = d11_lpbk;
- dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
- << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
- ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
- << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
- prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
+ prot->dmaxfer.d11_lpbk = d11_lpbk ? 1 : 0;
+ dmap->flags = (prot->dmaxfer.d11_lpbk << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT)
+ & PCIE_DMA_XFER_FLG_D11_LPBK_MASK;
/* update ring's WR index and ring doorbell to dongle */
+ prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
- DHD_RING_UNLOCK(ring->ring_lock, flags);
-
- DHD_ERROR(("DMA loopback Started...\n"));
+ DHD_INFO(("DMA Started...\n"));
return BCME_OK;
} /* dhdmsgbuf_dmaxfer_req */
-int
-dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result)
-{
- dhd_prot_t *prot = dhd->prot;
-
- if (prot->dmaxfer.in_progress)
- result->status = DMA_XFER_IN_PROGRESS;
- else if (prot->dmaxfer.status == 0)
- result->status = DMA_XFER_SUCCESS;
- else
- result->status = DMA_XFER_FAILED;
-
- result->type = prot->dmaxfer.d11_lpbk;
- result->error_code = prot->dmaxfer.status;
- result->num_bytes = prot->dmaxfer.len;
- result->time_taken = prot->dmaxfer.time_taken;
- if (prot->dmaxfer.time_taken) {
- /* throughput in kBps */
- result->tput =
- (prot->dmaxfer.len * (1000 * 1000 / 1024)) /
- (uint32)prot->dmaxfer.time_taken;
- }
-
- return BCME_OK;
-}
-
/** Called in the process of submitting an ioctl to the dongle */
static int
dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
- __FUNCTION__));
- return -EIO;
- }
-
- if (dhd->busstate == DHD_BUS_DOWN) {
- DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
- return -EIO;
- }
-
- /* don't talk to the dongle if fw is about to be reloaded */
- if (dhd->hang_was_sent) {
- DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
- __FUNCTION__));
- return -EIO;
- }
-
if (cmd == WLC_GET_VAR && buf)
{
if (!len || !*(uint8 *)buf) {
if ((len >= strlen("bcmerrorstr")) &&
(!strcmp((char *)buf, "bcmerrorstr"))) {
+
strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
*(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
+
goto done;
} else if ((len >= strlen("bcmerror")) &&
!strcmp((char *)buf, "bcmerror")) {
+
*(uint32 *)(uint32 *)buf = dhd->dongle_error;
+
goto done;
}
}
+
DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
action, ifidx, cmd, len));
+#ifdef REPORT_FATAL_TIMEOUTS
+ /*
+ * These timers "should" be started before sending H2D interrupt.
+ * Think of the scenario where H2D interrupt is fired and the Dongle
+ * responds back immediately. From the DPC we would stop the cmd, bus
+ * timers. But the process context could have switched out leading to
+ * a situation where the timers are Not started yet, but are actually stopped.
+ *
+ * Disable preemption from the time we start the timer until we are done
+ * with seding H2D interrupts.
+ */
+ OSL_DISABLE_PREEMPTION(dhd->osh);
+ dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
+ dhd_start_cmd_timer(dhd);
+ dhd_start_bus_timer(dhd);
+#endif /* REPORT_FATAL_TIMEOUTS */
ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
+#ifdef REPORT_FATAL_TIMEOUTS
+ /* For some reason if we fail to ring door bell, stop the timers */
+ if (ret < 0) {
+ DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+ dhd_stop_cmd_timer(dhd);
+ dhd_stop_bus_timer(dhd);
+ OSL_ENABLE_PREEMPTION(dhd->osh);
+ goto done;
+ }
+ OSL_ENABLE_PREEMPTION(dhd->osh);
+#else
if (ret < 0) {
DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
goto done;
}
+#endif /* REPORT_FATAL_TIMEOUTS */
/* wait for IOCTL completion message from dongle and get first fragment */
ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
return ret;
}
-void
-dhd_msgbuf_iovar_timeout_dump(dhd_pub_t *dhd)
-{
- uint32 intstatus;
- dhd_prot_t *prot = dhd->prot;
- dhd->rxcnt_timeout++;
- dhd->rx_ctlerrs++;
- dhd->iovar_timeout_occured = TRUE;
- DHD_ERROR(("%s: resumed on timeout rxcnt_timeout%s %d ioctl_cmd %d "
- "trans_id %d state %d busstate=%d ioctl_received=%d\n", __FUNCTION__,
- dhd->is_sched_error ? " due to scheduling problem" : "",
- dhd->rxcnt_timeout, prot->curr_ioctl_cmd, prot->ioctl_trans_id,
- prot->ioctl_state, dhd->busstate, prot->ioctl_received));
-#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
- if (dhd->is_sched_error && dhd->memdump_enabled == DUMP_MEMFILE_BUGON) {
- /* change g_assert_type to trigger Kernel panic */
- g_assert_type = 2;
- /* use ASSERT() to trigger panic */
- ASSERT(0);
- }
-#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
-
- if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
- prot->curr_ioctl_cmd == WLC_GET_VAR) {
- char iovbuf[32];
- int dump_size = 128;
- uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
- memset(iovbuf, 0, sizeof(iovbuf));
- strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
- iovbuf[sizeof(iovbuf) - 1] = '\0';
- DHD_ERROR(("Current IOVAR (%s): %s\n",
- prot->curr_ioctl_cmd == WLC_SET_VAR ?
- "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
- DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
- prhex("ioctl_buf", (const u8 *) ioctl_buf, dump_size);
- DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
- }
-
- /* Check the PCIe link status by reading intstatus register */
- intstatus = si_corereg(dhd->bus->sih,
- dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
- if (intstatus == (uint32)-1) {
- DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
- dhd->bus->is_linkdown = TRUE;
- }
-
- dhd_bus_dump_console_buffer(dhd->bus);
- dhd_prot_debug_info_print(dhd);
-}
-
/**
* Waits for IOCTL completion message from the dongle, copies this into caller
* provided parameter 'buf'.
goto out;
}
- timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
+ timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received, false);
#ifdef DHD_RECOVER_TIMEOUT
if (prot->ioctl_received == 0) {
- uint32 intstatus = si_corereg(dhd->bus->sih,
- dhd->bus->sih->buscoreidx, dhd->bus->pcie_mailbox_int, 0, 0);
- int host_irq_disbled = dhdpcie_irq_disabled(dhd->bus);
- if ((intstatus) && (intstatus != (uint32)-1) &&
- (timeleft == 0) && (!dhd_query_bus_erros(dhd))) {
- DHD_ERROR(("%s: iovar timeout trying again intstatus=%x"
- " host_irq_disabled=%d\n",
- __FUNCTION__, intstatus, host_irq_disbled));
- dhd_pcie_intr_count_dump(dhd);
- dhd_print_tasklet_status(dhd);
+ uint32 intstatus = 0;
+ uint32 intmask = 0;
+ intstatus = si_corereg(dhd->bus->sih,
+ dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+ intmask = si_corereg(dhd->bus->sih,
+ dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+ if ((intstatus) && (!intmask) && (timeleft == 0) && (!dhd_query_bus_erros(dhd)))
+ {
+ DHD_ERROR(("%s: iovar timeout trying again intstatus=%x intmask=%x\n",
+ __FUNCTION__, intstatus, intmask));
+ DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n"));
+ DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
+ "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
+ "dpc_return_busdown_count=%lu\n",
+ dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
+ dhd->bus->isr_intr_disable_count,
+ dhd->bus->suspend_intr_disable_count,
+ dhd->bus->dpc_return_busdown_count));
+
dhd_prot_process_ctrlbuf(dhd);
+
timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
- /* Clear Interrupts */
- dhdpcie_bus_clear_intstatus(dhd->bus);
+ /* Enable Back Interrupts using IntMask */
+ dhdpcie_bus_intr_enable(dhd->bus);
}
}
#endif /* DHD_RECOVER_TIMEOUT */
if (dhd->conf->ctrl_resched > 0 && timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
cnt++;
if (cnt <= dhd->conf->ctrl_resched) {
- uint buscorerev = dhd->bus->sih->buscorerev;
uint32 intstatus = 0, intmask = 0;
- intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt(buscorerev), 0, 0);
- intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask(buscorerev), 0, 0);
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
if (intstatus) {
DHD_ERROR(("%s: reschedule dhd_dpc, cnt=%d, intstatus=0x%x, intmask=0x%x\n",
__FUNCTION__, cnt, intstatus, intmask));
- dhd->bus->intstatus = intstatus;
dhd->bus->ipend = TRUE;
dhd->bus->dpc_sched = TRUE;
dhd_sched_dpc(dhd);
- timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received);
+ timeleft = dhd_os_ioctl_resp_wait(dhd, &prot->ioctl_received, true);
}
}
} else {
}
if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
- /* check if resumed on time out related to scheduling issue */
- dhd->is_sched_error = FALSE;
- if (dhd->bus->isr_entry_time > prot->ioctl_fillup_time) {
- dhd->is_sched_error = dhd_bus_query_dpc_sched_errors(dhd);
+ uint32 intstatus;
+
+ dhd->rxcnt_timeout++;
+ dhd->rx_ctlerrs++;
+ dhd->iovar_timeout_occured = TRUE;
+ DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
+ "trans_id %d state %d busstate=%d ioctl_received=%d\n",
+ __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
+ prot->ioctl_trans_id, prot->ioctl_state,
+ dhd->busstate, prot->ioctl_received));
+ if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
+ prot->curr_ioctl_cmd == WLC_GET_VAR) {
+ char iovbuf[32];
+ int i;
+ int dump_size = 128;
+ uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
+ memset(iovbuf, 0, sizeof(iovbuf));
+ strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
+ iovbuf[sizeof(iovbuf) - 1] = '\0';
+ DHD_ERROR(("Current IOVAR (%s): %s\n",
+ prot->curr_ioctl_cmd == WLC_SET_VAR ?
+ "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
+ DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
+ for (i = 0; i < dump_size; i++) {
+ DHD_ERROR(("%02X ", ioctl_buf[i]));
+ if ((i % 32) == 31) {
+ DHD_ERROR(("\n"));
+ }
+ }
+ DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
}
- dhd_msgbuf_iovar_timeout_dump(dhd);
+ /* Check the PCIe link status by reading intstatus register */
+ intstatus = si_corereg(dhd->bus->sih,
+ dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+ if (intstatus == (uint32)-1) {
+ DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
+ dhd->bus->is_linkdown = TRUE;
+ }
+
+ dhd_bus_dump_console_buffer(dhd->bus);
+ dhd_prot_debug_info_print(dhd);
#ifdef DHD_FW_COREDUMP
/* Collect socram dump */
dhd_bus_mem_dump(dhd);
}
#endif /* DHD_FW_COREDUMP */
-
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ dhd->bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
ret = -ETIMEDOUT;
goto out;
} else {
DHD_TRACE(("%s: Enter \n", __FUNCTION__));
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("%s : PCIe link is down. we have nothing to do\n",
- __FUNCTION__));
- return -EIO;
- }
-
if (dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
return -EIO;
DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
action, ifidx, cmd, len));
+#ifdef REPORT_FATAL_TIMEOUTS
+ /*
+ * These timers "should" be started before sending H2D interrupt.
+ * Think of the scenario where H2D interrupt is fired and the Dongle
+ * responds back immediately. From the DPC we would stop the cmd, bus
+ * timers. But the process context could have switched out leading to
+ * a situation where the timers are Not started yet, but are actually stopped.
+ *
+ * Disable preemption from the time we start the timer until we are done
+ * with seding H2D interrupts.
+ */
+ OSL_DISABLE_PREEMPTION(dhd->osh);
+ dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
+ dhd_start_cmd_timer(dhd);
+ dhd_start_bus_timer(dhd);
+#endif /* REPORT_FATAL_TIMEOUTS */
+
/* Fill up msgbuf for ioctl req */
ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
+#ifdef REPORT_FATAL_TIMEOUTS
+ /* For some reason if we fail to ring door bell, stop the timers */
+ if (ret < 0) {
+ DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
+ dhd_stop_cmd_timer(dhd);
+ dhd_stop_bus_timer(dhd);
+ OSL_ENABLE_PREEMPTION(dhd->osh);
+ goto done;
+ }
+
+ OSL_ENABLE_PREEMPTION(dhd->osh);
+#else
if (ret < 0) {
DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
goto done;
}
+#endif /* REPORT_FATAL_TIMEOUTS */
ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
return BCME_UNSUPPORTED;
}
-#ifdef DHD_DUMP_PCIE_RINGS
-int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
- unsigned long *file_posn, bool file_write)
-{
- dhd_prot_t *prot;
- msgbuf_ring_t *ring;
- int ret = 0;
- uint16 h2d_flowrings_total;
- uint16 flowid;
-
- if (!(dhd) || !(dhd->prot)) {
- goto exit;
- }
- prot = dhd->prot;
-
- /* Below is the same ring dump sequence followed in parser as well. */
- ring = &prot->h2dring_ctrl_subn;
- if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
- goto exit;
-
- ring = &prot->h2dring_rxp_subn;
- if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
- goto exit;
-
- ring = &prot->d2hring_ctrl_cpln;
- if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
- goto exit;
-
- ring = &prot->d2hring_tx_cpln;
- if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
- goto exit;
-
- ring = &prot->d2hring_rx_cpln;
- if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
- goto exit;
-
- h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
- FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
- if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0) {
- goto exit;
- }
- }
-
-#ifdef EWP_EDL
- if (dhd->dongle_edl_support) {
- ring = prot->d2hring_edl;
- if ((ret = dhd_edl_ring_hdr_write(dhd, ring, file, user_buf, file_posn)) < 0)
- goto exit;
- }
- else if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !dhd->dongle_edl_support)
-#else
- if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
-#endif /* EWP_EDL */
- {
- ring = prot->h2dring_info_subn;
- if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
- goto exit;
-
- ring = prot->d2hring_info_cpln;
- if ((ret = dhd_ring_write(dhd, ring, file, user_buf, file_posn)) < 0)
- goto exit;
- }
-
-exit :
- return ret;
-}
-
-/* Write to file */
-static
-int dhd_ring_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file,
- const void *user_buf, unsigned long *file_posn)
-{
- int ret = 0;
-
- if (ring == NULL) {
- DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
- __FUNCTION__));
- return BCME_ERROR;
- }
- if (file) {
- ret = dhd_os_write_file_posn(file, file_posn, (char *)(ring->dma_buf.va),
- ((unsigned long)(ring->max_items) * (ring->item_len)));
- if (ret < 0) {
- DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
- ret = BCME_ERROR;
- }
- } else if (user_buf) {
- ret = dhd_export_debug_data((char *)(ring->dma_buf.va), NULL, user_buf,
- ((unsigned long)(ring->max_items) * (ring->item_len)), (int *)file_posn);
- }
- return ret;
-}
-#endif /* DHD_DUMP_PCIE_RINGS */
-
-#ifdef EWP_EDL
-/* Write to file */
-static
-int dhd_edl_ring_hdr_write(dhd_pub_t *dhd, msgbuf_ring_t *ring, void *file, const void *user_buf,
- unsigned long *file_posn)
-{
- int ret = 0, nitems = 0;
- char *buf = NULL, *ptr = NULL;
- uint8 *msg_addr = NULL;
- uint16 rd = 0;
-
- if (ring == NULL) {
- DHD_ERROR(("%s: Ring not initialised, failed to dump ring contents\n",
- __FUNCTION__));
- ret = BCME_ERROR;
- goto done;
- }
-
- buf = MALLOCZ(dhd->osh, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
- if (buf == NULL) {
- DHD_ERROR(("%s: buffer allocation failed\n", __FUNCTION__));
- ret = BCME_ERROR;
- goto done;
- }
- ptr = buf;
-
- for (; nitems < D2HRING_EDL_MAX_ITEM; nitems++, rd++) {
- msg_addr = (uint8 *)ring->dma_buf.va + (rd * ring->item_len);
- memcpy(ptr, (char *)msg_addr, D2HRING_EDL_HDR_SIZE);
- ptr += D2HRING_EDL_HDR_SIZE;
- }
- if (file) {
- ret = dhd_os_write_file_posn(file, file_posn, buf,
- (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
- if (ret < 0) {
- DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
- goto done;
- }
- }
- else {
- ret = dhd_export_debug_data(buf, NULL, user_buf,
- (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM), file_posn);
- }
-
-done:
- if (buf) {
- MFREE(dhd->osh, buf, (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_HDR_SIZE));
- }
- return ret;
-}
-#endif /* EWP_EDL */
-
/** Add prot dump output to a buffer */
void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
{
dhd->prot->rw_index_sz);
bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
h2d_max_txpost, dhd->prot->h2d_max_txpost);
- bcm_bprintf(b, "pktid_txq_start_cnt: %d\n", dhd->prot->pktid_txq_start_cnt);
- bcm_bprintf(b, "pktid_txq_stop_cnt: %d\n", dhd->prot->pktid_txq_stop_cnt);
- bcm_bprintf(b, "pktid_depleted_cnt: %d\n", dhd->prot->pktid_depleted_cnt);
}
/* Update local copy of dongle statistics */
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
- DHD_RING_LOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_GENERAL_LOCK(dhd, flags);
hevent = (hostevent_hdr_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (hevent == NULL) {
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return -1;
}
* from the msgbuf, we can directly call the write_complete
*/
dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
-
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return 0;
}
ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
if (ret_buf == NULL) {
- /* HWA TODO, need to get RD pointer from different array
- * which HWA will directly write into host memory
- */
/* if alloc failed , invalidate cached read ptr */
if (dhd->dma_d2h_ring_upd_support) {
ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
} else {
dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ /* Check if ring->rd is valid */
+ if (ring->rd >= ring->max_items) {
+ dhd->bus->read_shm_fail = TRUE;
+ DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
+ return NULL;
+ }
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
}
/* Try allocating once more */
/* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
/* 8K allocation of dongle buffer fails */
/* dhd doesnt give separate input & output buf lens */
- /* so making the assumption that input length can never be more than 2k */
- rqstlen = MIN(rqstlen, MSGBUF_IOCTL_MAX_RQSTLEN);
+ /* so making the assumption that input length can never be more than 1.5k */
+ rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
- DHD_RING_LOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_GENERAL_LOCK(dhd, flags);
if (prot->ioctl_state) {
DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_BUSY;
} else {
prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
prot->ioctl_state = 0;
prot->curr_ioctl_cmd = 0;
prot->ioctl_received = IOCTL_WAIT;
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return -1;
}
/* copy ioct payload */
ioct_buf = (void *) prot->ioctbuf.va;
- prot->ioctl_fillup_time = OSL_LOCALTIME_NS();
-
if (buf)
memcpy(ioct_buf, buf, len);
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
-
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return 0;
} /* dhd_fillup_ioct_reqst */
+
/**
* dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
* DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
uint32 dma_buf_len = max_items * item_len;
dhd_prot_t *prot = dhd->prot;
uint16 max_flowrings = dhd->bus->max_tx_flowrings;
- dhd_dma_buf_t *dma_buf = NULL;
ASSERT(ring);
ASSERT(name);
uint16 flowid;
uint32 base_offset;
+ dhd_dma_buf_t *dma_buf = &ring->dma_buf;
dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
- dma_buf = &ring->dma_buf;
flowid = DHD_RINGID_TO_FLOWID(ringid);
base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
(void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
} else {
-#ifdef EWP_EDL
- if (ring == dhd->prot->d2hring_edl) {
- /* For EDL ring, memory is alloced during attach,
- * so just need to copy the dma_buf to the ring's dma_buf
- */
- memcpy(&ring->dma_buf, &dhd->edl_ring_mem, sizeof(ring->dma_buf));
- dma_buf = &ring->dma_buf;
- if (dma_buf->va == NULL) {
- return BCME_NOMEM;
- }
- } else
-#endif /* EWP_EDL */
- {
- /* Allocate a dhd_dma_buf */
- dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
- if (dma_buf_alloced != BCME_OK) {
- return BCME_NOMEM;
- }
+ /* Allocate a dhd_dma_buf */
+ dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
+ if (dma_buf_alloced != BCME_OK) {
+ return BCME_NOMEM;
}
}
}
#endif /* BCM_SECURE_DMA */
- ring->ring_lock = dhd_os_spin_lock_init(dhd->osh);
-
DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
"ring start %p buf phys addr %x:%x \n",
ring->name, ring->max_items, ring->item_len,
} /* dhd_prot_ring_attach */
+
/**
* dhd_prot_ring_init - Post the common ring information to dongle.
*
ring->wr = 0;
ring->rd = 0;
ring->curr_rd = 0;
- /* Reset hwa_db_type for all rings,
- * for data path rings, it will be assigned separately post init
- * from dhd_prot_d2h_sync_init and dhd_prot_h2d_sync_init
- */
- ring->hwa_db_type = 0;
/* CAUTION: ring::base_addr already in Little Endian */
dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
} /* dhd_prot_ring_init */
+
/**
* dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
* Reset WR and RD indices to 0.
ring->create_pending = FALSE;
}
+
/**
* dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
* hanging off the msgbuf_ring.
dhd_dma_buf_free(dhd, &ring->dma_buf);
}
- dhd_os_spin_lock_deinit(dhd->osh, ring->ring_lock);
-
} /* dhd_prot_ring_detach */
+
+/*
+ * +----------------------------------------------------------------------------
+ * Flowring Pool
+ *
+ * Unlike common rings, which are attached very early on (dhd_prot_attach),
+ * flowrings are dynamically instantiated. Moreover, flowrings may require a
+ * larger DMA-able buffer. To avoid issues with fragmented cache coherent
+ * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
+ * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
+ *
+ * Each DMA-able buffer may be allocated independently, or may be carved out
+ * of a single large contiguous region that is registered with the protocol
+ * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
+ * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
+ *
+ * No flowring pool action is performed in dhd_prot_attach(), as the number
+ * of h2d rings is not yet known.
+ *
+ * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
+ * determine the number of flowrings required, and a pool of msgbuf_rings are
+ * allocated and a DMA-able buffer (carved or allocated) is attached.
+ * See: dhd_prot_flowrings_pool_attach()
+ *
+ * A flowring msgbuf_ring object may be fetched from this pool during flowring
+ * creation, using the flowid. Likewise, flowrings may be freed back into the
+ * pool on flowring deletion.
+ * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
+ *
+ * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
+ * are detached (returned back to the carved region or freed), and the pool of
+ * msgbuf_ring and any objects allocated against it are freed.
+ * See: dhd_prot_flowrings_pool_detach()
+ *
+ * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
+ * state as-if upon an attach. All DMA-able buffers are retained.
+ * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
+ * pool attach will notice that the pool persists and continue to use it. This
+ * will avoid the case of a fragmented DMA-able region.
+ *
+ * +----------------------------------------------------------------------------
+ */
+
+/* Conversion of a flowid to a flowring pool index */
+#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
+ ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
+
+/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
+#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
+ (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
+ DHD_FLOWRINGS_POOL_OFFSET(flowid)
+
+/* Traverse each flowring in the flowring pool, assigning ring and flowid */
+#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
+ for ((flowid) = DHD_FLOWRING_START_FLOWID, \
+ (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
+ (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
+ (ring)++, (flowid)++)
+
/* Fetch number of H2D flowrings given the total number of h2d rings */
-uint16
+static uint16
dhd_get_max_flow_rings(dhd_pub_t *dhd)
{
if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
goto attach_fail;
}
- /*
- * TOD0 - Currently flowrings hwa is disabled and can be enabled like below
- * (dhd->bus->hwa_enab_bmap & HWA_ENAB_BITMAP_TXPOSTS) ? HWA_DB_TYPE_TXPOSTS : 0;
- */
- ring->hwa_db_type = 0;
}
return BCME_OK;
} /* dhd_prot_flowrings_pool_attach */
+
/**
* dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
* Invokes dhd_prot_ring_reset to perform the actual reset.
/* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
}
+
/**
* dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
* DMA-able buffers for flowrings.
dhd_prot_ring_detach(dhd, ring);
}
+
MFREE(prot->osh, prot->h2d_flowrings_pool,
(h2d_flowrings_total * sizeof(msgbuf_ring_t)));
} /* dhd_prot_flowrings_pool_detach */
+
/**
* dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
* msgbuf_ring from the flowring pool, and assign it.
return ring;
}
+
/**
* dhd_prot_flowrings_pool_release - release a previously fetched flowring's
* msgbuf_ring back to the flow_ring pool.
ring->curr_rd = 0;
}
+
/* Assumes only one index is updated at a time */
/* If exactly_nitems is true, this function will allocate space for nitems or fail */
/* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
return ret_ptr;
} /* dhd_prot_get_ring_space */
+
/**
* dhd_prot_ring_write_complete - Host updates the new WR index on producing
* new messages in a H2D ring. The messages are flushed from cache prior to
* always hold appropriate locks.
*/
static void BCMFASTPATH
-__dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
+dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
uint16 nitems)
{
dhd_prot_t *prot = dhd->prot;
- uint32 db_index;
+ uint8 db_index;
uint16 max_flowrings = dhd->bus->max_tx_flowrings;
- uint corerev;
/* cache flush */
OSL_CACHE_FLUSH(p, ring->item_len * nitems);
- /* For HWA, update db_index and ring mb2 DB and return */
- if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
- db_index = HWA_DB_INDEX_VALUE(ring->wr) | ring->hwa_db_type;
- DHD_TRACE(("%s: ring(%s) wr(%d) hwa_db_type(0x%x) db_index(0x%x)\n",
- __FUNCTION__, ring->name, ring->wr, ring->hwa_db_type, db_index));
- prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
- return;
- }
-
- if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
+ if (IDMA_DS_ACTIVE(dhd) && IDMA_ACTIVE(dhd)) {
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
+ sizeof(uint16), RING_WR_UPD, ring->idx);
+ } else if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
dhd_prot_dma_indx_set(dhd, ring->wr,
H2D_DMA_INDX_WR_UPD, ring->idx);
} else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
/* raise h2d interrupt */
if (IDMA_ACTIVE(dhd) ||
(IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
- db_index = IDMA_IDX0;
- /* this api is called in wl down path..in that case sih is freed already */
- if (dhd->bus->sih) {
- corerev = dhd->bus->sih->buscorerev;
- /* We need to explictly configure the type of DMA for core rev >= 24 */
- if (corerev >= 24) {
- db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
- }
+ if (IDMA_DS_ACTIVE(dhd)) {
+ prot->mb_ring_fn(dhd->bus, ring->wr);
+ } else {
+ db_index = IDMA_IDX0;
+ prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
}
- prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
} else {
prot->mb_ring_fn(dhd->bus, ring->wr);
}
}
-static void BCMFASTPATH
-dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
- uint16 nitems)
-{
- unsigned long flags_bus;
- DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
- __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
- DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
-}
-
-/**
- * dhd_prot_ring_write_complete_mbdata - will be called from dhd_prot_h2d_mbdata_send_ctrlmsg,
- * which will hold DHD_BUS_LOCK to update WR pointer, Ring DB and also update bus_low_power_state
- * to indicate D3_INFORM sent in the same BUS_LOCK.
- */
-static void BCMFASTPATH
-dhd_prot_ring_write_complete_mbdata(dhd_pub_t *dhd, msgbuf_ring_t * ring, void *p,
- uint16 nitems, uint32 mb_data)
-{
- unsigned long flags_bus;
-
- DHD_BUS_LOCK(dhd->bus->bus_lock, flags_bus);
-
- __dhd_prot_ring_write_complete(dhd, ring, p, nitems);
-
- /* Mark D3_INFORM in the same context to skip ringing H2D DB after D3_INFORM */
- if (mb_data == H2D_HOST_D3_INFORM) {
- dhd->bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
- }
-
- DHD_BUS_UNLOCK(dhd->bus->bus_lock, flags_bus);
-}
-
/**
* dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
* from a D2H ring. The new RD index will be updated in the DMA Index array or
dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
{
dhd_prot_t *prot = dhd->prot;
- uint32 db_index;
- uint corerev;
-
- /* For HWA, update db_index and ring mb2 DB and return */
- if (HWA_ACTIVE(dhd) && ring->hwa_db_type) {
- db_index = HWA_DB_INDEX_VALUE(ring->rd) | ring->hwa_db_type;
- DHD_TRACE(("%s: ring(%s) rd(0x%x) hwa_db_type(0x%x) db_index(0x%x)\n",
- __FUNCTION__, ring->name, ring->rd, ring->hwa_db_type, db_index));
- prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
- return;
- }
+ uint8 db_index;
/* update read index */
/* If dma'ing h2d indices supported
*/
if (IDMA_ACTIVE(dhd)) {
dhd_prot_dma_indx_set(dhd, ring->rd,
- D2H_DMA_INDX_RD_UPD, ring->idx);
- db_index = IDMA_IDX1;
- if (dhd->bus->sih) {
- corerev = dhd->bus->sih->buscorerev;
- /* We need to explictly configure the type of DMA for core rev >= 24 */
- if (corerev >= 24) {
- db_index |= (DMA_TYPE_IDMA << DMA_TYPE_SHIFT);
- }
+ D2H_DMA_INDX_RD_UPD, ring->idx);
+ if (IDMA_DS_ACTIVE(dhd)) {
+ dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
+ sizeof(uint16), RING_RD_UPD, ring->idx);
+ } else {
+ db_index = IDMA_IDX1;
+ prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
}
- prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
} else if (dhd->dma_h2d_ring_upd_support) {
dhd_prot_dma_indx_set(dhd, ring->rd,
D2H_DMA_INDX_RD_UPD, ring->idx);
}
static int
-dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create,
- uint16 ring_type, uint32 req_id)
+dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create)
{
unsigned long flags;
d2h_ring_create_req_t *d2h_ring;
uint16 alloced = 0;
int ret = BCME_OK;
uint16 max_h2d_rings = dhd->bus->max_submission_rings;
- msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
- DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+ DHD_GENERAL_LOCK(dhd, flags);
DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
/* Request for ring buffer space */
d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
- ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+ &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
&alloced, FALSE);
if (d2h_ring == NULL) {
ret = BCME_NOMEM;
goto err;
}
- ring_to_create->create_req_id = (uint16)req_id;
+ ring_to_create->create_req_id = DHD_D2H_DBGRING_REQ_PKTID;
ring_to_create->create_pending = TRUE;
/* Common msg buf hdr */
d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
d2h_ring->msg.if_id = 0;
- d2h_ring->msg.flags = ctrl_ring->current_phase;
+ d2h_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
- DHD_ERROR(("%s ringid: %d idx: %d max_h2d: %d\n", __FUNCTION__, d2h_ring->ring_id,
- ring_to_create->idx, max_h2d_rings));
-
- d2h_ring->ring_type = ring_type;
- d2h_ring->max_items = htol16(ring_to_create->max_items);
- d2h_ring->len_item = htol16(ring_to_create->item_len);
+ d2h_ring->ring_type = BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL;
+ d2h_ring->max_items = htol16(D2HRING_DYNAMIC_INFO_MAX_ITEM);
+ d2h_ring->len_item = htol16(D2HRING_INFO_BUFCMPLT_ITEMSIZE);
d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
d2h_ring->flags = 0;
d2h_ring->msg.epoch =
- ctrl_ring->seqnum % H2D_EPOCH_MODULO;
- ctrl_ring->seqnum++;
-#ifdef EWP_EDL
- if (ring_type == BCMPCIE_D2H_RING_TYPE_EDL) {
- DHD_ERROR(("%s: sending d2h EDL ring create: "
- "\n max items=%u; len_item=%u; ring_id=%u; low_addr=0x%x; high_addr=0x%x\n",
- __FUNCTION__, ltoh16(d2h_ring->max_items),
- ltoh16(d2h_ring->len_item),
- ltoh16(d2h_ring->ring_id),
- d2h_ring->ring_ptr.low_addr,
- d2h_ring->ring_ptr.high_addr));
- }
-#endif /* EWP_EDL */
+ dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
+ dhd->prot->h2dring_ctrl_subn.seqnum++;
/* Update the flow_ring's WRITE index */
- dhd_prot_ring_write_complete(dhd, ctrl_ring, d2h_ring,
+ dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, d2h_ring,
DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
-
- return ret;
err:
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
-
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return ret;
}
static int
-dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create, uint8 ring_type, uint32 id)
+dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create)
{
unsigned long flags;
h2d_ring_create_req_t *h2d_ring;
uint16 alloced = 0;
uint8 i = 0;
int ret = BCME_OK;
- msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
- DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+ DHD_GENERAL_LOCK(dhd, flags);
DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
/* Request for ring buffer space */
h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
- ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+ &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
&alloced, FALSE);
if (h2d_ring == NULL) {
ret = BCME_NOMEM;
goto err;
}
- ring_to_create->create_req_id = (uint16)id;
+ ring_to_create->create_req_id = DHD_H2D_DBGRING_REQ_PKTID;
ring_to_create->create_pending = TRUE;
/* Common msg buf hdr */
h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
h2d_ring->msg.if_id = 0;
h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
- h2d_ring->msg.flags = ctrl_ring->current_phase;
+ h2d_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
- h2d_ring->ring_type = ring_type;
+ h2d_ring->ring_type = BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT;
h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
h2d_ring->flags = 0;
h2d_ring->msg.epoch =
- ctrl_ring->seqnum % H2D_EPOCH_MODULO;
- ctrl_ring->seqnum++;
+ dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
+ dhd->prot->h2dring_ctrl_subn.seqnum++;
/* Update the flow_ring's WRITE index */
- dhd_prot_ring_write_complete(dhd, ctrl_ring, h2d_ring,
+ dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_ring,
DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
-
- return ret;
err:
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
-
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return ret;
}
} /* dhd_prot_dma_indx_set */
+
/**
* dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
* array.
} /* dhd_prot_dma_indx_init */
+
/**
* Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
* from, or NULL if there are no more messages to read.
DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
dhd->busstate, dhd->bus->wait_for_d3_ack));
DHD_ERROR(("\r\n======================= \r\n"));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (wr >= ring->max_items) {
+ dhd->bus->read_shm_fail = TRUE;
+ }
+#else
#ifdef DHD_FW_COREDUMP
if (dhd->memdump_enabled) {
/* collect core dump */
}
#endif /* DHD_FW_COREDUMP */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
*available_len = 0;
dhd_schedule_reset(dhd);
{
h2d_mailbox_data_t *h2d_mb_data;
uint16 alloced = 0;
- msgbuf_ring_t *ctrl_ring = &dhd->prot->h2dring_ctrl_subn;
- unsigned long flags;
int num_post = 1;
int i;
DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
__FUNCTION__, mb_data));
- if (!ctrl_ring->inited) {
+ if (!dhd->prot->h2dring_ctrl_subn.inited) {
DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
return BCME_ERROR;
}
+#ifdef PCIE_INB_DW
+ if ((INBAND_DW_ENAB(dhd->bus)) &&
+ (dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) ==
+ DW_DEVICE_DS_DEV_SLEEP)) {
+ if (mb_data == H2D_HOST_CONS_INT) {
+ /* One additional device_wake post needed */
+ num_post = 2;
+ }
+ }
+#endif /* PCIE_INB_DW */
for (i = 0; i < num_post; i ++) {
- DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
/* Request for ring buffer space */
h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
- ctrl_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
+ &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
&alloced, FALSE);
if (h2d_mb_data == NULL) {
DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
__FUNCTION__));
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
return BCME_NOMEM;
}
memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
- /* Common msg buf hdr */
+ /* Common msg buf hdr */
h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
- h2d_mb_data->msg.flags = ctrl_ring->current_phase;
+ h2d_mb_data->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
h2d_mb_data->msg.epoch =
- ctrl_ring->seqnum % H2D_EPOCH_MODULO;
- ctrl_ring->seqnum++;
+ dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
+ dhd->prot->h2dring_ctrl_subn.seqnum++;
- /* Update flow create message */
- h2d_mb_data->mail_box_data = htol32(mb_data);
+#ifdef PCIE_INB_DW
+ /* post device_wake first */
+ if ((num_post == 2) && (i == 0)) {
+ h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE);
+ } else
+#endif /* PCIE_INB_DW */
{
h2d_mb_data->mail_box_data = htol32(mb_data);
}
DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
/* upd wrt ptr and raise interrupt */
- dhd_prot_ring_write_complete_mbdata(dhd, ctrl_ring, h2d_mb_data,
- DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, mb_data);
-
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
-
+ /* caller of dhd_prot_h2d_mbdata_send_ctrlmsg already holding general lock */
+ dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_mb_data,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+#ifdef PCIE_INB_DW
+ /* Add a delay if device_wake is posted */
+ if ((num_post == 2) && (i == 0)) {
+ OSL_DELAY(1000);
+ }
+#endif /* PCIE_INB_DW */
}
+
return 0;
}
return BCME_NOMEM;
}
- DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+ DHD_GENERAL_LOCK(dhd, flags);
/* Request for ctrl_ring buffer space */
flow_create_rqst = (tx_flowring_create_request_t *)
dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
__FUNCTION__, flow_ring_node->flowid));
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_NOMEM;
}
flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
- flow_create_rqst->if_flags = 0;
-
-#ifdef DHD_HP2P
- /* Create HPP flow ring if HP2P is enabled and TID=7 and AWDL interface */
- /* and traffic is not multicast */
- /* Allow infra interface only if user enabled hp2p_infra_enable thru iovar */
- /* Allow only one HP2P Flow active at a time */
- if (dhd->hp2p_capable && !dhd->hp2p_ring_active &&
- flow_ring_node->flow_info.tid == HP2P_PRIO &&
- (dhd->hp2p_infra_enable || flow_create_rqst->msg.if_id) &&
- !ETHER_ISMULTI(flow_create_rqst->da)) {
- flow_create_rqst->if_flags |= BCMPCIE_FLOW_RING_INTF_HP2P;
- flow_ring_node->hp2p_ring = TRUE;
- dhd->hp2p_ring_active = TRUE;
-
- DHD_ERROR(("%s: flow ring for HP2P tid = %d flowid = %d\n",
- __FUNCTION__, flow_ring_node->flow_info.tid,
- flow_ring_node->flowid));
- }
-#endif /* DHD_HP2P */
/* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
* currently it is not used for priority. so uses solely for ifrm mask
/* update control subn ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_OK;
} /* dhd_prot_flow_ring_create */
ltoh16(resp->cmplt.status),
ltoh16(resp->cmplt.ring_id),
ltoh32(resp->cmn_hdr.request_id)));
- if ((ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) &&
- (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_BTLOGRING_REQ_PKTID)) {
+ if (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) {
DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
return;
}
- if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id) &&
- !dhd->prot->h2dring_info_subn->create_pending) {
+ if (!dhd->prot->h2dring_info_subn->create_pending) {
DHD_ERROR(("info ring create status for not pending submit ring\n"));
}
if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
- DHD_ERROR(("info/btlog ring create failed with status %d\n",
+ DHD_ERROR(("info ring create failed with status %d\n",
ltoh16(resp->cmplt.status)));
return;
}
- if (dhd->prot->h2dring_info_subn->create_req_id == ltoh32(resp->cmn_hdr.request_id)) {
- dhd->prot->h2dring_info_subn->create_pending = FALSE;
- dhd->prot->h2dring_info_subn->inited = TRUE;
- DHD_ERROR(("info buffer post after ring create\n"));
- dhd_prot_infobufpost(dhd, dhd->prot->h2dring_info_subn);
- }
+ dhd->prot->h2dring_info_subn->create_pending = FALSE;
+ dhd->prot->h2dring_info_subn->inited = TRUE;
+ dhd_prot_infobufpost(dhd);
}
static void
ltoh16(resp->cmplt.status),
ltoh16(resp->cmplt.ring_id),
ltoh32(resp->cmn_hdr.request_id)));
- if ((ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) &&
- (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_BTLOGRING_REQ_PKTID) &&
-#ifdef DHD_HP2P
- (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_TXREQ_PKTID) &&
- (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_HPPRING_RXREQ_PKTID) &&
-#endif /* DHD_HP2P */
- TRUE) {
+ if (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) {
DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
return;
}
- if (ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_DBGRING_REQ_PKTID) {
-#ifdef EWP_EDL
- if (!dhd->dongle_edl_support)
-#endif // endif
- {
- if (!dhd->prot->d2hring_info_cpln->create_pending) {
- DHD_ERROR(("info ring create status for not pending cpl ring\n"));
- return;
- }
+ if (!dhd->prot->d2hring_info_cpln->create_pending) {
+ DHD_ERROR(("info ring create status for not pending cpl ring\n"));
+ return;
+ }
- if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
- DHD_ERROR(("info cpl ring create failed with status %d\n",
- ltoh16(resp->cmplt.status)));
- return;
- }
- dhd->prot->d2hring_info_cpln->create_pending = FALSE;
- dhd->prot->d2hring_info_cpln->inited = TRUE;
- }
-#ifdef EWP_EDL
- else {
- if (!dhd->prot->d2hring_edl->create_pending) {
- DHD_ERROR(("edl ring create status for not pending cpl ring\n"));
- return;
- }
-
- if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
- DHD_ERROR(("edl cpl ring create failed with status %d\n",
- ltoh16(resp->cmplt.status)));
- return;
- }
- dhd->prot->d2hring_edl->create_pending = FALSE;
- dhd->prot->d2hring_edl->inited = TRUE;
- }
-#endif /* EWP_EDL */
- }
-
-#ifdef DHD_HP2P
- if (dhd->prot->d2hring_hp2p_txcpl &&
- ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_TXREQ_PKTID) {
- if (!dhd->prot->d2hring_hp2p_txcpl->create_pending) {
- DHD_ERROR(("HPP tx ring create status for not pending cpl ring\n"));
- return;
- }
-
- if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
- DHD_ERROR(("HPP tx cpl ring create failed with status %d\n",
- ltoh16(resp->cmplt.status)));
- return;
- }
- dhd->prot->d2hring_hp2p_txcpl->create_pending = FALSE;
- dhd->prot->d2hring_hp2p_txcpl->inited = TRUE;
- }
- if (dhd->prot->d2hring_hp2p_rxcpl &&
- ltoh32(resp->cmn_hdr.request_id) == DHD_D2H_HPPRING_RXREQ_PKTID) {
- if (!dhd->prot->d2hring_hp2p_rxcpl->create_pending) {
- DHD_ERROR(("HPP rx ring create status for not pending cpl ring\n"));
- return;
- }
-
- if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
- DHD_ERROR(("HPP rx cpl ring create failed with status %d\n",
- ltoh16(resp->cmplt.status)));
- return;
- }
- dhd->prot->d2hring_hp2p_rxcpl->create_pending = FALSE;
- dhd->prot->d2hring_hp2p_rxcpl->inited = TRUE;
+ if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
+ DHD_ERROR(("info cpl ring create failed with status %d\n",
+ ltoh16(resp->cmplt.status)));
+ return;
}
-#endif /* DHD_HP2P */
+ dhd->prot->d2hring_info_cpln->create_pending = FALSE;
+ dhd->prot->d2hring_info_cpln->inited = TRUE;
}
static void
static void
dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
{
+#ifdef DHD_TIMESYNC
+ host_timestamp_msg_cpl_t *host_ts_cpl;
+ uint32 pktid;
+ dhd_prot_t *prot = dhd->prot;
+
+ host_ts_cpl = (host_timestamp_msg_cpl_t *)buf;
+ DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__,
+ host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id));
+
+ pktid = ltoh32(host_ts_cpl->msg.request_id);
+ if (prot->hostts_req_buf_inuse == FALSE) {
+ DHD_ERROR(("No Pending Host TS req, but completion\n"));
+ return;
+ }
+ prot->hostts_req_buf_inuse = FALSE;
+ if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) {
+ DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n",
+ pktid, DHD_H2D_HOSTTS_REQ_PKTID));
+ return;
+ }
+ dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
+ host_ts_cpl->cmplt.status);
+#else /* DHD_TIMESYNC */
DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
+#endif /* DHD_TIMESYNC */
}
void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
struct bcmstrbuf *strbuf, const char * fmt)
{
- const char *default_fmt =
- "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x SIZE %d "
- "WORK_ITEM_SIZE %d MAX_WORK_ITEMS %d TOTAL_SIZE %d\n";
+ const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x"
+ " WORK ITEM SIZE %d MAX WORK ITEMS %d SIZE %d\n";
msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
uint16 rd, wr;
uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
if (fmt == NULL) {
fmt = default_fmt;
}
-
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("%s: Skip dumping flowring due to Link down\n", __FUNCTION__));
- return;
- }
-
dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
ltoh32(flow_ring->base_addr.high_addr),
ltoh32(flow_ring->base_addr.low_addr),
- flow_ring->item_len, flow_ring->max_items,
- dma_buf_len);
+ flow_ring->item_len, flow_ring->max_items, dma_buf_len);
}
void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
" %5d %5d %17p %8x:%8x %14d %14d %10d\n");
}
- if (dhd->prot->d2hring_edl != NULL) {
- bcm_bprintf(strbuf, "%14s", "D2HRingEDL");
- dhd_prot_print_flow_ring(dhd, prot->d2hring_edl, strbuf,
- " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
- }
bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
- OSL_ATOMIC_READ(dhd->osh, &dhd->prot->active_tx_count),
+ dhd->prot->active_tx_count,
DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
uint16 alloced = 0;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
- DHD_RING_LOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_GENERAL_LOCK(dhd, flags);
/* Request for ring buffer space */
flow_delete_rqst = (tx_flowring_delete_request_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (flow_delete_rqst == NULL) {
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_NOMEM;
}
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
-
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_OK;
}
-static void BCMFASTPATH
-dhd_prot_flow_ring_fastdelete(dhd_pub_t *dhd, uint16 flowid, uint16 rd_idx)
-{
- flow_ring_node_t *flow_ring_node = DHD_FLOW_RING(dhd, flowid);
- msgbuf_ring_t *ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
- host_txbuf_cmpl_t txstatus;
- host_txbuf_post_t *txdesc;
- uint16 wr_idx;
-
- DHD_INFO(("%s: FAST delete ring, flowid=%d, rd_idx=%d, wr_idx=%d\n",
- __FUNCTION__, flowid, rd_idx, ring->wr));
-
- memset(&txstatus, 0, sizeof(txstatus));
- txstatus.compl_hdr.flow_ring_id = flowid;
- txstatus.cmn_hdr.if_id = flow_ring_node->flow_info.ifindex;
- wr_idx = ring->wr;
-
- while (wr_idx != rd_idx) {
- if (wr_idx)
- wr_idx--;
- else
- wr_idx = ring->max_items - 1;
- txdesc = (host_txbuf_post_t *)((char *)DHD_RING_BGN_VA(ring) +
- (wr_idx * ring->item_len));
- txstatus.cmn_hdr.request_id = txdesc->cmn_hdr.request_id;
- dhd_prot_txstatus_process(dhd, &txstatus);
- }
-}
-
static void
dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
{
DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
- if (dhd->fast_delete_ring_support) {
- dhd_prot_flow_ring_fastdelete(dhd, flow_delete_resp->cmplt.flow_ring_id,
- flow_delete_resp->read_idx);
- }
dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
flow_delete_resp->cmplt.status);
}
uint16 alloced = 0;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
- DHD_RING_LOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_GENERAL_LOCK(dhd, flags);
/* Request for ring buffer space */
flow_flush_rqst = (tx_flowring_flush_request_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (flow_flush_rqst == NULL) {
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_NOMEM;
}
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
-
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_OK;
} /* dhd_prot_flow_ring_flush */
msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
/* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
- DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_LOCK(dhd, flags);
+
msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
if (msg_start == NULL) {
DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
__FUNCTION__, d2h_rings));
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return;
}
/* update control subn ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
-
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
-
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
}
ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
}
+int
+dhd_prot_debug_dma_info_print(dhd_pub_t *dhd)
+{
+ if (dhd->bus->is_linkdown) {
+ DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
+ "due to PCIe link down ------- \r\n"));
+ return 0;
+ }
+
+ DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
+
+ //HostToDev
+ DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
+ DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
+ DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
+
+ DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
+ DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
+ DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
+
+ //DevToHost
+ DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
+ DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
+ DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
+
+ DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
+ DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
+ DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
+ si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
+
+ return 0;
+}
+
int
dhd_prot_debug_info_print(dhd_pub_t *dhd)
{
dhd_prot_t *prot = dhd->prot;
msgbuf_ring_t *ring;
uint16 rd, wr;
+ uint32 intstatus = 0;
+ uint32 intmask = 0;
+ uint32 mbintstatus = 0;
+ uint32 d2h_mb_data = 0;
uint32 dma_buf_len;
- uint64 current_time;
- ulong ring_tcm_rd_addr; /* dongle address */
- ulong ring_tcm_wr_addr; /* dongle address */
DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
DHD_ERROR(("DHD: %s\n", dhd_version));
DHD_ERROR(("Firmware: %s\n", fw_version));
-#ifdef DHD_FW_COREDUMP
- DHD_ERROR(("\n ------- DUMPING CONFIGURATION INFORMATION ------ \r\n"));
- DHD_ERROR(("memdump mode: %d\n", dhd->memdump_enabled));
-#endif /* DHD_FW_COREDUMP */
-
DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
prot->device_ipc_version,
prot->host_ipc_version,
prot->active_ipc_version));
- DHD_ERROR(("d2h_intr_method -> %s\n",
- dhd->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX"));
DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
prot->max_tsbufpost, prot->cur_ts_bufs_posted));
DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
h2d_max_txpost, prot->h2d_max_txpost));
- current_time = OSL_LOCALTIME_NS();
- DHD_ERROR(("current_time="SEC_USEC_FMT"\n", GET_SEC_USEC(current_time)));
- DHD_ERROR(("ioctl_fillup_time="SEC_USEC_FMT
- " ioctl_ack_time="SEC_USEC_FMT
- " ioctl_cmplt_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(prot->ioctl_fillup_time),
- GET_SEC_USEC(prot->ioctl_ack_time),
- GET_SEC_USEC(prot->ioctl_cmplt_time)));
-
- /* Check PCIe INT registers */
- if (!dhd_pcie_dump_int_regs(dhd)) {
- DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
- dhd->bus->is_linkdown = TRUE;
- }
-
DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
ring = &prot->h2dring_ctrl_subn;
dma_buf_len = ring->max_items * ring->item_len;
- ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
- ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
- DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
- "SIZE %d \r\n",
+ DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), dma_buf_len));
DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("CtrlPost: From Shared Mem: RD and WR are invalid"
- " due to PCIe link down\r\n"));
- } else {
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
- DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
- }
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
ring = &prot->d2hring_ctrl_cpln;
dma_buf_len = ring->max_items * ring->item_len;
- ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
- ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
- DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
- "SIZE %d \r\n",
+ DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr, dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), dma_buf_len));
DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("CtrlCpl: From Shared Mem: RD and WR are invalid"
- " due to PCIe link down\r\n"));
- } else {
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
- DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
- }
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
ring = prot->h2dring_info_subn;
if (ring) {
dma_buf_len = ring->max_items * ring->item_len;
- ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
- ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
- DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
- "SIZE %d \r\n",
+ DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
- dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), dma_buf_len));
DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("InfoSub: From Shared Mem: RD and WR are invalid"
- " due to PCIe link down\r\n"));
- } else {
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
- DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
- }
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
}
ring = prot->d2hring_info_cpln;
if (ring) {
dma_buf_len = ring->max_items * ring->item_len;
- ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
- ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
- DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
- "SIZE %d \r\n",
+ DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
- dma_buf_len));
+ ltoh32(ring->base_addr.low_addr), dma_buf_len));
DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("InfoCpl: From Shared Mem: RD and WR are invalid"
- " due to PCIe link down\r\n"));
- } else {
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
- DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
- }
- DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
- }
-
- ring = &prot->d2hring_tx_cpln;
- if (ring) {
- ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
- ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
- dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("TxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
- "SIZE %d \r\n",
- ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
- dma_buf_len));
- DHD_ERROR(("TxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("TxCpl: From Shared Mem: RD and WR are invalid"
- " due to PCIe link down\r\n"));
- } else {
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
- DHD_ERROR(("TxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
- }
- DHD_ERROR(("TxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
- }
-
- ring = &prot->d2hring_rx_cpln;
- if (ring) {
- ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
- ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
- dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("RxCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
- "SIZE %d \r\n",
- ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
- dma_buf_len));
- DHD_ERROR(("RxCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("RxCpl: From Shared Mem: RD and WR are invalid"
- " due to PCIe link down\r\n"));
- } else {
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
- DHD_ERROR(("RxCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
- }
- DHD_ERROR(("RxCpl: Expected seq num: %d \r\n", ring->seqnum % D2H_EPOCH_MODULO));
- }
-#ifdef EWP_EDL
- ring = prot->d2hring_edl;
- if (ring) {
- ring_tcm_rd_addr = dhd->bus->ring_sh[ring->idx].ring_state_r;
- ring_tcm_wr_addr = dhd->bus->ring_sh[ring->idx].ring_state_w;
- dma_buf_len = ring->max_items * ring->item_len;
- DHD_ERROR(("EdlRing: Mem Info: BASE(VA) %p BASE(PA) %x:%x tcm_rd_wr 0x%lx:0x%lx "
- "SIZE %d \r\n",
- ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
- ltoh32(ring->base_addr.low_addr), ring_tcm_rd_addr, ring_tcm_wr_addr,
- dma_buf_len));
- DHD_ERROR(("EdlRing: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("EdlRing: From Shared Mem: RD and WR are invalid"
- " due to PCIe link down\r\n"));
- } else {
- dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
- dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
- DHD_ERROR(("EdlRing: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
- }
- DHD_ERROR(("EdlRing: Expected seq num: %d \r\n",
- ring->seqnum % D2H_EPOCH_MODULO));
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
+ DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
+ DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
}
-#endif /* EWP_EDL */
DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
__FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
-#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
- DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
- __FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
-#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
-
- DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
- DHD_ERROR(("pktid_txq_stop_cnt: %d\n", prot->pktid_txq_stop_cnt));
- DHD_ERROR(("pktid_depleted_cnt: %d\n", prot->pktid_depleted_cnt));
-
- dhd_pcie_debug_info_dump(dhd);
+ if (!dhd->bus->is_linkdown && dhd->bus->intstatus != (uint32)-1) {
+ DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIMailBoxInt, 0, 0);
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCIMailBoxMask, 0, 0);
+ mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
+ PCID2H_MailBox, 0, 0);
+ dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
+
+ DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
+ intstatus, intmask, mbintstatus));
+ DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
+ dhd->bus->def_intmask));
+
+ DHD_ERROR(("host pcie_irq enabled = %d\n", dhdpcie_irq_enabled(dhd->bus)));
+
+ DHD_ERROR(("\n ------- DUMPING PCIE Registers ------- \r\n"));
+ /* hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */
+ DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x\n",
+ PCIECFGREG_STATUS_CMD,
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
+ PCIECFGREG_BASEADDR0,
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32))));
+ DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
+ "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
+ sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
+ sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
+ dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
+ sizeof(uint32))));
+
+ /* hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/
+ * CurrentPcieGen2ProgramGuide/pcie_ep.htm
+ */
+ DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
+ "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
+ PCIECFGREG_PHY_DBG_CLKREQ1,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
+ PCIECFGREG_PHY_DBG_CLKREQ2,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
+ PCIECFGREG_PHY_DBG_CLKREQ3,
+ dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
+
+#if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID)
+ DHD_ERROR(("Pcie RC Error Status Val=0x%x\n",
+ dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
+ PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
+
+ DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
+ dhd_debug_get_rc_linkcap(dhd->bus)));
+#endif
+
+ DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
+ DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
+ "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
+ "dpc_return_busdown_count=%lu\n",
+ dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
+ dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count,
+ dhd->bus->dpc_return_busdown_count));
+
+ }
+ dhd_prot_debug_dma_info_print(dhd);
+#ifdef DHD_FW_COREDUMP
+ if (dhd->memdump_enabled) {
+#ifdef DHD_SSSR_DUMP
+ if (dhd->sssr_inited) {
+ dhdpcie_sssr_dump(dhd);
+ }
+#endif /* DHD_SSSR_DUMP */
+ }
+#endif /* DHD_FW_COREDUMP */
return 0;
}
{
uint32 *ptr;
uint32 value;
+ uint32 i;
+ uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
- if (dhd->prot->d2h_dma_indx_wr_buf.va) {
- uint32 i;
- uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
+ OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
+ dhd->prot->d2h_dma_indx_wr_buf.len);
- OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
- dhd->prot->d2h_dma_indx_wr_buf.len);
+ ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
- ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
+ bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
- bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
+ bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
- bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
- ptr++;
+ ptr++;
+ bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
+ for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
value = ltoh32(*ptr);
- bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
-
+ bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
ptr++;
- bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
- for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
- ptr++;
- }
}
- if (dhd->prot->h2d_dma_indx_rd_buf.va) {
- OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
- dhd->prot->h2d_dma_indx_rd_buf.len);
+ OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
+ dhd->prot->h2d_dma_indx_rd_buf.len);
- ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
+ ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
- bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
- ptr++;
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
- ptr++;
- value = ltoh32(*ptr);
- bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
- }
+ bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
return 0;
}
dhd_prot_t *prot = dhd->prot;
#if DHD_DBG_SHOW_METADATA
prot->metadata_dbg = val;
-#endif // endif
+#endif
return (uint32)prot->metadata_dbg;
}
#endif /* DHD_RX_CHAINING */
+
#ifdef IDLE_TX_FLOW_MGMT
int
dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
__FUNCTION__, flow_ring_node->flowid));
return BCME_NOMEM;
}
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
- DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_LOCK(dhd, flags);
/* Request for ctrl_ring buffer space */
flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
__FUNCTION__, flow_ring_node->flowid));
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_NOMEM;
}
/* update control subn ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_OK;
} /* dhd_prot_flow_ring_create */
uint16 alloced = 0;
msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
- DHD_RING_LOCK(ring->ring_lock, flags);
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
+
+ DHD_GENERAL_LOCK(dhd, flags);
/* Request for ring buffer space */
flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
if (flow_suspend_rqst == NULL) {
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_NOMEM;
}
/* update ring's WR index and ring doorbell to dongle */
dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
-
- DHD_RING_UNLOCK(ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
+#endif
return BCME_OK;
}
#endif /* IDLE_TX_FLOW_MGMT */
-static const char* etd_trap_name(hnd_ext_tag_trap_t tag)
-{
- switch (tag)
- {
- case TAG_TRAP_SIGNATURE: return "TAG_TRAP_SIGNATURE";
- case TAG_TRAP_STACK: return "TAG_TRAP_STACK";
- case TAG_TRAP_MEMORY: return "TAG_TRAP_MEMORY";
- case TAG_TRAP_DEEPSLEEP: return "TAG_TRAP_DEEPSLEEP";
- case TAG_TRAP_PSM_WD: return "TAG_TRAP_PSM_WD";
- case TAG_TRAP_PHY: return "TAG_TRAP_PHY";
- case TAG_TRAP_BUS: return "TAG_TRAP_BUS";
- case TAG_TRAP_MAC_SUSP: return "TAG_TRAP_MAC_SUSP";
- case TAG_TRAP_BACKPLANE: return "TAG_TRAP_BACKPLANE";
- case TAG_TRAP_PCIE_Q: return "TAG_TRAP_PCIE_Q";
- case TAG_TRAP_WLC_STATE: return "TAG_TRAP_WLC_STATE";
- case TAG_TRAP_MAC_WAKE: return "TAG_TRAP_MAC_WAKE";
- case TAG_TRAP_HMAP: return "TAG_TRAP_HMAP";
- case TAG_TRAP_PHYTXERR_THRESH: return "TAG_TRAP_PHYTXERR_THRESH";
- case TAG_TRAP_HC_DATA: return "TAG_TRAP_HC_DATA";
- case TAG_TRAP_LOG_DATA: return "TAG_TRAP_LOG_DATA";
- case TAG_TRAP_CODE: return "TAG_TRAP_CODE";
- case TAG_TRAP_LAST:
- default:
- return "Unknown";
- }
- return "Unknown";
-}
int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
{
uint32 i;
uint32 *ext_data;
hnd_ext_trap_hdr_t *hdr;
- const bcm_tlv_t *tlv;
- const trap_t *tr;
- const uint32 *stack;
- const hnd_ext_trap_bp_err_t *bpe;
+ bcm_tlv_t *tlv;
+ trap_t *tr;
+ uint32 *stack;
+ hnd_ext_trap_bp_err_t *bpe;
uint32 raw_len;
ext_data = dhdp->extended_trap_data;
hdr = (hnd_ext_trap_hdr_t *)ext_data;
bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
- /* Dump a list of all tags found before parsing data */
- bcm_bprintf(b, "\nTags Found:\n");
- for (i = 0; i < TAG_TRAP_LAST; i++) {
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, i);
- if (tlv)
- bcm_bprintf(b, "Tag: %d (%s), Length: %d\n", i, etd_trap_name(i), tlv->len);
- }
-
if (raw)
{
raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
}
/* Extract the various supported TLVs from the extended trap data */
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_CODE);
- if (tlv)
- {
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_CODE), tlv->len);
- bcm_bprintf(b, "ETD TYPE: %d\n", tlv->data[0]);
- }
-
tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
if (tlv)
{
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_SIGNATURE), tlv->len);
- tr = (const trap_t *)tlv->data;
+ bcm_bprintf(b, "\nTAG_TRAP_SIGNATURE len: %d\n", tlv->len);
+ tr = (trap_t *)tlv->data;
bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
if (tlv)
{
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_STACK), tlv->len);
- stack = (const uint32 *)tlv->data;
+ bcm_bprintf(b, "\nTAG_TRAP_STACK len: %d\n", tlv->len);
+ stack = (uint32 *)tlv->data;
for (i = 0; i < (uint32)(tlv->len / 4); i++)
{
bcm_bprintf(b, " 0x%08x\n", *stack);
tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
if (tlv)
{
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BACKPLANE), tlv->len);
- bpe = (const hnd_ext_trap_bp_err_t *)tlv->data;
+ bcm_bprintf(b, "\nTAG_TRAP_BACKPLANE len: %d\n", tlv->len);
+ bpe = (hnd_ext_trap_bp_err_t *)tlv->data;
bcm_bprintf(b, " error: %x\n", bpe->error);
bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
}
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MEMORY);
- if (tlv)
- {
- const hnd_ext_trap_heap_err_t* hme;
-
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MEMORY), tlv->len);
- hme = (const hnd_ext_trap_heap_err_t *)tlv->data;
- bcm_bprintf(b, " arena total: %d\n", hme->arena_total);
- bcm_bprintf(b, " heap free: %d\n", hme->heap_free);
- bcm_bprintf(b, " heap in use: %d\n", hme->heap_inuse);
- bcm_bprintf(b, " mf count: %d\n", hme->mf_count);
- bcm_bprintf(b, " stack LWM: %x\n", hme->stack_lwm);
-
- bcm_bprintf(b, " Histogram:\n");
- for (i = 0; i < (HEAP_HISTOGRAM_DUMP_LEN * 2); i += 2) {
- if (hme->heap_histogm[i] == 0xfffe)
- bcm_bprintf(b, " Others\t%d\t?\n", hme->heap_histogm[i + 1]);
- else if (hme->heap_histogm[i] == 0xffff)
- bcm_bprintf(b, " >= 256K\t%d\t?\n", hme->heap_histogm[i + 1]);
- else
- bcm_bprintf(b, " %d\t%d\t%d\n", hme->heap_histogm[i] << 2,
- hme->heap_histogm[i + 1], (hme->heap_histogm[i] << 2)
- * hme->heap_histogm[i + 1]);
- }
-
- bcm_bprintf(b, " Max free block: %d\n", hme->max_sz_free_blk[0] << 2);
- for (i = 1; i < HEAP_MAX_SZ_BLKS_LEN; i++) {
- bcm_bprintf(b, " Next lgst free block: %d\n", hme->max_sz_free_blk[i] << 2);
- }
- }
-
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PCIE_Q);
- if (tlv)
- {
- const hnd_ext_trap_pcie_mem_err_t* pqme;
-
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PCIE_Q), tlv->len);
- pqme = (const hnd_ext_trap_pcie_mem_err_t *)tlv->data;
- bcm_bprintf(b, " d2h queue len: %x\n", pqme->d2h_queue_len);
- bcm_bprintf(b, " d2h req queue len: %x\n", pqme->d2h_req_queue_len);
- }
-
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_WLC_STATE);
- if (tlv)
- {
- const hnd_ext_trap_wlc_mem_err_t* wsme;
-
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_WLC_STATE), tlv->len);
- wsme = (const hnd_ext_trap_wlc_mem_err_t *)tlv->data;
- bcm_bprintf(b, " instance: %d\n", wsme->instance);
- bcm_bprintf(b, " associated: %d\n", wsme->associated);
- bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
- bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
- bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
- bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
- bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
- bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
-
- if (tlv->len >= (sizeof(*wsme) * 2)) {
- wsme++;
- bcm_bprintf(b, "\n instance: %d\n", wsme->instance);
- bcm_bprintf(b, " associated: %d\n", wsme->associated);
- bcm_bprintf(b, " peer count: %d\n", wsme->peer_cnt);
- bcm_bprintf(b, " client count: %d\n", wsme->soft_ap_client_cnt);
- bcm_bprintf(b, " TX_AC_BK_FIFO: %d\n", wsme->txqueue_len[0]);
- bcm_bprintf(b, " TX_AC_BE_FIFO: %d\n", wsme->txqueue_len[1]);
- bcm_bprintf(b, " TX_AC_VI_FIFO: %d\n", wsme->txqueue_len[2]);
- bcm_bprintf(b, " TX_AC_VO_FIFO: %d\n", wsme->txqueue_len[3]);
- }
- }
-
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PHY);
- if (tlv)
- {
- const hnd_ext_trap_phydbg_t* phydbg;
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PHY), tlv->len);
- phydbg = (const hnd_ext_trap_phydbg_t *)tlv->data;
- bcm_bprintf(b, " err: 0x%x\n", phydbg->err);
- bcm_bprintf(b, " RxFeStatus: 0x%x\n", phydbg->RxFeStatus);
- bcm_bprintf(b, " TxFIFOStatus0: 0x%x\n", phydbg->TxFIFOStatus0);
- bcm_bprintf(b, " TxFIFOStatus1: 0x%x\n", phydbg->TxFIFOStatus1);
- bcm_bprintf(b, " RfseqMode: 0x%x\n", phydbg->RfseqMode);
- bcm_bprintf(b, " RfseqStatus0: 0x%x\n", phydbg->RfseqStatus0);
- bcm_bprintf(b, " RfseqStatus1: 0x%x\n", phydbg->RfseqStatus1);
- bcm_bprintf(b, " RfseqStatus_Ocl: 0x%x\n", phydbg->RfseqStatus_Ocl);
- bcm_bprintf(b, " RfseqStatus_Ocl1: 0x%x\n", phydbg->RfseqStatus_Ocl1);
- bcm_bprintf(b, " OCLControl1: 0x%x\n", phydbg->OCLControl1);
- bcm_bprintf(b, " TxError: 0x%x\n", phydbg->TxError);
- bcm_bprintf(b, " bphyTxError: 0x%x\n", phydbg->bphyTxError);
- bcm_bprintf(b, " TxCCKError: 0x%x\n", phydbg->TxCCKError);
- bcm_bprintf(b, " TxCtrlWrd0: 0x%x\n", phydbg->TxCtrlWrd0);
- bcm_bprintf(b, " TxCtrlWrd1: 0x%x\n", phydbg->TxCtrlWrd1);
- bcm_bprintf(b, " TxCtrlWrd2: 0x%x\n", phydbg->TxCtrlWrd2);
- bcm_bprintf(b, " TxLsig0: 0x%x\n", phydbg->TxLsig0);
- bcm_bprintf(b, " TxLsig1: 0x%x\n", phydbg->TxLsig1);
- bcm_bprintf(b, " TxVhtSigA10: 0x%x\n", phydbg->TxVhtSigA10);
- bcm_bprintf(b, " TxVhtSigA11: 0x%x\n", phydbg->TxVhtSigA11);
- bcm_bprintf(b, " TxVhtSigA20: 0x%x\n", phydbg->TxVhtSigA20);
- bcm_bprintf(b, " TxVhtSigA21: 0x%x\n", phydbg->TxVhtSigA21);
- bcm_bprintf(b, " txPktLength: 0x%x\n", phydbg->txPktLength);
- bcm_bprintf(b, " txPsdulengthCtr: 0x%x\n", phydbg->txPsdulengthCtr);
- bcm_bprintf(b, " gpioClkControl: 0x%x\n", phydbg->gpioClkControl);
- bcm_bprintf(b, " gpioSel: 0x%x\n", phydbg->gpioSel);
- bcm_bprintf(b, " pktprocdebug: 0x%x\n", phydbg->pktprocdebug);
- for (i = 0; i < 3; i++)
- bcm_bprintf(b, " gpioOut[%d]: 0x%x\n", i, phydbg->gpioOut[i]);
- }
-
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_PSM_WD);
- if (tlv)
- {
- const hnd_ext_trap_psmwd_t* psmwd;
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_PSM_WD), tlv->len);
- psmwd = (const hnd_ext_trap_psmwd_t *)tlv;
- bcm_bprintf(b, " version: 0x%x\n", psmwd->version);
- bcm_bprintf(b, " maccontrol: 0x%x\n", psmwd->i32_maccontrol);
- bcm_bprintf(b, " maccommand: 0x%x\n", psmwd->i32_maccommand);
- bcm_bprintf(b, " macintstatus: 0x%x\n", psmwd->i32_macintstatus);
- bcm_bprintf(b, " phydebug: 0x%x\n", psmwd->i32_phydebug);
- bcm_bprintf(b, " clk_ctl_st: 0x%x\n", psmwd->i32_clk_ctl_st);
- for (i = 0; i < 3; i++)
- bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, psmwd->i32_psmdebug[i]);
- bcm_bprintf(b, " gated clock en: 0x%x\n", psmwd->i16_0x1a8);
- bcm_bprintf(b, " Rcv Fifo Ctrl: 0x%x\n", psmwd->i16_0x406);
- bcm_bprintf(b, " Rx ctrl 1: 0x%x\n", psmwd->i16_0x408);
- bcm_bprintf(b, " Rxe Status 1: 0x%x\n", psmwd->i16_0x41a);
- bcm_bprintf(b, " Rxe Status 2: 0x%x\n", psmwd->i16_0x41c);
- bcm_bprintf(b, " rcv wrd count 0: 0x%x\n", psmwd->i16_0x424);
- bcm_bprintf(b, " rcv wrd count 1: 0x%x\n", psmwd->i16_0x426);
- bcm_bprintf(b, " RCV_LFIFO_STS: 0x%x\n", psmwd->i16_0x456);
- bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", psmwd->i16_0x480);
- bcm_bprintf(b, " PSM BRC: 0x%x\n", psmwd->i16_0x490);
- bcm_bprintf(b, " TXE CTRL: 0x%x\n", psmwd->i16_0x500);
- bcm_bprintf(b, " TXE Status: 0x%x\n", psmwd->i16_0x50e);
- bcm_bprintf(b, " TXE_xmtdmabusy: 0x%x\n", psmwd->i16_0x55e);
- bcm_bprintf(b, " TXE_XMTfifosuspflush: 0x%x\n", psmwd->i16_0x566);
- bcm_bprintf(b, " IFS Stat: 0x%x\n", psmwd->i16_0x690);
- bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", psmwd->i16_0x692);
- bcm_bprintf(b, " IFS_TX_DUR: 0x%x\n", psmwd->i16_0x694);
- bcm_bprintf(b, " SLow_CTL: 0x%x\n", psmwd->i16_0x6a0);
- bcm_bprintf(b, " TXE_AQM fifo Ready: 0x%x\n", psmwd->i16_0x838);
- bcm_bprintf(b, " Dagg ctrl: 0x%x\n", psmwd->i16_0x8c0);
- bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", psmwd->shm_prewds_cnt);
- bcm_bprintf(b, " shm_txtplufl_cnt: 0x%x\n", psmwd->shm_txtplufl_cnt);
- bcm_bprintf(b, " shm_txphyerr_cnt: 0x%x\n", psmwd->shm_txphyerr_cnt);
- }
-
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_SUSP);
- if (tlv)
- {
- const hnd_ext_trap_macsusp_t* macsusp;
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_SUSP), tlv->len);
- macsusp = (const hnd_ext_trap_macsusp_t *)tlv;
- bcm_bprintf(b, " version: %d\n", macsusp->version);
- bcm_bprintf(b, " trap_reason: %d\n", macsusp->trap_reason);
- bcm_bprintf(b, " maccontrol: 0x%x\n", macsusp->i32_maccontrol);
- bcm_bprintf(b, " maccommand: 0x%x\n", macsusp->i32_maccommand);
- bcm_bprintf(b, " macintstatus: 0x%x\n", macsusp->i32_macintstatus);
- for (i = 0; i < 4; i++)
- bcm_bprintf(b, " phydebug[%d]: 0x%x\n", i, macsusp->i32_phydebug[i]);
- for (i = 0; i < 8; i++)
- bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macsusp->i32_psmdebug[i]);
- bcm_bprintf(b, " Rxe Status_1: 0x%x\n", macsusp->i16_0x41a);
- bcm_bprintf(b, " Rxe Status_2: 0x%x\n", macsusp->i16_0x41c);
- bcm_bprintf(b, " PSM BRC: 0x%x\n", macsusp->i16_0x490);
- bcm_bprintf(b, " TXE Status: 0x%x\n", macsusp->i16_0x50e);
- bcm_bprintf(b, " TXE xmtdmabusy: 0x%x\n", macsusp->i16_0x55e);
- bcm_bprintf(b, " TXE XMTfifosuspflush: 0x%x\n", macsusp->i16_0x566);
- bcm_bprintf(b, " IFS Stat: 0x%x\n", macsusp->i16_0x690);
- bcm_bprintf(b, " IFS MEDBUSY CTR: 0x%x\n", macsusp->i16_0x692);
- bcm_bprintf(b, " IFS TX DUR: 0x%x\n", macsusp->i16_0x694);
- bcm_bprintf(b, " WEP CTL: 0x%x\n", macsusp->i16_0x7c0);
- bcm_bprintf(b, " TXE AQM fifo Ready: 0x%x\n", macsusp->i16_0x838);
- bcm_bprintf(b, " MHP status: 0x%x\n", macsusp->i16_0x880);
- bcm_bprintf(b, " shm_prewds_cnt: 0x%x\n", macsusp->shm_prewds_cnt);
- bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macsusp->shm_ucode_dbgst);
- }
-
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_MAC_WAKE);
- if (tlv)
- {
- const hnd_ext_trap_macenab_t* macwake;
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_MAC_WAKE), tlv->len);
- macwake = (const hnd_ext_trap_macenab_t *)tlv;
- bcm_bprintf(b, " version: 0x%x\n", macwake->version);
- bcm_bprintf(b, " trap_reason: 0x%x\n", macwake->trap_reason);
- bcm_bprintf(b, " maccontrol: 0x%x\n", macwake->i32_maccontrol);
- bcm_bprintf(b, " maccommand: 0x%x\n", macwake->i32_maccommand);
- bcm_bprintf(b, " macintstatus: 0x%x\n", macwake->i32_macintstatus);
- for (i = 0; i < 8; i++)
- bcm_bprintf(b, " psmdebug[%d]: 0x%x\n", i, macwake->i32_psmdebug[i]);
- bcm_bprintf(b, " clk_ctl_st: 0x%x\n", macwake->i32_clk_ctl_st);
- bcm_bprintf(b, " powerctl: 0x%x\n", macwake->i32_powerctl);
- bcm_bprintf(b, " gated clock en: 0x%x\n", macwake->i16_0x1a8);
- bcm_bprintf(b, " PSM_SLP_TMR: 0x%x\n", macwake->i16_0x480);
- bcm_bprintf(b, " PSM BRC: 0x%x\n", macwake->i16_0x490);
- bcm_bprintf(b, " TSF CTL: 0x%x\n", macwake->i16_0x600);
- bcm_bprintf(b, " IFS Stat: 0x%x\n", macwake->i16_0x690);
- bcm_bprintf(b, " IFS_MEDBUSY_CTR: 0x%x\n", macwake->i16_0x692);
- bcm_bprintf(b, " Slow_CTL: 0x%x\n", macwake->i16_0x6a0);
- bcm_bprintf(b, " Slow_FRAC: 0x%x\n", macwake->i16_0x6a6);
- bcm_bprintf(b, " fast power up delay: 0x%x\n", macwake->i16_0x6a8);
- bcm_bprintf(b, " Slow_PER: 0x%x\n", macwake->i16_0x6aa);
- bcm_bprintf(b, " shm_ucode_dbgst: 0x%x\n", macwake->shm_ucode_dbgst);
- }
-
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BUS);
- if (tlv)
- {
- const bcm_dngl_pcie_hc_t* hc;
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_BUS), tlv->len);
- hc = (const bcm_dngl_pcie_hc_t *)tlv->data;
- bcm_bprintf(b, " version: 0x%x\n", hc->version);
- bcm_bprintf(b, " reserved: 0x%x\n", hc->reserved);
- bcm_bprintf(b, " pcie_err_ind_type: 0x%x\n", hc->pcie_err_ind_type);
- bcm_bprintf(b, " pcie_flag: 0x%x\n", hc->pcie_flag);
- bcm_bprintf(b, " pcie_control_reg: 0x%x\n", hc->pcie_control_reg);
- for (i = 0; i < HC_PCIEDEV_CONFIG_REGLIST_MAX; i++)
- bcm_bprintf(b, " pcie_config_regs[%d]: 0x%x\n", i, hc->pcie_config_regs[i]);
- }
-
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_HMAP);
- if (tlv)
- {
- const pcie_hmapviolation_t* hmap;
- hmap = (const pcie_hmapviolation_t *)tlv->data;
- bcm_bprintf(b, "\n%s len: %d\n", etd_trap_name(TAG_TRAP_HMAP), tlv->len);
- bcm_bprintf(b, " HMAP Vio Addr Low: 0x%x\n", hmap->hmap_violationaddr_lo);
- bcm_bprintf(b, " HMAP Vio Addr Hi: 0x%x\n", hmap->hmap_violationaddr_hi);
- bcm_bprintf(b, " HMAP Vio Info: 0x%x\n", hmap->hmap_violation_info);
- }
-
return BCME_OK;
}
+
#ifdef BCMPCIE
int
dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
unsigned long flags;
uint16 alloced = 0;
uchar *ts_tlv_buf;
- msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
if ((tlvs == NULL) || (tlv_len == 0)) {
DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
__FUNCTION__, tlvs, tlv_len));
return -1;
}
+#ifdef PCIE_INB_DW
+ if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
+ return BCME_ERROR;
+#endif /* PCIE_INB_DW */
- DHD_RING_LOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_LOCK(dhdp, flags);
/* if Host TS req already pending go away */
if (prot->hostts_req_buf_inuse == TRUE) {
DHD_ERROR(("one host TS request already pending at device\n"));
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
return -1;
}
/* Request for cbuf space */
- ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, ctrl_ring,
+ ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, &prot->h2dring_ctrl_subn,
DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE);
if (ts_req == NULL) {
DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
return -1;
}
/* Common msg buf hdr */
ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
ts_req->msg.if_id = 0;
- ts_req->msg.flags = ctrl_ring->current_phase;
+ ts_req->msg.flags = prot->h2dring_ctrl_subn.current_phase;
ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
- ts_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
- ctrl_ring->seqnum++;
+ ts_req->msg.epoch = prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
+ prot->h2dring_ctrl_subn.seqnum++;
ts_req->xt_id = xt_id;
ts_req->seqnum = seqnum;
ts_req->msg.request_id, ts_req->input_data_len,
ts_req->xt_id, ts_req->seqnum));
+
/* upd wrt ptr and raise interrupt */
- dhd_prot_ring_write_complete(dhdp, ctrl_ring, ts_req,
+ dhd_prot_ring_write_complete(dhdp, &prot->h2dring_ctrl_subn, ts_req,
DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
-
- DHD_RING_UNLOCK(ctrl_ring->ring_lock, flags);
+ DHD_GENERAL_UNLOCK(dhdp, flags);
+#ifdef PCIE_INB_DW
+ dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
+#endif
return 0;
} /* dhd_prot_send_host_timestamp */
+
bool
dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
{
return dhd->prot->rx_ts_log_enabled;
}
-
-bool
-dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set)
-{
- if (set)
- dhd->prot->no_retry = enable;
-
- return dhd->prot->no_retry;
-}
-
-bool
-dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set)
-{
- if (set)
- dhd->prot->no_aggr = enable;
-
- return dhd->prot->no_aggr;
-}
-
-bool
-dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set)
-{
- if (set)
- dhd->prot->fixed_rate = enable;
-
- return dhd->prot->fixed_rate;
-}
#endif /* BCMPCIE */
void
dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
}
-void
-dhd_msgbuf_delay_post_ts_bufs(dhd_pub_t *dhd)
-{
- if (dhd->prot->max_tsbufpost > 0)
- dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
-}
-
static void BCMFASTPATH
dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
{
- DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
-
-}
-
-uint16
-dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp)
-{
- return dhdp->prot->ioctl_trans_id;
-}
-
-int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len)
-{
- if (!dhd->hscb_enable) {
- if (len) {
- /* prevent "Operation not supported" dhd message */
- *len = 0;
- return BCME_OK;
- }
- return BCME_UNSUPPORTED;
- }
-
- if (va) {
- *va = dhd->prot->host_scb_buf.va;
- }
- if (len) {
- *len = dhd->prot->host_scb_buf.len;
- }
-
- return BCME_OK;
-}
-
-#ifdef DHD_HP2P
-uint32
-dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val)
-{
- if (set)
- dhd->pkt_thresh = (uint16)val;
-
- val = dhd->pkt_thresh;
-
- return val;
-}
-
-uint32
-dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val)
-{
- if (set)
- dhd->time_thresh = (uint16)val;
-
- val = dhd->time_thresh;
-
- return val;
-}
-
-uint32
-dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val)
-{
- if (set)
- dhd->pkt_expiry = (uint16)val;
-
- val = dhd->pkt_expiry;
-
- return val;
-}
-
-uint8
-dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable)
-{
- uint8 ret = 0;
- if (set) {
- dhd->hp2p_enable = (enable & 0xf) ? TRUE : FALSE;
- dhd->hp2p_infra_enable = ((enable >> 4) & 0xf) ? TRUE : FALSE;
-
- if (enable) {
- dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_TID_MAP);
- } else {
- dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_AC_MAP);
- }
- }
- ret = dhd->hp2p_infra_enable ? 0x1:0x0;
- ret <<= 4;
- ret |= dhd->hp2p_enable ? 0x1:0x0;
-
- return ret;
-}
-
-static void
-dhd_update_hp2p_rxstats(dhd_pub_t *dhd, host_rxbuf_cmpl_t *rxstatus)
-{
- ts_timestamp_t *ts = (ts_timestamp_t *)&rxstatus->ts;
- hp2p_info_t *hp2p_info;
- uint32 dur1;
-
- hp2p_info = &dhd->hp2p_info[0];
- dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 100;
-
- if (dur1 > (MAX_RX_HIST_BIN - 1)) {
- dur1 = MAX_RX_HIST_BIN - 1;
- DHD_ERROR(("%s: 0x%x 0x%x\n",
- __FUNCTION__, ts->low, ts->high));
- }
-
- hp2p_info->rx_t0[dur1 % MAX_RX_HIST_BIN]++;
- return;
-}
-
-static void
-dhd_update_hp2p_txstats(dhd_pub_t *dhd, host_txbuf_cmpl_t *txstatus)
-{
- ts_timestamp_t *ts = (ts_timestamp_t *)&txstatus->ts;
- uint16 flowid = txstatus->compl_hdr.flow_ring_id;
- uint32 hp2p_flowid, dur1, dur2;
- hp2p_info_t *hp2p_info;
-
- hp2p_flowid = dhd->bus->max_submission_rings -
- dhd->bus->max_cmn_rings - flowid + 1;
- hp2p_info = &dhd->hp2p_info[hp2p_flowid];
- ts = (ts_timestamp_t *)&(txstatus->ts);
-
- dur1 = ((ts->high & 0x3FF) * HP2P_TIME_SCALE) / 1000;
- if (dur1 > (MAX_TX_HIST_BIN - 1)) {
- dur1 = MAX_TX_HIST_BIN - 1;
- DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
- }
- hp2p_info->tx_t0[dur1 % MAX_TX_HIST_BIN]++;
-
- dur2 = (((ts->high >> 10) & 0x3FF) * HP2P_TIME_SCALE) / 1000;
- if (dur2 > (MAX_TX_HIST_BIN - 1)) {
- dur2 = MAX_TX_HIST_BIN - 1;
- DHD_ERROR(("%s: 0x%x 0x%x\n", __FUNCTION__, ts->low, ts->high));
- }
-
- hp2p_info->tx_t1[dur2 % MAX_TX_HIST_BIN]++;
- return;
-}
-
-enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer)
-{
- hp2p_info_t *hp2p_info;
+#ifdef DHD_TIMESYNC
+ fw_timestamp_event_msg_t *resp;
+ uint32 pktid;
+ uint16 buflen, seqnum;
+ void * pkt;
unsigned long flags;
- dhd_pub_t *dhdp;
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- hp2p_info = container_of(timer, hp2p_info_t, timer.timer);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
- dhdp = hp2p_info->dhd_pub;
- if (!dhdp) {
- goto done;
- }
-
- DHD_INFO(("%s: pend_item = %d flowid = %d\n",
- __FUNCTION__, ((msgbuf_ring_t *)hp2p_info->ring)->pend_items_count,
- hp2p_info->flowid));
-
- flags = dhd_os_hp2plock(dhdp);
-
- dhd_prot_txdata_write_flush(dhdp, hp2p_info->flowid);
- hp2p_info->hrtimer_init = FALSE;
- hp2p_info->num_timer_limit++;
- dhd_os_hp2punlock(dhdp, flags);
-done:
- return HRTIMER_NORESTART;
-}
-
-static void
-dhd_calc_hp2p_burst(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 flowid)
-{
- hp2p_info_t *hp2p_info;
- uint16 hp2p_flowid;
+ resp = (fw_timestamp_event_msg_t *)buf;
+ pktid = ltoh32(resp->msg.request_id);
+ buflen = ltoh16(resp->buf_len);
+ seqnum = ltoh16(resp->seqnum);
- hp2p_flowid = dhd->bus->max_submission_rings -
- dhd->bus->max_cmn_rings - flowid + 1;
- hp2p_info = &dhd->hp2p_info[hp2p_flowid];
-
- if (ring->pend_items_count == dhd->pkt_thresh) {
- dhd_prot_txdata_write_flush(dhd, flowid);
+#if defined(DHD_PKTID_AUDIT_RING)
+ DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid,
+ DHD_DUPLICATE_FREE);
+#endif /* DHD_PKTID_AUDIT_RING */
- hp2p_info->hrtimer_init = FALSE;
- hp2p_info->ring = NULL;
- hp2p_info->num_pkt_limit++;
- hrtimer_cancel(&hp2p_info->timer.timer);
+ DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n",
+ pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum)));
- DHD_INFO(("%s: cancel hrtimer for flowid = %d \n"
- "hp2p_flowid = %d pkt_thresh = %d\n",
- __FUNCTION__, flowid, hp2p_flowid, dhd->pkt_thresh));
- } else {
- if (hp2p_info->hrtimer_init == FALSE) {
- hp2p_info->hrtimer_init = TRUE;
- hp2p_info->flowid = flowid;
- hp2p_info->dhd_pub = dhd;
- hp2p_info->ring = ring;
- hp2p_info->num_timer_start++;
-
- tasklet_hrtimer_start(&hp2p_info->timer,
- ktime_set(0, dhd->time_thresh * 1000), HRTIMER_MODE_REL);
-
- DHD_INFO(("%s: start hrtimer for flowid = %d hp2_flowid = %d\n",
- __FUNCTION__, flowid, hp2p_flowid));
- }
+ if (!dhd->prot->cur_ts_bufs_posted) {
+ DHD_ERROR(("tsbuf posted are zero, but there is a completion\n"));
+ return;
}
- return;
-}
-
-static void
-dhd_update_hp2p_txdesc(dhd_pub_t *dhd, host_txbuf_post_t *txdesc)
-{
- uint64 ts;
-
- ts = local_clock();
- do_div(ts, 1000);
- txdesc->metadata_buf_len = 0;
- txdesc->metadata_buf_addr.high_addr = htol32((ts >> 32) & 0xFFFFFFFF);
- txdesc->metadata_buf_addr.low_addr = htol32(ts & 0xFFFFFFFF);
- txdesc->exp_time = dhd->pkt_expiry;
+ dhd->prot->cur_ts_bufs_posted--;
+ if (dhd->prot->max_tsbufpost > 0)
+ dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
- DHD_INFO(("%s: metadata_high = 0x%x metadata_low = 0x%x exp_time = %x\n",
- __FUNCTION__, txdesc->metadata_buf_addr.high_addr,
- txdesc->metadata_buf_addr.low_addr,
- txdesc->exp_time));
+ DHD_GENERAL_LOCK(dhd, flags);
+ pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE);
+ DHD_GENERAL_UNLOCK(dhd, flags);
- return;
-}
-#endif /* DHD_HP2P */
+ if (!pkt) {
+ DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid));
+ return;
+ }
-#ifdef DHD_MAP_LOGGING
-void
-dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp)
-{
- dhd_prot_debug_info_print(dhdp);
- OSL_DMA_MAP_DUMP(dhdp->osh);
-#ifdef DHD_MAP_PKTID_LOGGING
- dhd_pktid_logging_dump(dhdp);
-#endif /* DHD_MAP_PKTID_LOGGING */
-#ifdef DHD_FW_COREDUMP
- dhdp->memdump_type = DUMP_TYPE_SMMU_FAULT;
-#ifdef DNGL_AXI_ERROR_LOGGING
- dhdp->memdump_enabled = DUMP_MEMFILE;
- dhd_bus_get_mem_dump(dhdp);
+ PKTSETLEN(dhd->osh, pkt, buflen);
+ dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
+#ifdef DHD_USE_STATIC_CTRLBUF
+ PKTFREE_STATIC(dhd->osh, pkt, TRUE);
#else
- dhdp->memdump_enabled = DUMP_MEMONLY;
- dhd_bus_mem_dump(dhdp);
-#endif /* DNGL_AXI_ERROR_LOGGING */
-#endif /* DHD_FW_COREDUMP */
+ PKTFREE(dhd->osh, pkt, TRUE);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+#else /* DHD_TIMESYNC */
+ DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
+#endif /* DHD_TIMESYNC */
+
}
-#endif /* DHD_MAP_LOGGING */
/*
* DHD Bus Module for PCIE
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pcie.c 825481 2019-06-14 10:06:03Z $
+ * $Id: dhd_pcie.c 710862 2017-07-14 07:43:59Z $
*/
+
/* include files */
#include <typedefs.h>
#include <bcmutils.h>
#include <bcmdevs.h>
#include <siutils.h>
-#include <hndoobr.h>
#include <hndsoc.h>
#include <hndpmu.h>
-#include <etd.h>
#include <hnd_debug.h>
#include <sbchipc.h>
-#include <sbhndarm.h>
#include <hnd_armtrap.h>
#if defined(DHD_DEBUG)
#include <hnd_cons.h>
#include <dhd_flowring.h>
#include <dhd_proto.h>
#include <dhd_dbg.h>
-#include <dhd_debug.h>
#include <dhd_daemon.h>
#include <dhdioctl.h>
#include <sdiovar.h>
#include <dhd_pcie.h>
#include <bcmpcie.h>
#include <bcmendian.h>
-#include <bcmstdlib_s.h>
#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
#include <bcmevent.h>
#include <dhd_config.h>
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-#include <linux/pm_runtime.h>
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#ifdef DHD_TIMESYNC
+#include <dhd_timesync.h>
+#endif /* DHD_TIMESYNC */
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
-#include <debugger.h>
-#endif /* DEBUGGER || DHD_DSCOPE */
-
-#ifdef DNGL_AXI_ERROR_LOGGING
-#include <dhd_linux_wq.h>
-#include <dhd_linux.h>
-#endif /* DNGL_AXI_ERROR_LOGGING */
-
-#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
-#include <dhd_linux_priv.h>
-#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
-
-#include <otpdefs.h>
-#define EXTENDED_PCIE_DEBUG_DUMP 1 /* Enable Extended pcie registers dump */
+#if defined(BCMEMBEDIMAGE)
+#ifndef DHD_EFI
+#include BCMEMBEDIMAGE
+#else
+#include <rtecdc_4364.h>
+#endif /* !DHD_EFI */
+#endif /* BCMEMBEDIMAGE */
#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
#define MAX_WKLK_IDLE_CHECK 3 /* times wake_lock checked before deciding not to suspend */
-#define DHD_MAX_ITEMS_HPP_TXCPL_RING 512
-#define DHD_MAX_ITEMS_HPP_RXCPL_RING 512
-
-#define ARMCR4REG_CORECAP (0x4/sizeof(uint32))
-#define ARMCR4REG_MPUCTRL (0x90/sizeof(uint32))
-#define ACC_MPU_SHIFT 25
-#define ACC_MPU_MASK (0x1u << ACC_MPU_SHIFT)
-
-#define REG_WORK_AROUND (0x1e4/sizeof(uint32))
-
#define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
/* CTO Prevention Recovery */
-#ifdef BCMQT_HW
-#define CTO_TO_CLEAR_WAIT_MS 10000
-#define CTO_TO_CLEAR_WAIT_MAX_CNT 100
-#else
#define CTO_TO_CLEAR_WAIT_MS 1000
#define CTO_TO_CLEAR_WAIT_MAX_CNT 10
-#endif // endif
-
-/* Fetch address of a member in the pciedev_shared structure in dongle memory */
-#define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
- (bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
-
-/* Fetch address of a member in rings_info_ptr structure in dongle memory */
-#define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
- (bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
-
-/* Fetch address of a member in the ring_mem structure in dongle memory */
-#define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
- (bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
#if defined(SUPPORT_MULTIPLE_BOARD_REV)
extern unsigned int system_rev;
#endif /* SUPPORT_MULTIPLE_BOARD_REV */
-#ifdef EWP_EDL
-extern int host_edl_support;
-#endif // endif
-
-/* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
-uint dma_ring_indices = 0;
-/* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
-bool h2d_phase = 0;
-/* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
- * defined in dhd_linux.c
- */
-bool force_trap_bad_h2d_phase = 0;
-
int dhd_dongle_memsize;
int dhd_dongle_ramsize;
-struct dhd_bus *g_dhd_bus = NULL;
-#ifdef DNGL_AXI_ERROR_LOGGING
-static void dhd_log_dump_axi_error(uint8 *axi_err);
-#endif /* DNGL_AXI_ERROR_LOGGING */
-
static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
#if defined(DHD_FW_COREDUMP)
+struct dhd_bus *g_dhd_bus = NULL;
static int dhdpcie_mem_dump(dhd_bus_t *bus);
-static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
#endif /* DHD_FW_COREDUMP */
static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
int plen, void *arg, int len, int val_size);
static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
- uint32 len, uint32 srcdelay, uint32 destdelay,
- uint32 d11_lpbk, uint32 core_num, uint32 wait);
+ uint32 len, uint32 srcdelay, uint32 destdelay, uint32 d11_lpbk);
static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
static int _dhdpcie_download_firmware(struct dhd_bus *bus);
static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
bool dongle_isolation, bool reset_flag);
static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
-static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
#ifdef DHD_SUPPORT_64BIT
-static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
-static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
+static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
+static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
#endif /* DHD_SUPPORT_64BIT */
static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
static void dhdpcie_fw_trap(dhd_bus_t *bus);
static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
-static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
extern void dhd_dpc_enable(dhd_pub_t *dhdp);
extern void dhd_dpc_kill(dhd_pub_t *dhdp);
static void dhd_bus_idle_scan(dhd_bus_t *bus);
#endif /* IDLE_TX_FLOW_MGMT */
+#ifdef BCMEMBEDIMAGE
+static int dhdpcie_download_code_array(dhd_bus_t *bus);
+#endif /* BCMEMBEDIMAGE */
+
+
#ifdef EXYNOS_PCIE_DEBUG
extern void exynos_pcie_register_dump(int ch_num);
#endif /* EXYNOS_PCIE_DEBUG */
-#if defined(DHD_H2D_LOG_TIME_SYNC)
-static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
-#endif /* DHD_H2D_LOG_TIME_SYNC */
-
#define PCI_VENDOR_ID_BROADCOM 0x14e4
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-#define MAX_D3_ACK_TIMEOUT 100
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
#define DHD_DEFAULT_DOORBELL_TIMEOUT 200 /* ms */
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT;
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
-static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
-
-static int dhdpcie_init_d11status(struct dhd_bus *bus);
+static void dhdpcie_cto_error_recovery(struct dhd_bus *bus);
-static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
+#ifdef BCM_ASLR_HEAP
+static void dhdpcie_wrt_rnd(struct dhd_bus *bus);
+#endif /* BCM_ASLR_HEAP */
extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
-#ifdef DHD_HP2P
-extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
-static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val);
-#endif // endif
-#define NUM_PATTERNS 2
-static bool dhd_bus_tcm_test(struct dhd_bus *bus);
-
/* IOVar table */
enum {
IOV_INTR = 1,
IOV_FORCE_FW_TRAP,
IOV_DB1_FOR_MB,
IOV_FLOW_PRIO_MAP,
+#ifdef DHD_PCIE_RUNTIMEPM
+ IOV_IDLETIME,
+#endif /* DHD_PCIE_RUNTIMEPM */
IOV_RXBOUND,
IOV_TXBOUND,
IOV_HANGREPORT,
IOV_TRAPDATA,
IOV_TRAPDATA_RAW,
IOV_CTO_PREVENTION,
+#ifdef PCIE_OOB
+ IOV_OOB_BT_REG_ON,
+ IOV_OOB_ENABLE,
+#endif /* PCIE_OOB */
IOV_PCIE_WD_RESET,
- IOV_DUMP_DONGLE,
- IOV_HWA_ENAB_BMAP,
+ IOV_CTO_THRESHOLD,
+#ifdef DHD_EFI
+ IOV_CONTROL_SIGNAL,
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ IOV_DEEP_SLEEP,
+#endif /* PCIE_OOB || PCIE_INB_DW */
+#endif /* DHD_EFI */
+#ifdef DEVICE_TX_STUCK_DETECT
+ IOV_DEVICE_TX_STUCK_DETECT,
+#endif /* DEVICE_TX_STUCK_DETECT */
+ IOV_INB_DW_ENABLE,
IOV_IDMA_ENABLE,
IOV_IFRM_ENABLE,
IOV_CLEAR_RING,
- IOV_DAR_ENABLE,
- IOV_DNGL_CAPS, /**< returns string with dongle capabilities */
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
- IOV_GDB_SERVER, /**< starts gdb server on given interface */
-#endif /* DEBUGGER || DHD_DSCOPE */
- IOV_INB_DW_ENABLE,
- IOV_CTO_THRESHOLD,
- IOV_HSCBSIZE, /* get HSCB buffer size */
- IOV_HP2P_ENABLE,
- IOV_HP2P_PKT_THRESHOLD,
- IOV_HP2P_TIME_THRESHOLD,
- IOV_HP2P_PKT_EXPIRY,
- IOV_HP2P_TXCPL_MAXITEMS,
- IOV_HP2P_RXCPL_MAXITEMS,
- IOV_EXTDTXS_IN_TXCPL,
- IOV_HOSTRDY_AFTER_INIT,
- IOV_PCIE_LAST /**< unused IOVAR */
+#ifdef DHD_EFI
+ IOV_WIFI_PROPERTIES,
+ IOV_OTP_DUMP
+#endif
};
+
const bcm_iovar_t dhdpcie_iovars[] = {
- {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 },
- {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 },
- {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 },
- {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 },
- {"devreset", IOV_DEVRESET, 0, 0, IOVT_UINT8, 0 },
- {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 },
+ {"intr", IOV_INTR, 0, 0, IOVT_BOOL, 0 },
+ {"memsize", IOV_MEMSIZE, 0, 0, IOVT_UINT32, 0 },
+ {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, 0, IOVT_BOOL, 0 },
+ {"vars", IOV_VARS, 0, 0, IOVT_BUFFER, 0 },
+ {"devreset", IOV_DEVRESET, 0, 0, IOVT_BOOL, 0 },
+ {"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 0, 0, 0 },
{"pcie_lpbk", IOV_PCIE_LPBK, 0, 0, IOVT_UINT32, 0 },
- {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
- {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
- {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
- {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
- {"pcie_suspend", IOV_PCIE_SUSPEND, DHD_IOVF_PWRREQ_BYPASS, 0, IOVT_UINT32, 0 },
+ {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
+ {"ramsize", IOV_RAMSIZE, 0, 0, IOVT_UINT32, 0 },
+ {"ramstart", IOV_RAMSTART, 0, 0, IOVT_UINT32, 0 },
+ {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, 3 * sizeof(int32) },
+ {"pcie_suspend", IOV_PCIE_SUSPEND, 0, 0, IOVT_UINT32, 0 },
+#ifdef PCIE_OOB
+ {"oob_bt_reg_on", IOV_OOB_BT_REG_ON, 0, 0, IOVT_UINT32, 0 },
+ {"oob_enable", IOV_OOB_ENABLE, 0, 0, IOVT_UINT32, 0 },
+#endif /* PCIE_OOB */
{"sleep_allowed", IOV_SLEEP_ALLOWED, 0, 0, IOVT_BOOL, 0 },
- {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
+ {"dngl_isolation", IOV_DONGLEISOLATION, 0, 0, IOVT_UINT32, 0 },
{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, 0, IOVT_UINT32, 0 },
- {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 },
- {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0},
+ {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, 0, IOVT_BUFFER, 0 },
+ {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, 0, IOVT_UINT32, 0},
{"metadata_dbg", IOV_METADATA_DBG, 0, 0, IOVT_BOOL, 0 },
- {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
- {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
- {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 },
+ {"rx_metadata_len", IOV_RX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
+ {"tx_metadata_len", IOV_TX_METADATALEN, 0, 0, IOVT_UINT32, 0 },
+ {"db1_for_mb", IOV_DB1_FOR_MB, 0, 0, IOVT_UINT32, 0 },
{"txp_thresh", IOV_TXP_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
- {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 },
- {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 },
- {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 },
- {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 },
- {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
+ {"buzzz_dump", IOV_BUZZZ_DUMP, 0, 0, IOVT_UINT32, 0 },
+ {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_PCIE_RUNTIMEPM
+ {"idletime", IOV_IDLETIME, 0, 0, IOVT_INT32, 0 },
+#endif /* DHD_PCIE_RUNTIMEPM */
+ {"rxbound", IOV_RXBOUND, 0, 0, IOVT_UINT32, 0 },
+ {"txbound", IOV_TXBOUND, 0, 0, IOVT_UINT32, 0 },
+ {"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
{"h2d_mb_data", IOV_H2D_MAILBOXDATA, 0, 0, IOVT_UINT32, 0 },
{"inforings", IOV_INFORINGS, 0, 0, IOVT_UINT32, 0 },
{"h2d_phase", IOV_H2D_PHASE, 0, 0, IOVT_UINT32, 0 },
{"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE, 0, 0,
IOVT_UINT32, 0 },
{"h2d_max_txpost", IOV_H2D_TXPOST_MAX_ITEM, 0, 0, IOVT_UINT32, 0 },
- {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 },
+ {"trap_data", IOV_TRAPDATA, 0, 0, IOVT_BUFFER, 0 },
{"trap_data_raw", IOV_TRAPDATA_RAW, 0, 0, IOVT_BUFFER, 0 },
- {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 },
+ {"cto_prevention", IOV_CTO_PREVENTION, 0, 0, IOVT_UINT32, 0 },
{"pcie_wd_reset", IOV_PCIE_WD_RESET, 0, 0, IOVT_BOOL, 0 },
- {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
- MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
- {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
- {"hwa_enab_bmap", IOV_HWA_ENAB_BMAP, 0, 0, IOVT_UINT32, 0 },
+ {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_EFI
+ {"control_signal", IOV_CONTROL_SIGNAL, 0, 0, IOVT_UINT32, 0},
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ {"deep_sleep", IOV_DEEP_SLEEP, 0, 0, IOVT_UINT32, 0},
+#endif /* PCIE_OOB || PCIE_INB_DW */
+#endif /* DHD_EFI */
+ {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
+#ifdef DEVICE_TX_STUCK_DETECT
+ {"dev_tx_stuck_monitor", IOV_DEVICE_TX_STUCK_DETECT, 0, 0, IOVT_UINT32, 0 },
+#endif /* DEVICE_TX_STUCK_DETECT */
{"idma_enable", IOV_IDMA_ENABLE, 0, 0, IOVT_UINT32, 0 },
{"ifrm_enable", IOV_IFRM_ENABLE, 0, 0, IOVT_UINT32, 0 },
- {"dar_enable", IOV_DAR_ENABLE, 0, 0, IOVT_UINT32, 0 },
- {"cap", IOV_DNGL_CAPS, 0, 0, IOVT_BUFFER, 0},
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
- {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 },
-#endif /* DEBUGGER || DHD_DSCOPE */
- {"inb_dw_enable", IOV_INB_DW_ENABLE, 0, 0, IOVT_UINT32, 0 },
- {"cto_threshold", IOV_CTO_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
- {"hscbsize", IOV_HSCBSIZE, 0, 0, IOVT_UINT32, 0 },
-#ifdef DHD_HP2P
- {"hp2p_enable", IOV_HP2P_ENABLE, 0, 0, IOVT_UINT32, 0 },
- {"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
- {"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD, 0, 0, IOVT_UINT32, 0 },
- {"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY, 0, 0, IOVT_UINT32, 0 },
- {"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
- {"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS, 0, 0, IOVT_UINT32, 0 },
-#endif // endif
- {"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL, 0, 0, IOVT_UINT32, 0 },
- {"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT, 0, 0, IOVT_UINT32, 0 },
+ {"clear_ring", IOV_CLEAR_RING, 0, 0, IOVT_UINT32, 0 },
+#ifdef DHD_EFI
+ {"properties", IOV_WIFI_PROPERTIES, 0, 0, IOVT_BUFFER, 0},
+ {"otp_dump", IOV_OTP_DUMP, 0, 0, IOVT_BUFFER, 0},
+#endif
{NULL, 0, 0, 0, 0, 0 }
};
-#define MAX_READ_TIMEOUT 2 * 1000 * 1000
+
+#define MAX_READ_TIMEOUT 5 * 1000 * 1000
#ifndef DHD_RXBOUND
#define DHD_RXBOUND 64
-#endif // endif
+#endif
#ifndef DHD_TXBOUND
#define DHD_TXBOUND 64
-#endif // endif
+#endif
#define DHD_INFORING_BOUND 32
-#define DHD_BTLOGRING_BOUND 32
uint dhd_rxbound = DHD_RXBOUND;
uint dhd_txbound = DHD_TXBOUND;
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
-/** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
-static struct dhd_gdb_bus_ops_s bus_ops = {
- .read_u16 = dhdpcie_bus_rtcm16,
- .read_u32 = dhdpcie_bus_rtcm32,
- .write_u32 = dhdpcie_bus_wtcm32,
-};
-#endif /* DEBUGGER || DHD_DSCOPE */
-
-bool
-dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
-{
- return bus->flr_force_fail;
-}
-
/**
* Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
* link with the bus driver, in order to look for or await the device.
return;
}
+
/** returns a host virtual address */
uint32 *
dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
}
/**
- * retrun H2D Doorbell registers address
- * use DAR registers instead of enum register for corerev >= 23 (4347B0)
- */
-static INLINE uint
-dhd_bus_db0_addr_get(struct dhd_bus *bus)
-{
- uint addr = PCIH2D_MailBox;
- uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
-
- return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
-}
-
-static INLINE uint
-dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
-{
- return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
-}
-
-static INLINE uint
-dhd_bus_db1_addr_get(struct dhd_bus *bus)
-{
- return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
-}
-
-static INLINE uint
-dhd_bus_db1_addr_1_get(struct dhd_bus *bus)
-{
- return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1);
-}
-
-/*
- * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
- */
-static INLINE void
-dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, uint offset, bool enable)
-{
- if (enable) {
- si_corereg(bus->sih, bus->sih->buscoreidx, offset,
- SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
- SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
- } else {
- si_corereg(bus->sih, bus->sih->buscoreidx, offset,
- SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
- }
-}
-
-static INLINE void
-_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
-{
- uint mask;
-
- /*
- * If multiple de-asserts, decrement ref and return
- * Clear power request when only one pending
- * so initial request is not removed unexpectedly
- */
- if (bus->pwr_req_ref > 1) {
- bus->pwr_req_ref--;
- return;
- }
-
- ASSERT(bus->pwr_req_ref == 1);
-
- if (MULTIBP_ENAB(bus->sih)) {
- /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
- mask = SRPWR_DMN1_ARMBPSD_MASK;
- } else {
- mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
- }
-
- si_srpwr_request(bus->sih, mask, 0);
- bus->pwr_req_ref = 0;
-}
-
-static INLINE void
-dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
-{
- unsigned long flags = 0;
-
- DHD_GENERAL_LOCK(bus->dhd, flags);
- _dhd_bus_pcie_pwr_req_clear_cmn(bus);
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
-}
-
-static INLINE void
-dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
-{
- _dhd_bus_pcie_pwr_req_clear_cmn(bus);
-}
-
-static INLINE void
-_dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
-{
- uint mask, val;
-
- /* If multiple request entries, increment reference and return */
- if (bus->pwr_req_ref > 0) {
- bus->pwr_req_ref++;
- return;
- }
-
- ASSERT(bus->pwr_req_ref == 0);
-
- if (MULTIBP_ENAB(bus->sih)) {
- /* Common BP controlled by HW so only need to toggle WL/ARM backplane */
- mask = SRPWR_DMN1_ARMBPSD_MASK;
- val = SRPWR_DMN1_ARMBPSD_MASK;
- } else {
- mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
- val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
- }
-
- si_srpwr_request(bus->sih, mask, val);
-
- bus->pwr_req_ref = 1;
-}
-
-static INLINE void
-dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
-{
- unsigned long flags = 0;
-
- DHD_GENERAL_LOCK(bus->dhd, flags);
- _dhd_bus_pcie_pwr_req_cmn(bus);
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
-}
-
-static INLINE void
-_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
-{
- uint mask, val;
-
- mask = SRPWR_DMN_ALL_MASK(bus->sih);
- val = SRPWR_DMN_ALL_MASK(bus->sih);
-
- si_srpwr_request(bus->sih, mask, val);
-}
-
-static INLINE void
-dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
-{
- unsigned long flags = 0;
-
- DHD_GENERAL_LOCK(bus->dhd, flags);
- _dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
-}
-
-static INLINE void
-_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
-{
- uint mask;
-
- mask = SRPWR_DMN_ALL_MASK(bus->sih);
-
- si_srpwr_request(bus->sih, mask, 0);
-}
-
-static INLINE void
-dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
-{
- unsigned long flags = 0;
-
- DHD_GENERAL_LOCK(bus->dhd, flags);
- _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
-}
-
-static INLINE void
-dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
-{
- _dhd_bus_pcie_pwr_req_cmn(bus);
-}
-
-bool
-dhdpcie_chip_support_msi(dhd_bus_t *bus)
-{
- DHD_INFO(("%s: buscorerev=%d chipid=0x%x\n",
- __FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
- if (bus->sih->buscorerev <= 14 ||
- si_chipid(bus->sih) == BCM4375_CHIP_ID ||
- si_chipid(bus->sih) == BCM4362_CHIP_ID ||
- si_chipid(bus->sih) == BCM43751_CHIP_ID ||
- si_chipid(bus->sih) == BCM4361_CHIP_ID ||
- si_chipid(bus->sih) == BCM4359_CHIP_ID) {
- return FALSE;
- } else {
- return TRUE;
- }
-}
-
-/**
- * Called once for each hardware (dongle) instance that this DHD manages.
- *
* 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
* bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
* precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
*
* 'tcm' is the *host* virtual address at which tcm is mapped.
*/
-int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
+dhd_bus_t* dhdpcie_bus_attach(osl_t *osh,
volatile char *regs, volatile char *tcm, void *pci_dev)
{
- dhd_bus_t *bus = NULL;
- int ret = BCME_OK;
+ dhd_bus_t *bus;
DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
do {
if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
- ret = BCME_NORESOURCE;
break;
}
/* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
bus->dev = (struct pci_dev *)pci_dev;
+
dll_init(&bus->flowring_active_list);
#ifdef IDLE_TX_FLOW_MGMT
bus->active_list_last_process_ts = OSL_SYSUPTIME();
#endif /* IDLE_TX_FLOW_MGMT */
+#ifdef DEVICE_TX_STUCK_DETECT
+ /* Enable the Device stuck detection feature by default */
+ bus->dev_tx_stuck_monitor = TRUE;
+ bus->device_tx_stuck_check = OSL_SYSUPTIME();
+#endif /* DEVICE_TX_STUCK_DETECT */
+
/* Attach pcie shared structure */
if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
- ret = BCME_NORESOURCE;
break;
}
if (dhdpcie_dongle_attach(bus)) {
DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
- ret = BCME_NOTREADY;
break;
}
/* software resources */
if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
- ret = BCME_NORESOURCE;
+
break;
}
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
- bus->dhd->hostrdy_after_init = TRUE;
bus->db1_for_mb = TRUE;
bus->dhd->hang_report = TRUE;
bus->use_mailbox = FALSE;
bus->use_d0_inform = FALSE;
- bus->intr_enabled = FALSE;
- bus->flr_force_fail = FALSE;
- /* By default disable HWA and enable it via iovar */
- bus->hwa_enab_bmap = 0;
- /* update the dma indices if set through module parameter. */
- if (dma_ring_indices != 0) {
- dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
- }
- /* update h2d phase support if set through module parameter */
- bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
- /* update force trap on bad phase if set through module parameter */
- bus->dhd->force_dongletrap_on_bad_h2d_phase =
- force_trap_bad_h2d_phase ? TRUE : FALSE;
#ifdef IDLE_TX_FLOW_MGMT
bus->enable_idle_flowring_mgmt = FALSE;
#endif /* IDLE_TX_FLOW_MGMT */
bus->irq_registered = FALSE;
-#ifdef DHD_MSI_SUPPORT
- bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
- PCIE_MSI : PCIE_INTX;
-#else
- bus->d2h_intr_method = PCIE_INTX;
-#endif /* DHD_MSI_SUPPORT */
-
-#ifdef DHD_HP2P
- bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
- bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
-#endif /* DHD_HP2P */
-
DHD_TRACE(("%s: EXIT SUCCESS\n",
__FUNCTION__));
+#ifdef DHD_FW_COREDUMP
g_dhd_bus = bus;
- *bus_ptr = bus;
- return ret;
+#endif
+ return bus;
} while (0);
DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
if (bus) {
MFREE(osh, bus, sizeof(dhd_bus_t));
}
-
- return ret;
-}
-
-bool
-dhd_bus_skip_clm(dhd_pub_t *dhdp)
-{
- switch (dhd_bus_chip_id(dhdp)) {
- case BCM4369_CHIP_ID:
- return TRUE;
- default:
- return FALSE;
- }
+ return NULL;
}
uint
return bus->dhd;
}
-void *
+const void *
dhd_bus_sih(struct dhd_bus *bus)
{
- return (void *)bus->sih;
+ return (const void *)bus->sih;
}
void *
return bus->sih->chippkg;
}
-/** Conduct Loopback test */
-int
-dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
-{
- dma_xfer_info_t dmaxfer_lpbk;
- int ret = BCME_OK;
-
-#define PCIE_DMAXFER_LPBK_LENGTH 4096
- memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
- dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
- dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
- dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
- dmaxfer_lpbk.type = type;
- dmaxfer_lpbk.should_wait = TRUE;
-
- ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
- (char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
- if (ret < 0) {
- DHD_ERROR(("failed to start PCIe Loopback Test!!! "
- "Type:%d Reason:%d\n", type, ret));
- return ret;
- }
-
- if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
- DHD_ERROR(("failed to check PCIe Loopback Test!!! "
- "Type:%d Status:%d Error code:%d\n", type,
- dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
- ret = BCME_ERROR;
- } else {
- DHD_ERROR(("successful to check PCIe Loopback Test"
- " Type:%d\n", type));
- }
-#undef PCIE_DMAXFER_LPBK_LENGTH
-
- return ret;
-}
-
-/* Log the lastest DPC schedule time */
-void
-dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
-{
- dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
-}
-
-/* Check if there is DPC scheduling errors */
-bool
-dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
-{
- dhd_bus_t *bus = dhdp->bus;
- bool sched_err;
-
- if (bus->dpc_entry_time < bus->isr_exit_time) {
- /* Kernel doesn't schedule the DPC after processing PCIe IRQ */
- sched_err = TRUE;
- } else if (bus->dpc_entry_time < bus->resched_dpc_time) {
- /* Kernel doesn't schedule the DPC after DHD tries to reschedule
- * the DPC due to pending work items to be processed.
- */
- sched_err = TRUE;
- } else {
- sched_err = FALSE;
- }
-
- if (sched_err) {
- /* print out minimum timestamp info */
- DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
- " isr_exit_time="SEC_USEC_FMT
- " dpc_entry_time="SEC_USEC_FMT
- "\ndpc_exit_time="SEC_USEC_FMT
- " dpc_sched_time="SEC_USEC_FMT
- " resched_dpc_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->isr_entry_time),
- GET_SEC_USEC(bus->isr_exit_time),
- GET_SEC_USEC(bus->dpc_entry_time),
- GET_SEC_USEC(bus->dpc_exit_time),
- GET_SEC_USEC(bus->dpc_sched_time),
- GET_SEC_USEC(bus->resched_dpc_time)));
- }
-
- return sched_err;
-}
-
/** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
uint32
dhdpcie_bus_intstatus(dhd_bus_t *bus)
{
uint32 intstatus = 0;
+#ifndef DHD_READ_INTSTATUS_IN_DPC
uint32 intmask = 0;
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
- if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
- DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
+ if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
+ bus->wait_for_d3_ack) {
+#ifdef DHD_EFI
+ DHD_INFO(("%s: trying to clear intstatus during suspend (%d)"
+ " or suspend in progress %d\n",
+ __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
+#else
+ DHD_ERROR(("%s: trying to clear intstatus during suspend (%d)"
+ " or suspend in progress %d\n",
+ __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
+#endif /* !DHD_EFI */
return intstatus;
}
if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
intstatus &= I_MB;
} else {
/* this is a PCIE core register..not a config register... */
- intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
+ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+#ifndef DHD_READ_INTSTATUS_IN_DPC
/* this is a PCIE core register..not a config register... */
- intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
+ intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+
+ intstatus &= intmask;
+#endif /* DHD_READ_INTSTATUS_IN_DPC */
/* Is device removed. intstatus & intmask read 0xffffffff */
- if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
+ if (intstatus == (uint32)-1) {
DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
- DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
- __FUNCTION__, intstatus, intmask));
- bus->is_linkdown = TRUE;
- dhd_pcie_debug_info_dump(bus->dhd);
+#ifdef CUSTOMER_HW4_DEBUG
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+ dhd_os_send_hang_message(bus->dhd);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+#endif /* CUSTOMER_HW4_DEBUG */
return intstatus;
}
-#ifndef DHD_READ_INTSTATUS_IN_DPC
- intstatus &= intmask;
-#endif /* DHD_READ_INTSTATUS_IN_DPC */
/*
* The fourth argument to si_corereg is the "mask" fields of the register to update
* few fields of the "mask" bit map, we should not be writing back what we read
* By doing so, we might clear/ack interrupts that are not handled yet.
*/
- si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
intstatus);
intstatus &= bus->def_intmask;
return intstatus;
}
-void
-dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
-{
- dhd_bus_t *bus = dhd->bus;
- int ret;
-
- /* Disable PCIe Runtime PM to avoid D3_ACK timeout.
- */
- DHD_DISABLE_RUNTIME_PM(dhd);
-
- /* Sleep for 1 seconds so that any AXI timeout
- * if running on ALP clock also will be captured
- */
- OSL_SLEEP(1000);
-
- /* reset backplane and cto,
- * then access through pcie is recovered.
- */
- ret = dhdpcie_cto_error_recovery(bus);
- if (!ret) {
- /* Waiting for backplane reset */
- OSL_SLEEP(10);
- /* Dump debug Info */
- dhd_prot_debug_info_print(bus->dhd);
- /* Dump console buffer */
- dhd_bus_dump_console_buffer(bus);
-#if defined(DHD_FW_COREDUMP)
- /* save core dump or write to a file */
- if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
-#ifdef DHD_SSSR_DUMP
- bus->dhd->collect_sssr = TRUE;
-#endif /* DHD_SSSR_DUMP */
- bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
- dhdpcie_mem_dump(bus);
- }
-#endif /* DHD_FW_COREDUMP */
- }
- bus->is_linkdown = TRUE;
- bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
- /* Send HANG event */
- dhd_os_send_hang_message(bus->dhd);
-}
-
/**
* Name: dhdpcie_bus_isr
* Parameters:
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
/* verify argument */
if (!bus) {
- DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
+ DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__));
break;
}
if (bus->dhd->dongle_reset) {
- DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
break;
}
if (bus->dhd->busstate == DHD_BUS_DOWN) {
- DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
break;
}
- /* avoid processing of interrupts until msgbuf prot is inited */
- if (!bus->intr_enabled) {
- DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
- break;
- }
- if (PCIECTO_ENAB(bus)) {
+ if (PCIECTO_ENAB(bus->dhd)) {
/* read pci_intstatus */
intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
- if (intstatus == (uint32)-1) {
- DHD_ERROR(("%s : Invalid intstatus for cto recovery\n",
- __FUNCTION__));
- dhdpcie_disable_irq_nosync(bus);
- break;
- }
-
if (intstatus & PCI_CTO_INT_MASK) {
- DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
- "intstat=0x%x enab=%d\n", __FUNCTION__,
- intstatus, bus->cto_enable));
- bus->cto_triggered = 1;
- /*
- * DAR still accessible
+ /* reset backplane and cto,
+ * then access through pcie is recovered.
*/
- dhd_bus_dump_dar_registers(bus);
-
- /* Disable further PCIe interrupts */
- dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
- /* Stop Tx flow */
- dhd_bus_stop_queue(bus);
-
- /* Schedule CTO recovery */
- dhd_schedule_cto_recovery(bus->dhd);
-
+ dhdpcie_cto_error_recovery(bus);
return TRUE;
}
}
- if (bus->d2h_intr_method == PCIE_MSI) {
- /* For MSI, as intstatus is cleared by firmware, no need to read */
- goto skip_intstatus_read;
- }
-
#ifndef DHD_READ_INTSTATUS_IN_DPC
intstatus = dhdpcie_bus_intstatus(bus);
/* Check if the interrupt is ours or not */
if (intstatus == 0) {
- /* in EFI since we poll for interrupt, this message will flood the logs
- * so disable this for EFI
- */
- DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
- bus->non_ours_irq_count++;
- bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
break;
}
/* return error for 0xFFFFFFFF */
if (intstatus == (uint32)-1) {
- DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
- __FUNCTION__, intstatus));
dhdpcie_disable_irq_nosync(bus);
- break;
- }
+ bus->is_linkdown = TRUE;
+ return BCME_ERROR;
+ }
-skip_intstatus_read:
/* Overall operation:
* - Mask further interrupts
* - Read/ack intstatus
bus->ipend = TRUE;
bus->isr_intr_disable_count++;
-
- /* For Linux, Macos etc (otherthan NDIS) instead of disabling
- * dongle interrupt by clearing the IntMask, disable directly
- * interrupt from the host side, so that host will not recieve
- * any interrupts at all, even though dongle raises interrupts
- */
- dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
+ dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
bus->intdis = TRUE;
}
return BCME_OK;
+
}
int
}
int
-dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
+dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
{
uint32 i;
osl_t *osh = bus->osh;
if (restore_pmcsr)
OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
- sizeof(uint32), bus->saved_config.pmcsr);
+ sizeof(uint32), bus->saved_config.pmcsr);
OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
sizeof(uint32), bus->saved_config.l1pm1);
- OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
- bus->saved_config.bar0_win);
- dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN,
+ sizeof(uint32), bus->saved_config.bar0_win);
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN,
+ sizeof(uint32), bus->saved_config.bar1_win);
return BCME_OK;
}
sizeof(uint32));
bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
sizeof(uint32));
-
return BCME_OK;
}
#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
dhd_pub_t *link_recovery = NULL;
#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
-
-static void
-dhdpcie_bus_intr_init(dhd_bus_t *bus)
-{
- uint buscorerev = bus->sih->buscorerev;
- bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
- bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
- bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
- bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
- if (buscorerev < 64) {
- bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
- }
-}
-
-static void
-dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
-{
- uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN :
- (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
- pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
-}
-
-void
-dhdpcie_dongle_reset(dhd_bus_t *bus)
-{
- /* if the pcie link is down, watchdog reset
- * should not be done, as it may hang
- */
- if (bus->is_linkdown) {
- return;
- }
-
- /* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
- if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) {
-#ifdef DHD_USE_BP_RESET
- /* Backplane reset using SPROM cfg register(0x88) for buscorerev <= 24 */
- dhd_bus_perform_bp_reset(bus);
-#else
- /* Legacy chipcommon watchdog reset */
- dhdpcie_cc_watchdog_reset(bus);
-#endif /* DHD_USE_BP_RESET */
- }
-}
-
static bool
dhdpcie_dongle_attach(dhd_bus_t *bus)
{
+
osl_t *osh = bus->osh;
volatile void *regsva = (volatile void*)bus->regs;
- uint16 devid;
+ uint16 devid = bus->cl_devid;
uint32 val;
sbpcieregs_t *sbpcieregs;
- bool dongle_isolation;
DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
bus->alp_only = TRUE;
bus->sih = NULL;
+ /* Set bar0 window to si_enum_base */
+ dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
+
/* Checking PCIe bus status with reading configuration space */
val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
if ((val & 0xFFFF) != VENDOR_BROADCOM) {
DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
goto fail;
}
- devid = (val >> 16) & 0xFFFF;
- bus->cl_devid = devid;
-
- /* Set bar0 window to si_enum_base */
- dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
/*
* Checking PCI_SPROM_CONTROL register for preventing invalid address access
goto fail;
}
+#ifdef DHD_EFI
+ /* Save good copy of PCIe config space */
+ if (BCME_OK != dhdpcie_config_save(bus)) {
+ DHD_ERROR(("%s : failed to save PCI configuration space!\n", __FUNCTION__));
+ goto fail;
+ }
+#endif /* DHD_EFI */
+
/* si_attach() will provide an SI handle and scan the backplane */
if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
&bus->vars, &bus->varsz))) {
goto fail;
}
- /* Configure CTO Prevention functionality */
-#if defined(BCMFPGA_HW)
- DHD_ERROR(("Disable CTO\n"));
- bus->cto_enable = FALSE;
-#else
-#if defined(BCMPCIE_CTO_PREVENTION)
- if (bus->sih->buscorerev >= 24) {
- DHD_ERROR(("Enable CTO\n"));
- bus->cto_enable = TRUE;
- } else
-#endif /* BCMPCIE_CTO_PREVENTION */
- {
- DHD_ERROR(("Disable CTO\n"));
- bus->cto_enable = FALSE;
- }
-#endif /* BCMFPGA_HW */
-
- if (PCIECTO_ENAB(bus)) {
- dhdpcie_cto_init(bus, TRUE);
- }
-
- if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
- /*
- * HW JIRA - CRWLPCIEGEN2-672
- * Producer Index Feature which is used by F1 gets reset on F0 FLR
- * fixed in REV68
- */
- if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
- dhdpcie_ssreset_dis_enum_rst(bus);
- }
-
- /* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
- * dhdpcie_bus_release_dongle() --> si_detach()
- * dhdpcie_dongle_attach() --> si_attach()
- */
- bus->pwr_req_ref = 0;
- }
-
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_nolock(bus);
- }
-
- /* Get info on the ARM and SOCRAM cores... */
- /* Should really be qualified by device id */
- if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
- (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
- (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
- (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
- bus->armrev = si_corerev(bus->sih);
- bus->coreid = si_coreid(bus->sih);
- } else {
- DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
- goto fail;
- }
-
- /* CA7 requires coherent bits on */
- if (bus->coreid == ARMCA7_CORE_ID) {
- val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
- dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
- (val | PCIE_BARCOHERENTACCEN_MASK));
- }
-
/* Olympic EFI requirement - stop driver load if FW is already running
* need to do this here before pcie_watchdog_reset, because
* pcie_watchdog_reset will put the ARM back into halt state
goto fail;
}
- BCM_REFERENCE(dongle_isolation);
-
- /* For inbuilt drivers pcie clk req will be done by RC,
- * so do not do clkreq from dhd
- */
- if (dhd_download_fw_on_driverload)
- {
- /* Enable CLKREQ# */
- dhdpcie_clkreq(bus->osh, 1, 1);
- }
-
- /*
- * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
- * without checking dongle_isolation flag, but if it is called via some other path
- * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
- * be called.
- */
- if (bus->dhd == NULL) {
- /* dhd_attach not yet happened, do watchdog reset */
- dongle_isolation = FALSE;
- } else {
- dongle_isolation = bus->dhd->dongle_isolation;
- }
+ /* Enable CLKREQ# */
+ dhdpcie_clkreq(bus->osh, 1, 1);
-#ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
+#ifndef DONGLE_ENABLE_ISOLATION
/*
* Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
* This is required to avoid spurious interrupts to the Host and bring back
* dongle to a sane state (on host soft-reboot / watchdog-reboot).
*/
- if (dongle_isolation == FALSE) {
- dhdpcie_dongle_reset(bus);
- }
-#endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
-
- /* need to set the force_bt_quiesce flag here
- * before calling dhdpcie_dongle_flr_or_pwr_toggle
- */
- bus->force_bt_quiesce = TRUE;
- /*
- * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
- * So don't need BT quiesce.
- */
- if (bus->sih->buscorerev >= 66) {
- bus->force_bt_quiesce = FALSE;
- }
+ pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
+#endif /* !DONGLE_ENABLE_ISOLATION */
- dhdpcie_dongle_flr_or_pwr_toggle(bus);
+#ifdef DHD_EFI
+ dhdpcie_dongle_pwr_toggle(bus);
+#endif
si_setcore(bus->sih, PCIE2_CORE_ID, 0);
sbpcieregs = (sbpcieregs_t*)(bus->regs);
val = R_REG(osh, &sbpcieregs->configdata);
W_REG(osh, &sbpcieregs->configdata, val);
+ /* Get info on the ARM and SOCRAM cores... */
+ /* Should really be qualified by device id */
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
+ bus->armrev = si_corerev(bus->sih);
+ } else {
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+ goto fail;
+ }
+
if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
- /* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
- * adjusted.
- */
+ /* Only set dongle RAMSIZE to default value when ramsize is not adjusted */
if (!bus->ramsize_adjusted) {
if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
goto fail;
}
- switch ((uint16)bus->sih->chip) {
- default:
- /* also populate base address */
- bus->dongle_ram_base = CA7_4365_RAM_BASE;
- bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */
- break;
- }
+ /* also populate base address */
+ bus->dongle_ram_base = CA7_4365_RAM_BASE;
+ /* Default reserve 1.75MB for CA7 */
+ bus->orig_ramsize = 0x1c0000;
}
} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
break;
- case BCM4347_CHIP_ID:
- case BCM4357_CHIP_ID:
- case BCM4361_CHIP_ID:
+ case BCM4347_CHIP_GRPID:
bus->dongle_ram_base = CR4_4347_RAM_BASE;
break;
case BCM4362_CHIP_ID:
bus->dongle_ram_base = CR4_4362_RAM_BASE;
break;
- case BCM43751_CHIP_ID:
- bus->dongle_ram_base = CR4_43751_RAM_BASE;
- break;
- case BCM43752_CHIP_ID:
- bus->dongle_ram_base = CR4_43752_RAM_BASE;
- break;
- case BCM4375_CHIP_ID:
- case BCM4369_CHIP_ID:
- bus->dongle_ram_base = CR4_4369_RAM_BASE;
- break;
default:
bus->dongle_ram_base = 0;
DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
if (dhd_dongle_memsize)
dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
- if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
- DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
- __FUNCTION__, bus->ramsize, bus->ramsize));
- goto fail;
- }
-
DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
bus->srmemsize = si_socram_srmem_size(bus->sih);
- dhdpcie_bus_intr_init(bus);
+
+ bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
/* Set the poll and/or interrupt flags */
bus->intr = (bool)dhd_intr;
if ((bus->poll = (bool)dhd_poll))
bus->pollrate = 1;
-#ifdef DHD_DISABLE_ASPM
- dhd_bus_aspm_enable_rc_ep(bus, FALSE);
-#endif /* DHD_DISABLE_ASPM */
+ bus->wait_for_d3_ack = 1;
+#ifdef PCIE_OOB
+ dhdpcie_oob_init(bus);
+#endif /* PCIE_OOB */
+#ifdef PCIE_INB_DW
+ bus->inb_enabled = TRUE;
+#endif /* PCIE_INB_DW */
+ bus->dongle_in_ds = FALSE;
bus->idma_enabled = TRUE;
bus->ifrm_enabled = TRUE;
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ bus->ds_enabled = TRUE;
+#endif
DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
-
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear_nolock(bus);
-
- /*
- * One time clearing of Common Power Domain since HW default is set
- * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
- * for 4378B0 (rev 68).
- * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
- */
- si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
-
- /*
- * WAR to fix ARM cold boot;
- * Assert WL domain in DAR helps but not enum
- */
- if (bus->sih->buscorerev >= 68) {
- dhd_bus_pcie_pwr_req_wl_domain(bus,
- DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), TRUE);
- }
- }
-
return 0;
fail:
if (bus->sih != NULL) {
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear_nolock(bus);
- }
- /* for EFI even if there is an error, load still succeeds
- * so si_detach should not be called here, it is called during unload
- */
si_detach(bus->sih);
bus->sih = NULL;
}
return 0;
}
-/* Non atomic function, caller should hold appropriate lock */
void
dhdpcie_bus_intr_enable(dhd_bus_t *bus)
{
DHD_TRACE(("%s Enter\n", __FUNCTION__));
- if (bus) {
- if (bus->sih && !bus->is_linkdown) {
+ if (bus && bus->sih && !bus->is_linkdown) {
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ dhpcie_bus_unmask_interrupt(bus);
+ } else {
/* Skip after recieving D3 ACK */
- if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
+ if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
+ bus->wait_for_d3_ack) {
return;
}
- if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
- (bus->sih->buscorerev == 4)) {
- dhpcie_bus_unmask_interrupt(bus);
- } else {
- #if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
- dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
- bus->def_intmask, TRUE);
- #endif
- si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
- bus->def_intmask, bus->def_intmask);
- }
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
+ bus->def_intmask, bus->def_intmask);
}
-
}
-
DHD_TRACE(("%s Exit\n", __FUNCTION__));
}
-/* Non atomic function, caller should hold appropriate lock */
void
dhdpcie_bus_intr_disable(dhd_bus_t *bus)
{
DHD_TRACE(("%s Enter\n", __FUNCTION__));
if (bus && bus->sih && !bus->is_linkdown) {
- /* Skip after recieving D3 ACK */
- if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
- return;
- }
if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
(bus->sih->buscorerev == 4)) {
dhpcie_bus_mask_interrupt(bus);
} else {
- si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
+ /* Skip after recieving D3 ACK */
+ if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
+ bus->wait_for_d3_ack) {
+ return;
+ }
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
bus->def_intmask, 0);
}
}
-
DHD_TRACE(("%s Exit\n", __FUNCTION__));
}
* whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
* they will exit from there itself without marking dhd_bus_busy_state as BUSY.
*/
-void
+static void
dhdpcie_advertise_bus_cleanup(dhd_pub_t *dhdp)
{
unsigned long flags;
int timeleft;
- dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
- if (dhdp->dhd_watchdog_ms_backup) {
- DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
- __FUNCTION__));
- dhd_os_wd_timer(dhdp, 0);
- }
- if (dhdp->busstate != DHD_BUS_DOWN) {
- DHD_GENERAL_LOCK(dhdp, flags);
- dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
- DHD_GENERAL_UNLOCK(dhdp, flags);
- }
+ DHD_GENERAL_LOCK(dhdp, flags);
+ dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
+ DHD_GENERAL_UNLOCK(dhdp, flags);
timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
if ((timeleft == 0) || (timeleft == 1)) {
return;
}
+
static void
dhdpcie_bus_remove_prep(dhd_bus_t *bus)
{
DHD_TRACE(("%s Enter\n", __FUNCTION__));
DHD_GENERAL_LOCK(bus->dhd, flags);
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
+#ifdef PCIE_INB_DW
+ /* De-Initialize the lock to serialize Device Wake Inband activities */
+ if (bus->inb_lock) {
+ dhd_os_spin_lock_deinit(bus->dhd->osh, bus->inb_lock);
+ bus->inb_lock = NULL;
+ }
+#endif
+
+
dhd_os_sdlock(bus->dhd);
if (bus->sih && !bus->dhd->dongle_isolation) {
- if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
- dhd_bus_pcie_pwr_req_reload_war(bus);
- }
-
/* Has insmod fails after rmmod issue in Brix Android */
-
- /* if the pcie link is down, watchdog reset
- * should not be done, as it may hang
- */
-
- if (!bus->is_linkdown) {
-#ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
- /* for efi, depending on bt over pcie mode
- * we either power toggle or do F0 FLR
- * from dhdpcie_bus_release dongle. So no need to
- * do dongle reset from here
- */
- dhdpcie_dongle_reset(bus);
-#endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
- }
+ /* if the pcie link is down, watchdog reset should not be done, as it may hang */
+ if (!bus->is_linkdown)
+ pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
+ else
+ DHD_ERROR(("%s: skipping watchdog reset, due to pcie link down ! \n",
+ __FUNCTION__));
bus->dhd->is_pcie_watchdog_reset = TRUE;
}
DHD_TRACE(("%s Exit\n", __FUNCTION__));
}
-void
-dhd_init_bus_lock(dhd_bus_t *bus)
-{
- if (!bus->bus_lock) {
- bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh);
- }
-}
-
-void
-dhd_deinit_bus_lock(dhd_bus_t *bus)
-{
- if (bus->bus_lock) {
- dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock);
- bus->bus_lock = NULL;
- }
-}
-
-void
-dhd_init_backplane_access_lock(dhd_bus_t *bus)
-{
- if (!bus->backplane_access_lock) {
- bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh);
- }
-}
-
-void
-dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
-{
- if (bus->backplane_access_lock) {
- dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock);
- bus->backplane_access_lock = NULL;
- }
-}
-
/** Detach and free everything */
void
dhdpcie_bus_release(dhd_bus_t *bus)
{
bool dongle_isolation = FALSE;
osl_t *osh = NULL;
- unsigned long flags_bus;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
ASSERT(osh);
if (bus->dhd) {
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
- debugger_close();
-#endif /* DEBUGGER || DHD_DSCOPE */
dhdpcie_advertise_bus_remove(bus->dhd);
dongle_isolation = bus->dhd->dongle_isolation;
bus->dhd->is_pcie_watchdog_reset = FALSE;
dhdpcie_bus_remove_prep(bus);
if (bus->intr) {
- DHD_BUS_LOCK(bus->bus_lock, flags_bus);
dhdpcie_bus_intr_disable(bus);
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
dhdpcie_free_irq(bus);
}
- dhd_deinit_bus_lock(bus);
- dhd_deinit_backplane_access_lock(bus);
- /**
- * dhdpcie_bus_release_dongle free bus->sih handle, which is needed to
- * access Dongle registers.
- * dhd_detach will communicate with dongle to delete flowring ..etc.
- * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
- */
- dhd_detach(bus->dhd);
dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+ dhd_detach(bus->dhd);
dhd_free(bus->dhd);
bus->dhd = NULL;
}
+
/* unmap the regs and tcm here!! */
if (bus->regs) {
dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
MFREE(osh, bus->console.buf, bus->console.bufsize);
}
+
/* Finally free bus info */
MFREE(osh, bus, sizeof(dhd_bus_t));
- g_dhd_bus = NULL;
}
DHD_TRACE(("%s: Exit\n", __FUNCTION__));
} /* dhdpcie_bus_release */
+
void
dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
{
return;
}
- if (bus->is_linkdown) {
- DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
- return;
- }
-
if (bus->sih) {
if (!dongle_isolation &&
- (bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
- dhdpcie_dongle_reset(bus);
- }
-
- dhdpcie_dongle_flr_or_pwr_toggle(bus);
-
+ (bus->dhd && !bus->dhd->is_pcie_watchdog_reset))
+ pcie_watchdog_reset(bus->osh, bus->sih,
+ (sbpcieregs_t *) bus->regs);
+#ifdef DHD_EFI
+ dhdpcie_dongle_pwr_toggle(bus);
+#endif
if (bus->ltrsleep_on_unload) {
si_corereg(bus->sih, bus->sih->buscoreidx,
OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
pcie_serdes_iddqdisable(bus->osh, bus->sih,
(sbpcieregs_t *) bus->regs);
- /* For inbuilt drivers pcie clk req will be done by RC,
- * so do not do clkreq from dhd
- */
- if (dhd_download_fw_on_driverload)
- {
- /* Disable CLKREQ# */
- dhdpcie_clkreq(bus->osh, 1, 0);
- }
+ /* Disable CLKREQ# */
+ dhdpcie_clkreq(bus->osh, 1, 0);
if (bus->sih != NULL) {
si_detach(bus->sih);
/** Stop bus module: clear pending frames, disable data flow */
void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
{
- unsigned long flags, flags_bus;
+ uint32 status;
+ unsigned long flags;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
DHD_DISABLE_RUNTIME_PM(bus->dhd);
DHD_GENERAL_LOCK(bus->dhd, flags);
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- atomic_set(&bus->dhd->block_bus, TRUE);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
- DHD_BUS_LOCK(bus->bus_lock, flags_bus);
dhdpcie_bus_intr_disable(bus);
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
-
- if (!bus->is_linkdown) {
- uint32 status;
- status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
- dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
- }
+ status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
if (!dhd_download_fw_on_driverload) {
dhd_dpc_kill(bus->dhd);
}
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- pm_runtime_disable(dhd_bus_to_dev(bus));
- pm_runtime_set_suspended(dhd_bus_to_dev(bus));
- pm_runtime_enable(dhd_bus_to_dev(bus));
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
/* Clear rx control and wake any waiters */
dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
return;
}
+#ifdef DEVICE_TX_STUCK_DETECT
+void
+dhd_bus_send_msg_to_daemon(int reason)
+{
+ bcm_to_info_t to_info;
+
+ to_info.magic = BCM_TO_MAGIC;
+ to_info.reason = reason;
+
+ dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
+ return;
+}
+
+/**
+ * scan the flow rings in active list to check if stuck and notify application
+ * The conditions for warn/stuck detection are
+ * 1. Flow ring is active
+ * 2. There are packets to be consumed by the consumer (wr != rd)
+ * If 1 and 2 are true, then
+ * 3. Warn, if Tx completion is not received for a duration of DEVICE_TX_STUCK_WARN_DURATION
+ * 4. Trap FW, if Tx completion is not received for a duration of DEVICE_TX_STUCK_DURATION
+ */
+static void
+dhd_bus_device_tx_stuck_scan(dhd_bus_t *bus)
+{
+ uint32 tx_cmpl;
+ unsigned long list_lock_flags;
+ unsigned long ring_lock_flags;
+ dll_t *item, *prev;
+ flow_ring_node_t *flow_ring_node;
+ bool ring_empty;
+ bool active;
+
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags);
+
+ for (item = dll_tail_p(&bus->flowring_active_list);
+ !dll_end(&bus->flowring_active_list, item); item = prev) {
+
+ prev = dll_prev_p(item);
+
+ flow_ring_node = dhd_constlist_to_flowring(item);
+ DHD_FLOWRING_LOCK(flow_ring_node->lock, ring_lock_flags);
+ tx_cmpl = flow_ring_node->tx_cmpl;
+ active = flow_ring_node->active;
+ ring_empty = dhd_prot_is_cmpl_ring_empty(bus->dhd, flow_ring_node->prot_info);
+ DHD_FLOWRING_UNLOCK(flow_ring_node->lock, ring_lock_flags);
+
+ if (ring_empty) {
+ /* reset conters... etc */
+ flow_ring_node->stuck_count = 0;
+ flow_ring_node->tx_cmpl_prev = tx_cmpl;
+ continue;
+ }
+ /**
+ * DEVICE_TX_STUCK_WARN_DURATION, DEVICE_TX_STUCK_DURATION are integer
+ * representation of time, to decide if a flow is in warn state or stuck.
+ *
+ * flow_ring_node->stuck_count is an integer counter representing how long
+ * tx_cmpl is not received though there are pending packets in the ring
+ * to be consumed by the dongle for that particular flow.
+ *
+ * This method of determining time elapsed is helpful in sleep/wake scenarios.
+ * If host sleeps and wakes up, that sleep time is not considered into
+ * stuck duration.
+ */
+ if ((tx_cmpl == flow_ring_node->tx_cmpl_prev) && active) {
+
+ flow_ring_node->stuck_count++;
+
+ DHD_ERROR(("%s: flowid: %d tx_cmpl: %u tx_cmpl_prev: %u stuck_count: %d\n",
+ __func__, flow_ring_node->flowid, tx_cmpl,
+ flow_ring_node->tx_cmpl_prev, flow_ring_node->stuck_count));
+
+ switch (flow_ring_node->stuck_count) {
+ case DEVICE_TX_STUCK_WARN_DURATION:
+ /**
+ * Notify Device Tx Stuck Notification App about the
+ * device Tx stuck warning for this flowid.
+ * App will collect the logs required.
+ */
+ DHD_ERROR(("stuck warning for flowid: %d sent to app\n",
+ flow_ring_node->flowid));
+ dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK_WARNING);
+ break;
+ case DEVICE_TX_STUCK_DURATION:
+ /**
+ * Notify Device Tx Stuck Notification App about the
+ * device Tx stuck info for this flowid.
+ * App will collect the logs required.
+ */
+ DHD_ERROR(("stuck information for flowid: %d sent to app\n",
+ flow_ring_node->flowid));
+ dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK);
+ break;
+ default:
+ break;
+ }
+ } else {
+ flow_ring_node->tx_cmpl_prev = tx_cmpl;
+ flow_ring_node->stuck_count = 0;
+ }
+ }
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags);
+}
/**
- * Watchdog timer function.
- * @param dhd Represents a specific hardware (dongle) instance that this DHD manages
+ * schedules dhd_bus_device_tx_stuck_scan after DEVICE_TX_STUCK_CKECK_TIMEOUT,
+ * to determine if any flowid is stuck.
*/
+static void
+dhd_bus_device_stuck_scan(dhd_bus_t *bus)
+{
+ uint32 time_stamp; /* in millisec */
+ uint32 diff;
+
+ /* Need not run the algorith if Dongle has trapped */
+ if (bus->dhd->dongle_trap_occured) {
+ return;
+ }
+ time_stamp = OSL_SYSUPTIME();
+ diff = time_stamp - bus->device_tx_stuck_check;
+ if (diff > DEVICE_TX_STUCK_CKECK_TIMEOUT) {
+ dhd_bus_device_tx_stuck_scan(bus);
+ bus->device_tx_stuck_check = OSL_SYSUPTIME();
+ }
+ return;
+}
+#endif /* DEVICE_TX_STUCK_DETECT */
+
+/** Watchdog timer function */
bool dhd_bus_watchdog(dhd_pub_t *dhd)
{
unsigned long flags;
- dhd_bus_t *bus = dhd->bus;
+ dhd_bus_t *bus;
+ bus = dhd->bus;
DHD_GENERAL_LOCK(dhd, flags);
if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
DHD_BUS_BUSY_SET_IN_WD(dhd);
DHD_GENERAL_UNLOCK(dhd, flags);
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+
+
/* Poll for console output periodically */
if (dhd->busstate == DHD_BUS_DATA &&
- dhd->dhd_console_ms != 0 &&
- bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) {
+ dhd_console_ms != 0 && !bus->d3_suspend_pending) {
bus->console.count += dhd_watchdog_ms;
- if (bus->console.count >= dhd->dhd_console_ms) {
- bus->console.count -= dhd->dhd_console_ms;
-
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
-
+ if (bus->console.count >= dhd_console_ms) {
+ bus->console.count -= dhd_console_ms;
/* Make sure backplane clock is on */
- if (dhdpcie_bus_readconsole(bus) < 0) {
- dhd->dhd_console_ms = 0; /* On error, stop trying */
- }
-
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
+ if (dhdpcie_bus_readconsole(bus) < 0)
+ dhd_console_ms = 0; /* On error, stop trying */
}
}
}
#endif /* DHD_READ_INTSTATUS_IN_DPC */
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ /* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
+ if (dhd_doorbell_timeout != 0 && dhd->busstate == DHD_BUS_DATA &&
+ dhd->up && dhd_timeout_expired(&bus->doorbell_timer)) {
+ dhd_bus_set_device_wake(bus, FALSE);
+ }
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ if (bus->ds_exit_timeout) {
+ bus->ds_exit_timeout --;
+ if (bus->ds_exit_timeout == 1) {
+ DHD_ERROR(("DS-EXIT TIMEOUT\n"));
+ bus->ds_exit_timeout = 0;
+ bus->inband_ds_exit_to_cnt++;
+ }
+ }
+ if (bus->host_sleep_exit_timeout) {
+ bus->host_sleep_exit_timeout --;
+ if (bus->host_sleep_exit_timeout == 1) {
+ DHD_ERROR(("HOST_SLEEP-EXIT TIMEOUT\n"));
+ bus->host_sleep_exit_timeout = 0;
+ bus->inband_host_sleep_exit_to_cnt++;
+ }
+ }
+ }
+#endif /* PCIE_INB_DW */
+
+#ifdef DEVICE_TX_STUCK_DETECT
+ if (dhd->bus->dev_tx_stuck_monitor == TRUE) {
+ dhd_bus_device_stuck_scan(dhd->bus);
+ }
+#endif /* DEVICE_TX_STUCK_DETECT */
+
DHD_GENERAL_LOCK(dhd, flags);
DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
dhd_os_busbusy_wake(dhd);
DHD_GENERAL_UNLOCK(dhd, flags);
-
return TRUE;
} /* dhd_bus_watchdog */
-#if defined(SUPPORT_MULTIPLE_REVISION)
-static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
- uint32 chiprev;
-#if defined(SUPPORT_MULTIPLE_CHIPS)
- char chipver_tag[20] = "_4358";
-#else
- char chipver_tag[10] = {0, };
-#endif /* SUPPORT_MULTIPLE_CHIPS */
-
- chiprev = dhd_bus_chiprev(bus);
- if (chiprev == 0) {
- DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
- strcat(chipver_tag, "_a0");
- } else if (chiprev == 1) {
- DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
-#if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
- strcat(chipver_tag, "_a1");
-#endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
- } else if (chiprev == 3) {
- DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
-#if defined(SUPPORT_MULTIPLE_CHIPS)
- strcat(chipver_tag, "_a3");
-#endif /* SUPPORT_MULTIPLE_CHIPS */
- } else {
- DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
- }
-
- strcat(fw_path, chipver_tag);
-
-#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
- if (chiprev == 1 || chiprev == 3) {
- int ret = dhd_check_module_b85a();
- if ((chiprev == 1) && (ret < 0)) {
- memset(chipver_tag, 0x00, sizeof(chipver_tag));
- strcat(chipver_tag, "_b85");
- strcat(chipver_tag, "_a1");
- }
- }
-
- DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
-#endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
-
-#if defined(SUPPORT_MULTIPLE_BOARD_REV)
- if (system_rev >= 10) {
- DHD_ERROR(("----- Board Rev [%d]-----\n", system_rev));
- strcat(chipver_tag, "_r10");
- }
-#endif /* SUPPORT_MULTIPLE_BOARD_REV */
- strcat(nv_path, chipver_tag);
-
- return 0;
-}
-
-static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
- uint32 chip_ver;
- char chipver_tag[10] = {0, };
-#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
- defined(SUPPORT_BCM4359_MIXED_MODULES)
- int module_type = -1;
-#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
-
- chip_ver = bus->sih->chiprev;
- if (chip_ver == 4) {
- DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
- strncat(chipver_tag, "_b0", strlen("_b0"));
- } else if (chip_ver == 5) {
- DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
- strncat(chipver_tag, "_b1", strlen("_b1"));
- } else if (chip_ver == 9) {
- DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
- strncat(chipver_tag, "_c0", strlen("_c0"));
- } else {
- DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
- return -1;
- }
-
-#if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
- defined(SUPPORT_BCM4359_MIXED_MODULES)
- module_type = dhd_check_module_b90();
-
- switch (module_type) {
- case BCM4359_MODULE_TYPE_B90B:
- strcat(fw_path, chipver_tag);
- break;
- case BCM4359_MODULE_TYPE_B90S:
- default:
- /*
- * .cid.info file not exist case,
- * loading B90S FW force for initial MFG boot up.
- */
- if (chip_ver == 5) {
- strncat(fw_path, "_b90s", strlen("_b90s"));
- }
- strcat(fw_path, chipver_tag);
- strcat(nv_path, chipver_tag);
- break;
- }
-#else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
- strcat(fw_path, chipver_tag);
- strcat(nv_path, chipver_tag);
-#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
-
- return 0;
-}
-
-#if defined(USE_CID_CHECK)
-
-#define MAX_EXTENSION 20
-#define MODULE_BCM4361_INDEX 3
-#define CHIP_REV_A0 1
-#define CHIP_REV_A1 2
-#define CHIP_REV_B0 3
-#define CHIP_REV_B1 4
-#define CHIP_REV_B2 5
-#define CHIP_REV_C0 6
-#define BOARD_TYPE_EPA 0x080f
-#define BOARD_TYPE_IPA 0x0827
-#define BOARD_TYPE_IPA_OLD 0x081a
-#define DEFAULT_CIDINFO_FOR_EPA "r00a_e000_a0_ePA"
-#define DEFAULT_CIDINFO_FOR_IPA "r00a_e000_a0_iPA"
-#define DEFAULT_CIDINFO_FOR_A1 "r01a_e30a_a1"
-#define DEFAULT_CIDINFO_FOR_B0 "r01i_e32_b0"
-#define MAX_VID_LEN 8
-#define CIS_TUPLE_HDR_LEN 2
-#if defined(BCM4361_CHIP)
-#define CIS_TUPLE_START_ADDRESS 0x18011110
-#define CIS_TUPLE_END_ADDRESS 0x18011167
-#elif defined(BCM4375_CHIP)
-#define CIS_TUPLE_START_ADDRESS 0x18011120
-#define CIS_TUPLE_END_ADDRESS 0x18011177
-#endif /* defined(BCM4361_CHIP) */
-#define CIS_TUPLE_MAX_COUNT (uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
- + 1) / sizeof(uint32))
-#define CIS_TUPLE_TAG_START 0x80
-#define CIS_TUPLE_TAG_VENDOR 0x81
-#define CIS_TUPLE_TAG_BOARDTYPE 0x1b
-#define CIS_TUPLE_TAG_LENGTH 1
-#define NVRAM_FEM_MURATA "_murata"
-#define CID_FEM_MURATA "_mur_"
-
-typedef struct cis_tuple_format {
- uint8 id;
- uint8 len; /* total length of tag and data */
- uint8 tag;
- uint8 data[1];
-} cis_tuple_format_t;
-
-typedef struct {
- char cid_ext[MAX_EXTENSION];
- char nvram_ext[MAX_EXTENSION];
- char fw_ext[MAX_EXTENSION];
-} naming_info_t;
-
-naming_info_t bcm4361_naming_table[] = {
- { {""}, {""}, {""} },
- { {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
- { {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
- { {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
- { {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
- { {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
- { {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
- { {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
- { {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
- { {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
- { {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
- { {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
- { {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
- { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
- { {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
- { {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
- { {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
- { {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
- { {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
- { {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
- { {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
- { {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
- { {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
- { {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} }, /* exceptional case : r31 -> r30 */
- { {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
- { {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
- { {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
- { {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
- { {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
- { {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
- { {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
- { {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
- { {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} },
- { {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} },
- { {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
-};
-
-#define MODULE_BCM4375_INDEX 3
-
-naming_info_t bcm4375_naming_table[] = {
- { {""}, {""}, {""} },
- { {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} },
- { {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
- { {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
- { {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
- { {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
- { {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
- { {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
- { {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
- { {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
- { {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
- { {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
- { {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
- { {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
- { {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
- { {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
- { {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
- { {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
- { {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
- { {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
-};
-
-static naming_info_t *
-dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type)
-{
- int index_found = 0, i = 0;
-
- if (module_type && strlen(module_type) > 0) {
- for (i = 1; i < table_size; i++) {
- if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) {
- index_found = i;
- break;
- }
- }
- }
-
- DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
-
- return &table[index_found];
-}
-
-static naming_info_t *
-dhd_find_naming_info_by_cid(naming_info_t table[], int table_size,
- char *cid_info)
-{
- int index_found = 0, i = 0;
- char *ptr;
-
- /* truncate extension */
- for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) {
- ptr = bcmstrstr(ptr, "_");
- if (ptr) {
- ptr++;
- }
- }
-
- for (i = 1; i < table_size && ptr; i++) {
- if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) {
- index_found = i;
- break;
- }
- }
-
- DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
-
- return &table[index_found];
-}
-
-static int
-dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype,
- unsigned char *vid, int *vid_length)
-{
- int boardtype_backplane_addr[] = {
- 0x18010324, /* OTP Control 1 */
- 0x18012618, /* PMU min resource mask */
- };
- int boardtype_backplane_data[] = {
- 0x00fa0000,
- 0x0e4fffff /* Keep on ARMHTAVAIL */
- };
- int int_val = 0, i = 0;
- cis_tuple_format_t *tuple;
- int totlen, len;
- uint32 raw_data[CIS_TUPLE_MAX_COUNT];
-
- for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) {
- /* Write new OTP and PMU configuration */
- if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
- &boardtype_backplane_data[i], FALSE) != BCME_OK) {
- DHD_ERROR(("invalid size/addr combination\n"));
- return BCME_ERROR;
- }
-
- if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
- &int_val, TRUE) != BCME_OK) {
- DHD_ERROR(("invalid size/addr combination\n"));
- return BCME_ERROR;
- }
-
- DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
- __FUNCTION__, boardtype_backplane_addr[i], int_val));
- }
-
- /* read tuple raw data */
- for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
- if (si_backplane_access(bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32),
- sizeof(uint32), &raw_data[i], TRUE) != BCME_OK) {
- break;
- }
- }
-
- totlen = i * sizeof(uint32);
- tuple = (cis_tuple_format_t *)raw_data;
-
- /* check the first tuple has tag 'start' */
- if (tuple->id != CIS_TUPLE_TAG_START) {
- return BCME_ERROR;
- }
-
- *vid_length = *boardtype = 0;
-
- /* find tagged parameter */
- while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
- (*vid_length == 0 || *boardtype == 0)) {
- len = tuple->len;
-
- if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
- (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
- /* found VID */
- memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
- *vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
- prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
- }
- else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
- (totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
- /* found boardtype */
- *boardtype = (int)tuple->data[0];
- prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
- }
-
- tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
- totlen -= (len + CIS_TUPLE_HDR_LEN);
- }
-
- if (*vid_length <= 0 || *boardtype <= 0) {
- DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
- *vid_length, *boardtype));
- return BCME_ERROR;
- }
-
- return BCME_OK;
-
-}
-
-static naming_info_t *
-dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size,
- dhd_bus_t *bus, bool *is_murata_fem)
-{
- int board_type = 0, chip_rev = 0, vid_length = 0;
- unsigned char vid[MAX_VID_LEN];
- naming_info_t *info = &table[0];
- char *cid_info = NULL;
-
- if (!bus || !bus->sih) {
- DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
- return NULL;
- }
- chip_rev = bus->sih->chiprev;
-
- if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length)
- != BCME_OK) {
- DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
- return NULL;
- }
-
- DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
-
-#if defined(BCM4361_CHIP)
- /* A0 chipset has exception only */
- if (chip_rev == CHIP_REV_A0) {
- if (board_type == BOARD_TYPE_EPA) {
- info = dhd_find_naming_info(table, table_size,
- DEFAULT_CIDINFO_FOR_EPA);
- } else if ((board_type == BOARD_TYPE_IPA) ||
- (board_type == BOARD_TYPE_IPA_OLD)) {
- info = dhd_find_naming_info(table, table_size,
- DEFAULT_CIDINFO_FOR_IPA);
- }
- } else {
- cid_info = dhd_get_cid_info(vid, vid_length);
- if (cid_info) {
- info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
- if (strstr(cid_info, CID_FEM_MURATA)) {
- *is_murata_fem = TRUE;
- }
- }
- }
-#else
- cid_info = dhd_get_cid_info(vid, vid_length);
- if (cid_info) {
- info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
- if (strstr(cid_info, CID_FEM_MURATA)) {
- *is_murata_fem = TRUE;
- }
- }
-#endif /* BCM4361_CHIP */
-
- return info;
-}
-#endif /* USE_CID_CHECK */
-
-static int
-concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
- int ret = BCME_OK;
-#if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
- char module_type[MAX_VNAME_LEN];
- naming_info_t *info = NULL;
- bool is_murata_fem = FALSE;
-
- memset(module_type, 0, sizeof(module_type));
-
- if (dhd_check_module_bcm(module_type,
- MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) {
- info = dhd_find_naming_info(bcm4361_naming_table,
- ARRAYSIZE(bcm4361_naming_table), module_type);
- } else {
- /* in case of .cid.info doesn't exists */
- info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table,
- ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem);
- }
-
- if (bcmstrnstr(nv_path, PATH_MAX, "_murata", 7)) {
- is_murata_fem = FALSE;
- }
-
- if (info) {
- if (is_murata_fem) {
- strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
- }
- strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
- strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
- } else {
- DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
- ret = BCME_ERROR;
- }
-#else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
- char chipver_tag[10] = {0, };
-
- strcat(fw_path, chipver_tag);
- strcat(nv_path, chipver_tag);
-#endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
-
- return ret;
-}
-
-static int
-concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
- int ret = BCME_OK;
-#if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK)
- char module_type[MAX_VNAME_LEN];
- naming_info_t *info = NULL;
- bool is_murata_fem = FALSE;
-
- memset(module_type, 0, sizeof(module_type));
-
- if (dhd_check_module_bcm(module_type,
- MODULE_BCM4375_INDEX, &is_murata_fem) == BCME_OK) {
- info = dhd_find_naming_info(bcm4375_naming_table,
- ARRAYSIZE(bcm4375_naming_table), module_type);
- } else {
- /* in case of .cid.info doesn't exists */
- info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table,
- ARRAYSIZE(bcm4375_naming_table), bus, &is_murata_fem);
- }
-
- if (info) {
- strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
- strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
- } else {
- DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
- ret = BCME_ERROR;
- }
-#else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
- char chipver_tag[10] = {0, };
-
- strcat(fw_path, chipver_tag);
- strcat(nv_path, chipver_tag);
-#endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
-
- return ret;
-}
-
-int
-concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
- int res = 0;
-
- if (!bus || !bus->sih) {
- DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
- return -1;
- }
-
- if (!fw_path || !nv_path) {
- DHD_ERROR(("fw_path or nv_path is null.\n"));
- return res;
- }
-
- switch (si_chipid(bus->sih)) {
-
- case BCM43569_CHIP_ID:
- case BCM4358_CHIP_ID:
- res = concate_revision_bcm4358(bus, fw_path, nv_path);
- break;
- case BCM4355_CHIP_ID:
- case BCM4359_CHIP_ID:
- res = concate_revision_bcm4359(bus, fw_path, nv_path);
- break;
- case BCM4361_CHIP_ID:
- case BCM4347_CHIP_ID:
- res = concate_revision_bcm4361(bus, fw_path, nv_path);
- break;
- case BCM4375_CHIP_ID:
- res = concate_revision_bcm4375(bus, fw_path, nv_path);
- break;
- default:
- DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
- return res;
- }
-
- return res;
-}
-#endif /* SUPPORT_MULTIPLE_REVISION */
uint16
dhd_get_chipid(dhd_pub_t *dhd)
return 0;
}
-/**
- * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
- *
- * BCM_REQUEST_FW specific :
- * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
- * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
- *
- * BCMEMBEDIMAGE specific:
- * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
- * file will be used instead.
- *
- * @return BCME_OK on success
- */
+/* Download firmware image and nvram image */
int
dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
char *pfw_path, char *pnv_path,
bus->dhd->clm_path = pclm_path;
bus->dhd->conf_path = pconf_path;
-#if defined(SUPPORT_MULTIPLE_REVISION)
- if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
- DHD_ERROR(("%s: fail to concatnate revison \n",
- __FUNCTION__));
- return BCME_BADARG;
- }
-#endif /* SUPPORT_MULTIPLE_REVISION */
#if defined(DHD_BLOB_EXISTENCE_CHECK)
dhd_set_blob_support(bus->dhd, bus->fw_path);
DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
__FUNCTION__, bus->fw_path, bus->nv_path));
- dhdpcie_dump_resource(bus);
ret = dhdpcie_download_firmware(bus, osh);
return ret;
}
+void
+dhd_set_path_params(struct dhd_bus *bus)
+{
+ /* External conf takes precedence if specified */
+ dhd_conf_preinit(bus->dhd);
+
+ if (bus->dhd->clm_path[0] == '\0') {
+ dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path);
+ }
+ dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
+ if (bus->dhd->conf_path[0] == '\0') {
+ dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path);
+ }
+#ifdef CONFIG_PATH_AUTO_SELECT
+ dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path);
+#endif
+
+ dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
+
+ dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
+ dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
+ dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
+
+ printf("Final fw_path=%s\n", bus->fw_path);
+ printf("Final nv_path=%s\n", bus->nv_path);
+ printf("Final clm_path=%s\n", bus->dhd->clm_path);
+ printf("Final conf_path=%s\n", bus->dhd->conf_path);
+
+}
+
void
dhd_set_bus_params(struct dhd_bus *bus)
{
}
}
-/**
- * Loads firmware given by 'bus->fw_path' into PCIe dongle.
- *
- * BCM_REQUEST_FW specific :
- * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
- * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
- *
- * BCMEMBEDIMAGE specific:
- * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
- * file will be used instead.
- *
- * @return BCME_OK on success
- */
static int
dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
{
DHD_OS_WAKE_LOCK(bus->dhd);
- dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
+ dhd_set_path_params(bus);
dhd_set_bus_params(bus);
ret = _dhdpcie_download_firmware(bus);
DHD_OS_WAKE_UNLOCK(bus->dhd);
return ret;
-} /* dhdpcie_download_firmware */
-
-#define DHD_MEMORY_SET_PATTERN 0xAA
+}
-/**
- * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
- * is updated with the event logging partitions within that file as well.
- *
- * @param pfw_path Path to .bin or .bea file
- */
static int
dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
{
int len = 0;
bool store_reset;
char *imgbuf = NULL;
- uint8 *memblock = NULL, *memptr = NULL;
+ uint8 *memblock = NULL, *memptr;
uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
+
int offset_end = bus->ramsize;
- uint32 file_size = 0, read_len = 0;
-#if defined(DHD_FW_MEM_CORRUPTION)
- if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
- dhd_tcm_test_enable = TRUE;
- } else {
- dhd_tcm_test_enable = FALSE;
- }
-#endif /* DHD_FW_MEM_CORRUPTION */
- DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
- /* TCM check */
- if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
- DHD_ERROR(("dhd_bus_tcm_test failed\n"));
- bcmerror = BCME_ERROR;
- goto err;
- }
+#ifndef DHD_EFI
DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+#endif /* DHD_EFI */
/* Should succeed in opening image if it is actually given through registry
* entry or in module param.
*/
- imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
+ imgbuf = dhd_os_open_image(pfw_path);
if (imgbuf == NULL) {
printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
goto err;
}
- file_size = dhd_os_get_image_size(imgbuf);
- if (!file_size) {
- DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
- goto err;
- }
-
memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
if (memblock == NULL) {
DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
- bcmerror = BCME_NOMEM;
goto err;
}
if (dhd_msg_level & DHD_TRACE_VAL) {
memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
}
+
/* check if CR4/CA7 */
store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
+
/* Download image with MEMBLOCK size */
while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
if (len < 0) {
bcmerror = BCME_ERROR;
goto err;
}
- read_len += len;
- if (read_len > file_size) {
- DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
- " file_size=%u truncating len to %d \n", __FUNCTION__,
- len, read_len, file_size, (len - (read_len - file_size))));
- len -= (read_len - file_size);
- }
-
/* if address is 0, store the reset instruction to be written in 0 */
if (store_reset) {
ASSERT(offset == 0);
bcmerror = BCME_ERROR;
goto err;
}
-
- if (read_len >= file_size) {
- break;
- }
}
+
err:
if (memblock) {
MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
}
if (imgbuf) {
- dhd_os_close_image1(bus->dhd, imgbuf);
+ dhd_os_close_image(imgbuf);
}
return bcmerror;
} /* dhdpcie_download_code_file */
+#ifdef CUSTOMER_HW4_DEBUG
+#define MIN_NVRAMVARS_SIZE 128
+#endif /* CUSTOMER_HW4_DEBUG */
+
static int
dhdpcie_download_nvram(struct dhd_bus *bus)
{
bool local_alloc = FALSE;
pnv_path = bus->nv_path;
+#ifdef BCMEMBEDIMAGE
+ nvram_file_exists = TRUE;
+#else
nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+#endif
/* First try UEFI */
len = MAX_NVRAMBUF_SIZE;
if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
bufp = (char *) memblock;
+#ifdef CACHE_FW_IMAGES
+ if (bus->processed_nvram_params_len) {
+ len = bus->processed_nvram_params_len;
+ }
+
+ if (!bus->processed_nvram_params_len) {
+ bufp[len] = 0;
+ if (nvram_uefi_exists || nvram_file_exists) {
+ len = process_nvram_vars(bufp, len);
+ bus->processed_nvram_params_len = len;
+ }
+ } else
+#else
{
bufp[len] = 0;
if (nvram_uefi_exists || nvram_file_exists) {
len = process_nvram_vars(bufp, len);
}
}
+#endif /* CACHE_FW_IMAGES */
DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
+#ifdef CUSTOMER_HW4_DEBUG
+ if (len < MIN_NVRAMVARS_SIZE) {
+ DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
+ __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
if (len % 4) {
len += 4 - (len % 4);
}
}
+
err:
if (memblock) {
if (local_alloc) {
return bcmerror;
}
+
+#ifdef BCMEMBEDIMAGE
+int
+dhdpcie_download_code_array(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ unsigned char *p_dlarray = NULL;
+ unsigned int dlarray_size = 0;
+ unsigned int downloded_len, remaining_len, len;
+ char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
+ uint8 *memblock = NULL, *memptr;
+
+ downloded_len = 0;
+ remaining_len = 0;
+ len = 0;
+
+#ifdef DHD_EFI
+ p_dlarray = rtecdc_fw_arr;
+ dlarray_size = sizeof(rtecdc_fw_arr);
+#else
+ p_dlarray = dlarray;
+ dlarray_size = sizeof(dlarray);
+ p_dlimagename = dlimagename;
+ p_dlimagever = dlimagever;
+ p_dlimagedate = dlimagedate;
+#endif /* DHD_EFI */
+
+#ifndef DHD_EFI
+ if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
+ (p_dlimagename == 0) || (p_dlimagever == 0) || (p_dlimagedate == 0))
+ goto err;
+#endif /* DHD_EFI */
+
+ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+ while (downloded_len < dlarray_size) {
+ remaining_len = dlarray_size - downloded_len;
+ if (remaining_len >= MEMBLOCK)
+ len = MEMBLOCK;
+ else
+ len = remaining_len;
+
+ memcpy(memptr, (p_dlarray + downloded_len), len);
+ /* check if CR4/CA7 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
+ si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
+ /* if address is 0, store the reset instruction to be written in 0 */
+ if (offset == 0) {
+ bus->resetinstr = *(((uint32*)memptr));
+ /* Add start of RAM address to the address given by user */
+ offset += bus->dongle_ram_base;
+ }
+ }
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
+ downloded_len += len;
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+ offset += MEMBLOCK;
+ }
+
+#ifdef DHD_DEBUG
+ /* Upload and compare the downloaded code */
+ {
+ unsigned char *ularray = NULL;
+ unsigned int uploded_len;
+ uploded_len = 0;
+ bcmerror = -1;
+ ularray = MALLOC(bus->dhd->osh, dlarray_size);
+ if (ularray == NULL)
+ goto upload_err;
+ /* Upload image to verify downloaded contents. */
+ offset = bus->dongle_ram_base;
+ memset(ularray, 0xaa, dlarray_size);
+ while (uploded_len < dlarray_size) {
+ remaining_len = dlarray_size - uploded_len;
+ if (remaining_len >= MEMBLOCK)
+ len = MEMBLOCK;
+ else
+ len = remaining_len;
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
+ (uint8 *)(ularray + uploded_len), len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto upload_err;
+ }
+
+ uploded_len += len;
+ offset += MEMBLOCK;
+ }
+#ifdef DHD_EFI
+ if (memcmp(p_dlarray, ularray, dlarray_size)) {
+ DHD_ERROR(("%s: Downloaded image is corrupted ! \n", __FUNCTION__));
+ goto upload_err;
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded .\n", __FUNCTION__));
+#else
+ if (memcmp(p_dlarray, ularray, dlarray_size)) {
+ DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+ __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+ goto upload_err;
+
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+ __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+#endif /* DHD_EFI */
+
+upload_err:
+ if (ularray)
+ MFREE(bus->dhd->osh, ularray, dlarray_size);
+ }
+#endif /* DHD_DEBUG */
+err:
+
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+ return bcmerror;
+} /* dhdpcie_download_code_array */
+#endif /* BCMEMBEDIMAGE */
+
+
static int
dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
{
/* External image takes precedence if specified */
if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
- // opens and seeks to correct file offset:
- imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path);
+ imgbuf = dhd_os_open_image(bus->fw_path);
if (imgbuf == NULL) {
DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
goto err;
err:
if (imgbuf)
- dhd_os_close_image1(bus->dhd, imgbuf);
+ dhd_os_close_image(imgbuf);
return bcmerror;
}
+
/* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
* with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
* So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
return;
}
+#ifndef BCMEMBEDIMAGE
/* Out immediately if no image to download */
if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
return;
}
+#endif /* !BCMEMBEDIMAGE */
/* Get maximum RAMSIZE info search length */
for (i = 0; ; i++) {
/* External image takes precedence if specified */
if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
+#if defined(BCMEMBEDIMAGE) && !defined(DHD_EFI)
+ unsigned char *p_dlarray = NULL;
+ unsigned int dlarray_size = 0;
+ char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
+
+ p_dlarray = dlarray;
+ dlarray_size = sizeof(dlarray);
+ p_dlimagename = dlimagename;
+ p_dlimagever = dlimagever;
+ p_dlimagedate = dlimagedate;
+
+ if ((p_dlarray == 0) || (dlarray_size == 0) || (p_dlimagename == 0) ||
+ (p_dlimagever == 0) || (p_dlimagedate == 0))
+ goto err;
+
+ ramsizeptr = p_dlarray;
+ ramsizelen = dlarray_size;
+#else
goto err;
+#endif /* BCMEMBEDIMAGE && !DHD_EFI */
}
else {
ramsizeptr = memptr;
MFREE(bus->dhd->osh, memptr, search_len);
return;
-} /* dhdpcie_ramsize_adj */
+} /* _dhdpcie_download_firmware */
-/**
- * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
- *
- * BCMEMBEDIMAGE specific:
- * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
- * file will be used instead.
- *
- */
static int
_dhdpcie_download_firmware(struct dhd_bus *bus)
{
/* Out immediately if no image to download */
if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
return 0;
+#endif
}
/* Adjust ram size */
dhdpcie_ramsize_adj(bus);
/* External image takes precedence if specified */
if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
if (dhdpcie_download_code_file(bus, bus->fw_path)) {
- DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
- __LINE__));
+ DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
goto err;
+#endif
} else {
embed = FALSE;
dlok = TRUE;
}
}
+#ifdef BCMEMBEDIMAGE
+ if (embed) {
+ if (dhdpcie_download_code_array(bus)) {
+ DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+ goto err;
+ } else {
+ dlok = TRUE;
+ }
+ }
+#else
BCM_REFERENCE(embed);
+#endif
if (!dlok) {
- DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
+ DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
goto err;
}
/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
/* External nvram takes precedence if specified */
if (dhdpcie_download_nvram(bus)) {
- DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
+ DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
goto err;
}
return bcmerror;
} /* _dhdpcie_download_firmware */
+#define CONSOLE_LINE_MAX 192
+
static int
dhdpcie_bus_readconsole(dhd_bus_t *bus)
{
uint8 line[CONSOLE_LINE_MAX], ch;
uint32 n, idx, addr;
int rv;
- uint readlen = 0;
- uint i = 0;
/* Don't do anything until FWREADY updates console address */
if (bus->console_addr == 0)
c->bufsize = ltoh32(c->log.buf_size);
if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
return BCME_NOMEM;
- DHD_INFO(("conlog: bufsize=0x%x\n", c->bufsize));
}
idx = ltoh32(c->log.idx);
if (idx == c->last)
return BCME_OK;
- DHD_INFO(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
- idx, c->last));
-
- /* Read the console buffer data to a local buffer */
- /* optimize and read only the portion of the buffer needed, but
- * important to handle wrap-around.
- */
+ /* Read the console buffer */
addr = ltoh32(c->log.buf);
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+ return rv;
- /* wrap around case - write ptr < read ptr */
- if (idx < c->last) {
- /* from read ptr to end of buffer */
- readlen = c->bufsize - c->last;
- if ((rv = dhdpcie_bus_membytes(bus, FALSE,
- addr + c->last, c->buf, readlen)) < 0) {
- DHD_ERROR(("conlog: read error[1] ! \n"));
- return rv;
- }
- /* from beginning of buffer to write ptr */
- if ((rv = dhdpcie_bus_membytes(bus, FALSE,
- addr, c->buf + readlen,
- idx)) < 0) {
- DHD_ERROR(("conlog: read error[2] ! \n"));
- return rv;
- }
- readlen += idx;
- } else {
- /* non-wraparound case, write ptr > read ptr */
- readlen = (uint)idx - c->last;
- if ((rv = dhdpcie_bus_membytes(bus, FALSE,
- addr + c->last, c->buf, readlen)) < 0) {
- DHD_ERROR(("conlog: read error[3] ! \n"));
- return rv;
- }
- }
- /* update read ptr */
- c->last = idx;
-
- /* now output the read data from the local buffer to the host console */
- while (i < readlen) {
- for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
- ch = c->buf[i];
- ++i;
+ while (c->last != idx) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ if (c->last == idx) {
+ /* This would output a partial line. Instead, back up
+ * the buffer pointer and output this line next time around.
+ */
+ if (c->last >= n)
+ c->last -= n;
+ else
+ c->last = c->bufsize - n;
+ goto break2;
+ }
+ ch = c->buf[c->last];
+ c->last = (c->last + 1) % c->bufsize;
if (ch == '\n')
break;
line[n] = ch;
DHD_FWLOG(("CONSOLE: %s\n", line));
}
}
+break2:
return BCME_OK;
-
} /* dhdpcie_bus_readconsole */
void
line[n] = ch;
}
+
if (n > 0) {
if (line[n - 1] == '\r')
n--;
return;
}
-/**
- * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
- *
- * @return BCME_OK on success
- */
static int
dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
{
pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
struct bcmstrbuf strbuf;
unsigned long flags;
- bool dongle_trap_occured = FALSE;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (mbuffer == NULL) {
DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
bcmerror = BCME_NOMEM;
- goto done2;
+ goto done;
}
}
if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
bcmerror = BCME_NOMEM;
- goto done2;
+ goto done;
}
DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
- goto done1;
+ goto done;
}
bcm_binit(&strbuf, data, size);
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
bus->pcie_sh->assert_exp_addr,
(uint8 *)str, maxstrlen)) < 0) {
- goto done1;
+ goto done;
}
str[maxstrlen - 1] = '\0';
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
bus->pcie_sh->assert_file_addr,
(uint8 *)str, maxstrlen)) < 0) {
- goto done1;
+ goto done;
}
str[maxstrlen - 1] = '\0';
if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
trap_t *tr = &bus->dhd->last_trap_info;
- dongle_trap_occured = TRUE;
+ bus->dhd->dongle_trap_occured = TRUE;
if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
- bus->dhd->dongle_trap_occured = TRUE;
- goto done1;
+ goto done;
}
dhd_bus_dump_trap_info(bus, &strbuf);
+
+ dhd_bus_dump_console_buffer(bus);
}
}
if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
- DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+ printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
+#ifdef REPORT_FATAL_TIMEOUTS
+ /**
+ * stop the timers as FW trapped
+ */
+ if (dhd_stop_scan_timer(bus->dhd)) {
+ DHD_ERROR(("dhd_stop_scan_timer failed\n"));
+ ASSERT(0);
+ }
+ if (dhd_stop_bus_timer(bus->dhd)) {
+ DHD_ERROR(("dhd_stop_bus_timer failed\n"));
+ ASSERT(0);
+ }
+ if (dhd_stop_cmd_timer(bus->dhd)) {
+ DHD_ERROR(("dhd_stop_cmd_timer failed\n"));
+ ASSERT(0);
+ }
+ if (dhd_stop_join_timer(bus->dhd)) {
+ DHD_ERROR(("dhd_stop_join_timer failed\n"));
+ ASSERT(0);
+ }
+#endif /* REPORT_FATAL_TIMEOUTS */
- dhd_bus_dump_console_buffer(bus);
dhd_prot_debug_info_print(bus->dhd);
#if defined(DHD_FW_COREDUMP)
/* save core dump or write to a file */
if (bus->dhd->memdump_enabled) {
-#ifdef DHD_SSSR_DUMP
- bus->dhd->collect_sssr = TRUE;
-#endif /* DHD_SSSR_DUMP */
bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
dhdpcie_mem_dump(bus);
}
#endif /* DHD_FW_COREDUMP */
- /* set the trap occured flag only after all the memdump,
- * logdump and sssr dump collection has been scheduled
- */
- if (dongle_trap_occured) {
- bus->dhd->dongle_trap_occured = TRUE;
- }
-
/* wake up IOCTL wait event */
dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
dhd_schedule_reset(bus->dhd);
- }
-done1:
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
}
DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
dhd_os_busbusy_wake(bus->dhd);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
-done2:
+
+done:
if (mbuffer)
MFREE(bus->dhd->osh, mbuffer, msize);
if (str)
return bcmerror;
} /* dhdpcie_checkdied */
+
/* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
{
return;
}
-#if defined(DHD_FW_COREDUMP)
-static int
-dhdpcie_get_mem_dump(dhd_bus_t *bus)
-{
- int ret = BCME_OK;
- int size = 0;
- int start = 0;
- int read_size = 0; /* Read size of each iteration */
- uint8 *p_buf = NULL, *databuf = NULL;
-
- if (!bus) {
- DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- if (!bus->dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- size = bus->ramsize; /* Full mem size */
- start = bus->dongle_ram_base; /* Start address */
-
- /* Get full mem size */
- p_buf = dhd_get_fwdump_buf(bus->dhd, size);
- if (!p_buf) {
- DHD_ERROR(("%s: Out of memory (%d bytes)\n",
- __FUNCTION__, size));
- return BCME_ERROR;
- }
-
- /* Read mem content */
- DHD_TRACE_HW4(("Dump dongle memory\n"));
- databuf = p_buf;
- while (size > 0) {
- read_size = MIN(MEMBLOCK, size);
- ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
- if (ret) {
- DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
-#ifdef DHD_DEBUG_UART
- bus->dhd->memdump_success = FALSE;
-#endif /* DHD_DEBUG_UART */
- break;
- }
- DHD_TRACE(("."));
-
- /* Decrement size and increment start address */
- size -= read_size;
- start += read_size;
- databuf += read_size;
- }
-
- return ret;
-}
+#if defined(DHD_FW_COREDUMP)
static int
dhdpcie_mem_dump(dhd_bus_t *bus)
{
- dhd_pub_t *dhdp;
- int ret;
+ int ret = 0;
+ int size; /* Full mem size */
+ int start = bus->dongle_ram_base; /* Start address */
+ int read_size = 0; /* Read size of each iteration */
+ uint8 *buf = NULL, *databuf = NULL;
#ifdef EXYNOS_PCIE_DEBUG
exynos_pcie_register_dump(1);
#endif /* EXYNOS_PCIE_DEBUG */
- dhdp = bus->dhd;
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus->is_linkdown) {
+ DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
return BCME_ERROR;
}
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
- DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
+ /* Get full mem size */
+ size = bus->ramsize;
+ buf = dhd_get_fwdump_buf(bus->dhd, size);
+ if (!buf) {
+ DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
return BCME_ERROR;
}
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
- return BCME_ERROR;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ /* Read mem content */
+ DHD_TRACE_HW4(("Dump dongle memory\n"));
+ databuf = buf;
+ while (size)
+ {
+ read_size = MIN(MEMBLOCK, size);
+ if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
+ {
+ DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
+ bus->dhd->memdump_success = FALSE;
+ return BCME_ERROR;
+ }
+ DHD_TRACE(("."));
- ret = dhdpcie_get_mem_dump(bus);
- if (ret) {
- DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
- __FUNCTION__, ret));
- return ret;
+ /* Decrement size and increment start address */
+ size -= read_size;
+ start += read_size;
+ databuf += read_size;
}
+ bus->dhd->memdump_success = TRUE;
- dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
+ dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
/* buf, actually soc_ram free handled in dhd_{free,clear} */
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
- pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
return ret;
}
-int
-dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
-{
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- return dhdpcie_get_mem_dump(dhdp->bus);
-}
-
int
dhd_bus_mem_dump(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
- int ret = BCME_ERROR;
if (dhdp->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s bus is down\n", __FUNCTION__));
return BCME_ERROR;
}
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (dhdp->memdump_type == DUMP_TYPE_BY_SYSDUMP) {
+ DHD_ERROR(("%s : bus wakeup by SYSDUMP\n", __FUNCTION__));
+ dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
- /* Try to resume if already suspended or suspend in progress */
-
- /* Skip if still in suspended or suspend in progress */
- if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
+ if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
return BCME_ERROR;
}
- DHD_OS_WAKE_LOCK(dhdp);
- ret = dhdpcie_mem_dump(bus);
- DHD_OS_WAKE_UNLOCK(dhdp);
- return ret;
+ return dhdpcie_mem_dump(bus);
+}
+
+int
+dhd_dongle_mem_dump(void)
+{
+ if (!g_dhd_bus) {
+ DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ dhd_bus_dump_console_buffer(g_dhd_bus);
+ dhd_prot_debug_info_print(g_dhd_bus->dhd);
+
+ g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
+ g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
+
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+ DHD_OS_WAKE_LOCK(g_dhd_bus->dhd);
+ dhd_bus_mem_dump(g_dhd_bus->dhd);
+ DHD_OS_WAKE_UNLOCK(g_dhd_bus->dhd);
+ return 0;
}
+EXPORT_SYMBOL(dhd_dongle_mem_dump);
#endif /* DHD_FW_COREDUMP */
int
dhd_socram_dump(dhd_bus_t *bus)
{
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
+
#if defined(DHD_FW_COREDUMP)
DHD_OS_WAKE_LOCK(bus->dhd);
dhd_bus_mem_dump(bus->dhd);
return 0;
#else
return -1;
-#endif // endif
+#endif
}
/**
return BCME_ERROR;
}
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
+
/* Detect endianness. */
little_endian = *(char *)&detect_endian_flag;
}
}
}
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
return BCME_OK;
} /* dhdpcie_bus_membytes */
int ret = BCME_OK;
#ifdef DHD_LOSSLESS_ROAMING
dhd_pub_t *dhdp = bus->dhd;
-#endif // endif
+#endif
DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
/* ASSERT on flow_id */
flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
- if (flow_ring_node->prot_info == NULL) {
- DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
- return BCME_NOTREADY;
- }
-
#ifdef DHD_LOSSLESS_ROAMING
if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
}
while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
- if (bus->dhd->conf->orphan_move <= 1)
- PKTORPHAN(txp, bus->dhd->conf->tsq);
+ PKTORPHAN(txp, bus->dhd->conf->tsq);
/*
* Modifying the packet length caused P2P cert failures.
eh = (struct ether_header *) pktdata;
if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
uint8 prio = (uint8)PKTPRIO(txp);
+
/* Restore to original priority for 802.1X packet */
if (prio == PRIO_8021D_NC) {
PKTSETPRIO(txp, dhdp->prio_8021x);
}
}
#endif /* DHD_LOSSLESS_ROAMING */
+
/* Attempt to transfer packet over flow ring */
ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
if (ret != BCME_OK) { /* may not have resources in flow ring */
DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
- dhd_prot_txdata_write_flush(bus->dhd, flow_id);
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
/* reinsert at head */
dhd_flow_queue_reinsert(bus->dhd, queue, txp);
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
}
}
-#ifdef DHD_HP2P
- if (!flow_ring_node->hp2p_ring) {
- dhd_prot_txdata_write_flush(bus->dhd, flow_id);
- }
-#else
- dhd_prot_txdata_write_flush(bus->dhd, flow_id);
-#endif // endif
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
+
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
}
toss:
DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
+/* for EFI, pass the 'send' flag as false, to avoid enqueuing the failed tx pkt
+* into the Tx done queue
+*/
+#ifdef DHD_EFI
+ PKTCFREE(bus->dhd->osh, txp, FALSE);
+#else
PKTCFREE(bus->dhd->osh, txp, TRUE);
+#endif
return ret;
} /* dhd_bus_txdata */
+
void
dhd_bus_stop_queue(struct dhd_bus *bus)
{
dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+ bus->bus_flowctrl = TRUE;
}
void
dhd_bus_start_queue(struct dhd_bus *bus)
{
- /*
- * Tx queue has been stopped due to resource shortage (or)
- * bus is not in a state to turn on.
- *
- * Note that we try to re-start network interface only
- * when we have enough resources, one has to first change the
- * flag indicating we have all the resources.
- */
- if (dhd_prot_check_tx_resource(bus->dhd)) {
- DHD_ERROR(("%s: Interface NOT started, previously stopped "
- "due to resource shortage\n", __FUNCTION__));
- return;
- }
dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+ bus->bus_flowctrl = TRUE;
}
/* Device console input function */
dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
}
-void
-dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
-{
- dhdpcie_os_setbar1win(bus, addr);
-}
-
/** 'offset' is a backplane address */
void
dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
{
- if (bus->is_linkdown) {
- DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
- return;
- } else {
- dhdpcie_os_wtcm8(bus, offset, data);
- }
+ W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
}
uint8
dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
{
volatile uint8 data;
- if (bus->is_linkdown) {
- DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
- data = (uint8)-1;
- } else {
- data = dhdpcie_os_rtcm8(bus, offset);
- }
+ data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
return data;
}
void
dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
{
- if (bus->is_linkdown) {
- DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
- return;
- } else {
- dhdpcie_os_wtcm32(bus, offset, data);
- }
+ W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
}
void
dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
{
- if (bus->is_linkdown) {
- DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
- return;
- } else {
- dhdpcie_os_wtcm16(bus, offset, data);
- }
+ W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
}
#ifdef DHD_SUPPORT_64BIT
void
dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
{
- if (bus->is_linkdown) {
- DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
- return;
- } else {
- dhdpcie_os_wtcm64(bus, offset, data);
- }
+ W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
}
#endif /* DHD_SUPPORT_64BIT */
dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
{
volatile uint16 data;
- if (bus->is_linkdown) {
- DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
- data = (uint16)-1;
- } else {
- data = dhdpcie_os_rtcm16(bus, offset);
- }
+ data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
return data;
}
dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
{
volatile uint32 data;
- if (bus->is_linkdown) {
- DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
- data = (uint32)-1;
- } else {
- data = dhdpcie_os_rtcm32(bus, offset);
- }
+ data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
return data;
}
dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
{
volatile uint64 data;
- if (bus->is_linkdown) {
- DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
- data = (uint64)-1;
- } else {
- data = dhdpcie_os_rtcm64(bus, offset);
- }
+ data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
return data;
}
#endif /* DHD_SUPPORT_64BIT */
dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
{
uint64 long_data;
- ulong addr; /* dongle address */
+ uintptr tcm_offset;
DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
return;
}
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
switch (type) {
case D2H_DMA_SCRATCH_BUF:
- addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
long_data = HTOL64(*(uint64 *)data);
- dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ tcm_offset = (uintptr)&(sh->host_dma_scratch_buffer);
+ dhdpcie_bus_membytes(bus, TRUE,
+ (ulong)tcm_offset, (uint8*) &long_data, len);
if (dhd_msg_level & DHD_INFO_VAL) {
prhex(__FUNCTION__, data, len);
}
break;
+ }
case D2H_DMA_SCRATCH_BUF_LEN :
- addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
- dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
+ tcm_offset = (uintptr)&(sh->host_dma_scratch_buffer_len);
+ dhdpcie_bus_wtcm32(bus,
+ (ulong)tcm_offset, (uint32) HTOL32(*(uint32 *)data));
if (dhd_msg_level & DHD_INFO_VAL) {
prhex(__FUNCTION__, data, len);
}
break;
+ }
case H2D_DMA_INDX_WR_BUF:
+ {
+ pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
+
long_data = HTOL64(*(uint64 *)data);
- addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
- dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ tcm_offset = (uintptr)shmem->rings_info_ptr;
+ tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE,
+ (ulong)tcm_offset, (uint8*) &long_data, len);
if (dhd_msg_level & DHD_INFO_VAL) {
prhex(__FUNCTION__, data, len);
}
break;
+ }
case H2D_DMA_INDX_RD_BUF:
+ {
+ pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
long_data = HTOL64(*(uint64 *)data);
- addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
- dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ tcm_offset = (uintptr)shmem->rings_info_ptr;
+ tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE,
+ (ulong)tcm_offset, (uint8*) &long_data, len);
if (dhd_msg_level & DHD_INFO_VAL) {
prhex(__FUNCTION__, data, len);
}
break;
+ }
case D2H_DMA_INDX_WR_BUF:
+ {
+ pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
long_data = HTOL64(*(uint64 *)data);
- addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
- dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ tcm_offset = (uintptr)shmem->rings_info_ptr;
+ tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE,
+ (ulong)tcm_offset, (uint8*) &long_data, len);
if (dhd_msg_level & DHD_INFO_VAL) {
prhex(__FUNCTION__, data, len);
}
break;
+ }
case D2H_DMA_INDX_RD_BUF:
+ {
+ pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
long_data = HTOL64(*(uint64 *)data);
- addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
- dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ tcm_offset = (uintptr)shmem->rings_info_ptr;
+ tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE,
+ (ulong)tcm_offset, (uint8*) &long_data, len);
if (dhd_msg_level & DHD_INFO_VAL) {
prhex(__FUNCTION__, data, len);
}
break;
+ }
case H2D_IFRM_INDX_WR_BUF:
+ {
+ pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
+
long_data = HTOL64(*(uint64 *)data);
- addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
- dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
+ tcm_offset = (uintptr)shmem->rings_info_ptr;
+ tcm_offset += OFFSETOF(ring_info_t, ifrm_w_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE,
+ (ulong)tcm_offset, (uint8*) &long_data, len);
if (dhd_msg_level & DHD_INFO_VAL) {
prhex(__FUNCTION__, data, len);
}
break;
+ }
case RING_ITEM_LEN :
- addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
- dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+ tcm_offset += OFFSETOF(ring_mem_t, len_items);
+ dhdpcie_bus_wtcm16(bus,
+ (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
break;
case RING_MAX_ITEMS :
- addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
- dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+ tcm_offset += OFFSETOF(ring_mem_t, max_item);
+ dhdpcie_bus_wtcm16(bus,
+ (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
break;
case RING_BUF_ADDR :
long_data = HTOL64(*(uint64 *)data);
- addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
- dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
+ tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+ tcm_offset += OFFSETOF(ring_mem_t, base_addr);
+ dhdpcie_bus_membytes(bus, TRUE,
+ (ulong)tcm_offset, (uint8 *) &long_data, len);
if (dhd_msg_level & DHD_INFO_VAL) {
prhex(__FUNCTION__, data, len);
}
break;
case RING_WR_UPD :
- addr = bus->ring_sh[ringid].ring_state_w;
- dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ tcm_offset = bus->ring_sh[ringid].ring_state_w;
+ dhdpcie_bus_wtcm16(bus,
+ (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
break;
case RING_RD_UPD :
- addr = bus->ring_sh[ringid].ring_state_r;
- dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
+ tcm_offset = bus->ring_sh[ringid].ring_state_r;
+ dhdpcie_bus_wtcm16(bus,
+ (ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
break;
case D2H_MB_DATA:
- addr = bus->d2h_mb_data_ptr_addr;
- dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
+ dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
+ (uint32) HTOL32(*(uint32 *)data));
break;
case H2D_MB_DATA:
- addr = bus->h2d_mb_data_ptr_addr;
- dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
+ dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
+ (uint32) HTOL32(*(uint32 *)data));
break;
case HOST_API_VERSION:
- addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
- dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
+ tcm_offset = (uintptr)sh + OFFSETOF(pciedev_shared_t, host_cap);
+ dhdpcie_bus_wtcm32(bus,
+ (ulong)tcm_offset, (uint32) HTOL32(*(uint32 *)data));
break;
+ }
case DNGL_TO_HOST_TRAP_ADDR:
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
long_data = HTOL64(*(uint64 *)data);
- addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
- dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
- DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
+ tcm_offset = (uintptr)&(sh->host_trap_addr);
+ dhdpcie_bus_membytes(bus, TRUE,
+ (ulong)tcm_offset, (uint8*) &long_data, len);
break;
+ }
- case HOST_SCB_ADDR:
- addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
-#ifdef DHD_SUPPORT_64BIT
- dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data));
-#else /* !DHD_SUPPORT_64BIT */
- dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
-#endif /* DHD_SUPPORT_64BIT */
- DHD_INFO(("Wrote host_scb_addr:0x%x\n",
- (uint32) HTOL32(*(uint32 *)data)));
+#ifdef HOFFLOAD_MODULES
+ case WRT_HOST_MODULE_ADDR:
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
+ long_data = HTOL64(*(uint64 *)data);
+ tcm_offset = (uintptr)&(sh->hoffload_addr);
+ dhdpcie_bus_membytes(bus, TRUE,
+ (ulong)tcm_offset, (uint8*) &long_data, len);
break;
-
+ }
+#endif
default:
break;
}
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
} /* dhd_bus_cmn_writeshared */
/** A snippet of dongle memory is shared between host and dongle */
void
dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
{
- ulong addr; /* dongle address */
+ ulong tcm_offset;
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
switch (type) {
case RING_WR_UPD :
- addr = bus->ring_sh[ringid].ring_state_w;
- *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
+ tcm_offset = bus->ring_sh[ringid].ring_state_w;
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
break;
-
case RING_RD_UPD :
- addr = bus->ring_sh[ringid].ring_state_r;
- *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
+ tcm_offset = bus->ring_sh[ringid].ring_state_r;
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
break;
-
case TOTAL_LFRAG_PACKET_CNT :
- addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
- *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
+ (ulong)(uintptr) &sh->total_lfrag_pkt_cnt));
break;
-
+ }
case H2D_MB_DATA:
- addr = bus->h2d_mb_data_ptr_addr;
- *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
break;
-
case D2H_MB_DATA:
- addr = bus->d2h_mb_data_ptr_addr;
- *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
break;
-
case MAX_HOST_RXBUFS :
- addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
- *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
- break;
-
- case HOST_SCB_ADDR:
- addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
- *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
+ {
+ pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
+ (ulong)(uintptr) &sh->max_host_rxbufs));
break;
-
+ }
default :
break;
}
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
}
uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
{
}
-/**
- * @param params input buffer, NULL for 'set' operation.
- * @param plen length of 'params' buffer, 0 for 'set' operation.
- * @param arg output buffer
- */
int
dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
void *params, int plen, void *arg, int len, bool set)
ASSERT(name);
ASSERT(len >= 0);
- if (!name || len < 0)
- return BCME_BADARG;
/* Get MUST have return space */
ASSERT(set || (arg && len));
- if (!(set || (arg && len)))
- return BCME_BADARG;
/* Set does NOT take qualifiers */
ASSERT(!set || (!params && !plen));
- if (!(!set || (!params && !plen)))
- return BCME_BADARG;
DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
name, (set ? "set" : "get"), len, plen));
goto exit;
}
- if (MULTIBP_ENAB(bus->sih)) {
- if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
- DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
- } else {
- dhd_bus_pcie_pwr_req(bus);
- }
- }
/* set up 'params' pointer in case this is a set command so that
* the convenience int and bool code can be common to set and get
bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
exit:
- /* In DEVRESET_QUIESCE/DEVRESET_ON,
- * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
- * causes pwr_req_ref count miss-match in pwr req clear function and hang.
- * In this case, bypass pwr req clear.
- */
- if (bcmerror == BCME_DNGL_DEVRESET) {
- bcmerror = BCME_OK;
- } else {
- if (MULTIBP_ENAB(bus->sih)) {
- if (vi && (vi->flags & DHD_IOVF_PWRREQ_BYPASS)) {
- DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
- } else {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
- }
- }
return bcmerror;
} /* dhd_bus_iovar_op */
#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
((sih)->buscoretype == PCIE2_CORE_ID))
-#define PCIE_FLR_CAPAB_BIT 28
-#define PCIE_FUNCTION_LEVEL_RESET_BIT 15
-
-/* Change delays for only QT HW, FPGA and silicon uses same delay */
-#ifdef BCMQT_HW
-#define DHD_FUNCTION_LEVEL_RESET_DELAY 300000u
-#define DHD_SSRESET_STATUS_RETRY_DELAY 10000u
-#else
-#define DHD_FUNCTION_LEVEL_RESET_DELAY 70u /* 70 msec delay */
-#define DHD_SSRESET_STATUS_RETRY_DELAY 40u
-#endif // endif
-/*
- * Increase SSReset de-assert time to 8ms.
- * since it takes longer time if re-scan time on 4378B0.
- */
-#define DHD_SSRESET_STATUS_RETRIES 200u
-
-static void
-dhdpcie_enum_reg_init(dhd_bus_t *bus)
-{
- /* initialize Function control register (clear bit 4) to HW init value */
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
- PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
-
- /* clear IntMask */
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
- /* clear IntStatus */
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
-
- /* clear MSIVector */
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
- /* clear MSIIntMask */
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
- /* clear MSIIntStatus */
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
-
- /* clear PowerIntMask */
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
- /* clear PowerIntStatus */
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
-
- /* clear MailboxIntMask */
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
- /* clear MailboxInt */
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
-}
-
-int
-dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
-{
- uint flr_capab;
- uint val;
- int retry = 0;
-
- DHD_ERROR(("******** Perform FLR ********\n"));
-
- if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
- if (bus->pcie_mailbox_mask != 0) {
- dhdpcie_bus_intr_disable(bus);
- }
- /* initialize F0 enum registers before FLR for rev66/67 */
- dhdpcie_enum_reg_init(bus);
- }
-
- /* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
- val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
- flr_capab = val & (1 << PCIE_FLR_CAPAB_BIT);
- DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
- PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
- if (!flr_capab) {
- DHD_ERROR(("Chip does not support FLR\n"));
- return BCME_UNSUPPORTED;
- }
-
- /* Save pcie config space */
- DHD_INFO(("Save Pcie Config Space\n"));
- DHD_PCIE_CONFIG_SAVE(bus);
-
- /* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
- DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
- PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
- val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
- DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
- val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
- DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
- OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
-
- /* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
- DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
- OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
-
- if (force_fail) {
- DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
- PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
- val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
- DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
- val));
- val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
- DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
- val));
- OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
-
- val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
- DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
- val));
- }
-
- /* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
- DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
- PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
- val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
- DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
- val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
- DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
- OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
-
- /* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
- DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
- "is cleared\n", PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
- do {
- val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
- DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
- PCIE_CFG_SUBSYSTEM_CONTROL, val));
- val = val & (1 << PCIE_SSRESET_STATUS_BIT);
- OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
- } while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
-
- if (val) {
- DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
- PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
- /* User has to fire the IOVAR again, if force_fail is needed */
- if (force_fail) {
- bus->flr_force_fail = FALSE;
- DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
- }
- return BCME_DONGLE_DOWN;
- }
-
- /* Restore pcie config space */
- DHD_INFO(("Restore Pcie Config Space\n"));
- DHD_PCIE_CONFIG_RESTORE(bus);
-
- DHD_ERROR(("******** FLR Succedeed ********\n"));
-
- return BCME_OK;
-}
-
-#ifdef DHD_USE_BP_RESET
-#define DHD_BP_RESET_ASPM_DISABLE_DELAY 500u /* usec */
-
-#define DHD_BP_RESET_STATUS_RETRY_DELAY 40u /* usec */
-#define DHD_BP_RESET_STATUS_RETRIES 50u
-
-#define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT 10
-#define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT 21
-int
-dhd_bus_perform_bp_reset(struct dhd_bus *bus)
-{
- uint val;
- int retry = 0;
- uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
- int ret = BCME_OK;
- bool cond;
-
- DHD_ERROR(("******** Perform BP reset ********\n"));
-
- /* Disable ASPM */
- DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
- PCIECFGREG_LINK_STATUS_CTRL));
- val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
- DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
- val = val & (~PCIE_ASPM_ENAB);
- DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
- OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
-
- /* wait for delay usec */
- DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
- OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
-
- /* Set bit 10 of PCIECFGREG_SPROM_CTRL */
- DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
- PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
- val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
- DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
- val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
- DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
- OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
-
- /* Wait till bit backplane reset is ASSERTED i,e
- * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
- * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
- * else DAR register will read previous old value
- */
- DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
- "PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
- PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
- do {
- val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
- DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
- cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
- OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
- } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
-
- if (cond) {
- DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
- PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
- ret = BCME_ERROR;
- goto aspm_enab;
- }
-
- /* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
- DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
- "dar_clk_ctrl_status_reg(0x%x) is cleared\n",
- PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
- do {
- val = si_corereg(bus->sih, bus->sih->buscoreidx,
- dar_clk_ctrl_status_reg, 0, 0);
- DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
- dar_clk_ctrl_status_reg, val));
- cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
- OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
- } while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
-
- if (cond) {
- DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
- dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
- ret = BCME_ERROR;
- }
-
-aspm_enab:
- /* Enable ASPM */
- DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
- PCIECFGREG_LINK_STATUS_CTRL));
- val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
- DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
- val = val | (PCIE_ASPM_L1_ENAB);
- DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
- OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
-
- DHD_ERROR(("******** BP reset Succedeed ********\n"));
-
- return ret;
-}
-#endif /* DHD_USE_BP_RESET */
-
int
dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
{
dhd_bus_t *bus = dhdp->bus;
int bcmerror = 0;
unsigned long flags;
- unsigned long flags_bus;
#ifdef CONFIG_ARCH_MSM
int retry = POWERUP_MAX_RETRY;
#endif /* CONFIG_ARCH_MSM */
- if (flag == TRUE) { /* Turn off WLAN */
- /* Removing Power */
- DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
- DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
- bus->dhd->up = FALSE;
-
- /* wait for other contexts to finish -- if required a call
- * to OSL_DELAY for 1s can be added to give other contexts
- * a chance to finish
- */
- dhdpcie_advertise_bus_cleanup(bus->dhd);
+ if (dhd_download_fw_on_driverload) {
+ bcmerror = dhd_bus_start(dhdp);
+ } else {
+ if (flag == TRUE) { /* Turn off WLAN */
+ /* Removing Power */
+ DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
- if (bus->dhd->busstate != DHD_BUS_DOWN) {
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- atomic_set(&bus->dhd->block_bus, TRUE);
- dhd_flush_rx_tx_wq(bus->dhd);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ bus->dhd->up = FALSE;
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ dhdpcie_advertise_bus_cleanup(bus->dhd);
+ if (bus->intr) {
+ dhdpcie_bus_intr_disable(bus);
+ dhdpcie_free_irq(bus);
+ }
#ifdef BCMPCIE_OOB_HOST_WAKE
- /* Clean up any pending host wake IRQ */
- dhd_bus_oob_intr_set(bus->dhd, FALSE);
- dhd_bus_oob_intr_unregister(bus->dhd);
+ /* Clean up any pending host wake IRQ */
+ dhd_bus_oob_intr_set(bus->dhd, FALSE);
+ dhd_bus_oob_intr_unregister(bus->dhd);
#endif /* BCMPCIE_OOB_HOST_WAKE */
- dhd_os_wd_timer(dhdp, 0);
- dhd_bus_stop(bus, TRUE);
- if (bus->intr) {
- DHD_BUS_LOCK(bus->bus_lock, flags_bus);
- dhdpcie_bus_intr_disable(bus);
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
- dhdpcie_free_irq(bus);
- }
- dhd_deinit_bus_lock(bus);
- dhd_deinit_backplane_access_lock(bus);
- dhd_bus_release_dongle(bus);
- dhdpcie_bus_free_resource(bus);
- bcmerror = dhdpcie_bus_disable_device(bus);
- if (bcmerror) {
- DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
- __FUNCTION__, bcmerror));
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- atomic_set(&bus->dhd->block_bus, FALSE);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
- }
- /* Clean up protocol data after Bus Master Enable bit clear
- * so that host can safely unmap DMA and remove the allocated buffers
- * from the PKTID MAP. Some Applicantion Processors supported
- * System MMU triggers Kernel panic when they detect to attempt to
- * DMA-unmapped memory access from the devices which use the
- * System MMU. Therefore, Kernel panic can be happened since it is
- * possible that dongle can access to DMA-unmapped memory after
- * calling the dhd_prot_reset().
- * For this reason, the dhd_prot_reset() and dhd_clear() functions
- * should be located after the dhdpcie_bus_disable_device().
- */
- dhd_prot_reset(dhdp);
- dhd_clear(dhdp);
+ dhd_os_wd_timer(dhdp, 0);
+ dhd_bus_stop(bus, TRUE);
+ dhd_prot_reset(dhdp);
+ dhd_clear(dhdp);
+ dhd_bus_release_dongle(bus);
+ dhdpcie_bus_free_resource(bus);
+ bcmerror = dhdpcie_bus_disable_device(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
#ifdef CONFIG_ARCH_MSM
- bcmerror = dhdpcie_bus_clock_stop(bus);
- if (bcmerror) {
- DHD_ERROR(("%s: host clock stop failed: %d\n",
- __FUNCTION__, bcmerror));
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- atomic_set(&bus->dhd->block_bus, FALSE);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
- goto done;
- }
+ bcmerror = dhdpcie_bus_clock_stop(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: host clock stop failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
#endif /* CONFIG_ARCH_MSM */
- DHD_GENERAL_LOCK(bus->dhd, flags);
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
- bus->dhd->busstate = DHD_BUS_DOWN;
- DHD_GENERAL_UNLOCK(bus->dhd, flags);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- atomic_set(&bus->dhd->block_bus, FALSE);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
- } else {
- if (bus->intr) {
- dhdpcie_free_irq(bus);
- }
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ } else {
+ if (bus->intr) {
+ dhdpcie_free_irq(bus);
+ }
#ifdef BCMPCIE_OOB_HOST_WAKE
- /* Clean up any pending host wake IRQ */
- dhd_bus_oob_intr_set(bus->dhd, FALSE);
- dhd_bus_oob_intr_unregister(bus->dhd);
+ /* Clean up any pending host wake IRQ */
+ dhd_bus_oob_intr_set(bus->dhd, FALSE);
+ dhd_bus_oob_intr_unregister(bus->dhd);
#endif /* BCMPCIE_OOB_HOST_WAKE */
- dhd_dpc_kill(bus->dhd);
- if (!bus->no_bus_init) {
+ dhd_dpc_kill(bus->dhd);
+ dhd_prot_reset(dhdp);
+ dhd_clear(dhdp);
dhd_bus_release_dongle(bus);
dhdpcie_bus_free_resource(bus);
bcmerror = dhdpcie_bus_disable_device(bus);
if (bcmerror) {
DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
__FUNCTION__, bcmerror));
+ goto done;
}
-
- /* Clean up protocol data after Bus Master Enable bit clear
- * so that host can safely unmap DMA and remove the allocated
- * buffers from the PKTID MAP. Some Applicantion Processors
- * supported System MMU triggers Kernel panic when they detect
- * to attempt to DMA-unmapped memory access from the devices
- * which use the System MMU.
- * Therefore, Kernel panic can be happened since it is possible
- * that dongle can access to DMA-unmapped memory after calling
- * the dhd_prot_reset().
- * For this reason, the dhd_prot_reset() and dhd_clear() functions
- * should be located after the dhdpcie_bus_disable_device().
- */
- dhd_prot_reset(dhdp);
- dhd_clear(dhdp);
- } else {
- bus->no_bus_init = FALSE;
- }
+
#ifdef CONFIG_ARCH_MSM
- bcmerror = dhdpcie_bus_clock_stop(bus);
- if (bcmerror) {
- DHD_ERROR(("%s: host clock stop failed: %d\n",
- __FUNCTION__, bcmerror));
- goto done;
- }
+ bcmerror = dhdpcie_bus_clock_stop(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: host clock stop failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
#endif /* CONFIG_ARCH_MSM */
- }
+ }
- bus->dhd->dongle_reset = TRUE;
- DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
+ bus->dhd->dongle_reset = TRUE;
+ DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
- } else { /* Turn on WLAN */
- if (bus->dhd->busstate == DHD_BUS_DOWN) {
- /* Powering On */
- DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
+ } else { /* Turn on WLAN */
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ /* Powering On */
+ DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
#ifdef CONFIG_ARCH_MSM
- while (--retry) {
- bcmerror = dhdpcie_bus_clock_start(bus);
- if (!bcmerror) {
- DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
- __FUNCTION__));
- break;
- } else {
- OSL_SLEEP(10);
+ while (--retry) {
+ bcmerror = dhdpcie_bus_clock_start(bus);
+ if (!bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
+ __FUNCTION__));
+ break;
+ } else {
+ OSL_SLEEP(10);
+ }
}
- }
- if (bcmerror && !retry) {
- DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
- __FUNCTION__, bcmerror));
- goto done;
- }
-#if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
- dhd_bus_aspm_enable_rc_ep(bus, FALSE);
-#endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
+ if (bcmerror && !retry) {
+ DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
#endif /* CONFIG_ARCH_MSM */
- bus->is_linkdown = 0;
- bus->cto_triggered = 0;
- bcmerror = dhdpcie_bus_enable_device(bus);
- if (bcmerror) {
- DHD_ERROR(("%s: host configuration restore failed: %d\n",
- __FUNCTION__, bcmerror));
- goto done;
- }
+ bus->is_linkdown = 0;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ bus->read_shm_fail = FALSE;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ bcmerror = dhdpcie_bus_enable_device(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: host configuration restore failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
- bcmerror = dhdpcie_bus_alloc_resource(bus);
- if (bcmerror) {
- DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
- __FUNCTION__, bcmerror));
- goto done;
- }
+ bcmerror = dhdpcie_bus_alloc_resource(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
- bcmerror = dhdpcie_bus_dongle_attach(bus);
- if (bcmerror) {
- DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
- __FUNCTION__, bcmerror));
- goto done;
- }
+ bcmerror = dhdpcie_bus_dongle_attach(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
- bcmerror = dhd_bus_request_irq(bus);
- if (bcmerror) {
- DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
- __FUNCTION__, bcmerror));
- goto done;
- }
+ bcmerror = dhd_bus_request_irq(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
- bus->dhd->dongle_reset = FALSE;
+ bus->dhd->dongle_reset = FALSE;
-#if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
- dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
-#endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
+ bcmerror = dhd_bus_start(dhdp);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhd_bus_start: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
- bcmerror = dhd_bus_start(dhdp);
- if (bcmerror) {
- DHD_ERROR(("%s: dhd_bus_start: %d\n",
- __FUNCTION__, bcmerror));
+ bus->dhd->up = TRUE;
+ DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
goto done;
}
-
- bus->dhd->up = TRUE;
- /* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
- if (bus->dhd->dhd_watchdog_ms_backup) {
- DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
- __FUNCTION__));
- dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
- }
- DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
- } else {
- DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
- goto done;
}
}
done:
if (bcmerror) {
DHD_GENERAL_LOCK(bus->dhd, flags);
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
}
- return bcmerror;
-}
-
-/* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
- * calls shall be serialized. This wrapper function provides such serialization
- * and shall be used everywjer einstead of direct call of si_backplane_access()
- *
- * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
- * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
- * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
- * conditions calls of si_backplane_access() shall be serialized. Presence of
- * tasklet context implies that serialization shall b ebased on spinlock. Hence
- * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
- * spinlock-based.
- *
- * Other platforms may add their own implementations of
- * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
- * needed implementation might be empty)
- */
-static uint
-serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
-{
- uint ret;
- unsigned long flags;
- DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
- ret = si_backplane_access(bus->sih, addr, size, val, read);
- DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
- return ret;
-}
-
-static int
-dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
-{
- int h2d_support, d2h_support;
-
- d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
- h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
- return (d2h_support | (h2d_support << 1));
-
-}
-int
-dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
-{
- int bcmerror = 0;
- /* Can change it only during initialization/FW download */
- if (dhd->busstate == DHD_BUS_DOWN) {
- if ((int_val > 3) || (int_val < 0)) {
- DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
- bcmerror = BCME_BADARG;
- } else {
- dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
- dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
- dhd->dma_ring_upd_overwrite = TRUE;
- }
- } else {
- DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
- __FUNCTION__));
- bcmerror = BCME_NOTDOWN;
- }
return bcmerror;
-
}
-/**
- * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
- *
- * @param actionid e.g. IOV_SVAL(IOV_PCIEREG)
- * @param params input buffer
- * @param plen length in [bytes] of input buffer 'params'
- * @param arg output buffer
- * @param len length in [bytes] of output buffer 'arg'
- */
static int
dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
void *params, int plen, void *arg, int len, int val_size)
switch (actionid) {
+
case IOV_SVAL(IOV_VARS):
bcmerror = dhdpcie_downloadvars(bus, arg, len);
break;
break;
case IOV_SVAL(IOV_PCIE_DMAXFER): {
- dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
-
- if (!dmaxfer)
- return BCME_BADARG;
- if (dmaxfer->version != DHD_DMAXFER_VERSION)
- return BCME_VERSION;
- if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
- return BCME_BADLEN;
- }
-
- bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
- dmaxfer->src_delay, dmaxfer->dest_delay,
- dmaxfer->type, dmaxfer->core_num,
- dmaxfer->should_wait);
-
- if (dmaxfer->should_wait && bcmerror >= 0) {
- bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
+ int int_val4 = 0;
+ if (plen >= (int)sizeof(int_val) * 4) {
+ bcopy((void*)((uintptr)params + 3 * sizeof(int_val)),
+ &int_val4, sizeof(int_val4));
}
+ bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3, int_val4);
break;
}
- case IOV_GVAL(IOV_PCIE_DMAXFER): {
- dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
- if (!dmaxfer)
- return BCME_BADARG;
- if (dmaxfer->version != DHD_DMAXFER_VERSION)
- return BCME_VERSION;
- if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
- return BCME_BADLEN;
- }
- bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
+#ifdef DEVICE_TX_STUCK_DETECT
+ case IOV_GVAL(IOV_DEVICE_TX_STUCK_DETECT):
+ int_val = bus->dev_tx_stuck_monitor;
+ bcopy(&int_val, arg, val_size);
break;
- }
-
+ case IOV_SVAL(IOV_DEVICE_TX_STUCK_DETECT):
+ bus->dev_tx_stuck_monitor = (bool)int_val;
+ break;
+#endif /* DEVICE_TX_STUCK_DETECT */
case IOV_GVAL(IOV_PCIE_SUSPEND):
int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
bcopy(&int_val, arg, val_size);
DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- dhdpcie_bus_suspend(bus, TRUE, TRUE);
-#else
+
dhdpcie_bus_suspend(bus, TRUE);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
bcopy(&int_val, arg, val_size);
break;
- /* Debug related. Dumps core registers or one of the dongle memory */
- case IOV_GVAL(IOV_DUMP_DONGLE):
- {
- dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
- dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
- uint32 *p = ddo->val;
- const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
-
- if (plen < sizeof(ddi) || len < sizeof(ddo)) {
- bcmerror = BCME_BADARG;
- break;
- }
-
- switch (ddi.type) {
- case DUMP_DONGLE_COREREG:
- ddo->n_bytes = 0;
-
- if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
- break; // beyond last core: core enumeration ended
- }
-
- ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
- ddo->address += ddi.offset; // BP address at which this dump starts
-
- ddo->id = si_coreid(bus->sih);
- ddo->rev = si_corerev(bus->sih);
-
- while (ddi.offset < max_offset &&
- sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
- *p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
- ddi.offset += sizeof(uint32);
- ddo->n_bytes += sizeof(uint32);
- }
- break;
- default:
- // TODO: implement d11 SHM/TPL dumping
- bcmerror = BCME_BADARG;
- break;
- }
- break;
- }
-
- /* Debug related. Returns a string with dongle capabilities */
- case IOV_GVAL(IOV_DNGL_CAPS):
- {
- strncpy(arg, bus->dhd->fw_capabilities,
- MIN(strlen(bus->dhd->fw_capabilities), (size_t)len));
- ((char*)arg)[len - 1] = '\0';
- break;
- }
-
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
- case IOV_SVAL(IOV_GDB_SERVER):
- /* debugger_*() functions may sleep, so cannot hold spinlock */
- DHD_PERIM_UNLOCK(bus->dhd);
- if (int_val > 0) {
- debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
- } else {
- debugger_close();
- }
- DHD_PERIM_LOCK(bus->dhd);
- break;
-#endif /* DEBUGGER || DHD_DSCOPE */
-
#ifdef BCM_BUZZZ
/* Dump dongle side buzzz trace to console */
case IOV_GVAL(IOV_BUZZZ_DUMP):
break;
}
case IOV_GVAL(IOV_DMA_RINGINDICES):
- {
- int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
+ { int h2d_support, d2h_support;
+
+ d2h_support = bus->dhd->dma_d2h_ring_upd_support ? 1 : 0;
+ h2d_support = bus->dhd->dma_h2d_ring_upd_support ? 1 : 0;
+ int_val = d2h_support | (h2d_support << 1);
bcopy(&int_val, arg, sizeof(int_val));
break;
}
case IOV_SVAL(IOV_DMA_RINGINDICES):
- bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
+ /* Can change it only during initialization/FW download */
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ if ((int_val > 3) || (int_val < 0)) {
+ DHD_ERROR(("%s: Bad argument. Possible values: 0, 1, 2 & 3\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
+ bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
+ bus->dhd->dma_ring_upd_overwrite = TRUE;
+ }
+ } else {
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+ __FUNCTION__));
+ bcmerror = BCME_NOTDOWN;
+ }
break;
case IOV_GVAL(IOV_METADATA_DBG):
break;
case IOV_SVAL(IOV_DEVRESET):
- switch (int_val) {
- case DHD_BUS_DEVRESET_ON:
- bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
- break;
- case DHD_BUS_DEVRESET_OFF:
- bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
- break;
- case DHD_BUS_DEVRESET_FLR:
- bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
- break;
- case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
- bus->flr_force_fail = TRUE;
- break;
- default:
- DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
- break;
- }
+ dhd_bus_devreset(bus->dhd, (uint8)bool_val);
break;
case IOV_SVAL(IOV_FORCE_FW_TRAP):
if (bus->dhd->busstate == DHD_BUS_DATA)
bcopy(&int_val, arg, val_size);
break;
+#ifdef DHD_PCIE_RUNTIMEPM
+ case IOV_GVAL(IOV_IDLETIME):
+ int_val = bus->idletime;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_IDLETIME):
+ if (int_val < 0) {
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->idletime = int_val;
+ if (bus->idletime) {
+ DHD_ENABLE_RUNTIME_PM(bus->dhd);
+ } else {
+ DHD_DISABLE_RUNTIME_PM(bus->dhd);
+ }
+ }
+ break;
+#endif /* DHD_PCIE_RUNTIMEPM */
+
case IOV_GVAL(IOV_TXBOUND):
int_val = (int32)dhd_txbound;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_CTO_PREVENTION):
- bcmerror = dhdpcie_cto_init(bus, bool_val);
+ {
+ uint32 pcie_lnkst;
+
+ if (bus->sih->buscorerev < 19) {
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
+
+ pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+
+ /* 4347A0 in PCIEGEN1 doesn't support CTO prevention due to
+ * 4347A0 DAR Issue : JIRA:CRWLPCIEGEN2-443: Issue in DAR write
+ */
+ if ((bus->sih->buscorerev == 19) &&
+ (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
+ PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) {
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+ bus->dhd->cto_enable = bool_val;
+ dhdpcie_cto_init(bus, bus->dhd->cto_enable);
+ DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
+ __FUNCTION__, bus->dhd->cto_enable));
+ }
break;
case IOV_GVAL(IOV_CTO_PREVENTION):
bcmerror = BCME_UNSUPPORTED;
break;
}
- int_val = (int32)bus->cto_enable;
+ int_val = (int32)bus->dhd->cto_enable;
bcopy(&int_val, arg, val_size);
break;
bcmerror = BCME_UNSUPPORTED;
break;
}
- bus->cto_threshold = (uint32)int_val;
+ bus->dhd->cto_threshold = (uint32)int_val;
}
break;
bcmerror = BCME_UNSUPPORTED;
break;
}
- if (bus->cto_threshold)
- int_val = (int32)bus->cto_threshold;
+ if (bus->dhd->cto_threshold)
+ int_val = (int32)bus->dhd->cto_threshold;
else
int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
case IOV_SVAL(IOV_PCIE_WD_RESET):
if (bool_val) {
- /* Legacy chipcommon watchdog reset */
- dhdpcie_cc_watchdog_reset(bus);
+ pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
}
break;
+#ifdef DHD_EFI
+ case IOV_SVAL(IOV_CONTROL_SIGNAL):
+ {
+ bcmerror = dhd_control_signal(bus, arg, TRUE);
+ break;
+ }
- case IOV_GVAL(IOV_HWA_ENAB_BMAP):
- int_val = bus->hwa_enab_bmap;
+ case IOV_GVAL(IOV_CONTROL_SIGNAL):
+ {
+ bcmerror = dhd_control_signal(bus, params, FALSE);
+ break;
+ }
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ case IOV_GVAL(IOV_DEEP_SLEEP):
+ int_val = bus->ds_enabled;
bcopy(&int_val, arg, val_size);
break;
- case IOV_SVAL(IOV_HWA_ENAB_BMAP):
- bus->hwa_enab_bmap = (uint8)int_val;
+
+ case IOV_SVAL(IOV_DEEP_SLEEP):
+ if (int_val == 1) {
+ bus->ds_enabled = TRUE;
+ /* Deassert */
+ if (dhd_bus_set_device_wake(bus, FALSE) == BCME_OK) {
+#ifdef PCIE_INB_DW
+ int timeleft;
+ timeleft = dhd_os_ds_enter_wait(bus->dhd, NULL);
+ if (timeleft == 0) {
+ DHD_ERROR(("DS-ENTER timeout\n"));
+ bus->ds_enabled = FALSE;
+ break;
+ }
+#endif /* PCIE_INB_DW */
+ }
+ else {
+ DHD_ERROR(("%s: Enable Deep Sleep failed !\n", __FUNCTION__));
+ bus->ds_enabled = FALSE;
+ }
+ }
+ else if (int_val == 0) {
+ /* Assert */
+ if (dhd_bus_set_device_wake(bus, TRUE) == BCME_OK)
+ bus->ds_enabled = FALSE;
+ else
+ DHD_ERROR(("%s: Disable Deep Sleep failed !\n", __FUNCTION__));
+ }
+ else
+ DHD_ERROR(("%s: Invalid number, allowed only 0|1\n", __FUNCTION__));
+
+ break;
+#endif /* PCIE_OOB || PCIE_INB_DW */
+
+ case IOV_GVAL(IOV_WIFI_PROPERTIES):
+ bcmerror = dhd_wifi_properties(bus, params);
+ break;
+
+ case IOV_GVAL(IOV_OTP_DUMP):
+ bcmerror = dhd_otp_dump(bus, params);
break;
+#endif /* DHD_EFI */
+
case IOV_GVAL(IOV_IDMA_ENABLE):
int_val = bus->idma_enabled;
bcopy(&int_val, arg, val_size);
bcopy(&int_val, arg, val_size);
dhd_flow_rings_flush(bus->dhd, 0);
break;
- case IOV_GVAL(IOV_DAR_ENABLE):
- int_val = bus->dar_enabled;
- bcopy(&int_val, arg, val_size);
- break;
- case IOV_SVAL(IOV_DAR_ENABLE):
- bus->dar_enabled = (bool)int_val;
- break;
- case IOV_GVAL(IOV_HSCBSIZE):
- bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
- break;
-
-#ifdef DHD_HP2P
- case IOV_SVAL(IOV_HP2P_ENABLE):
- dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
- break;
-
- case IOV_GVAL(IOV_HP2P_ENABLE):
- int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
- bcopy(&int_val, arg, val_size);
- break;
-
- case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
- dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
- break;
-
- case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
- int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
- bcopy(&int_val, arg, val_size);
- break;
-
- case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
- dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
- break;
-
- case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
- int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
- bcopy(&int_val, arg, val_size);
- break;
-
- case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
- dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
- break;
-
- case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
- int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
- bcopy(&int_val, arg, val_size);
- break;
- case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
- if (bus->dhd->busstate != DHD_BUS_DOWN) {
- return BCME_NOTDOWN;
- }
- dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
- break;
-
- case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
- int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
- bcopy(&int_val, arg, val_size);
- break;
- case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
- if (bus->dhd->busstate != DHD_BUS_DOWN) {
- return BCME_NOTDOWN;
- }
- dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
- break;
-
- case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
- int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
- bcopy(&int_val, arg, val_size);
- break;
-#endif /* DHD_HP2P */
- case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
- if (bus->dhd->busstate != DHD_BUS_DOWN) {
- return BCME_NOTDOWN;
- }
- if (int_val)
- bus->dhd->extdtxs_in_txcpl = TRUE;
- else
- bus->dhd->extdtxs_in_txcpl = FALSE;
- break;
-
- case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
- int_val = bus->dhd->extdtxs_in_txcpl;
- bcopy(&int_val, arg, val_size);
- break;
-
- case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
- if (bus->dhd->busstate != DHD_BUS_DOWN) {
- return BCME_NOTDOWN;
- }
- if (int_val)
- bus->dhd->hostrdy_after_init = TRUE;
- else
- bus->dhd->hostrdy_after_init = FALSE;
- break;
-
- case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
- int_val = bus->dhd->hostrdy_after_init;
- bcopy(&int_val, arg, val_size);
- break;
-
default:
bcmerror = BCME_UNSUPPORTED;
break;
return 0;
}
-void
-dhd_bus_dump_dar_registers(struct dhd_bus *bus)
-{
- uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
- dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
- uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
- dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
-
- if (bus->is_linkdown && !bus->cto_triggered) {
- DHD_ERROR(("%s: link is down\n", __FUNCTION__));
- return;
- }
-
- dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
- dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
- dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
- dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
- dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
- dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
-
- if (bus->sih->buscorerev < 24) {
- DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
- __FUNCTION__, bus->sih->buscorerev));
- return;
- }
-
- dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
- dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
- dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
- dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
- dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
- dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
-
- DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
- __FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
- dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
-
- DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
- __FUNCTION__, dar_errlog_reg, dar_errlog_val,
- dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
-}
-
/* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
void
dhd_bus_hostready(struct dhd_bus *bus)
return;
}
- DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
+ DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
-
- if (DAR_PWRREQ(bus)) {
- dhd_bus_pcie_pwr_req(bus);
- }
-
- dhd_bus_dump_dar_registers(bus);
-
- si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
bus->hostready_count ++;
- DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
+ DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
}
/* Clear INTSTATUS */
dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
} else {
/* this is a PCIE core register..not a config register... */
- intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
- si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
+ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
intstatus);
}
}
int
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
-#else
dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
{
int timeleft;
int rc = 0;
- unsigned long flags, flags_bus;
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- int d3_read_retry = 0;
- uint32 d2h_mb_data = 0;
- uint32 zero = 0;
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+ unsigned long flags;
printf("%s: state=%d\n", __FUNCTION__, state);
if (bus->dhd == NULL) {
return BCME_OK;
}
+ if (bus->d3_suspend_pending) {
+ DHD_ERROR(("Suspend pending ...\n"));
+ return BCME_ERROR;
+ }
+
+
if (state) {
int idle_retry = 0;
int active;
/* Suspend */
DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
- bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
- if (bus->dhd->dhd_watchdog_ms_backup) {
- DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
- __FUNCTION__));
- dhd_os_wd_timer(bus->dhd, 0);
- }
-
DHD_GENERAL_LOCK(bus->dhd, flags);
if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
DHD_ERROR(("Tx Request is not ended\n"));
bus->dhd->busstate = DHD_BUS_DATA;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
+#ifndef DHD_EFI
return -EBUSY;
+#else
+ return BCME_ERROR;
+#endif
}
- bus->last_suspend_start_time = OSL_LOCALTIME_NS();
-
/* stop all interface network queue. */
dhd_bus_stop_queue(bus);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- if (byint) {
- DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
- /* Clear wait_for_d3_ack before sending D3_INFORM */
- bus->wait_for_d3_ack = 0;
- dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
-
- timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
- DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
- } else {
- /* Clear wait_for_d3_ack before sending D3_INFORM */
- bus->wait_for_d3_ack = 0;
- dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
- while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
- dhdpcie_handle_mb_data(bus);
- usleep_range(1000, 1500);
- d3_read_retry++;
- }
- }
-#else
DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
- /* Clear wait_for_d3_ack before sending D3_INFORM */
+#ifdef DHD_TIMESYNC
+ /* disable time sync mechanism, if configed */
+ dhd_timesync_control(bus->dhd, TRUE);
+#endif /* DHD_TIMESYNC */
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ dhd_bus_set_device_wake(bus, TRUE);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
+#ifdef PCIE_OOB
+ bus->oob_presuspend = TRUE;
+#endif
+#ifdef PCIE_INB_DW
+ /* De-assert at this point for In-band device_wake */
+ if (INBAND_DW_ENAB(bus)) {
+ dhd_bus_set_device_wake(bus, FALSE);
+ dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_SLEEP_WAIT);
+ }
+#endif /* PCIE_INB_DW */
+
+ /* Clear wait_for_d3_ack */
bus->wait_for_d3_ack = 0;
/*
- * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
- * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
+ * Send H2D_HOST_D3_INFORM to dongle and mark
+ * bus->d3_suspend_pending to TRUE in dhdpcie_send_mb_data
* inside atomic context, so that no more DBs will be
* rung after sending D3_INFORM
*/
dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
/* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
-
+ dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT);
timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
#ifdef DHD_RECOVER_TIMEOUT
if (bus->wait_for_d3_ack == 0) {
/* If wait_for_d3_ack was not updated because D2H MB was not received */
- uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
- bus->pcie_mailbox_int, 0, 0);
- int host_irq_disabled = dhdpcie_irq_disabled(bus);
- if ((intstatus) && (intstatus != (uint32)-1) &&
- (timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
- DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
- " host_irq_disabled=%d\n",
- __FUNCTION__, intstatus, host_irq_disabled));
- dhd_pcie_intr_count_dump(bus->dhd);
- dhd_print_tasklet_status(bus->dhd);
- if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
- !bus->use_mailbox) {
- dhd_prot_process_ctrlbuf(bus->dhd);
- } else {
- dhdpcie_handle_mb_data(bus);
- }
+ uint32 intstatus = 0;
+ uint32 intmask = 0;
+ intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+ intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+ if ((intstatus) && (!intmask) && (timeleft == 0) &&
+ (!dhd_query_bus_erros(bus->dhd))) {
+
+ DHD_ERROR(("%s: D3 ACK trying again intstatus=%x intmask=%x\n",
+ __FUNCTION__, intstatus, intmask));
+ DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n"));
+ DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_en_count=%lu\n"
+ "isr_intr_disable_count=%lu suspend_intr_dis_count=%lu\n"
+ "dpc_return_busdown_count=%lu\n",
+ bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
+ bus->isr_intr_disable_count,
+ bus->suspend_intr_disable_count,
+ bus->dpc_return_busdown_count));
+
+ dhd_prot_process_ctrlbuf(bus->dhd);
+
timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
- /* Clear Interrupts */
- dhdpcie_bus_clear_intstatus(bus);
+
+ /* Enable Back Interrupts using IntMask */
+ dhdpcie_bus_intr_enable(bus);
}
+
+
} /* bus->wait_for_d3_ack was 0 */
#endif /* DHD_RECOVER_TIMEOUT */
DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
/* To allow threads that got pre-empted to complete.
*/
if (bus->wait_for_d3_ack) {
DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
+
/* Got D3 Ack. Suspend the bus */
if (active) {
DHD_ERROR(("%s():Suspend failed because of wakelock"
"restoring Dongle to D0\n", __FUNCTION__));
- if (bus->dhd->dhd_watchdog_ms_backup) {
- DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
- __FUNCTION__));
- dhd_os_wd_timer(bus->dhd,
- bus->dhd->dhd_watchdog_ms_backup);
- }
-
/*
* Dongle still thinks that it has to be in D3 state until
* it gets a D0 Inform, but we are backing off from suspend.
/* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
bus->wait_for_d3_ack = 0;
- DHD_BUS_LOCK(bus->bus_lock, flags_bus);
- bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
/* Enable back the intmask which was cleared in DPC
* after getting D3_ACK.
*/
bus->resume_intr_enable_count++;
-
- /* For Linux, Macos etc (otherthan NDIS) enable back the dongle
- * interrupts using intmask and host interrupts
- * which were disabled in the dhdpcie_bus_isr()->
- * dhd_bus_handle_d3_ack().
- */
- /* Enable back interrupt using Intmask!! */
dhdpcie_bus_intr_enable(bus);
- /* Enable back interrupt from Host side!! */
- dhdpcie_enable_irq(bus);
-
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
if (bus->use_d0_inform) {
DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
dhd_bus_hostready(bus);
DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->d3_suspend_pending = FALSE;
bus->dhd->busstate = DHD_BUS_DATA;
/* resume all interface network queue. */
dhd_bus_start_queue(bus);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
rc = BCME_ERROR;
} else {
- /* Actual Suspend after no wakelock */
- /* At this time bus->bus_low_power_state will be
- * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
- * in dhd_bus_handle_d3_ack()
- */
+#ifdef PCIE_OOB
+ bus->oob_presuspend = FALSE;
+ if (OOB_DW_ENAB(bus)) {
+ dhd_bus_set_device_wake(bus, FALSE);
+ }
+#endif /* PCIE_OOB */
+#if defined(PCIE_OOB) || defined(BCMPCIE_OOB_HOST_WAKE)
+ bus->oob_presuspend = TRUE;
+#endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_HOST_SLEEP_WAIT) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_HOST_SLEEP);
+ }
+ }
+#endif /* PCIE_INB_DW */
if (bus->use_d0_inform &&
(bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
}
-
#if defined(BCMPCIE_OOB_HOST_WAKE)
- if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
- DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
- } else {
- dhdpcie_oob_intr_set(bus, TRUE);
- }
+ dhdpcie_oob_intr_set(bus, TRUE);
#endif /* BCMPCIE_OOB_HOST_WAKE */
DHD_GENERAL_LOCK(bus->dhd, flags);
/* The Host cannot process interrupts now so disable the same.
* No need to disable the dongle INTR using intmask, as we are
- * already calling disabling INTRs from DPC context after
- * getting D3_ACK in dhd_bus_handle_d3_ack.
- * Code may not look symmetric between Suspend and
+ * already calling dhdpcie_bus_intr_disable from DPC context after
+ * getting D3_ACK. Code may not look symmetric between Suspend and
* Resume paths but this is done to close down the timing window
- * between DPC and suspend context and bus->bus_low_power_state
- * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
+ * between DPC and suspend context.
*/
+ /* Disable interrupt from host side!! */
+ dhdpcie_disable_irq_nosync(bus);
+
bus->dhd->d3ackcnt_timeout = 0;
+ bus->d3_suspend_pending = FALSE;
bus->dhd->busstate = DHD_BUS_SUSPEND;
DHD_GENERAL_UNLOCK(bus->dhd, flags);
- dhdpcie_dump_resource(bus);
/* Handle Host Suspend */
rc = dhdpcie_pci_suspend_resume(bus, state);
- if (!rc) {
- bus->last_suspend_end_time = OSL_LOCALTIME_NS();
- }
}
- } else if (timeleft == 0) { /* D3 ACK Timeout */
-#ifdef DHD_FW_COREDUMP
- uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
-#endif /* DHD_FW_COREDUMP */
-
- /* check if the D3 ACK timeout due to scheduling issue */
- bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
- bus->isr_entry_time > bus->last_d3_inform_time &&
- dhd_bus_query_dpc_sched_errors(bus->dhd);
+ } else if (timeleft == 0) {
bus->dhd->d3ack_timeout_occured = TRUE;
/* If the D3 Ack has timeout */
bus->dhd->d3ackcnt_timeout++;
- DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
- __FUNCTION__, bus->dhd->is_sched_error ?
- " due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
-#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
- if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
- /* change g_assert_type to trigger Kernel panic */
- g_assert_type = 2;
- /* use ASSERT() to trigger panic */
- ASSERT(0);
- }
-#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
- DHD_BUS_LOCK(bus->bus_lock, flags_bus);
- bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
+ DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n",
+ __FUNCTION__, bus->dhd->d3ackcnt_timeout));
DHD_GENERAL_LOCK(bus->dhd, flags);
+ bus->d3_suspend_pending = FALSE;
bus->dhd->busstate = DHD_BUS_DATA;
/* resume all interface network queue. */
dhd_bus_start_queue(bus);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
- if (!bus->dhd->dongle_trap_occured &&
- !bus->is_linkdown &&
- !bus->cto_triggered) {
+ if (!bus->dhd->dongle_trap_occured) {
uint32 intstatus = 0;
/* Check if PCIe bus status is valid */
- intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
- bus->pcie_mailbox_int, 0, 0);
+ intstatus = si_corereg(bus->sih,
+ bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
if (intstatus == (uint32)-1) {
/* Invalidate PCIe bus status */
bus->is_linkdown = 1;
dhd_bus_dump_console_buffer(bus);
dhd_prot_debug_info_print(bus->dhd);
#ifdef DHD_FW_COREDUMP
- if (cur_memdump_mode) {
+ if (bus->dhd->memdump_enabled) {
/* write core dump to file */
bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
dhdpcie_mem_dump(bus);
}
#endif /* DHD_FW_COREDUMP */
-
DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
__FUNCTION__));
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
}
-#if defined(DHD_ERPOM)
- dhd_schedule_reset(bus->dhd);
-#endif // endif
rc = -ETIMEDOUT;
}
+ bus->wait_for_d3_ack = 1;
+
+#ifdef PCIE_OOB
+ bus->oob_presuspend = FALSE;
+#endif /* PCIE_OOB */
} else {
/* Resume */
- DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
- bus->last_resume_start_time = OSL_LOCALTIME_NS();
-
/**
* PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
* si_backplane_access(function to read/write backplane)
#if defined(BCMPCIE_OOB_HOST_WAKE)
DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_HOST_SLEEP) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_WAKE_WAIT);
+ }
+ }
+#endif /* PCIE_INB_DW */
rc = dhdpcie_pci_suspend_resume(bus, state);
- dhdpcie_dump_resource(bus);
- DHD_BUS_LOCK(bus->bus_lock, flags_bus);
- /* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
- bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
+#ifdef BCMPCIE_OOB_HOST_WAKE
+ bus->oob_presuspend = FALSE;
+#endif /* BCMPCIE_OOB_HOST_WAKE */
- if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
+ if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
if (bus->use_d0_inform) {
DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
/* ring doorbell 1 (hostready) */
dhd_bus_hostready(bus);
}
+
DHD_GENERAL_LOCK(bus->dhd, flags);
bus->dhd->busstate = DHD_BUS_DATA;
+#ifdef DHD_PCIE_RUNTIMEPM
+ if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
+ bus->bus_wake = 1;
+ OSL_SMP_WMB();
+ wake_up_interruptible(&bus->rpm_queue);
+ }
+#endif /* DHD_PCIE_RUNTIMEPM */
+#ifdef PCIE_OOB
+ /*
+ * Assert & Deassert the Device Wake. The following is the explanation for doing so.
+ * 0) At this point,
+ * Host is in suspend state, Link is in L2/L3, Dongle is in D3 Cold
+ * Device Wake is enabled.
+ * 1) When the Host comes out of Suspend, it first sends PERST# in the Link.
+ * Looking at this the Dongle moves from D3 Cold to NO DS State
+ * 2) Now The Host OS calls the "resume" function of DHD. From here the DHD first
+ * Asserts the Device Wake.
+ * From the defn, when the Device Wake is asserted, The dongle FW will ensure
+ * that the Dongle is out of deep sleep IF the device is already in deep sleep.
+ * But note that now the Dongle is NOT in Deep sleep and is actually in
+ * NO DS state. So just driving the Device Wake high does not trigger any state
+ * transitions. The Host should actually "Toggle" the Device Wake to ensure
+ * that Dongle synchronizes with the Host and starts the State Transition to D0.
+ * 4) Note that the above explanation is applicable Only when the Host comes out of
+ * suspend and the Dongle comes out of D3 Cold
+ */
+ /* This logic is not required when hostready is enabled */
+
+ if (!bus->dhd->d2h_hostrdy_supported) {
+ if (OOB_DW_ENAB(bus)) {
+ dhd_bus_set_device_wake(bus, TRUE);
+ OSL_DELAY(1000);
+ dhd_bus_set_device_wake(bus, FALSE);
+ }
+ }
+#endif /* PCIE_OOB */
/* resume all interface network queue. */
dhd_bus_start_queue(bus);
+ /* The Host is ready to process interrupts now so enable the same. */
/* TODO: for NDIS also we need to use enable_irq in future */
bus->resume_intr_enable_count++;
-
- /* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
- * using intmask and host interrupts
- * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
- */
dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
-
DHD_GENERAL_UNLOCK(bus->dhd, flags);
-
- if (bus->dhd->dhd_watchdog_ms_backup) {
- DHD_ERROR(("%s: Enabling wdtick after resume\n",
- __FUNCTION__));
- dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
- }
-
- bus->last_resume_end_time = OSL_LOCALTIME_NS();
- /* Update TCM rd index for EDL ring */
- DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
+#ifdef DHD_TIMESYNC
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ /* enable time sync mechanism, if configed */
+ dhd_timesync_control(bus->dhd, FALSE);
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+#endif /* DHD_TIMESYNC */
}
return rc;
}
return 0;
}
-static uint32
-dhd_apply_d11_war_length(struct dhd_bus *bus, uint32 len, uint32 d11_lpbk)
-{
- uint16 chipid = si_chipid(bus->sih);
- if ((chipid == BCM4375_CHIP_ID ||
- chipid == BCM4362_CHIP_ID ||
- chipid == BCM43751_CHIP_ID ||
- chipid == BCM43752_CHIP_ID ||
- chipid == BCM4377_CHIP_ID) &&
- (d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
- len += 8;
- }
- DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
- return len;
-}
-
/** Transfers bytes from host to dongle and to host again using DMA */
static int
-dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
- uint32 len, uint32 srcdelay, uint32 destdelay,
- uint32 d11_lpbk, uint32 core_num, uint32 wait)
+dhdpcie_bus_dmaxfer_req(
+ struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay, uint32 d11_lpbk)
{
- int ret = 0;
-
if (bus->dhd == NULL) {
DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
return BCME_ERROR;
DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
return BCME_ERROR;
}
-
- len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
-
- bus->dmaxfer_complete = FALSE;
- ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
- d11_lpbk, core_num);
- if (ret != BCME_OK || !wait) {
- DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__,
- ret, wait));
- } else {
- ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
- if (ret < 0)
- ret = BCME_NOTREADY;
- }
-
- return ret;
-
+ return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay, d11_lpbk);
}
-bool
-dhd_bus_is_multibp_capable(struct dhd_bus *bus)
-{
- return MULTIBP_CAP(bus->sih);
-}
-#define PCIE_REV_FOR_4378A0 66 /* dhd_bus_perform_flr_with_quiesce() causes problems */
-#define PCIE_REV_FOR_4378B0 68
static int
dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
{
int bcmerror = 0;
volatile uint32 *cr4_regs;
- bool do_flr;
if (!bus->sih) {
DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
return BCME_ERROR;
}
-
- do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
- (bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
-
- if (MULTIBP_ENAB(bus->sih) && !do_flr) {
- dhd_bus_pcie_pwr_req(bus);
- }
-
/* To enter download state, disable ARM and reset SOCRAM.
* To exit download state, simply reset ARM (default is RAM boot).
*/
if (enter) {
-
/* Make sure BAR1 maps to backplane address 0 */
- dhdpcie_setbar1win(bus, 0x00000000);
+ dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000);
bus->alp_only = TRUE;
/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
goto fail;
}
- /* write random numbers to sysmem for the purpose of
- * randomizing heap address space.
- */
- if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
- DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
- __FUNCTION__));
- goto fail;
- }
/* switch back to arm core again */
if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
goto fail;
}
+
if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
!(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
goto fail;
}
+#ifdef BCM_ASLR_HEAP
/* write a random number to TCM for the purpose of
* randomizing heap address space.
*/
- if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
- DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
- __FUNCTION__));
- goto fail;
- }
+ dhdpcie_wrt_rnd(bus);
+#endif /* BCM_ASLR_HEAP */
/* switch back to arm core again */
if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
/* Always return to PCIE core */
si_setcore(bus->sih, PCIE2_CORE_ID, 0);
- if (MULTIBP_ENAB(bus->sih) && !do_flr) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
-
return bcmerror;
} /* dhdpcie_bus_download_state */
/* Verify NVRAM bytes */
DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
- if (!nvram_ularray) {
- MFREE(bus->dhd->osh, vbuffer, varsize);
+ if (!nvram_ularray)
return BCME_NOMEM;
- }
/* Upload image to verify downloaded contents. */
memset(nvram_ularray, 0xaa, varsize);
/* We assumed that string length of both ccode and
* regrev values should not exceed WLC_CNTRY_BUF_SZ
*/
- if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
+ if (sp && ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
sp++;
while (*sp != '\0') {
DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
}
#endif /* DHD_USE_SINGLE_NVRAM_FILE */
+
err:
return bcmerror;
}
uint8 lcreg_offset; /* PCIE capability LCreg offset in the config space */
uint32 reg_val;
+
pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
if (!pcie_cap) {
return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
}
+
+
uint8
dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
{
return 0;
}
-void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
-{
- dhd_bus_t *bus;
- uint64 current_time = OSL_LOCALTIME_NS();
-
- if (!dhd) {
- DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
- return;
- }
-
- bus = dhd->bus;
- if (!bus) {
- DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
- return;
- }
-
- bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
- bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
- "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
- "dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
- bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
- bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
- bus->dpc_return_busdown_count, bus->non_ours_irq_count);
-#ifdef BCMPCIE_OOB_HOST_WAKE
- bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
- " oob_intr_disable_count=%lu\noob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT
- " last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
- " oob_irq_enabled=%d oob_gpio_level=%d\n",
- bus->oob_intr_count, bus->oob_intr_enable_count,
- bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
- GET_SEC_USEC(bus->last_oob_irq_time), GET_SEC_USEC(bus->last_oob_irq_enable_time),
- GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
- dhdpcie_get_oob_irq_level());
-#endif /* BCMPCIE_OOB_HOST_WAKE */
- bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
- " isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT
- " last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
- "last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
- " last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
- " last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
- "\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
- "last_d3_inform_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
- GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time),
- GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
- GET_SEC_USEC(bus->last_process_ctrlbuf_time),
- GET_SEC_USEC(bus->last_process_flowring_time),
- GET_SEC_USEC(bus->last_process_txcpl_time),
- GET_SEC_USEC(bus->last_process_rxcpl_time),
- GET_SEC_USEC(bus->last_process_infocpl_time),
- GET_SEC_USEC(bus->last_process_edl_time),
- GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
- GET_SEC_USEC(bus->last_d3_inform_time));
-
- bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
- SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
- SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
- GET_SEC_USEC(bus->last_suspend_end_time),
- GET_SEC_USEC(bus->last_resume_start_time),
- GET_SEC_USEC(bus->last_resume_end_time));
-
-#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
- bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
- " logtrace_thread_sem_down_time="SEC_USEC_FMT
- "\nlogtrace_thread_flush_time="SEC_USEC_FMT
- " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
- "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
- GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
- GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
- GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
- GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
-#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
-}
-
void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
{
uint32 intstatus = 0;
uint32 intmask = 0;
- uint32 d2h_db0 = 0;
+ uint32 mbintstatus = 0;
uint32 d2h_mb_data = 0;
- intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- dhd->bus->pcie_mailbox_int, 0, 0);
- intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- dhd->bus->pcie_mailbox_mask, 0, 0);
- d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
+ intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+ intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
+ mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
- bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
- intstatus, intmask, d2h_db0);
+ bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
+ intstatus, intmask, mbintstatus);
bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
d2h_mb_data, dhd->bus->def_intmask);
+ bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
+ bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
+ "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
+ "dpc_return_busdown_count=%lu\n",
+ dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
+ dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count,
+ dhd->bus->dpc_return_busdown_count);
}
+
/** Add bus dump output to a buffer */
void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
{
int ix = 0;
flow_ring_node_t *flow_ring_node;
flow_info_t *flow_info;
-#ifdef TX_STATUS_LATENCY_STATS
- uint8 ifindex;
- if_flow_lkup_t *if_flow_lkup;
- dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
-#endif /* TX_STATUS_LATENCY_STATS */
+ char eabuf[ETHER_ADDR_STR_LEN];
if (dhdp->busstate != DHD_BUS_DATA)
return;
-#ifdef TX_STATUS_LATENCY_STATS
- memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
-#endif /* TX_STATUS_LATENCY_STATS */
#ifdef DHD_WAKE_STATUS
bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
dhd_prot_print_info(dhdp, strbuf);
dhd_dump_intr_registers(dhdp, strbuf);
- dhd_dump_intr_counters(dhdp, strbuf);
bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
-#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
- bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
- dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
-#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
bcm_bprintf(strbuf,
- "%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
+ "%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
"Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
- " Overflows", " RD", " WR");
-
-#ifdef TX_STATUS_LATENCY_STATS
- /* Average Tx status/Completion Latency in micro secs */
- bcm_bprintf(strbuf, "%16s %16s ", " NumTxPkts", " AvgTxCmpL_Us");
-#endif /* TX_STATUS_LATENCY_STATS */
-
- bcm_bprintf(strbuf, "\n");
+ "Overflows", "RD", "WR");
+ bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack");
for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
flow_info = &flow_ring_node->flow_info;
bcm_bprintf(strbuf,
- "%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
+ "%3d. %4d %2d %4d %17s %4d %4d %6d %10u ", ix++,
flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
- MAC2STRDBG(flow_info->da),
+ bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf),
DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
"%4d %4d ");
-
-#ifdef TX_STATUS_LATENCY_STATS
- bcm_bprintf(strbuf, "%16d %16d ",
- flow_info->num_tx_pkts,
- flow_info->num_tx_status ?
- DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
- flow_info->num_tx_status) : 0);
-
- ifindex = flow_info->ifindex;
- ASSERT(ifindex < DHD_MAX_IFS);
- if (ifindex < DHD_MAX_IFS) {
- if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
- if_tx_status_latency[ifindex].cum_tx_status_latency +=
- flow_info->cum_tx_status_latency;
- } else {
- DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
- __FUNCTION__, ifindex, flowid));
- }
-#endif /* TX_STATUS_LATENCY_STATS */
- bcm_bprintf(strbuf, "\n");
- }
-
-#ifdef TX_STATUS_LATENCY_STATS
- bcm_bprintf(strbuf, "\n%s %16s %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
- if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
- for (ix = 0; ix < DHD_MAX_IFS; ix++) {
- if (!if_flow_lkup[ix].status) {
- continue;
- }
- bcm_bprintf(strbuf, "%2d %16d %16d\n",
- ix,
- if_tx_status_latency[ix].num_tx_status ?
- DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
- if_tx_status_latency[ix].num_tx_status): 0,
- if_tx_status_latency[ix].num_tx_status);
- }
-#endif /* TX_STATUS_LATENCY_STATS */
-
-#ifdef DHD_HP2P
- if (dhdp->hp2p_capable) {
- bcm_bprintf(strbuf, "\n%s %16s %16s", "Flowid", "Tx_t0", "Tx_t1");
-
- for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
- hp2p_info_t *hp2p_info;
- int bin;
-
- hp2p_info = &dhdp->hp2p_info[flowid];
- if (hp2p_info->num_timer_start == 0)
- continue;
-
- bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
- bcm_bprintf(strbuf, "\n%s", "Bin");
-
- for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
- bcm_bprintf(strbuf, "\n%2d %20d %16d", bin,
- hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
- }
-
- bcm_bprintf(strbuf, "\n%s %16s", "Flowid", "Rx_t0");
- bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
- bcm_bprintf(strbuf, "\n%s", "Bin");
-
- for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
- bcm_bprintf(strbuf, "\n%d %20d", bin,
- hp2p_info->rx_t0[bin]);
- }
-
- bcm_bprintf(strbuf, "\n%s %16s %16s",
- "Packet limit", "Timer limit", "Timer start");
- bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit,
- hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
- }
-
- bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf,
+ "%5s %6s %5s\n", "NA", "NA", "NA");
}
-#endif /* DHD_HP2P */
-
bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
if (dhdp->d2h_hostrdy_supported) {
bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
}
- bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
- dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
+#ifdef PCIE_INB_DW
+ /* Inband device wake counters */
+ if (INBAND_DW_ENAB(dhdp->bus)) {
+ bcm_bprintf(strbuf, "Inband device_wake assert count: %d\n",
+ dhdp->bus->inband_dw_assert_cnt);
+ bcm_bprintf(strbuf, "Inband device_wake deassert count: %d\n",
+ dhdp->bus->inband_dw_deassert_cnt);
+ bcm_bprintf(strbuf, "Inband DS-EXIT <host initiated> count: %d\n",
+ dhdp->bus->inband_ds_exit_host_cnt);
+ bcm_bprintf(strbuf, "Inband DS-EXIT <device initiated> count: %d\n",
+ dhdp->bus->inband_ds_exit_device_cnt);
+ bcm_bprintf(strbuf, "Inband DS-EXIT Timeout count: %d\n",
+ dhdp->bus->inband_ds_exit_to_cnt);
+ bcm_bprintf(strbuf, "Inband HOST_SLEEP-EXIT Timeout count: %d\n",
+ dhdp->bus->inband_host_sleep_exit_to_cnt);
+ }
+#endif /* PCIE_INB_DW */
}
-#ifdef DNGL_AXI_ERROR_LOGGING
-bool
-dhd_axi_sig_match(dhd_pub_t *dhdp)
+/**
+ * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
+ * flow queue to their flow ring.
+ */
+static void
+dhd_update_txflowrings(dhd_pub_t *dhd)
{
- uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
-
- if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
- DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
- return FALSE;
- }
+ unsigned long flags;
+ dll_t *item, *next;
+ flow_ring_node_t *flow_ring_node;
+ struct dhd_bus *bus = dhd->bus;
- DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
- __FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
- dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
- if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
- axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
- uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
- OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
- if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
- return TRUE;
- } else {
- DHD_ERROR(("%s: No AXI signature: 0x%x\n",
- __FUNCTION__, axi_signature));
- return FALSE;
+ /* Hold flowring_list_lock to ensure no race condition while accessing the List */
+ DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
+ for (item = dll_head_p(&bus->flowring_active_list);
+ (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
+ item = next) {
+ if (dhd->hang_was_sent) {
+ break;
}
- } else {
- DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
- return FALSE;
- }
-}
-
-void
-dhd_axi_error(dhd_pub_t *dhdp)
-{
- dhd_axi_error_dump_t *axi_err_dump;
- uint8 *axi_err_buf = NULL;
- uint8 *p_axi_err = NULL;
- uint32 axi_logbuf_addr;
- uint32 axi_tcm_addr;
- int err, size;
- OSL_DELAY(75000);
+ next = dll_next_p(item);
+ flow_ring_node = dhd_constlist_to_flowring(item);
- axi_logbuf_addr = dhdp->axierror_logbuf_addr;
- if (!axi_logbuf_addr) {
- DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
- goto sched_axi;
- }
+ /* Ensure that flow_ring_node in the list is Not Null */
+ ASSERT(flow_ring_node != NULL);
- axi_err_dump = dhdp->axi_err_dump;
- if (!axi_err_dump) {
- goto sched_axi;
- }
+ /* Ensure that the flowring node has valid contents */
+ ASSERT(flow_ring_node->prot_info != NULL);
- if (!dhd_axi_sig_match(dhdp)) {
- goto sched_axi;
+ dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
}
+ DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+}
- /* Reading AXI error data for SMMU fault */
- DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
- axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
- size = sizeof(hnd_ext_trap_axi_error_v1_t);
- axi_err_buf = MALLOCZ(dhdp->osh, size);
- if (axi_err_buf == NULL) {
- DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
- goto sched_axi;
+/** Mailbox ringbell Function */
+static void
+dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
+{
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
+ return;
}
-
- p_axi_err = axi_err_buf;
- err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
- if (err) {
- DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
- __FUNCTION__, err, size, axi_tcm_addr));
- goto sched_axi;
+ if (bus->db1_for_mb) {
+ /* this is a pcie core register, not the config register */
+ DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
+ } else {
+ DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
+ dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+ dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
}
+}
- /* Dump data to Dmesg */
- dhd_log_dump_axi_error(axi_err_buf);
- err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
- if (err) {
- DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
- __FUNCTION__, err));
- }
+/* Upon receiving a mailbox interrupt,
+ * if H2D_FW_TRAP bit is set in mailbox location
+ * device traps
+ */
+static void
+dhdpcie_fw_trap(dhd_bus_t *bus)
+{
+ /* Send the mailbox data and generate mailbox intr. */
+ dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
+}
-sched_axi:
- if (axi_err_buf) {
- MFREE(dhdp->osh, axi_err_buf, size);
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+void
+dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus)
+{
+ if (dhd_doorbell_timeout)
+ dhd_timeout_start(&bus->doorbell_timer,
+ (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
+ else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) {
+ dhd_bus_set_device_wake(bus, FALSE);
}
- dhd_schedule_axi_error_dump(dhdp, NULL);
}
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
-static void
-dhd_log_dump_axi_error(uint8 *axi_err)
-{
- dma_dentry_v1_t dma_dentry;
- dma_fifo_v1_t dma_fifo;
- int i = 0, j = 0;
-
- if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
- hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
- DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
- DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
- DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
- DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
- __FUNCTION__, axi_err_v1->dma_fifo_valid_count));
- DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
- __FUNCTION__, axi_err_v1->axi_errorlog_status));
- DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
- __FUNCTION__, axi_err_v1->axi_errorlog_core));
- DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
- __FUNCTION__, axi_err_v1->axi_errorlog_hi));
- DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
- __FUNCTION__, axi_err_v1->axi_errorlog_lo));
- DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
- __FUNCTION__, axi_err_v1->axi_errorlog_id));
-
- for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
- dma_fifo = axi_err_v1->dma_fifo[i];
- DHD_ERROR(("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid));
- DHD_ERROR(("%s: direction:%d : 0x%x\n",
- __FUNCTION__, i, dma_fifo.direction));
- DHD_ERROR(("%s: index:%d : 0x%x\n",
- __FUNCTION__, i, dma_fifo.index));
- DHD_ERROR(("%s: dpa:%d : 0x%x\n",
- __FUNCTION__, i, dma_fifo.dpa));
- DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
- __FUNCTION__, i, dma_fifo.desc_lo));
- DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
- __FUNCTION__, i, dma_fifo.desc_hi));
- DHD_ERROR(("%s: din:%d : 0x%x\n",
- __FUNCTION__, i, dma_fifo.din));
- DHD_ERROR(("%s: dout:%d : 0x%x\n",
- __FUNCTION__, i, dma_fifo.dout));
- for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
- dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
- DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
- __FUNCTION__, i, dma_dentry.ctrl1));
- DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
- __FUNCTION__, i, dma_dentry.ctrl2));
- DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
- __FUNCTION__, i, dma_dentry.addrlo));
- DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
- __FUNCTION__, i, dma_dentry.addrhi));
- }
- }
- }
- else {
- DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
+#ifdef PCIE_INB_DW
+
+void
+dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus)
+{
+ /* The DHD_BUS_INB_DW_LOCK must be held before
+ * calling this function !!
+ */
+ if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DEV_SLEEP_PEND) &&
+ (bus->host_active_cnt == 0)) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
+ dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
}
}
-#endif /* DNGL_AXI_ERROR_LOGGING */
-/**
- * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
- * flow queue to their flow ring.
- */
-static void
-dhd_update_txflowrings(dhd_pub_t *dhd)
+int
+dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val)
{
+ int timeleft;
unsigned long flags;
- dll_t *item, *next;
- flow_ring_node_t *flow_ring_node;
- struct dhd_bus *bus = dhd->bus;
+ int ret;
- if (dhd_query_bus_erros(dhd)) {
- return;
+ if (!INBAND_DW_ENAB(bus)) {
+ return BCME_ERROR;
}
- /* Hold flowring_list_lock to ensure no race condition while accessing the List */
- DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
- for (item = dll_head_p(&bus->flowring_active_list);
- (!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
- item = next) {
- if (dhd->hang_was_sent) {
- break;
+ if (val) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+
+ /*
+ * Reset the Door Bell Timeout value. So that the Watchdog
+ * doesn't try to Deassert Device Wake, while we are in
+ * the process of still Asserting the same.
+ */
+ if (dhd_doorbell_timeout) {
+ dhd_timeout_start(&bus->doorbell_timer,
+ (dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
+ }
+
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DEV_SLEEP) {
+ /* Clear wait_for_ds_exit */
+ bus->wait_for_ds_exit = 0;
+ ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_ASSERT);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed: assert Inband device_wake\n"));
+ bus->wait_for_ds_exit = 1;
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DISABLED_WAIT);
+ bus->inband_dw_assert_cnt++;
+ } else {
+ DHD_INFO(("Not in DS SLEEP state \n"));
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ ret = BCME_OK;
+ goto exit;
}
- next = dll_next_p(item);
- flow_ring_node = dhd_constlist_to_flowring(item);
+ /*
+ * Since we are going to wait/sleep .. release the lock.
+ * The Device Wake sanity is still valid, because
+ * a) If there is another context that comes in and tries
+ * to assert DS again and if it gets the lock, since
+ * ds_state would be now != DW_DEVICE_DS_DEV_SLEEP the
+ * context would return saying Not in DS Sleep.
+ * b) If ther is another context that comes in and tries
+ * to de-assert DS and gets the lock,
+ * since the ds_state is != DW_DEVICE_DS_DEV_WAKE
+ * that context would return too. This can not happen
+ * since the watchdog is the only context that can
+ * De-Assert Device Wake and as the first step of
+ * Asserting the Device Wake, we have pushed out the
+ * Door Bell Timeout.
+ *
+ */
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
- /* Ensure that flow_ring_node in the list is Not Null */
- ASSERT(flow_ring_node != NULL);
+ if (!CAN_SLEEP()) {
+ /* Called from context that cannot sleep */
+ OSL_DELAY(1000);
+ bus->wait_for_ds_exit = 1;
+ } else {
+ /* Wait for DS EXIT for DS_EXIT_TIMEOUT seconds */
+ timeleft = dhd_os_ds_exit_wait(bus->dhd, &bus->wait_for_ds_exit);
+ if (!bus->wait_for_ds_exit && timeleft == 0) {
+ DHD_ERROR(("DS-EXIT timeout\n"));
+ bus->inband_ds_exit_to_cnt++;
+ bus->ds_exit_timeout = 0;
+ ret = BCME_ERROR;
+ goto exit;
+ }
+ }
- /* Ensure that the flowring node has valid contents */
- ASSERT(flow_ring_node->prot_info != NULL);
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DEV_WAKE);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
- dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
+ ret = BCME_OK;
+ } else {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DEV_WAKE)) {
+ ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_DEASSERT);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("Failed: deassert Inband device_wake\n"));
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ goto exit;
+ }
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_ACTIVE);
+ bus->inband_dw_deassert_cnt++;
+ } else if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DEV_SLEEP_PEND) &&
+ (bus->host_active_cnt == 0)) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
+ dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+ }
+
+ ret = BCME_OK;
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
}
- DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
+
+exit:
+ return ret;
}
+#endif /* PCIE_INB_DW */
-/** Mailbox ringbell Function */
-static void
-dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
+
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+int
+dhd_bus_set_device_wake(struct dhd_bus *bus, bool val)
{
- if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
- (bus->sih->buscorerev == 4)) {
- DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
- return;
- }
- if (bus->db1_for_mb) {
- /* this is a pcie core register, not the config register */
- DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
- if (DAR_PWRREQ(bus)) {
- dhd_bus_pcie_pwr_req(bus);
+ if (bus->ds_enabled) {
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ return dhd_bus_inb_set_device_wake(bus, val);
}
- si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
- ~0, 0x12345678);
- } else {
- DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
- dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
- dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+#endif /* PCIE_INB_DW */
+#ifdef PCIE_OOB
+ if (OOB_DW_ENAB(bus)) {
+ return dhd_os_oob_set_device_wake(bus, val);
+ }
+#endif /* PCIE_OOB */
}
+ return BCME_OK;
}
-
-/* Upon receiving a mailbox interrupt,
- * if H2D_FW_TRAP bit is set in mailbox location
- * device traps
- */
-static void
-dhdpcie_fw_trap(dhd_bus_t *bus)
-{
- /* Send the mailbox data and generate mailbox intr. */
- dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
- /* For FWs that cannot interprete H2D_FW_TRAP */
- (void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
-}
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
/** mailbox doorbell ring function */
void
dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
{
/* Skip after sending D3_INFORM */
- if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
- DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
- __FUNCTION__, bus->bus_low_power_state));
- return;
- }
-
- /* Skip in the case of link down */
- if (bus->is_linkdown) {
- DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
+ DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
+ "busstate=%d, d3_suspend_pending=%d\n",
+ __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
return;
}
-
if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
(bus->sih->buscorerev == 4)) {
- si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
- PCIE_INTB, PCIE_INTB);
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
} else {
/* this is a pcie core register, not the config regsiter */
DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
if (IDMA_ACTIVE(bus->dhd)) {
- if (DAR_PWRREQ(bus)) {
- dhd_bus_pcie_pwr_req(bus);
- }
- si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox_2,
~0, value);
} else {
- if (DAR_PWRREQ(bus)) {
- dhd_bus_pcie_pwr_req(bus);
- }
si_corereg(bus->sih, bus->sih->buscoreidx,
- dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
+ PCIH2D_MailBox, ~0, 0x12345678);
}
}
}
{
/* this is a pcie core register, not the config regsiter */
/* Skip after sending D3_INFORM */
- if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
- DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
- __FUNCTION__, bus->bus_low_power_state));
- return;
- }
-
- /* Skip in the case of link down */
- if (bus->is_linkdown) {
- DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
+ if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
+ DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
+ "busstate=%d, d3_suspend_pending=%d\n",
+ __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
return;
}
-
DHD_INFO(("writing a door bell 2 to the device\n"));
- if (DAR_PWRREQ(bus)) {
- dhd_bus_pcie_pwr_req(bus);
- }
- si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox_2,
~0, value);
}
dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
{
/* Skip after sending D3_INFORM */
- if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
- DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
- __FUNCTION__, bus->bus_low_power_state));
+ if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
+ DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
+ "busstate=%d, d3_suspend_pending=%d\n",
+ __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
return;
}
-
- /* Skip in the case of link down */
- if (bus->is_linkdown) {
- DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
- return;
- }
-
- if (DAR_PWRREQ(bus)) {
- dhd_bus_pcie_pwr_req(bus);
- }
-
-#ifdef DHD_DB0TS
- if (bus->dhd->db0ts_capable) {
- uint64 ts;
-
- ts = local_clock();
- do_div(ts, 1000);
-
- value = htol32(ts & 0xFFFFFFFF);
- DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ if (OOB_DW_ENAB(bus)) {
+ dhd_bus_set_device_wake(bus, TRUE);
}
-#endif /* DHD_DB0TS */
+ dhd_bus_doorbell_timeout_reset(bus);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
}
dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
{
/* Skip after sending D3_INFORM */
- if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
- DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
- __FUNCTION__, bus->bus_low_power_state));
+ if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
+ DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
+ "busstate=%d, d3_suspend_pending=%d\n",
+ __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
return;
}
-
- /* Skip in the case of link down */
- if (bus->is_linkdown) {
- DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
- return;
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ if (devwake) {
+ if (OOB_DW_ENAB(bus)) {
+ dhd_bus_set_device_wake(bus, TRUE);
+ }
}
+ dhd_bus_doorbell_timeout_reset(bus);
+#endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
- if (DAR_PWRREQ(bus)) {
- dhd_bus_pcie_pwr_req(bus);
- }
W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
}
{
uint32 w;
/* Skip after sending D3_INFORM */
- if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
- DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
- __FUNCTION__, bus->bus_low_power_state));
+ if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
+ DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
+ "busstate=%d, d3_suspend_pending=%d\n",
+ __FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
return;
}
-
- /* Skip in the case of link down */
- if (bus->is_linkdown) {
- DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
- return;
- }
-
w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
}
if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
(bus->sih->buscorerev == 4)) {
bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
- bus->pcie_mailbox_int);
+ PCIMailBoxInt);
if (bus->pcie_mb_intr_addr) {
bus->pcie_mb_intr_osh = si_osh(bus->sih);
return dhd_bus_ringbell_oldpcie;
}
} else {
bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
- dhd_bus_db0_addr_get(bus));
+ PCIH2D_MailBox);
if (bus->pcie_mb_intr_addr) {
bus->pcie_mb_intr_osh = si_osh(bus->sih);
return dhdpcie_bus_ringbell_fast;
dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
{
bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
- dhd_bus_db0_addr_2_get(bus));
+ PCIH2D_MailBox_2);
if (bus->pcie_mb_intr_2_addr) {
bus->pcie_mb_intr_osh = si_osh(bus->sih);
return dhdpcie_bus_ringbell_2_fast;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- bus->dpc_entry_time = OSL_LOCALTIME_NS();
-
DHD_GENERAL_LOCK(bus->dhd, flags);
/* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
* to avoid IOCTL Resumed On timeout when ioctl is waiting for response
bus->dpc_return_busdown_count++;
return 0;
}
+#ifdef DHD_PCIE_RUNTIMEPM
+ bus->idlecount = 0;
+#endif /* DHD_PCIE_RUNTIMEPM */
DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
INTR_ON:
#endif /* DHD_READ_INTSTATUS_IN_DPC */
bus->dpc_intr_enable_count++;
- /* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
- * which has been disabled in the dhdpcie_bus_isr()
- */
- dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
- bus->dpc_exit_time = OSL_LOCALTIME_NS();
- } else {
- bus->resched_dpc_time = OSL_LOCALTIME_NS();
+ dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
}
- bus->dpc_sched = resched;
-
DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
dhd_os_busbusy_wake(bus->dhd);
}
+
int
dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
{
uint32 cur_h2d_mb_data = 0;
+ unsigned long flags;
DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
return BCME_ERROR;
}
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+
if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
h2d_mb_data));
/* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
- {
- if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
- DHD_ERROR(("failure sending the H2D Mailbox message "
- "to firmware\n"));
- goto fail;
- }
+#ifdef PCIE_OOB
+ bus->oob_enabled = FALSE;
+#endif /* PCIE_OOB */
+ if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
+ DHD_ERROR(("failure sending the H2D Mailbox message to firmware\n"));
+ goto fail;
}
+#ifdef PCIE_OOB
+ bus->oob_enabled = TRUE;
+#endif /* PCIE_OOB */
goto done;
}
done:
if (h2d_mb_data == H2D_HOST_D3_INFORM) {
DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
- bus->last_d3_inform_time = OSL_LOCALTIME_NS();
+ /* Mark D3_INFORM in the atomic context to
+ * skip ringing H2D DB after D3_INFORM
+ */
+ bus->d3_suspend_pending = TRUE;
bus->d3_inform_cnt++;
}
if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
bus->d0_inform_cnt++;
}
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
return BCME_OK;
+
fail:
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
return BCME_ERROR;
}
-static void
-dhd_bus_handle_d3_ack(dhd_bus_t *bus)
-{
- unsigned long flags_bus;
- DHD_BUS_LOCK(bus->bus_lock, flags_bus);
- bus->suspend_intr_disable_count++;
- /* Disable dongle Interrupts Immediately after D3 */
-
- /* For Linux, Macos etc (otherthan NDIS) along with disabling
- * dongle interrupt by clearing the IntMask, disable directly
- * interrupt from the host side as well. Also clear the intstatus
- * if it is set to avoid unnecessary intrrupts after D3 ACK.
- */
- dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
- dhdpcie_bus_clear_intstatus(bus);
- dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
-
- if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
- /* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
- bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
- DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
- }
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
- /* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
- * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
- */
- if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
- bus->wait_for_d3_ack = 1;
- dhd_os_d3ack_wake(bus->dhd);
- } else {
- DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
- }
-}
void
dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
{
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
-
+#ifdef PCIE_INB_DW
+ unsigned long flags = 0;
+#endif
DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
- if (d2h_mb_data & D2H_DEV_FWHALT) {
+ if (d2h_mb_data & D2H_DEV_FWHALT) {
DHD_ERROR(("FW trap has happened\n"));
dhdpcie_checkdied(bus, NULL, 0);
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
- goto exit;
+ return;
}
if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
- bool ds_acked = FALSE;
- BCM_REFERENCE(ds_acked);
- if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
+ if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
+ bus->wait_for_d3_ack) {
DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
- goto exit;
+ return;
}
/* what should we do */
DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_DS_ACTIVE) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DEV_SLEEP_PEND);
+ if (bus->host_active_cnt == 0) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DEV_SLEEP);
+ dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+ }
+ }
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ dhd_os_ds_enter_wake(bus->dhd);
+ } else
+#endif /* PCIE_INB_DW */
{
dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
- DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
}
+ if (IDMA_DS_ENAB(bus->dhd)) {
+ bus->dongle_in_ds = TRUE;
+ }
+ DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
}
if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
/* what should we do */
+ bus->dongle_in_ds = FALSE;
DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ bus->inband_ds_exit_device_cnt++;
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_DS_DISABLED_WAIT) {
+ /* wake up only if some one is waiting in
+ * DW_DEVICE_DS_DISABLED_WAIT state
+ * in this case the waiter will change the state
+ * to DW_DEVICE_DS_DEV_WAKE
+ */
+ bus->wait_for_ds_exit = 1;
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ dhd_os_ds_exit_wake(bus->dhd);
+ } else {
+ DHD_INFO(("D2H_MB_DATA: not in DW_DEVICE_DS_DISABLED_WAIT!\n"));
+ /*
+ * If there is no one waiting, then update the state from here
+ */
+ bus->wait_for_ds_exit = 1;
+ dhdpcie_bus_set_pcie_inband_dw_state(bus,
+ DW_DEVICE_DS_DEV_WAKE);
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+ }
+#endif /* PCIE_INB_DW */
}
if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK) {
/* what should we do */
DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
+#ifdef PCIE_INB_DW
+ if (INBAND_DW_ENAB(bus)) {
+ DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
+ if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
+ DW_DEVICE_HOST_WAKE_WAIT) {
+ dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_ACTIVE);
+ }
+ DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
+ }
+#endif /* PCIE_INB_DW */
}
if (d2h_mb_data & D2H_DEV_D3_ACK) {
/* what should we do */
DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
if (!bus->wait_for_d3_ack) {
- dhd_bus_handle_d3_ack(bus);
+ /* Disable dongle Interrupts Immediately after D3 */
+ bus->suspend_intr_disable_count++;
+ dhdpcie_bus_intr_disable(bus);
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
+ DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
+ } else {
+ bus->wait_for_d3_ack = 1;
+ dhd_os_d3ack_wake(bus->dhd);
+ }
+#else /* DHD_HANG_SEND_UP_TEST */
+ bus->wait_for_d3_ack = 1;
+ dhd_os_d3ack_wake(bus->dhd);
+#endif /* DHD_HANG_SEND_UP_TEST */
}
}
-
-exit:
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
}
static void
{
uint32 d2h_mb_data = 0;
uint32 zero = 0;
-
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
-
dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
__FUNCTION__, d2h_mb_data));
- goto exit;
+ return;
}
dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
DHD_ERROR(("FW trap has happened\n"));
dhdpcie_checkdied(bus, NULL, 0);
/* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
- goto exit;
+ return;
}
if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
/* what should we do */
DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+ if (IDMA_DS_ENAB(bus->dhd)) {
+ bus->dongle_in_ds = TRUE;
+ }
DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
}
if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
/* what should we do */
DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
+ bus->dongle_in_ds = FALSE;
}
if (d2h_mb_data & D2H_DEV_D3_ACK) {
/* what should we do */
DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
if (!bus->wait_for_d3_ack) {
- dhd_bus_handle_d3_ack(bus);
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
+ DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
+ } else {
+ bus->wait_for_d3_ack = 1;
+ dhd_os_d3ack_wake(bus->dhd);
+ }
+#else /* DHD_HANG_SEND_UP_TEST */
+ bus->wait_for_d3_ack = 1;
+ dhd_os_d3ack_wake(bus->dhd);
+#endif /* DHD_HANG_SEND_UP_TEST */
}
}
-
-exit:
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
}
static void
uint32 d2h_mb_data = 0;
uint32 zero = 0;
- if (bus->is_linkdown) {
- DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
- return;
- }
-
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
-
dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
- if (!d2h_mb_data) {
- goto exit;
- }
+ if (!d2h_mb_data)
+ return;
dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
dhd_bus_handle_mb_data(bus, d2h_mb_data);
-
-exit:
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
}
static bool
dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
{
bool resched = FALSE;
- unsigned long flags_bus;
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
(bus->sih->buscorerev == 4)) {
/* Msg stream interrupt */
if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
bus->api.handle_mb_data(bus);
- /* Do no process any rings after recieving D3_ACK */
- DHD_BUS_LOCK(bus->bus_lock, flags_bus);
- if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
- DHD_ERROR(("%s: D3 Ack Recieved. "
- "Skip processing rest of ring buffers.\n", __FUNCTION__));
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
+ if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
goto exit;
}
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
- /* Validate intstatus only for INTX case */
- if ((bus->d2h_intr_method == PCIE_MSI) ||
- ((bus->d2h_intr_method == PCIE_INTX) && (intstatus & bus->d2h_mb_mask))) {
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
- resched = dhdpci_bus_read_frames(bus);
- pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
- pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
- }
-#else
+ if (intstatus & PCIE_MB_D2H_MB_MASK) {
resched = dhdpci_bus_read_frames(bus);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
}
}
exit:
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
return resched;
}
-#if defined(DHD_H2D_LOG_TIME_SYNC)
-static void
-dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
-{
- unsigned long time_elapsed;
-
- /* Poll for timeout value periodically */
- if ((bus->dhd->busstate == DHD_BUS_DATA) &&
- (bus->dhd->dhd_rte_time_sync_ms != 0) &&
- (bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE)) {
- time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
- /* Compare time is milli seconds */
- if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) {
- /*
- * Its fine, if it has crossed the timeout value. No need to adjust the
- * elapsed time
- */
- bus->dhd_rte_time_sync_count += time_elapsed;
-
- /* Schedule deffered work. Work function will send IOVAR. */
- dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
- }
- }
-}
-#endif /* DHD_H2D_LOG_TIME_SYNC */
-
static bool
dhdpci_bus_read_frames(dhd_bus_t *bus)
{
bool more = FALSE;
- unsigned long flags_bus;
/* First check if there a FW trap */
if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
(bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
-#ifdef DNGL_AXI_ERROR_LOGGING
- if (bus->dhd->axi_error) {
- DHD_ERROR(("AXI Error happened\n"));
- return FALSE;
- }
-#endif /* DNGL_AXI_ERROR_LOGGING */
dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
return FALSE;
}
DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
dhd_prot_process_ctrlbuf(bus->dhd);
- bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
/* Unlock to give chance for resp to be handled */
DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
- /* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
- DHD_BUS_LOCK(bus->bus_lock, flags_bus);
- if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
- DHD_ERROR(("%s: Bus is in power save state (%d). "
- "Skip processing rest of ring buffers.\n",
- __FUNCTION__, bus->bus_low_power_state));
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
- return FALSE;
- }
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
-
DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
/* update the flow ring cpls */
dhd_update_txflowrings(bus->dhd);
- bus->last_process_flowring_time = OSL_LOCALTIME_NS();
/* With heavy TX traffic, we could get a lot of TxStatus
* so add bound
*/
-#ifdef DHD_HP2P
- more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
-#endif /* DHD_HP2P */
- more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
- bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
+ more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
/* With heavy RX traffic, this routine potentially could spend some time
* processing RX frames without RX bound
*/
-#ifdef DHD_HP2P
- more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
-#endif /* DHD_HP2P */
- more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
- bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
+ more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
/* Process info ring completion messages */
-#ifdef EWP_EDL
- if (!bus->dhd->dongle_edl_support)
-#endif // endif
- {
- more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
- bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
- }
-#ifdef EWP_EDL
- else {
- more |= dhd_prot_process_msgbuf_edl(bus->dhd);
- bus->last_process_edl_time = OSL_LOCALTIME_NS();
- }
-#endif /* EWP_EDL */
+ more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
#ifdef IDLE_TX_FLOW_MGMT
if (bus->enable_idle_flowring_mgmt) {
}
DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
-#if defined(DHD_H2D_LOG_TIME_SYNC)
- dhdpci_bus_rte_log_time_sync_poll(bus);
-#endif /* DHD_H2D_LOG_TIME_SYNC */
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus->read_shm_fail) {
+ /* Read interrupt state once again to confirm linkdown */
+ int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+ if (intstatus != (uint32)-1) {
+ DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
+#ifdef DHD_FW_COREDUMP
+ if (bus->dhd->memdump_enabled) {
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
+ dhd_bus_mem_dump(bus->dhd);
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ }
+#endif /* DHD_FW_COREDUMP */
+ bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+ dhd_os_send_hang_message(bus->dhd);
+ } else {
+ DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+ bus->is_linkdown = 1;
+ bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+ dhd_os_send_hang_message(bus->dhd);
+ }
+ }
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
return more;
}
uint32 shaddr = 0;
pciedev_shared_t *sh = bus->pcie_sh;
dhd_timeout_t tmo;
- bool idma_en = FALSE;
-
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
shaddr = bus->dongle_ram_base + bus->ramsize - 4;
/* start a timer for 5 seconds */
addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
}
- if (addr == (uint32)-1) {
- DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
- bus->is_linkdown = 1;
- return BCME_ERROR;
- }
-
if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
(addr > shaddr)) {
DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
__FUNCTION__, addr));
DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
-#ifdef DEBUG_DNGL_INIT_FAIL
- if (addr != (uint32)-1) { /* skip further PCIE reads if read this addr */
- if (bus->dhd->memdump_enabled) {
- bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
- dhdpcie_mem_dump(bus);
- }
- }
-#endif /* DEBUG_DNGL_INIT_FAIL */
return BCME_ERROR;
} else {
bus->shared_addr = (ulong)addr;
}
#endif /* IDLE_TX_FLOW_MGMT */
- if (IDMA_CAPABLE(bus)) {
- if (bus->sih->buscorerev == 23) {
- } else {
- idma_en = TRUE;
- }
- }
-
- /* TODO: This need to be selected based on IPC instead of compile time */
- bus->dhd->hwa_enable = TRUE;
+ bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
+ bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
- if (idma_en) {
- bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
- bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
- }
+ bus->dhd->idma_retention_ds = (sh->flags & PCIE_SHARED_IDMA_RETENTION_DS) ? TRUE : FALSE;
bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
- bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
-
/* Does the FW support DMA'ing r/w indices */
if (sh->flags & PCIE_SHARED_DMA_INDEX) {
if (!bus->dhd->dma_ring_upd_overwrite) {
bus->dhd->dma_d2h_ring_upd_support = FALSE;
}
- /* Does the firmware support fast delete ring? */
- if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
- DHD_INFO(("%s: Firmware supports fast delete ring\n",
- __FUNCTION__));
- bus->dhd->fast_delete_ring_support = TRUE;
- } else {
- DHD_INFO(("%s: Firmware does not support fast delete ring\n",
- __FUNCTION__));
- bus->dhd->fast_delete_ring_support = FALSE;
- }
-
/* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
{
ring_info_t ring_info;
- /* boundary check */
- if (sh->rings_info_ptr > shaddr) {
- DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
- __FUNCTION__, sh->rings_info_ptr));
- return BCME_ERROR;
- }
-
if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
return rv;
bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
+
if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
bus->api.handle_mb_data = dhdpcie_handle_mb_data;
- bus->use_mailbox = TRUE;
}
if (bus->max_completion_rings == 0) {
DHD_ERROR(("dongle completion rings are invalid %d\n",
bus->dhd->d2h_hostrdy_supported =
((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
- bus->dhd->ext_trap_data_supported =
- ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
+#ifdef PCIE_OOB
+ bus->dhd->d2h_no_oob_dw = (sh->flags & PCIE_SHARED_NO_OOB_DW) ? TRUE : FALSE;
+#endif /* PCIE_OOB */
- if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
- bus->dhd->pcie_txs_metadata_enable = 0;
-
- bus->dhd->hscb_enable =
- (sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
-
-#ifdef EWP_EDL
- if (host_edl_support) {
- bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
- DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
- }
-#endif /* EWP_EDL */
-
- bus->dhd->debug_buf_dest_support =
- (sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
- DHD_ERROR(("FW supports debug buf dest ? %s \n",
- bus->dhd->debug_buf_dest_support ? "Y" : "N"));
-
-#ifdef DHD_HP2P
- if (bus->dhd->hp2p_enable) {
- bus->dhd->hp2p_ts_capable =
- (sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP;
- bus->dhd->hp2p_capable =
- (sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
- bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable;
-
- DHD_ERROR(("FW supports HP2P ? %s \n",
- bus->dhd->hp2p_capable ? "Y" : "N"));
-
- if (bus->dhd->hp2p_capable) {
- bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
- bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
- bus->dhd->time_thresh = HP2P_TIME_THRESH;
- for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
- hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
-
- hp2p_info->hrtimer_init = FALSE;
- tasklet_hrtimer_init(&hp2p_info->timer,
- dhd_hp2p_write, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- }
- }
- }
-#endif /* DHD_HP2P */
+#ifdef PCIE_INB_DW
+ bus->dhd->d2h_inband_dw = (sh->flags & PCIE_SHARED_INBAND_DS) ? TRUE : FALSE;
+#endif /* PCIE_INB_DW */
-#ifdef DHD_DB0TS
- bus->dhd->db0ts_capable =
- (sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
-#endif /* DHD_DB0TS */
+#if defined(PCIE_OOB) && defined(PCIE_INB_DW)
+ DHD_ERROR(("FW supports Inband dw ? %s oob dw ? %s\n",
+ bus->dhd->d2h_inband_dw ? "Y":"N",
+ bus->dhd->d2h_no_oob_dw ? "N":"Y"));
+#endif /* defined(PCIE_OOB) && defined(PCIE_INB_DW) */
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
+ bus->dhd->ext_trap_data_supported =
+ ((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
- /*
- * WAR to fix ARM cold boot;
- * De-assert WL domain in DAR
- */
- if (bus->sih->buscorerev >= 68) {
- dhd_bus_pcie_pwr_req_wl_domain(bus,
- DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev), FALSE);
- }
- }
return BCME_OK;
} /* dhdpcie_readshared */
if (!bus->dhd)
return 0;
- if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
- dhd_bus_pcie_pwr_req_clear_reload_war(bus);
- }
-
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req(bus);
- }
-
- /* Configure AER registers to log the TLP header */
- dhd_bus_aer_config(bus);
-
/* Make sure we're talking to the core. */
bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
ASSERT(bus->reg != NULL);
ret = dhdpcie_readshared(bus);
if (ret < 0) {
DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
- goto exit;
+ return ret;
}
/* Make sure we're talking to the core. */
bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
ASSERT(bus->reg != NULL);
- dhd_init_bus_lock(bus);
-
- dhd_init_backplane_access_lock(bus);
-
/* Set bus state according to enable result */
dhdp->busstate = DHD_BUS_DATA;
- bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
- dhdp->dhd_bus_busy_state = 0;
+ bus->d3_suspend_pending = FALSE;
- /* D11 status via PCIe completion header */
- if ((ret = dhdpcie_init_d11status(bus)) < 0) {
- goto exit;
+#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
+ if (bus->pcie_sh->flags2 & PCIE_SHARED_D2H_D11_TX_STATUS) {
+ uint32 flags2 = bus->pcie_sh->flags2;
+ uint32 addr;
+
+ addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
+ flags2 |= PCIE_SHARED_H2D_D11_TX_STATUS;
+ ret = dhdpcie_bus_membytes(bus, TRUE, addr,
+ (uint8 *)&flags2, sizeof(flags2));
+ if (ret < 0) {
+ DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
+ __FUNCTION__));
+ return ret;
+ }
+ bus->pcie_sh->flags2 = flags2;
+ bus->dhd->d11_tx_status = TRUE;
}
+#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
if (!dhd_download_fw_on_driverload)
dhd_dpc_enable(bus->dhd);
/* Enable the interrupt after device is up */
dhdpcie_bus_intr_enable(bus);
- bus->intr_enabled = TRUE;
-
/* bcmsdh_intr_unmask(bus->sdh); */
+#ifdef DHD_PCIE_RUNTIMEPM
+ bus->idlecount = 0;
+ bus->idletime = (int32)MAX_IDLE_COUNT;
+ init_waitqueue_head(&bus->rpm_queue);
+ mutex_init(&bus->pm_lock);
+#else
bus->idletime = 0;
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+#ifdef PCIE_INB_DW
+ /* Initialize the lock to serialize Device Wake Inband activities */
+ if (!bus->inb_lock) {
+ bus->inb_lock = dhd_os_spin_lock_init(bus->dhd->osh);
+ }
+#endif
+
/* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
bus->use_d0_inform = FALSE;
}
-exit:
- if (MULTIBP_ENAB(bus->sih)) {
- dhd_bus_pcie_pwr_req_clear(bus);
- }
return ret;
}
{
uint32 addr = 0;
uint32 val = 0;
-
addr = bus->dongle_ram_base + bus->ramsize - 4;
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
+#endif /* DHD_PCIE_RUNTIMEPM */
dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
}
+
bool
dhdpcie_chipmatch(uint16 vendor, uint16 device)
{
if (vendor != PCI_VENDOR_ID_BROADCOM) {
+#ifndef DHD_EFI
DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
vendor, device));
+#endif /* DHD_EFI */
return (-ENODEV);
}
if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
(device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
- (device == BCM43569_CHIP_ID)) {
+ (device == BCM43569_CHIP_ID))
return 0;
- }
if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
- (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) {
+ (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
return 0;
- }
if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
- (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) {
+ (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
return 0;
- }
if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
- (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) {
+ (device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID))
return 0;
- }
if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
- (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) {
+ (device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device))
return 0;
- }
if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
- (device == BCM43452_D11AC5G_ID)) {
+ (device == BCM43452_D11AC5G_ID))
return 0;
- }
if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
- (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) {
+ (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
return 0;
- }
if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
- (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) {
+ (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
return 0;
- }
if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
- (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) {
+ (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
return 0;
- }
if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
- (device == BCM4358_D11AC5G_ID)) {
+ (device == BCM4358_D11AC5G_ID))
return 0;
- }
if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
- (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) {
+ (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
return 0;
- }
if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
- (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) {
+ (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
return 0;
- }
if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
- (device == BCM4359_D11AC5G_ID)) {
+ (device == BCM4359_D11AC5G_ID))
return 0;
- }
if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
- (device == BCM43596_D11AC5G_ID)) {
+ (device == BCM43596_D11AC5G_ID))
return 0;
- }
if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
- (device == BCM43597_D11AC5G_ID)) {
+ (device == BCM43597_D11AC5G_ID))
return 0;
- }
if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
- (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) {
+ (device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID))
+ return 0;
+
+ if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
+ (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID))
return 0;
- }
if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
- (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) {
+ (device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID))
return 0;
- }
+
if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
(device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
return 0;
}
- if ((device == BCM43751_D11AX_ID) || (device == BCM43751_D11AX2G_ID) ||
- (device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) {
- return 0;
- }
- if ((device == BCM43752_D11AX_ID) || (device == BCM43752_D11AX2G_ID) ||
- (device == BCM43752_D11AX5G_ID) || (device == BCM43752_CHIP_ID)) {
- return 0;
- }
- if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
- (device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
- return 0;
- }
if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
- (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) {
+ (device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID))
return 0;
- }
if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
- (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) ||
- (device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) {
- return 0;
- }
-
- if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) ||
- (device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) {
- return 0;
- }
-
- if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) ||
- (device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) {
+ (device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID))
return 0;
- }
-
+#ifndef DHD_EFI
DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
+#endif
return (-ENODEV);
} /* dhdpcie_chipmatch */
dhd_tcpack_info_tbl_clean(bus->dhd);
#endif /* DHDTCPACK_SUPPRESS */
-#ifdef DHD_HP2P
- if (flow_ring_node->hp2p_ring) {
- bus->dhd->hp2p_ring_active = FALSE;
- flow_ring_node->hp2p_ring = FALSE;
- }
-#endif /* DHD_HP2P */
-
/* clean up BUS level info */
DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
- /* Boundary check of the flowid */
- if (flowid >= bus->dhd->num_flow_rings) {
- DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
- flowid, bus->dhd->num_flow_rings));
- return;
- }
-
flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
- if (!flow_ring_node) {
- DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
- return;
- }
-
ASSERT(flow_ring_node->flowid == flowid);
- if (flow_ring_node->flowid != flowid) {
- DHD_ERROR(("%s: flowid %d is different from the flowid "
- "of the flow_ring_node %d\n", __FUNCTION__, flowid,
- flow_ring_node->flowid));
- return;
- }
if (status != BCME_OK) {
DHD_ERROR(("%s Flow create Response failure error status = %d \n",
flow_ring_node = (flow_ring_node_t *)arg;
-#ifdef DHDTCPACK_SUPPRESS
- /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
- * when there is a newly coming packet from network stack.
- */
- dhd_tcpack_info_tbl_clean(bus->dhd);
-#endif /* DHDTCPACK_SUPPRESS */
DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
queue = &flow_ring_node->queue; /* queue associated with flow ring */
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
/* Flush all pending packets in the queue, if any */
while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
PKTFREE(bus->dhd->osh, pkt, TRUE);
DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
- /* Boundary check of the flowid */
- if (flowid >= bus->dhd->num_flow_rings) {
- DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
- flowid, bus->dhd->num_flow_rings));
- return;
- }
-
flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
- if (!flow_ring_node) {
- DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
- return;
- }
-
ASSERT(flow_ring_node->flowid == flowid);
- if (flow_ring_node->flowid != flowid) {
- DHD_ERROR(("%s: flowid %d is different from the flowid "
- "of the flow_ring_node %d\n", __FUNCTION__, flowid,
- flow_ring_node->flowid));
- return;
- }
if (status != BCME_OK) {
DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
return;
}
- /* Boundary check of the flowid */
- if (flowid >= bus->dhd->num_flow_rings) {
- DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
- flowid, bus->dhd->num_flow_rings));
- return;
- }
-
flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
- if (!flow_ring_node) {
- DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
- return;
- }
-
ASSERT(flow_ring_node->flowid == flowid);
- if (flow_ring_node->flowid != flowid) {
- DHD_ERROR(("%s: flowid %d is different from the flowid "
- "of the flow_ring_node %d\n", __FUNCTION__, flowid,
- flow_ring_node->flowid));
- return;
- }
flow_ring_node->status = FLOW_RING_STATUS_OPEN;
return;
dhdp->bus->is_linkdown = val;
}
-int
-dhd_bus_get_linkdown(dhd_pub_t *dhdp)
-{
- return dhdp->bus->is_linkdown;
-}
-
-int
-dhd_bus_get_cto(dhd_pub_t *dhdp)
-{
- return dhdp->bus->cto_triggered;
-}
-
#ifdef IDLE_TX_FLOW_MGMT
/* resume request */
int
return;
}
+
/* scan the nodes in active list till it finds a non idle node */
void
dhd_bus_idle_scan(dhd_bus_t *bus)
ASSERT(osh);
if (bus->dhd) {
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
- debugger_close();
-#endif /* DEBUGGER || DHD_DSCOPE */
-
dongle_isolation = bus->dhd->dongle_isolation;
dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
}
return 0;
}
-int
-dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
+void
+dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
{
- uint32 val;
if (enable) {
dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
- val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
- dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN);
- } else {
- dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
- val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
- dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN);
- }
- return 0;
-}
-
-int
-dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
-{
- if (bus->sih->buscorerev < 19) {
- DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n",
- __FUNCTION__, bus->sih->buscorerev));
- return BCME_UNSUPPORTED;
- }
-
- if (bus->sih->buscorerev == 19) {
- uint32 pcie_lnkst;
- si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
+ dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, SPROM_BACKPLANE_EN);
- pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, configdata), 0, 0);
-
- if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
- PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1) {
- return BCME_UNSUPPORTED;
+ if (bus->dhd->cto_threshold == 0) {
+ bus->dhd->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
}
- }
-
- bus->cto_enable = enable;
-
- dhdpcie_cto_cfg_init(bus, enable);
- if (enable) {
- if (bus->cto_threshold == 0) {
- bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
- }
si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
- ((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
- PCIE_CTO_TO_THRESHHOLD_MASK) |
- ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
- PCIE_CTO_CLKCHKCNT_MASK) |
- PCIE_CTO_ENAB_MASK);
+ OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
+ ((bus->dhd->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
+ PCIE_CTO_TO_THRESHHOLD_MASK) |
+ ((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
+ PCIE_CTO_CLKCHKCNT_MASK) |
+ PCIE_CTO_ENAB_MASK);
} else {
+ dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
+ dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, 0);
+
si_corereg(bus->sih, bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
+ OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
}
-
- DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
- __FUNCTION__, bus->cto_enable));
-
- return 0;
}
-static int
+static void
dhdpcie_cto_error_recovery(struct dhd_bus *bus)
{
uint32 pci_intmask, err_status;
uint8 i = 0;
- uint32 val;
pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
DHD_OS_WAKE_LOCK(bus->dhd);
- DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
-
- /*
- * DAR still accessible
- */
- dhd_bus_dump_dar_registers(bus);
-
/* reset backplane */
- val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
- dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
+ dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, SPROM_CFG_TO_SB_RST);
/* clear timeout error */
while (1) {
err_status = si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_ERRLOG(bus->sih->buscorerev),
+ OFFSETOF(sbpcieregs_t, dm_errlog),
0, 0);
if (err_status & PCIE_CTO_ERR_MASK) {
si_corereg(bus->sih, bus->sih->buscoreidx,
- DAR_ERRLOG(bus->sih->buscorerev),
+ OFFSETOF(sbpcieregs_t, dm_errlog),
~0, PCIE_CTO_ERR_MASK);
} else {
break;
DHD_ERROR(("cto recovery fail\n"));
DHD_OS_WAKE_UNLOCK(bus->dhd);
- return BCME_ERROR;
+ return;
}
}
/* Halt ARM & remove reset */
/* TBD : we can add ARM Halt here in case */
- /* reset SPROM_CFG_TO_SB_RST */
- val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
-
- DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
- PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
- dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
-
- val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
- DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
- PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
+ DHD_ERROR(("cto recovery success\n"));
DHD_OS_WAKE_UNLOCK(bus->dhd);
-
- return BCME_OK;
-}
-
-void
-dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
-{
- uint32 val;
-
- val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
- dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
- val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
-}
-
-#if defined(DBG_PKT_MON)
-static int
-dhdpcie_init_d11status(struct dhd_bus *bus)
-{
- uint32 addr;
- uint32 flags2;
- int ret = 0;
-
- if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
- flags2 = bus->pcie_sh->flags2;
- addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
- flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
- ret = dhdpcie_bus_membytes(bus, TRUE, addr,
- (uint8 *)&flags2, sizeof(flags2));
- if (ret < 0) {
- DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
- __FUNCTION__));
- return ret;
- }
- bus->pcie_sh->flags2 = flags2;
- bus->dhd->d11_tx_status = TRUE;
- }
- return ret;
-}
-
-#else
-static int
-dhdpcie_init_d11status(struct dhd_bus *bus)
-{
- return 0;
}
-#endif // endif
#ifdef BCMPCIE_OOB_HOST_WAKE
int
}
#endif /* BCMPCIE_OOB_HOST_WAKE */
+
+
bool
dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
{
{
dhd_bus_t *bus = pub->bus;
uint32 coreoffset = index << 12;
- uint32 core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
+ uint32 core_addr = SI_ENUM_BASE + coreoffset;
uint32 value;
+
while (first_addr <= last_addr) {
- core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
- if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) {
+ core_addr = SI_ENUM_BASE + coreoffset + first_addr;
+ if (si_backplane_access(bus->sih, core_addr, 4, &value, TRUE) != BCME_OK) {
DHD_ERROR(("Invalid size/addr combination \n"));
}
DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
}
}
+#ifdef PCIE_OOB
bool
-dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus)
+dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus)
{
if (!bus->dhd)
return FALSE;
- else if (bus->hwa_enab_bmap) {
- return bus->dhd->hwa_enable;
+ if (bus->oob_enabled) {
+ return !bus->dhd->d2h_no_oob_dw;
} else {
return FALSE;
}
}
+#endif /* PCIE_OOB */
+
+void
+dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
+{
+ DHD_ERROR(("ENABLING DW:%d\n", dw_option));
+ bus->dw_option = dw_option;
+}
+#ifdef PCIE_INB_DW
bool
-dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
+dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus)
{
if (!bus->dhd)
return FALSE;
- else if (bus->idma_enabled) {
- return bus->dhd->idma_enable;
+ if (bus->inb_enabled) {
+ return bus->dhd->d2h_inband_dw;
} else {
return FALSE;
}
}
+void
+dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, enum dhd_bus_ds_state state)
+{
+ if (!INBAND_DW_ENAB(bus))
+ return;
+
+ DHD_INFO(("%s:%d\n", __FUNCTION__, state));
+ bus->dhd->ds_state = state;
+ if (state == DW_DEVICE_DS_DISABLED_WAIT || state == DW_DEVICE_DS_D3_INFORM_WAIT) {
+ bus->ds_exit_timeout = 100;
+ }
+ if (state == DW_DEVICE_HOST_WAKE_WAIT) {
+ bus->host_sleep_exit_timeout = 100;
+ }
+ if (state == DW_DEVICE_DS_DEV_WAKE) {
+ bus->ds_exit_timeout = 0;
+ }
+ if (state == DW_DEVICE_DS_ACTIVE) {
+ bus->host_sleep_exit_timeout = 0;
+ }
+}
+
+enum dhd_bus_ds_state
+dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus)
+{
+ if (!INBAND_DW_ENAB(bus))
+ return DW_DEVICE_DS_INVALID;
+ return bus->dhd->ds_state;
+}
+#endif /* PCIE_INB_DW */
+
bool
-dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
+dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
{
if (!bus->dhd)
return FALSE;
- else if (bus->ifrm_enabled) {
- return bus->dhd->ifrm_enable;
+ else if (bus->idma_enabled) {
+ return bus->dhd->idma_enable;
} else {
return FALSE;
}
}
bool
-dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
+dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
{
- if (!bus->dhd) {
+ if (!bus->dhd)
return FALSE;
- } else if (bus->dar_enabled) {
- return bus->dhd->dar_enable;
+ else if (bus->ifrm_enabled) {
+ return bus->dhd->ifrm_enable;
} else {
return FALSE;
}
}
-void
-dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
-{
- DHD_ERROR(("ENABLING DW:%d\n", dw_option));
- bus->dw_option = dw_option;
-}
void
dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
"\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
" lp 0x%x, rpc 0x%x"
"\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
- "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
- "r10 0x%x, r11 0x%x, r12 0x%x\n\n",
+ "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
ltoh32(bus->pcie_sh->trap_addr),
ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
- ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
- ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
- ltoh32(tr->r11), ltoh32(tr->r12));
+ ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7));
}
int
int bcmerror = 0;
struct dhd_bus *bus = dhdp->bus;
- if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
+ if (si_backplane_access(bus->sih, addr, size, data, read) != BCME_OK) {
DHD_ERROR(("Invalid size/addr combination \n"));
bcmerror = BCME_ERROR;
}
return dhd->bus->idletime;
}
+#ifdef DHD_SSSR_DUMP
+
static INLINE void
dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
{
OSL_DELAY(1);
- if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
- DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
- } else {
- DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
- }
+ si_backplane_access(dhd->bus->sih, addr, sizeof(uint), val, read);
+ DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read));
return;
}
-#ifdef DHD_SSSR_DUMP
static int
dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
uint addr_reg, uint data_reg)
addr = data_reg;
/* Read 4 bytes at once and loop for fifo_size / 4 */
for (i = 0; i < fifo_size / 4; i++) {
- if (serialized_backplane_access(dhd->bus, addr,
- sizeof(uint), &val, TRUE) != BCME_OK) {
- DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
- return BCME_ERROR;
- }
+ si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
buf[i] = val;
OSL_DELAY(1);
}
}
static int
-dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
+dhdpcie_get_sssr_vasip_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
uint addr_reg)
{
uint addr;
uint val = 0;
int i;
- si_t *sih = dhd->bus->sih;
DHD_ERROR(("%s\n", __FUNCTION__));
return BCME_ERROR;
}
- if (addr_reg) {
-
- if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
- dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
- int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
- fifo_size);
- if (err != BCME_OK) {
- DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
- __FUNCTION__));
- }
- } else {
- /* Check if vasip clk is disabled, if yes enable it */
- addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
- dhd_sbreg_op(dhd, addr, &val, TRUE);
- if (!val) {
- val = 1;
- dhd_sbreg_op(dhd, addr, &val, FALSE);
- }
-
- addr = addr_reg;
- /* Read 4 bytes at once and loop for fifo_size / 4 */
- for (i = 0; i < fifo_size / 4; i++, addr += 4) {
- if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
- &val, TRUE) != BCME_OK) {
- DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
- addr));
- return BCME_ERROR;
- }
- buf[i] = val;
- OSL_DELAY(1);
- }
- }
- } else {
- uint cur_coreid;
- uint chipc_corerev;
- chipcregs_t *chipcregs;
-
- /* Save the current core */
- cur_coreid = si_coreid(sih);
-
- /* Switch to ChipC */
- chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
-
- chipc_corerev = si_corerev(sih);
-
- if ((chipc_corerev == 64) || (chipc_corerev == 65)) {
- W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
-
- /* Read 4 bytes at once and loop for fifo_size / 4 */
- for (i = 0; i < fifo_size / 4; i++) {
- buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
- OSL_DELAY(1);
- }
- }
-
- /* Switch back to the original core */
- si_setcore(sih, cur_coreid, 0);
- }
-
- return BCME_OK;
-}
-
-#if defined(EWP_ETD_PRSRV_LOGS)
-void
-dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
- uint8 *ext_trap_data, void *event_decode_data)
-{
- hnd_ext_trap_hdr_t *hdr = NULL;
- bcm_tlv_t *tlv;
- eventlog_trapdata_info_t *etd_evtlog = NULL;
- eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
- uint arr_size = 0;
- int i = 0;
- int err = 0;
- uint32 seqnum = 0;
-
- if (!ext_trap_data || !event_decode_data || !dhd)
- return;
-
- if (!dhd->concise_dbg_buf)
- return;
-
- /* First word is original trap_data, skip */
- ext_trap_data += sizeof(uint32);
-
- hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
- tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
- if (tlv) {
- uint32 baseaddr = 0;
- uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
-
- etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
- DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
- "seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
- (etd_evtlog->num_elements),
- ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
- arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
- if (!arr_size) {
- DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__));
- return;
- }
- evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
- if (!evtlog_buf_arr) {
- DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
- return;
- }
-
- /* boundary check */
- baseaddr = etd_evtlog->log_arr_addr;
- if ((baseaddr < dhd->bus->dongle_ram_base) ||
- ((baseaddr + arr_size) > endaddr)) {
- DHD_ERROR(("%s: Error reading invalid address\n",
- __FUNCTION__));
- goto err;
- }
+ /* Check if vasip clk is disabled, if yes enable it */
+ addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
+ if (!val) {
+ val = 1;
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
+ }
- /* read the eventlog_trap_buf_info_t array from dongle memory */
- err = dhdpcie_bus_membytes(dhd->bus, FALSE,
- (ulong)(etd_evtlog->log_arr_addr),
- (uint8 *)evtlog_buf_arr, arr_size);
- if (err != BCME_OK) {
- DHD_ERROR(("%s: Error reading event log array from dongle !\n",
- __FUNCTION__));
- goto err;
- }
- /* ntoh is required only for seq_num, because in the original
- * case of event logs from info ring, it is sent from dongle in that way
- * so for ETD also dongle follows same convention
- */
- seqnum = ntoh32(etd_evtlog->seq_num);
- memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
- for (i = 0; i < (etd_evtlog->num_elements); ++i) {
- /* boundary check */
- baseaddr = evtlog_buf_arr[i].buf_addr;
- if ((baseaddr < dhd->bus->dongle_ram_base) ||
- ((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
- DHD_ERROR(("%s: Error reading invalid address\n",
- __FUNCTION__));
- goto err;
- }
- /* read each individual event log buf from dongle memory */
- err = dhdpcie_bus_membytes(dhd->bus, FALSE,
- ((ulong)evtlog_buf_arr[i].buf_addr),
- dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
- if (err != BCME_OK) {
- DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
- __FUNCTION__));
- goto err;
- }
- dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
- event_decode_data, (evtlog_buf_arr[i].len),
- FALSE, hton32(seqnum));
- ++seqnum;
- }
-err:
- MFREE(dhd->osh, evtlog_buf_arr, arr_size);
- } else {
- DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
+ addr = addr_reg;
+ /* Read 4 bytes at once and loop for fifo_size / 4 */
+ for (i = 0; i < fifo_size / 4; i++, addr += 4) {
+ si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
+ buf[i] = val;
+ OSL_DELAY(1);
}
+ return BCME_OK;
}
-#endif /* BCMPCIE && DHD_LOG_DUMP */
-static uint32
-dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
+static int
+dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd)
{
uint addr;
- uint val = 0;
+ uint val;
DHD_ERROR(("%s\n", __FUNCTION__));
dhd_sbreg_op(dhd, addr, &val, TRUE);
if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
- dhd_sbreg_op(dhd, addr, ®_val, FALSE);
+ val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask;
+ dhd_sbreg_op(dhd, addr, &val, FALSE);
}
return BCME_OK;
}
-static uint32
+static int
dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
{
uint addr;
- uint val = 0, reg_val = 0;
+ uint val;
DHD_ERROR(("%s\n", __FUNCTION__));
/* conditionally clear bits [11:8] of PowerCtrl */
addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
- dhd_sbreg_op(dhd, addr, ®_val, TRUE);
- if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
+ dhd_sbreg_op(dhd, addr, &val, TRUE);
+ if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
val = 0;
dhd_sbreg_op(dhd, addr, &val, FALSE);
}
- return reg_val;
+ return BCME_OK;
}
static int
return BCME_OK;
}
-static void
-dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
-{
-#define TRAP_DATA_MAIN_CORE_BIT_MASK (1 << 1)
-#define TRAP_DATA_AUX_CORE_BIT_MASK (1 << 4)
- uint trap_data_mask[MAX_NUM_D11CORES] =
- {TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK};
- int i;
- /* Apply only for 4375 chip */
- if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
- for (i = 0; i < MAX_NUM_D11CORES; i++) {
- if (dhd->sssr_d11_outofreset[i] &&
- (dhd->dongle_trap_data & trap_data_mask[i])) {
- dhd->sssr_d11_outofreset[i] = TRUE;
- } else {
- dhd->sssr_d11_outofreset[i] = FALSE;
- }
- DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
- "trap_data:0x%x-0x%x\n",
- __FUNCTION__, i, dhd->sssr_d11_outofreset[i],
- dhd->dongle_trap_data, trap_data_mask[i]));
- }
- }
-}
-
static int
dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
{
for (i = 0; i < MAX_NUM_D11CORES; i++) {
/* Check if bit 0 of resetctrl is cleared */
addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
- if (!addr) {
- DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
- __FUNCTION__, i));
- continue;
- }
dhd_sbreg_op(dhd, addr, &val, TRUE);
if (!(val & 1)) {
dhd->sssr_d11_outofreset[i] = TRUE;
DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
__FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
}
- dhdpcie_update_d11_status_from_trapdata(dhd);
-
return BCME_OK;
}
val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
dhd_sbreg_op(dhd, addr, &val, FALSE);
}
-
- if (MULTIBP_ENAB(dhd->bus->sih)) {
- uint32 resetctrl = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
-
- /* Just halt ARM but do not reset the core */
- resetctrl &= ~(SI_CORE_SIZE - 1);
- resetctrl += OFFSETOF(aidmp_t, ioctrl);
-
- dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
- val |= SICF_CPUHALT;
- dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
- }
- }
- return BCME_OK;
-}
-
-static int
-dhdpcie_arm_resume_clk_req(dhd_pub_t *dhd)
-{
- uint addr;
- uint val = 0;
-
- DHD_ERROR(("%s\n", __FUNCTION__));
-
- /* Check if bit 0 of resetctrl is cleared */
- addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
- dhd_sbreg_op(dhd, addr, &val, TRUE);
- if (!(val & 1)) {
- if (MULTIBP_ENAB(dhd->bus->sih)) {
- uint32 resetctrl = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
-
- /* Take ARM out of halt but do not reset core */
- resetctrl &= ~(SI_CORE_SIZE - 1);
- resetctrl += OFFSETOF(aidmp_t, ioctrl);
-
- dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
- val &= ~SICF_CPUHALT;
- dhd_sbreg_op(dhd, resetctrl, &val, FALSE);
- dhd_sbreg_op(dhd, resetctrl, &val, TRUE);
- }
}
-
return BCME_OK;
}
}
if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
- dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
+ dhdpcie_get_sssr_vasip_dump(dhd, dhd->sssr_vasip_buf_before,
dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
- } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
- dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
- dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
- dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
- dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
}
return BCME_OK;
}
if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
- dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
+ dhdpcie_get_sssr_vasip_dump(dhd, dhd->sssr_vasip_buf_after,
dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
- } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
- dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
- dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
- dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
- dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
}
return BCME_OK;
int
dhdpcie_sssr_dump(dhd_pub_t *dhd)
{
- uint32 powerctrl_val;
-
if (!dhd->sssr_inited) {
DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
return BCME_ERROR;
return BCME_ERROR;
}
- DHD_ERROR(("%s: Before WL down (powerctl: pcie:0x%x chipc:0x%x) "
- "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(chipcregs_t, powerctl), 0, 0),
- si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
- PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
- PMU_REG(dhd->bus->sih, res_state, 0, 0)));
-
dhdpcie_d11_check_outofreset(dhd);
DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
}
dhdpcie_clear_intmask_and_timer(dhd);
- powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
+ dhdpcie_suspend_chipcommon_powerctrl(dhd);
dhdpcie_clear_clk_req(dhd);
dhdpcie_pcie_send_ltrsleep(dhd);
- if (MULTIBP_ENAB(dhd->bus->sih)) {
- dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), FALSE);
- }
-
/* Wait for some time before Restore */
OSL_DELAY(6000);
- DHD_ERROR(("%s: After WL down (powerctl: pcie:0x%x chipc:0x%x) "
- "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(chipcregs_t, powerctl), 0, 0),
- si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
- PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
- PMU_REG(dhd->bus->sih, res_state, 0, 0)));
-
- if (MULTIBP_ENAB(dhd->bus->sih)) {
- dhd_bus_pcie_pwr_req_wl_domain(dhd->bus, OFFSETOF(chipcregs_t, powerctl), TRUE);
- /* Add delay for WL domain to power up */
- OSL_DELAY(15000);
-
- DHD_ERROR(("%s: After WL up again (powerctl: pcie:0x%x chipc:0x%x) "
- "PMU rctl:0x%x res_state:0x%x\n", __FUNCTION__,
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(chipcregs_t, powerctl), 0, 0),
- si_corereg(dhd->bus->sih, 0, OFFSETOF(chipcregs_t, powerctl), 0, 0),
- PMU_REG(dhd->bus->sih, retention_ctl, 0, 0),
- PMU_REG(dhd->bus->sih, res_state, 0, 0)));
- }
-
- dhdpcie_arm_resume_clk_req(dhd);
- dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
- dhdpcie_bring_d11_outofreset(dhd);
-
- DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
- if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
- DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
- return BCME_ERROR;
- }
- dhd->sssr_dump_collected = TRUE;
- dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
-
- return BCME_OK;
-}
-
-static int
-dhdpcie_fis_trigger(dhd_pub_t *dhd)
-{
- if (!dhd->sssr_inited) {
- DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- /* Trigger FIS */
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
- OSL_DELAY(100 * 1000);
-
- return BCME_OK;
-}
-
-int
-dhd_bus_fis_trigger(dhd_pub_t *dhd)
-{
- return dhdpcie_fis_trigger(dhd);
-}
-
-static int
-dhdpcie_fis_dump(dhd_pub_t *dhd)
-{
- int i;
-
- if (!dhd->sssr_inited) {
- DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- /* bring up all pmu resources */
- PMU_REG(dhd->bus->sih, min_res_mask, ~0,
- PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
- OSL_DELAY(10 * 1000);
-
- for (i = 0; i < MAX_NUM_D11CORES; i++) {
- dhd->sssr_d11_outofreset[i] = TRUE;
- }
-
+ dhdpcie_resume_chipcommon_powerctrl(dhd);
dhdpcie_bring_d11_outofreset(dhd);
- OSL_DELAY(6000);
-
- /* clear FIS Done */
- PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK);
-
- dhdpcie_d11_check_outofreset(dhd);
DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
return BCME_ERROR;
}
- dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
+ dhd_schedule_sssr_dump(dhd);
return BCME_OK;
}
-
-int
-dhd_bus_fis_dump(dhd_pub_t *dhd)
-{
- return dhdpcie_fis_dump(dhd);
-}
#endif /* DHD_SSSR_DUMP */
#ifdef DHD_WAKE_STATUS
wake_counts_t*
dhd_bus_get_wakecount(dhd_pub_t *dhd)
{
+ if (!dhd->bus) {
+ return NULL;
+ }
return &dhd->bus->wake_counts;
}
int
}
#endif /* DHD_WAKE_STATUS */
-/* Writes random number(s) to the TCM. FW upon initialization reads this register
- * to fetch the random number, and uses it to randomize heap address space layout.
+#ifdef BCM_ASLR_HEAP
+/* Writes random number(s) to the TCM. FW upon initialization reads the metadata
+ * of the random number and then based on metadata, reads the random number from the TCM.
*/
-static int
+static void
dhdpcie_wrt_rnd(struct dhd_bus *bus)
{
bcm_rand_metadata_t rnd_data;
- uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
- uint32 count = BCM_ENTROPY_HOST_NBYTES;
- int ret = 0;
+ uint32 rand_no;
+ uint32 count = 1; /* start with 1 random number */
+
uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
-
- memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
- rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
+ rnd_data.signature = htol32(BCM_RNG_SIGNATURE);
rnd_data.count = htol32(count);
/* write the metadata about random number */
dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
/* scale back by number of random number counts */
- addr -= count;
-
- /* Now get & write the random number(s) */
- ret = dhd_get_random_bytes(rand_buf, count);
- if (ret != BCME_OK) {
- return ret;
- }
- dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
-
- return BCME_OK;
-}
-
-void
-dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
-{
- struct dhd_bus *bus = dhd->bus;
- uint64 current_time;
-
- DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
- DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
- bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
- DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
- bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
-#ifdef BCMPCIE_OOB_HOST_WAKE
- DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
- bus->oob_intr_count, bus->oob_intr_enable_count,
- bus->oob_intr_disable_count));
- DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n",
- dhdpcie_get_oob_irq_num(bus),
- GET_SEC_USEC(bus->last_oob_irq_time)));
- DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
- " last_oob_irq_disable_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->last_oob_irq_enable_time),
- GET_SEC_USEC(bus->last_oob_irq_disable_time)));
- DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
- dhdpcie_get_oob_irq_status(bus),
- dhdpcie_get_oob_irq_level()));
-#endif /* BCMPCIE_OOB_HOST_WAKE */
- DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
- bus->dpc_return_busdown_count, bus->non_ours_irq_count));
-
- current_time = OSL_LOCALTIME_NS();
- DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(current_time)));
- DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
- " isr_exit_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->isr_entry_time),
- GET_SEC_USEC(bus->isr_exit_time)));
- DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
- " last_non_ours_irq_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->dpc_sched_time),
- GET_SEC_USEC(bus->last_non_ours_irq_time)));
- DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
- " last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->dpc_entry_time),
- GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
- DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
- " last_process_txcpl_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->last_process_flowring_time),
- GET_SEC_USEC(bus->last_process_txcpl_time)));
- DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
- " last_process_infocpl_time="SEC_USEC_FMT
- " last_process_edl_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->last_process_rxcpl_time),
- GET_SEC_USEC(bus->last_process_infocpl_time),
- GET_SEC_USEC(bus->last_process_edl_time)));
- DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
- " resched_dpc_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->dpc_exit_time),
- GET_SEC_USEC(bus->resched_dpc_time)));
- DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->last_d3_inform_time)));
-
- DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
- " last_suspend_end_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->last_suspend_start_time),
- GET_SEC_USEC(bus->last_suspend_end_time)));
- DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
- " last_resume_end_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(bus->last_resume_start_time),
- GET_SEC_USEC(bus->last_resume_end_time)));
-
-#if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
- DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
- " logtrace_thread_sem_down_time="SEC_USEC_FMT
- "\nlogtrace_thread_flush_time="SEC_USEC_FMT
- " logtrace_thread_unexpected_break_time="SEC_USEC_FMT
- "\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
- GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
- GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
- GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
- GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
- GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
-#endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
-}
-
-void
-dhd_bus_intr_count_dump(dhd_pub_t *dhd)
-{
- dhd_pcie_intr_count_dump(dhd);
-}
-
-int
-dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
-{
- uint32 save_idx, val;
- si_t *sih = dhd->bus->sih;
- uint32 oob_base, oob_base1;
- uint32 wrapper_dump_list[] = {
- AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
- AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
- AI_RESETSTATUS, AI_RESETCTRL,
- AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
- AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
- };
- uint32 i;
- hndoobr_reg_t *reg;
- cr4regs_t *cr4regs;
- ca7regs_t *ca7regs;
-
- save_idx = si_coreidx(sih);
-
- DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
-
- if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
- for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
- val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
- DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
- }
- }
-
- if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
- DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
- for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
- val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
- DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
- }
- DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
- DHD_ERROR(("reg:0x%x val:0x%x\n",
- (uint)OFFSETOF(cr4regs_t, corecapabilities), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
- val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
- }
-
- if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
- DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
- val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
- val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
- DHD_ERROR(("reg:0x%x val:0x%x\n",
- (uint)OFFSETOF(ca7regs_t, corecapabilities), val));
- val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
- val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
- val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
- val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
- DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
- }
-
- DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
-
- oob_base = si_oobr_baseaddr(sih, FALSE);
- oob_base1 = si_oobr_baseaddr(sih, TRUE);
- if (oob_base) {
- dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
- dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
- dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
- dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
- } else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
- val = R_REG(dhd->osh, ®->intstatus[0]);
- DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
- val = R_REG(dhd->osh, ®->intstatus[1]);
- DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
- val = R_REG(dhd->osh, ®->intstatus[2]);
- DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
- val = R_REG(dhd->osh, ®->intstatus[3]);
- DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
- }
-
- if (oob_base1) {
- DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
-
- dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
- dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
- dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
- dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
- }
-
- si_setcoreidx(dhd->bus->sih, save_idx);
-
- return 0;
-}
-
-static void
-dhdpcie_hw_war_regdump(dhd_bus_t *bus)
-{
- uint32 save_idx, val;
- volatile uint32 *reg;
-
- save_idx = si_coreidx(bus->sih);
- if ((reg = si_setcore(bus->sih, CC_CORE_ID, 0)) != NULL) {
- val = R_REG(bus->osh, reg + REG_WORK_AROUND);
- DHD_ERROR(("CC HW_WAR :0x%x\n", val));
- }
-
- if ((reg = si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) != NULL) {
- val = R_REG(bus->osh, reg + REG_WORK_AROUND);
- DHD_ERROR(("ARM HW_WAR:0x%x\n", val));
- }
-
- if ((reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0)) != NULL) {
- val = R_REG(bus->osh, reg + REG_WORK_AROUND);
- DHD_ERROR(("PCIE HW_WAR :0x%x\n", val));
- }
- si_setcoreidx(bus->sih, save_idx);
-
- val = PMU_REG_NEW(bus->sih, min_res_mask, 0, 0);
- DHD_ERROR(("MINRESMASK :0x%x\n", val));
-}
-
-int
-dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
-{
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
- "due to PCIe link down ------- \r\n"));
- return 0;
- }
-
- DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
-
- //HostToDev
- DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
- DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
- DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
-
- DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
- DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
- DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
-
- //DevToHost
- DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
- DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
- DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
-
- DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
- DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
- DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
-
- return 0;
-}
-
-bool
-dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
-{
- uint32 intstatus = 0;
- uint32 intmask = 0;
- uint32 d2h_db0 = 0;
- uint32 d2h_mb_data = 0;
-
- DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
- intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- dhd->bus->pcie_mailbox_int, 0, 0);
- if (intstatus == (uint32)-1) {
- DHD_ERROR(("intstatus=0x%x \n", intstatus));
- return FALSE;
- }
-
- intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- dhd->bus->pcie_mailbox_mask, 0, 0);
- if (intmask == (uint32) -1) {
- DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
- return FALSE;
- }
-
- d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCID2H_MailBox, 0, 0);
- if (d2h_db0 == (uint32)-1) {
- DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
- intstatus, intmask, d2h_db0));
- return FALSE;
- }
-
- DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
- intstatus, intmask, d2h_db0));
- dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
- DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
- dhd->bus->def_intmask));
-
- return TRUE;
-}
-
-void
-dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
-{
- DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
- DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
- dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
- PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
-#ifdef EXTENDED_PCIE_DEBUG_DUMP
- DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
- dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
- PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
- dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
- PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
- dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
- PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
- dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
- PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
-#endif /* EXTENDED_PCIE_DEBUG_DUMP */
-}
-
-int
-dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
-{
- int host_irq_disabled;
-
- DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
- host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
- DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
- dhd_print_tasklet_status(dhd);
- dhd_pcie_intr_count_dump(dhd);
-
- DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
- dhdpcie_dump_resource(dhd->bus);
-
- dhd_pcie_dump_rc_conf_space_cap(dhd);
-
- DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
- dhd_debug_get_rc_linkcap(dhd->bus)));
- DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
- DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
- "PCIE_CFG_PMCSR(0x%x)=0x%x\n",
- PCIECFGREG_STATUS_CMD,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
- PCIECFGREG_BASEADDR0,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
- PCIECFGREG_BASEADDR1,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
- PCIE_CFG_PMCSR,
- dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
- DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
- "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
- sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
- sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
- sizeof(uint32))));
-#ifdef EXTENDED_PCIE_DEBUG_DUMP
- DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
- dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
- PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
- DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
- "hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
- dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
- PCI_TLP_HDR_LOG2,
- dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
- PCI_TLP_HDR_LOG3,
- dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
- PCI_TLP_HDR_LOG4,
- dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
- if (dhd->bus->sih->buscorerev >= 24) {
- DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
- "L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
- sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
- dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
- sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
- dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
- sizeof(uint32))));
- dhd_bus_dump_dar_registers(dhd->bus);
- }
-#endif /* EXTENDED_PCIE_DEBUG_DUMP */
-
- if (dhd->bus->is_linkdown) {
- DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
- return 0;
- }
-
- DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
-
- DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
- "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
- PCIECFGREG_PHY_DBG_CLKREQ1,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
- PCIECFGREG_PHY_DBG_CLKREQ2,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
- PCIECFGREG_PHY_DBG_CLKREQ3,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
-
-#ifdef EXTENDED_PCIE_DEBUG_DUMP
- if (dhd->bus->sih->buscorerev >= 24) {
-
- DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
- "ltssm_hist_2(0x%x)=0x%x "
- "ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
- PCIECFGREG_PHY_LTSSM_HIST_1,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
- PCIECFGREG_PHY_LTSSM_HIST_2,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
- PCIECFGREG_PHY_LTSSM_HIST_3,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
-
- DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
- PCIECFGREG_TREFUP,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
- PCIECFGREG_TREFUP_EXT,
- dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
- DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
- "Function_Intstatus(0x%x)=0x%x "
- "Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
- "Power_Intmask(0x%x)=0x%x\n",
- PCIE_CORE_REG_ERRLOG,
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIE_CORE_REG_ERRLOG, 0, 0),
- PCIE_CORE_REG_ERR_ADDR,
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIE_CORE_REG_ERR_ADDR, 0, 0),
- PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
- PCIFunctionIntmask(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
- PCIPowerIntstatus(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
- PCIPowerIntmask(dhd->bus->sih->buscorerev),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
- DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
- "err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
- (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
- (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
- (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
- (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
- DHD_ERROR(("err_code(0x%x)=0x%x\n",
- (uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
- si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
- OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
-
- dhd_pcie_dump_wrapper_regs(dhd);
- dhdpcie_hw_war_regdump(dhd->bus);
- }
-#endif /* EXTENDED_PCIE_DEBUG_DUMP */
-
- dhd_pcie_dma_info_dump(dhd);
-
- return 0;
-}
-
-bool
-dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
-{
- return bus->force_bt_quiesce;
-}
-
-#ifdef DHD_HP2P
-uint16
-dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
-{
- if (tx)
- return bus->hp2p_txcpl_max_items;
- else
- return bus->hp2p_rxcpl_max_items;
-}
-
-static uint16
-dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val)
-{
- if (tx)
- bus->hp2p_txcpl_max_items = val;
- else
- bus->hp2p_rxcpl_max_items = val;
- return val;
-}
-#endif /* DHD_HP2P */
-
-static bool
-dhd_bus_tcm_test(struct dhd_bus *bus)
-{
- int ret = 0;
- int size; /* Full mem size */
- int start; /* Start address */
- int read_size = 0; /* Read size of each iteration */
- int num = 0;
- uint8 *read_buf, *write_buf;
- uint8 init_val[NUM_PATTERNS] = {
- 0xFFu, /* 11111111 */
- 0x00u, /* 00000000 */
- };
-
- if (!bus) {
- DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
- return FALSE;
- }
-
- read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
-
- if (!read_buf) {
- DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
- return FALSE;
- }
-
- write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
-
- if (!write_buf) {
- MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
- DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
- return FALSE;
- }
-
- DHD_ERROR(("%s: start %x, size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
- DHD_ERROR(("%s: memblock size %d, #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
-
- while (num < NUM_PATTERNS) {
- start = bus->dongle_ram_base;
- /* Get full mem size */
- size = bus->ramsize;
-
- memset(write_buf, init_val[num], MEMBLOCK);
- while (size > 0) {
- read_size = MIN(MEMBLOCK, size);
- memset(read_buf, 0, read_size);
-
- /* Write */
- if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
- DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
- MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
- MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
- return FALSE;
- }
-
- /* Read */
- if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
- DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
- MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
- MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
- return FALSE;
- }
-
- /* Compare */
- if (memcmp(read_buf, write_buf, read_size)) {
- DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
- __FUNCTION__, start, num));
- prhex("Readbuf", read_buf, read_size);
- prhex("Writebuf", write_buf, read_size);
- MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
- MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
- return FALSE;
- }
-
- /* Decrement size and increment start address */
- size -= read_size;
- start += read_size;
- }
- num++;
- }
-
- MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
- MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
-
- DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
- return TRUE;
+ addr -= sizeof(count) * count;
+ /* Now write the random number(s) */
+ rand_no = htol32(dhd_get_random_number());
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rand_no, sizeof(rand_no));
}
+#endif /* BCM_ASLR_HEAP */
/*
* Linux DHD Bus Module for PCIE
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pcie.h 816392 2019-04-24 14:39:02Z $
+ * $Id: dhd_pcie.h 707536 2017-06-28 04:23:48Z $
*/
+
#ifndef dhd_pcie_h
#define dhd_pcie_h
#include <bcmpcie.h>
#include <hnd_cons.h>
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+#ifdef CONFIG_PCI_MSM
+#include <linux/msm_pcie.h>
+#else
+#include <mach/msm_pcie.h>
+#endif /* CONFIG_PCI_MSM */
+#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)
+#include <linux/exynos-pci-noti.h>
+extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg);
+extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg);
+#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+#include <linux/mutex.h>
+#include <linux/wait.h>
+
+#define DEFAULT_DHD_RUNTIME_MS 100
+#ifndef CUSTOM_DHD_RUNTIME_MS
+#define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS
+#endif /* CUSTOM_DHD_RUNTIME_MS */
+
+
+#ifndef MAX_IDLE_COUNT
+#define MAX_IDLE_COUNT 16
+#endif /* MAX_IDLE_COUNT */
+
+#ifndef MAX_RESUME_WAIT
+#define MAX_RESUME_WAIT 100
+#endif /* MAX_RESUME_WAIT */
+#endif /* DHD_PCIE_RUNTIMEPM */
/* defines */
-#define PCIE_SHARED_VERSION PCIE_SHARED_VERSION_7
#define PCMSGBUF_HDRLEN 0
#define DONGLE_REG_MAP_SIZE (32 * 1024)
#define REMAP_ENAB(bus) ((bus)->remap)
#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+#define struct_pcie_notify struct msm_pcie_notify
+#define struct_pcie_register_event struct msm_pcie_register_event
+#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)
+#define struct_pcie_notify struct exynos_pcie_notify
+#define struct_pcie_register_event struct exynos_pcie_register_event
+#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+
#define MAX_DHD_TX_FLOWS 320
/* user defined data structures */
/* Device console log buffer state */
-#define CONSOLE_LINE_MAX 192u
+#define CONSOLE_LINE_MAX 192
#define CONSOLE_BUFFER_MAX (8 * 1024)
#ifdef IDLE_TX_FLOW_MGMT
#define IDLE_FLOW_RING_TIMEOUT 5000
#endif /* IDLE_TX_FLOW_MGMT */
-/* HWA enabled and inited */
-#define HWA_ACTIVE(dhd) (((dhd)->hwa_enable) && ((dhd)->hwa_inited))
+#ifdef DEVICE_TX_STUCK_DETECT
+#define DEVICE_TX_STUCK_CKECK_TIMEOUT 1000 /* 1 sec */
+#define DEVICE_TX_STUCK_TIMEOUT 10000 /* 10 secs */
+#define DEVICE_TX_STUCK_WARN_DURATION (DEVICE_TX_STUCK_TIMEOUT / DEVICE_TX_STUCK_CKECK_TIMEOUT)
+#define DEVICE_TX_STUCK_DURATION (DEVICE_TX_STUCK_WARN_DURATION * 2)
+#endif /* DEVICE_TX_STUCK_DETECT */
/* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */
#define IDMA_ENAB(dhd) ((dhd)->idma_enable)
#define IDMA_ACTIVE(dhd) (((dhd)->idma_enable) && ((dhd)->idma_inited))
-#define IDMA_CAPABLE(bus) (((bus)->sih->buscorerev == 19) || ((bus)->sih->buscorerev >= 23))
+#define IDMA_DS_ENAB(dhd) ((dhd)->idma_retention_ds)
+#define IDMA_DS_ACTIVE(dhd) ((dhd)->bus->dongle_in_ds)
/* IFRM (Implicit Flow Ring Manager enable and inited */
#define IFRM_ENAB(dhd) ((dhd)->ifrm_enable)
#define IFRM_ACTIVE(dhd) (((dhd)->ifrm_enable) && ((dhd)->ifrm_inited))
-/* DAR registers use for h2d doorbell */
-#define DAR_ENAB(dhd) ((dhd)->dar_enable)
-#define DAR_ACTIVE(dhd) (((dhd)->dar_enable) && ((dhd)->dar_inited))
-
-/* DAR WAR for revs < 64 */
-#define DAR_PWRREQ(bus) (((bus)->_dar_war) && DAR_ACTIVE((bus)->dhd))
-
/* PCIE CTO Prevention and Recovery */
-#define PCIECTO_ENAB(bus) ((bus)->cto_enable)
+#define PCIECTO_ENAB(dhd) ((dhd)->cto_enable)
/* Implicit DMA index usage :
* Index 0 for h2d write index transfer
#define IDMA_IDX1 1
#define IDMA_IDX2 2
#define IDMA_IDX3 3
-#define DMA_TYPE_SHIFT 4
-#define DMA_TYPE_IDMA 1
#define DHDPCIE_CONFIG_HDR_SIZE 16
#define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */
uint32 ring_state_r;
} ring_sh_info_t;
+
#define DEVICE_WAKE_NONE 0
#define DEVICE_WAKE_OOB 1
#define DEVICE_WAKE_INB 2
#define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB)
#define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE)
-#define PCIE_RELOAD_WAR_ENAB(buscorerev) \
- ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || (buscorerev == 70))
-
-/*
- * HW JIRA - CRWLPCIEGEN2-672
- * Producer Index Feature which is used by F1 gets reset on F0 FLR
- * fixed in REV68
- */
-#define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \
- ((buscorerev == 66) || (buscorerev == 67))
-
struct dhd_bus;
struct dhd_pcie_rev {
uint32 bar1_win;
} dhdpcie_config_save_t;
-/* The level of bus communication with the dongle */
-enum dhd_bus_low_power_state {
- DHD_BUS_NO_LOW_POWER_STATE, /* Not in low power state */
- DHD_BUS_D3_INFORM_SENT, /* D3 INFORM sent */
- DHD_BUS_D3_ACK_RECIEVED, /* D3 ACK recieved */
-};
-
-/** Instantiated once for each hardware (dongle) instance that this DHD manages */
typedef struct dhd_bus {
- dhd_pub_t *dhd; /**< pointer to per hardware (dongle) unique instance */
+ dhd_pub_t *dhd;
struct pci_dev *rc_dev; /* pci RC device handle */
struct pci_dev *dev; /* pci device handle */
+#ifdef DHD_EFI
+ void *pcie_dev;
+#endif
dll_t flowring_active_list; /* constructed list of tx flowring queues */
#ifdef IDLE_TX_FLOW_MGMT
/* stores the timestamp of active list processing */
#endif /* IDLE_TX_FLOW_MGMT */
+#ifdef DEVICE_TX_STUCK_DETECT
+ /* Flag to enable/disable device tx stuck monitor by DHD IOVAR dev_tx_stuck_monitor */
+ uint32 dev_tx_stuck_monitor;
+ /* Stores the timestamp (msec) of the last device Tx stuck check */
+ uint32 device_tx_stuck_check;
+#endif /* DEVICE_TX_STUCK_DETECT */
+
si_t *sih; /* Handle for SI calls */
char *vars; /* Variables (from CIS and/or other) */
uint varsz; /* Size of variables buffer */
sbpcieregs_t *reg; /* Registers for PCIE core */
uint armrev; /* CPU core revision */
- uint coreid; /* CPU core id */
uint ramrev; /* SOCRAM core revision */
uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
char *fw_path; /* module_param: path to firmware image */
char *nv_path; /* module_param: path to nvram vars file */
+#ifdef CACHE_FW_IMAGES
+ int processed_nvram_params_len; /* Modified len of NVRAM info */
+#endif
+
struct pktq txq; /* Queue length used for flow-control */
ulong shared_addr;
pciedev_shared_t *pcie_sh;
+ bool bus_flowctrl;
uint32 dma_rxoffset;
volatile char *regs; /* pci device memory va */
volatile char *tcm; /* pci device memory va */
/* version 3 shared struct related info end */
uint32 def_intmask;
- uint32 d2h_mb_mask;
- uint32 pcie_mailbox_mask;
- uint32 pcie_mailbox_int;
bool ltrsleep_on_unload;
uint wait_for_d3_ack;
uint16 max_tx_flowrings;
dhd_timeout_t doorbell_timer;
bool device_wake_state;
+#ifdef PCIE_OOB
+ bool oob_enabled;
+#endif /* PCIE_OOB */
bool irq_registered;
- bool d2h_intr_method;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
+ defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895))
+#ifdef CONFIG_ARCH_MSM
+ uint8 no_cfg_restore;
+#endif /* CONFIG_ARCH_MSM */
+ struct_pcie_register_event pcie_event;
+#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
+ * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895))
+ */
+ bool read_shm_fail;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
int32 idletime; /* Control for activity timeout */
+#ifdef DHD_PCIE_RUNTIMEPM
+ int32 idlecount; /* Activity timeout counter */
+ int32 bus_wake; /* For wake up the bus */
+ bool runtime_resume_done; /* For check runtime suspend end */
+ struct mutex pm_lock; /* Synchronize for system PM & runtime PM */
+ wait_queue_head_t rpm_queue; /* wait-queue for bus wake up */
+#endif /* DHD_PCIE_RUNTIMEPM */
uint32 d3_inform_cnt;
uint32 d0_inform_cnt;
uint32 d0_inform_in_use_cnt;
uint8 force_suspend;
uint8 is_linkdown;
- uint8 no_bus_init;
#ifdef IDLE_TX_FLOW_MGMT
bool enable_idle_flowring_mgmt;
#endif /* IDLE_TX_FLOW_MGMT */
struct dhd_pcie_rev api;
bool use_mailbox;
+ bool d3_suspend_pending;
bool use_d0_inform;
- void *bus_lock;
- void *backplane_access_lock;
- enum dhd_bus_low_power_state bus_low_power_state;
uint32 hostready_count; /* Number of hostready issued */
-#if defined(BCMPCIE_OOB_HOST_WAKE)
+#if defined(PCIE_OOB) || defined(BCMPCIE_OOB_HOST_WAKE)
bool oob_presuspend;
-#endif // endif
+#endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */
+ bool dongle_in_ds;
+ uint8 dw_option;
+#ifdef PCIE_INB_DW
+ bool inb_enabled;
+ uint32 ds_exit_timeout;
+ uint32 host_sleep_exit_timeout;
+ uint wait_for_ds_exit;
+ uint32 inband_dw_assert_cnt; /* # of inband device_wake assert */
+ uint32 inband_dw_deassert_cnt; /* # of inband device_wake deassert */
+ uint32 inband_ds_exit_host_cnt; /* # of DS-EXIT , host initiated */
+ uint32 inband_ds_exit_device_cnt; /* # of DS-EXIT , device initiated */
+ uint32 inband_ds_exit_to_cnt; /* # of DS-EXIT timeout */
+ uint32 inband_host_sleep_exit_to_cnt; /* # of Host_Sleep exit timeout */
+ void *inb_lock; /* Lock to serialize in band device wake activity */
+ /* # of contexts in the host which currently want a FW transaction */
+ uint32 host_active_cnt;
+#endif /* PCIE_INB_DW */
dhdpcie_config_save_t saved_config;
ulong resume_intr_enable_count;
ulong dpc_intr_enable_count;
ulong isr_intr_disable_count;
ulong suspend_intr_disable_count;
ulong dpc_return_busdown_count;
- ulong non_ours_irq_count;
-#ifdef BCMPCIE_OOB_HOST_WAKE
- ulong oob_intr_count;
- ulong oob_intr_enable_count;
- ulong oob_intr_disable_count;
- uint64 last_oob_irq_time;
- uint64 last_oob_irq_enable_time;
- uint64 last_oob_irq_disable_time;
-#endif /* BCMPCIE_OOB_HOST_WAKE */
- uint64 isr_entry_time;
- uint64 isr_exit_time;
- uint64 dpc_sched_time;
- uint64 dpc_entry_time;
- uint64 dpc_exit_time;
- uint64 resched_dpc_time;
- uint64 last_d3_inform_time;
- uint64 last_process_ctrlbuf_time;
- uint64 last_process_flowring_time;
- uint64 last_process_txcpl_time;
- uint64 last_process_rxcpl_time;
- uint64 last_process_infocpl_time;
- uint64 last_process_edl_time;
- uint64 last_suspend_start_time;
- uint64 last_suspend_end_time;
- uint64 last_resume_start_time;
- uint64 last_resume_end_time;
- uint64 last_non_ours_irq_time;
- uint8 hwa_enab_bmap;
bool idma_enabled;
bool ifrm_enabled;
- bool dar_enabled;
- uint32 dmaxfer_complete;
- uint8 dw_option;
- bool _dar_war;
- uint8 dma_chan;
- bool cto_enable; /* enable PCIE CTO Prevention and recovery */
- uint32 cto_threshold; /* PCIE CTO timeout threshold */
- bool cto_triggered; /* CTO is triggered */
- int pwr_req_ref;
- bool flr_force_fail; /* user intends to simulate flr force fail */
- bool intr_enabled; /* ready to receive interrupts from dongle */
- bool force_bt_quiesce; /* send bt_quiesce command to BT driver. */
-#if defined(DHD_H2D_LOG_TIME_SYNC)
- ulong dhd_rte_time_sync_count; /* OSL_SYSUPTIME_US() */
-#endif /* DHD_H2D_LOG_TIME_SYNC */
- bool rc_ep_aspm_cap; /* RC and EP ASPM capable */
- bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */
- uint16 hp2p_txcpl_max_items;
- uint16 hp2p_rxcpl_max_items;
- /* PCIE coherent status */
- uint32 coherent_state;
+#if defined(PCIE_OOB) || defined(PCIE_INB_DW)
+ bool ds_enabled;
+#endif
+#ifdef DHD_PCIE_RUNTIMEPM
+ bool chk_pm; /* To avoid counting of wake up from Runtime PM */
+#endif /* DHD_PCIE_RUNTIMEPM */
} dhd_bus_t;
-#ifdef DHD_MSI_SUPPORT
-extern uint enable_msi;
-#endif /* DHD_MSI_SUPPORT */
-
-enum {
- PCIE_INTX = 0,
- PCIE_MSI = 1
-};
-
/* function declarations */
extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size);
extern void dhdpcie_bus_unregister(void);
extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
-extern int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
+extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh,
volatile char *regs, volatile char *tcm, void *pci_dev);
extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
extern void dhdpcie_free_irq(dhd_bus_t *bus);
extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value);
extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake);
-extern void dhdpcie_dongle_reset(dhd_bus_t *bus);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint);
-#else
extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state);
extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable);
extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time);
extern int dhdpcie_disable_irq(dhd_bus_t *bus);
extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus);
extern int dhdpcie_enable_irq(dhd_bus_t *bus);
-
-extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus);
-
extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset);
extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
bool is_write, uint32 writeval);
-extern uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
- bool is_write, uint32 writeval);
extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus);
extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
extern int dhdpcie_disable_device(dhd_bus_t *bus);
extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
extern void dhdpcie_free_resource(dhd_bus_t *bus);
-extern void dhdpcie_dump_resource(dhd_bus_t *bus);
extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
-void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr);
-void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
-uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset);
-void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
-uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset);
-void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
-uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset);
-#ifdef DHD_SUPPORT_64BIT
-void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
-uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset);
-#endif // endif
-
extern int dhdpcie_enable_device(dhd_bus_t *bus);
-
#ifdef BCMPCIE_OOB_HOST_WAKE
extern int dhdpcie_oob_intr_register(dhd_bus_t *bus);
extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
-extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus);
-extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus);
-extern int dhdpcie_get_oob_irq_level(void);
#endif /* BCMPCIE_OOB_HOST_WAKE */
+#ifdef PCIE_OOB
+extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val);
+extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus);
+extern void dhdpcie_oob_init(dhd_bus_t *bus);
+extern void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus);
+extern int dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val);
+extern void dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val);
+#endif /* PCIE_OOB */
#if defined(CONFIG_ARCH_EXYNOS)
#define SAMSUNG_PCIE_VENDOR_ID 0x144d
-#if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420)
+#if defined(CONFIG_MACH_UNIVERSAL5433)
+#define SAMSUNG_PCIE_DEVICE_ID 0xa5e3
+#define SAMSUNG_PCIE_CH_NUM
+#elif defined(CONFIG_MACH_UNIVERSAL7420)
#define SAMSUNG_PCIE_DEVICE_ID 0xa575
#define SAMSUNG_PCIE_CH_NUM 1
#elif defined(CONFIG_SOC_EXYNOS8890)
#define SAMSUNG_PCIE_DEVICE_ID 0xa544
#define SAMSUNG_PCIE_CH_NUM 0
-#elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
- defined(CONFIG_SOC_EXYNOS9820)
+#elif defined(CONFIG_SOC_EXYNOS7420)
+#define SAMSUNG_PCIE_DEVICE_ID 0xa575
+#define SAMSUNG_PCIE_CH_NUM 1
+#elif defined(CONFIG_SOC_EXYNOS8895)
#define SAMSUNG_PCIE_DEVICE_ID 0xecec
#define SAMSUNG_PCIE_CH_NUM 0
#else
#define MSM_PCIE_DEVICE_ID 0x0104
#elif defined(CONFIG_ARCH_MSM8998)
#define MSM_PCIE_DEVICE_ID 0x0105
-#elif defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150)
-#define MSM_PCIE_DEVICE_ID 0x0106
#else
#error "Not supported platform"
-#endif // endif
+#endif
#endif /* CONFIG_ARCH_MSM */
#if defined(CONFIG_X86)
#define TEGRA_PCIE_DEVICE_ID 0x4347
#endif /* CONFIG_ARCH_TEGRA */
-#define HIKEY_PCIE_VENDOR_ID 0x19e5
-#define HIKEY_PCIE_DEVICE_ID 0x3660
-
-#define DUMMY_PCIE_VENDOR_ID 0xffff
-#define DUMMY_PCIE_DEVICE_ID 0xffff
-
#if defined(CONFIG_ARCH_EXYNOS)
#define PCIE_RC_VENDOR_ID SAMSUNG_PCIE_VENDOR_ID
#define PCIE_RC_DEVICE_ID SAMSUNG_PCIE_DEVICE_ID
#elif defined(CONFIG_ARCH_TEGRA)
#define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID
#define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID
-#else
-#define PCIE_RC_VENDOR_ID HIKEY_PCIE_VENDOR_ID
-#define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID
#endif /* CONFIG_ARCH_EXYNOS */
-#define DHD_REGULAR_RING 0
-#define DHD_HP2P_RING 1
-
#ifdef USE_EXYNOS_PCIE_RC_PMPATCH
+#ifdef CONFIG_MACH_UNIVERSAL5433
+extern int exynos_pcie_pm_suspend(void);
+extern int exynos_pcie_pm_resume(void);
+#else
extern int exynos_pcie_pm_suspend(int ch_num);
extern int exynos_pcie_pm_resume(int ch_num);
+#endif /* CONFIG_MACH_UNIVERSAL5433 */
#endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
#ifdef CONFIG_ARCH_TEGRA
#endif /* DHD_WAKE_STATUS */
extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus);
extern void dhd_bus_hostready(struct dhd_bus *bus);
+#ifdef PCIE_OOB
+extern bool dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus);
+#endif /* PCIE_OOB */
+#ifdef PCIE_INB_DW
+extern bool dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus);
+extern void dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus,
+ enum dhd_bus_ds_state state);
+extern enum dhd_bus_ds_state dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus);
+extern const char * dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate);
+extern const char * dhd_convert_dsval(uint32 val, bool d2h);
+extern int dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val);
+extern void dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus);
+#endif /* PCIE_INB_DW */
extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option);
-extern int dhdpcie_irq_disabled(struct dhd_bus *bus);
-
-static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;}
-static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; }
-static INLINE void
-dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus)
-{ return; }
-
-int dhdpcie_config_check(dhd_bus_t *bus);
-int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr);
-int dhdpcie_config_save(dhd_bus_t *bus);
-int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state);
-
-extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus);
+extern bool dhdpcie_irq_enabled(struct dhd_bus *bus);
extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus);
extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus);
-extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus);
static INLINE uint32
dhd_pcie_config_read(osl_t *osh, uint offset, uint size)
return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0);
}
-extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path,
- char *clm_path, char *txcap_path);
-
-extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd);
-extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd);
-extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus);
-#ifdef DHD_HP2P
-extern uint16 dhd_bus_get_hp2p_ring_max_size(dhd_bus_t *bus, bool tx);
-#endif // endif
+#ifdef DHD_SSSR_DUMP
+extern int dhdpcie_sssr_dump(dhd_pub_t *dhd);
+#endif /* DHD_SSSR_DUMP */
+
+#ifdef DHD_EFI
+extern int dhd_os_wifi_platform_set_power(uint32 value);
+int dhd_control_signal(dhd_bus_t *bus, char *arg, int set);
+extern int dhd_wifi_properties(struct dhd_bus *bus, char *arg);
+extern bool dhdpcie_is_arm_halted(struct dhd_bus *bus);
+extern void dhdpcie_dongle_pwr_toggle(dhd_bus_t *bus);
+extern int dhd_otp_dump(dhd_bus_t *bus, char *arg);
+#else
+static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; }
+static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;}
+#endif /* DHD_EFI */
+int dhdpcie_config_check(dhd_bus_t *bus);
+int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr);
+int dhdpcie_config_save(dhd_bus_t *bus);
+int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state);
#endif /* dhd_pcie_h */
/*
* Linux DHD Bus Module for PCIE
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pcie_linux.c 821650 2019-05-24 10:41:54Z $
+ * $Id: dhd_pcie_linux.c 707536 2017-06-28 04:23:48Z $
*/
+
/* include files */
#include <typedefs.h>
#include <bcmutils.h>
#include <mach/msm_pcie.h>
#endif /* CONFIG_PCI_MSM */
#endif /* CONFIG_ARCH_MSM */
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-#include <linux/pm_runtime.h>
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-#ifndef AUTO_SUSPEND_TIMEOUT
-#define AUTO_SUSPEND_TIMEOUT 1000
-#endif /* AUTO_SUSPEND_TIMEOUT */
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
+#ifdef PCIE_OOB
+#include "ftdi_sio_external.h"
+#endif /* PCIE_OOB */
#include <linux/irq.h>
#ifdef USE_SMMU_ARCH_MSM
#include <asm/dma-iommu.h>
#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
-#ifdef FORCE_TPOWERON
-extern uint32 tpoweron_scale;
-#endif /* FORCE_TPOWERON */
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+ struct sk_buff *s = (struct sk_buff *)(p); \
+ ASSERT(OSL_PKTTAG_SZ == 32); \
+ *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
+ *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+ *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+ *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+
+#ifdef PCIE_OOB
+#define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */
+#define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */
+#define BIT_WL_REG_ON 6
+#define BIT_BT_REG_ON 7
+
+int gpio_handle_val = 0;
+unsigned char gpio_port = 0;
+unsigned char gpio_direction = 0;
+#define OOB_PORT "ttyUSB0"
+#endif /* PCIE_OOB */
+
/* user defined data structures */
+typedef struct dhd_pc_res {
+ uint32 bar0_size;
+ void* bar0_addr;
+ uint32 bar1_size;
+ void* bar1_addr;
+} pci_config_res, *pPci_config_res;
+
typedef bool (*dhdpcie_cb_fn_t)(void *);
typedef struct dhdpcie_info
{
dhd_bus_t *bus;
- osl_t *osh;
+ osl_t *osh;
struct pci_dev *dev; /* pci device handle */
- volatile char *regs; /* pci device memory va */
- volatile char *tcm; /* pci device memory va */
- uint32 bar1_size; /* pci device memory size */
- uint32 curr_bar1_win; /* current PCIEBar1Window setting */
+ volatile char *regs; /* pci device memory va */
+ volatile char *tcm; /* pci device memory va */
+ uint32 tcm_size; /* pci device memory size */
struct pcos_info *pcos_info;
uint16 last_intrstatus; /* to cache intrstatus */
int irq;
#endif /* USE_SMMU_ARCH_MSM */
} dhdpcie_info_t;
+
struct pcos_info {
dhdpcie_info_t *pc;
spinlock_t lock;
void *dev; /* handle to the underlying device */
} dhdpcie_os_info_t;
static irqreturn_t wlan_oob_irq(int irq, void *data);
-#ifdef CUSTOMER_HW2
+#if defined(CUSTOMER_HW2) && defined(CONFIG_ARCH_APQ8084)
extern struct brcm_pcie_wake brcm_pcie_wake;
-#endif /* CUSTOMER_HW2 */
+#endif /* CUSTOMER_HW2 && CONFIG_ARCH_APQ8084 */
#endif /* BCMPCIE_OOB_HOST_WAKE */
#ifdef USE_SMMU_ARCH_MSM
static irqreturn_t dhdpcie_isr(int irq, void *arg);
/* OS Routine functions for PCI suspend/resume */
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state, bool byint);
-#else
static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
static int dhdpcie_resume_host_dev(dhd_bus_t *bus);
static int dhdpcie_suspend_host_dev(dhd_bus_t *bus);
static int dhdpcie_resume_dev(struct pci_dev *dev);
static int dhdpcie_suspend_dev(struct pci_dev *dev);
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
-static int dhdpcie_pm_system_resume_noirq(struct device * dev);
+#ifdef DHD_PCIE_RUNTIMEPM
+static int dhdpcie_pm_suspend(struct device *dev);
+static int dhdpcie_pm_prepare(struct device *dev);
+static int dhdpcie_pm_resume(struct device *dev);
+static void dhdpcie_pm_complete(struct device *dev);
#else
static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
static int dhdpcie_pci_resume(struct pci_dev *dev);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-static int dhdpcie_pm_runtime_suspend(struct device * dev);
-static int dhdpcie_pm_runtime_resume(struct device * dev);
-static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
-static int dhdpcie_pm_system_resume_noirq(struct device * dev);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
-static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state);
-
-uint32
-dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
- uint32 writeval);
+#endif /* DHD_PCIE_RUNTIMEPM */
static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
{ vendor: 0x14e4,
MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
/* Power Management Hooks */
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-static const struct dev_pm_ops dhdpcie_pm_ops = {
- SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend, dhdpcie_pm_runtime_resume, NULL)
- .suspend_noirq = dhdpcie_pm_system_suspend_noirq,
- .resume_noirq = dhdpcie_pm_system_resume_noirq
+#ifdef DHD_PCIE_RUNTIMEPM
+static const struct dev_pm_ops dhd_pcie_pm_ops = {
+ .prepare = dhdpcie_pm_prepare,
+ .suspend = dhdpcie_pm_suspend,
+ .resume = dhdpcie_pm_resume,
+ .complete = dhdpcie_pm_complete,
};
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#endif /* DHD_PCIE_RUNTIMEPM */
static struct pci_driver dhdpcie_driver = {
node: {&dhdpcie_driver.node, &dhdpcie_driver.node},
id_table: dhdpcie_pci_devid,
probe: dhdpcie_pci_probe,
remove: dhdpcie_pci_remove,
-#if defined(DHD_PCIE_NATIVE_RUNTIMEPM)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ save_state: NULL,
+#endif
+#ifdef DHD_PCIE_RUNTIMEPM
.driver.pm = &dhd_pcie_pm_ops,
#else
suspend: dhdpcie_pci_suspend,
resume: dhdpcie_pci_resume,
-#endif // endif
+#endif /* DHD_PCIE_RUNTIMEPM */
};
int dhdpcie_init_succeeded = FALSE;
}
DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__));
-
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) ||
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__));
- return -EINVAL;
- }
-
mapping = arm_iommu_create_mapping(&platform_bus_type,
smmu_info->smmu_iova_start, smmu_info->smmu_iova_len);
if (IS_ERR(mapping)) {
}
#endif /* USE_SMMU_ARCH_MSM */
-#ifdef FORCE_TPOWERON
-static void
-dhd_bus_get_tpoweron(dhd_bus_t *bus)
-{
-
- uint32 tpoweron_rc;
- uint32 tpoweron_ep;
-
- tpoweron_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
- PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
- tpoweron_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
- PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
- DHD_ERROR(("%s: tpoweron_rc:0x%x tpoweron_ep:0x%x\n",
- __FUNCTION__, tpoweron_rc, tpoweron_ep));
-}
-
-static void
-dhd_bus_set_tpoweron(dhd_bus_t *bus, uint16 tpoweron)
+#ifdef DHD_PCIE_RUNTIMEPM
+static int dhdpcie_pm_suspend(struct device *dev)
{
+ int ret = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+ unsigned long flags;
- dhd_bus_get_tpoweron(bus);
- /* Set the tpoweron */
- DHD_ERROR(("%s tpoweron: 0x%x\n", __FUNCTION__, tpoweron));
- dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
- PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
- dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
- PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
-
- dhd_bus_get_tpoweron(bus);
-
-}
-
-static bool
-dhdpcie_chip_req_forced_tpoweron(dhd_bus_t *bus)
-{
- /*
- * On Fire's reference platform, coming out of L1.2,
- * there is a constant delay of 45us between CLKREQ# and stable REFCLK
- * Due to this delay, with tPowerOn < 50
- * there is a chance of the refclk sense to trigger on noise.
- *
- * Which ever chip needs forced tPowerOn of 50us should be listed below.
- */
- if (si_chipid(bus->sih) == BCM4377_CHIP_ID) {
- return TRUE;
- }
- return FALSE;
-}
-#endif /* FORCE_TPOWERON */
-
-static bool
-dhd_bus_aspm_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
-{
- uint32 linkctrl_before;
- uint32 linkctrl_after = 0;
- uint8 linkctrl_asm;
- char *device;
-
- device = (dev == bus->dev) ? "EP" : "RC";
-
- linkctrl_before = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
- FALSE, FALSE, 0);
- linkctrl_asm = (linkctrl_before & PCIE_ASPM_CTRL_MASK);
-
- if (enable) {
- if (linkctrl_asm == PCIE_ASPM_L1_ENAB) {
- DHD_ERROR(("%s: %s already enabled linkctrl: 0x%x\n",
- __FUNCTION__, device, linkctrl_before));
- return FALSE;
- }
- /* Enable only L1 ASPM (bit 1) */
- dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
- TRUE, (linkctrl_before | PCIE_ASPM_L1_ENAB));
- } else {
- if (linkctrl_asm == 0) {
- DHD_ERROR(("%s: %s already disabled linkctrl: 0x%x\n",
- __FUNCTION__, device, linkctrl_before));
- return FALSE;
- }
- /* Disable complete ASPM (bit 1 and bit 0) */
- dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
- TRUE, (linkctrl_before & (~PCIE_ASPM_ENAB)));
+ if (pch) {
+ bus = pch->bus;
}
-
- linkctrl_after = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
- FALSE, FALSE, 0);
- DHD_ERROR(("%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n",
- __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
- linkctrl_before, linkctrl_after));
-
- return TRUE;
-}
-
-static bool
-dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t *bus)
-{
- uint32 rc_aspm_cap;
- uint32 ep_aspm_cap;
-
- /* RC ASPM capability */
- rc_aspm_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
- FALSE, FALSE, 0);
- if (rc_aspm_cap == BCME_ERROR) {
- DHD_ERROR(("%s RC is not ASPM capable\n", __FUNCTION__));
- return FALSE;
+ if (!bus) {
+ return ret;
}
- /* EP ASPM capability */
- ep_aspm_cap = dhdpcie_access_cap(bus->dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
- FALSE, FALSE, 0);
- if (ep_aspm_cap == BCME_ERROR) {
- DHD_ERROR(("%s EP is not ASPM capable\n", __FUNCTION__));
- return FALSE;
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
+ DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
+ __FUNCTION__, bus->dhd->dhd_bus_busy_state));
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ return -EBUSY;
}
+ DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
- return TRUE;
-}
-
-bool
-dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable)
-{
- bool ret;
-
- if (!bus->rc_ep_aspm_cap) {
- DHD_ERROR(("%s: NOT ASPM CAPABLE rc_ep_aspm_cap: %d\n",
- __FUNCTION__, bus->rc_ep_aspm_cap));
- return FALSE;
- }
+ if (!bus->dhd->dongle_reset)
+ ret = dhdpcie_set_suspend_resume(bus, TRUE);
- if (enable) {
- /* Enable only L1 ASPM first RC then EP */
- ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
- ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
- } else {
- /* Disable complete ASPM first EP then RC */
- ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
- ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
- }
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
return ret;
-}
-
-static void
-dhd_bus_l1ss_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
-{
- uint32 l1ssctrl_before;
- uint32 l1ssctrl_after = 0;
- uint8 l1ss_ep;
- char *device;
-
- device = (dev == bus->dev) ? "EP" : "RC";
-
- /* Extendend Capacility Reg */
- l1ssctrl_before = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
- PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
- l1ss_ep = (l1ssctrl_before & PCIE_EXT_L1SS_MASK);
-
- if (enable) {
- if (l1ss_ep == PCIE_EXT_L1SS_ENAB) {
- DHD_ERROR(("%s: %s already enabled, l1ssctrl: 0x%x\n",
- __FUNCTION__, device, l1ssctrl_before));
- return;
- }
- dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
- TRUE, TRUE, (l1ssctrl_before | PCIE_EXT_L1SS_ENAB));
- } else {
- if (l1ss_ep == 0) {
- DHD_ERROR(("%s: %s already disabled, l1ssctrl: 0x%x\n",
- __FUNCTION__, device, l1ssctrl_before));
- return;
- }
- dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
- TRUE, TRUE, (l1ssctrl_before & (~PCIE_EXT_L1SS_ENAB)));
- }
- l1ssctrl_after = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
- PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
- DHD_ERROR(("%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n",
- __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
- l1ssctrl_before, l1ssctrl_after));
}
-static bool
-dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t *bus)
+static int dhdpcie_pm_prepare(struct device *dev)
{
- uint32 rc_l1ss_cap;
- uint32 ep_l1ss_cap;
-
- /* RC Extendend Capacility */
- rc_l1ss_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_EXTCAP_ID_L1SS,
- PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
- if (rc_l1ss_cap == BCME_ERROR) {
- DHD_ERROR(("%s RC is not l1ss capable\n", __FUNCTION__));
- return FALSE;
- }
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
- /* EP Extendend Capacility */
- ep_l1ss_cap = dhdpcie_access_cap(bus->dev, PCIE_EXTCAP_ID_L1SS,
- PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
- if (ep_l1ss_cap == BCME_ERROR) {
- DHD_ERROR(("%s EP is not l1ss capable\n", __FUNCTION__));
- return FALSE;
+ if (pch) {
+ bus = pch->bus;
+ DHD_DISABLE_RUNTIME_PM(bus->dhd);
}
- return TRUE;
+ bus->chk_pm = TRUE;
+ return 0;
}
-void
-dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable)
+static int dhdpcie_pm_resume(struct device *dev)
{
- bool ret;
+ int ret = 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+ unsigned long flags;
- if ((!bus->rc_ep_aspm_cap) || (!bus->rc_ep_l1ss_cap)) {
- DHD_ERROR(("%s: NOT L1SS CAPABLE rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
- __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
- return;
+ if (pch) {
+ bus = pch->bus;
+ }
+ if (!bus) {
+ return ret;
}
- /* Disable ASPM of RC and EP */
- ret = dhd_bus_aspm_enable_rc_ep(bus, FALSE);
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
- if (enable) {
- /* Enable RC then EP */
- dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
- dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
- } else {
- /* Disable EP then RC */
- dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
- dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
+ if (!bus->dhd->dongle_reset) {
+ ret = dhdpcie_set_suspend_resume(bus, FALSE);
+ bus->chk_pm = FALSE;
}
- /* Enable ASPM of RC and EP only if this API disabled */
- if (ret == TRUE) {
- dhd_bus_aspm_enable_rc_ep(bus, TRUE);
- }
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ return ret;
}
-void
-dhd_bus_aer_config(dhd_bus_t *bus)
+static void dhdpcie_pm_complete(struct device *dev)
{
- uint32 val;
-
- DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__));
- val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
- PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
- if (val != (uint32)-1) {
- val &= ~CORR_ERR_AE;
- dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
- PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
- } else {
- DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
- __FUNCTION__, val));
- }
+ struct pci_dev *pdev = to_pci_dev(dev);
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
- DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__));
- val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
- PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
- if (val != (uint32)-1) {
- val &= ~CORR_ERR_AE;
- dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
- PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
- } else {
- DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
- __FUNCTION__, val));
+ if (pch) {
+ bus = pch->bus;
+ DHD_ENABLE_RUNTIME_PM(bus->dhd);
}
-}
+ return;
+}
+#else
static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
{
int ret = 0;
dhdpcie_info_t *pch = pci_get_drvdata(pdev);
dhd_bus_t *bus = NULL;
unsigned long flags;
- uint32 i = 0;
if (pch) {
bus = pch->bus;
BCM_REFERENCE(state);
+ DHD_GENERAL_LOCK(bus->dhd, flags);
if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
__FUNCTION__, bus->dhd->dhd_bus_busy_state));
-
- OSL_DELAY(1000);
- /* retry till the transaction is complete */
- while (i < 100) {
- OSL_DELAY(1000);
- i++;
- if (DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
- DHD_ERROR(("%s: Bus enter IDLE!! after %d ms\n",
- __FUNCTION__, i));
- break;
- }
- }
- if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
- DHD_ERROR(("%s: Bus not IDLE!! Failed after %d ms, "
- "dhd_bus_busy_state = 0x%x\n",
- __FUNCTION__, i, bus->dhd->dhd_bus_busy_state));
- return -EBUSY;
- }
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ return -EBUSY;
}
- DHD_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
DHD_GENERAL_UNLOCK(bus->dhd, flags);
return ret;
}
-static int
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state, bool byint)
-#else
-dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state)
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state)
{
int ret = 0;
ASSERT(bus && !bus->dhd->dongle_reset);
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* if wakelock is held during suspend, return failed */
+ if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) {
+ return -EBUSY;
+ }
+ mutex_lock(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
+
/* When firmware is not loaded do the PCI bus */
/* suspend/resume only */
if (bus->dhd->busstate == DHD_BUS_DOWN) {
ret = dhdpcie_pci_suspend_resume(bus, state);
+#ifdef DHD_PCIE_RUNTIMEPM
+ mutex_unlock(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
return ret;
}
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- ret = dhdpcie_bus_suspend(bus, state, byint);
-#else
- ret = dhdpcie_bus_suspend(bus, state);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
- return ret;
-}
-
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
-static int dhdpcie_pm_runtime_suspend(struct device * dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- dhdpcie_info_t *pch = pci_get_drvdata(pdev);
- dhd_bus_t *bus = NULL;
- int ret = 0;
-
- if (!pch)
- return -EBUSY;
-
- bus = pch->bus;
-
- DHD_RPM(("%s Enter\n", __FUNCTION__));
-
- if (atomic_read(&bus->dhd->block_bus))
- return -EHOSTDOWN;
-
- dhd_netif_stop_queue(bus);
- atomic_set(&bus->dhd->block_bus, TRUE);
-
- if (dhdpcie_set_suspend_resume(pdev, TRUE, TRUE)) {
- pm_runtime_mark_last_busy(dev);
- ret = -EAGAIN;
- }
-
- atomic_set(&bus->dhd->block_bus, FALSE);
- dhd_bus_start_queue(bus);
-
- return ret;
-}
-
-static int dhdpcie_pm_runtime_resume(struct device * dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- dhdpcie_info_t *pch = pci_get_drvdata(pdev);
- dhd_bus_t *bus = pch->bus;
-
- DHD_RPM(("%s Enter\n", __FUNCTION__));
-
- if (atomic_read(&bus->dhd->block_bus))
- return -EHOSTDOWN;
-
- if (dhdpcie_set_suspend_resume(pdev, FALSE, TRUE))
- return -EAGAIN;
-
- return 0;
-}
-
-static int dhdpcie_pm_system_suspend_noirq(struct device * dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- dhdpcie_info_t *pch = pci_get_drvdata(pdev);
- dhd_bus_t *bus = NULL;
- int ret;
-
- DHD_RPM(("%s Enter\n", __FUNCTION__));
-
- if (!pch)
- return -EBUSY;
-
- bus = pch->bus;
-
- if (atomic_read(&bus->dhd->block_bus))
- return -EHOSTDOWN;
-
- dhd_netif_stop_queue(bus);
- atomic_set(&bus->dhd->block_bus, TRUE);
-
- ret = dhdpcie_set_suspend_resume(pdev, TRUE, FALSE);
-
- if (ret) {
- dhd_bus_start_queue(bus);
- atomic_set(&bus->dhd->block_bus, FALSE);
- }
-
- return ret;
-}
-static int dhdpcie_pm_system_resume_noirq(struct device * dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- dhdpcie_info_t *pch = pci_get_drvdata(pdev);
- dhd_bus_t *bus = NULL;
- int ret;
-
- if (!pch)
- return -EBUSY;
-
- bus = pch->bus;
-
- DHD_RPM(("%s Enter\n", __FUNCTION__));
-
- ret = dhdpcie_set_suspend_resume(pdev, FALSE, FALSE);
+ ret = dhdpcie_bus_suspend(bus, state);
- atomic_set(&bus->dhd->block_bus, FALSE);
- dhd_bus_start_queue(bus);
- pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
+#ifdef DHD_PCIE_RUNTIMEPM
+ mutex_unlock(&bus->pm_lock);
+#endif /* DHD_PCIE_RUNTIMEPM */
return ret;
}
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
-static void
-dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus, char *suspend_state)
-{
- DHD_ERROR(("%s: BaseAddress0(0x%x)=0x%x, "
- "BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x\n",
- suspend_state,
- PCIECFGREG_BASEADDR0,
- dhd_pcie_config_read(bus->osh,
- PCIECFGREG_BASEADDR0, sizeof(uint32)),
- PCIECFGREG_BASEADDR1,
- dhd_pcie_config_read(bus->osh,
- PCIECFGREG_BASEADDR1, sizeof(uint32)),
- PCIE_CFG_PMCSR,
- dhd_pcie_config_read(bus->osh,
- PCIE_CFG_PMCSR, sizeof(uint32))));
-}
-
static int dhdpcie_suspend_dev(struct pci_dev *dev)
{
int ret;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
dhdpcie_info_t *pch = pci_get_drvdata(dev);
dhd_bus_t *bus = pch->bus;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
if (bus->is_linkdown) {
DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
return BCME_ERROR;
}
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
- DHD_ERROR(("%s: Enter\n", __FUNCTION__));
- dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND");
+ DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
dhd_dpc_tasklet_kill(bus->dhd);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
pch->state = pci_store_saved_state(dev);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
pci_enable_wake(dev, PCI_D0, TRUE);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
if (pci_is_enabled(dev))
+#endif
pci_disable_device(dev);
ret = pci_set_power_state(dev, PCI_D3hot);
DHD_ERROR(("%s: pci_set_power_state error %d\n",
__FUNCTION__, ret));
}
-// dev->state_saved = FALSE;
- dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND");
+ dev->state_saved = FALSE;
return ret;
}
static int dhdpcie_resume_dev(struct pci_dev *dev)
{
int err = 0;
- dhdpcie_info_t *pch = pci_get_drvdata(dev);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ dhdpcie_info_t *pch = pci_get_drvdata(dev);
pci_load_and_free_saved_state(dev, &pch->state);
#endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
- DHD_ERROR(("%s: Enter\n", __FUNCTION__));
-// dev->state_saved = TRUE;
+ DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+ dev->state_saved = TRUE;
pci_restore_state(dev);
-#ifdef FORCE_TPOWERON
- if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) {
- dhd_bus_set_tpoweron(pch->bus, tpoweron_scale);
- }
-#endif /* FORCE_TPOWERON */
err = pci_enable_device(dev);
if (err) {
printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
goto out;
}
- BCM_REFERENCE(pch);
- dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME");
+
out:
return err;
}
DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n",
__FUNCTION__, bcmerror));
bus->is_linkdown = 1;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
}
return bcmerror;
return bcmerror;
}
-/**
- * dhdpcie_os_setbar1win
- *
- * Interface function for setting bar1 window in order to allow
- * os layer to be aware of current window positon.
- *
- * @bus: dhd bus context
- * @addr: new backplane windows address for BAR1
- */
-void
-dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr)
-{
- dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
-
- osl_pci_write_config(bus->osh, PCI_BAR1_WIN, 4, addr);
- pch->curr_bar1_win = addr;
-}
-
-/**
- * dhdpcie_os_chkbpoffset
- *
- * Check the provided address is within the current BAR1 window,
- * if not, shift the window
- *
- * @bus: dhd bus context
- * @offset: back plane address that the caller wants to access
- *
- * Return: new offset for access
- */
-static ulong
-dhdpcie_os_chkbpoffset(dhdpcie_info_t *pch, ulong offset)
-{
- /* Determine BAR1 backplane window using window size
- * Window address mask should be ~(size - 1)
- */
- uint32 bpwin = (uint32)(offset & ~(pch->bar1_size - 1));
-
- if (bpwin != pch->curr_bar1_win) {
- /* Move BAR1 window */
- dhdpcie_os_setbar1win(pch->bus, bpwin);
- }
-
- return offset - bpwin;
-}
-
-/**
- * dhdpcie os layer tcm read/write interface
- */
-void
-dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
-{
- dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
-
- offset = dhdpcie_os_chkbpoffset(pch, offset);
- W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
-}
-
-uint8
-dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset)
-{
- volatile uint8 data;
- dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
-
- offset = dhdpcie_os_chkbpoffset(pch, offset);
- data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
- return data;
-}
-
-void
-dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
-{
- dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
-
- offset = dhdpcie_os_chkbpoffset(pch, offset);
- W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
-}
-
-uint16
-dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset)
-{
- volatile uint16 data;
- dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
-
- offset = dhdpcie_os_chkbpoffset(pch, offset);
- data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
- return data;
-}
-
-void
-dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
-{
- dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
-
- offset = dhdpcie_os_chkbpoffset(pch, offset);
- W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
-}
-
-uint32
-dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset)
-{
- volatile uint32 data;
- dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
-
- offset = dhdpcie_os_chkbpoffset(pch, offset);
- data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
- return data;
-}
-
-#ifdef DHD_SUPPORT_64BIT
-void
-dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
-{
- dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
-
- offset = dhdpcie_os_chkbpoffset(pch, offset);
- W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
-}
-
-uint64
-dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset)
-{
- volatile uint64 data;
- dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
-
- offset = dhdpcie_os_chkbpoffset(pch, offset);
- data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
- return data;
-}
-#endif /* DHD_SUPPORT_64BIT */
-
+#if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID)
uint32
dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
{
*/
uint32
-dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
+dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
uint32 writeval)
{
int cap_ptr = 0;
uint32 ret = -1;
uint32 readval;
- if (!(pdev)) {
- DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__));
+ if (!(bus->rc_dev)) {
+ DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
+ __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
return ret;
}
/* removing max EXT_CAP_ID check as
* linux kernel definition's max value is not upadted yet as per spec
*/
- cap_ptr = pci_find_ext_capability(pdev, cap);
+ cap_ptr = pci_find_ext_capability(bus->rc_dev, cap);
} else {
/* removing max PCI_CAP_ID_MAX check as
* pervious kernel versions dont have this definition
*/
- cap_ptr = pci_find_capability(pdev, cap);
+ cap_ptr = pci_find_capability(bus->rc_dev, cap);
}
/* Return if capability with given ID not found */
if (cap_ptr == 0) {
- DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n",
- __FUNCTION__, cap));
+ DHD_ERROR(("%s: RC %x:%x PCI Cap(0x%02x) not supported.\n",
+ __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, cap));
return BCME_ERROR;
}
if (is_write) {
- pci_write_config_dword(pdev, (cap_ptr + offset), writeval);
+ ret = pci_write_config_dword(bus->rc_dev, (cap_ptr + offset), writeval);
+ if (ret) {
+ DHD_ERROR(("%s: pci_write_config_dword failed. cap=%d offset=%d\n",
+ __FUNCTION__, cap, offset));
+ return BCME_ERROR;
+ }
ret = BCME_OK;
} else {
- pci_read_config_dword(pdev, (cap_ptr + offset), &readval);
+ ret = pci_read_config_dword(bus->rc_dev, (cap_ptr + offset), &readval);
+
+ if (ret) {
+ DHD_ERROR(("%s: pci_read_config_dword failed. cap=%d offset=%d\n",
+ __FUNCTION__, cap, offset));
+ return BCME_ERROR;
+ }
ret = readval;
}
return ret;
}
-uint32
-dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
- uint32 writeval)
-{
- if (!(bus->rc_dev)) {
- DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
- __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
- return BCME_ERROR;
- }
-
- return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write, writeval);
-}
-
-uint32
-dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
- uint32 writeval)
-{
- if (!(bus->dev)) {
- DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write, writeval);
-}
-
/* API wrapper to read Root Port link capability
* Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
*/
linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK;
return linkcap;
}
-
-static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state)
-{
- if (bus->coreid == ARMCA7_CORE_ID) {
- if (state) {
- /* Sleep */
- bus->coherent_state = dhdpcie_bus_cfg_read_dword(bus,
- PCIE_CFG_SUBSYSTEM_CONTROL, 4) & PCIE_BARCOHERENTACCEN_MASK;
- } else {
- uint32 val = (dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL,
- 4) & ~PCIE_BARCOHERENTACCEN_MASK) | bus->coherent_state;
- dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, val);
- }
- }
-}
+#endif
int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
{
struct pci_dev *dev = bus->dev;
if (state) {
- dhdpcie_config_save_restore_coherent(bus, state);
-#if !defined(BCMPCIE_OOB_HOST_WAKE)
+#ifndef BCMPCIE_OOB_HOST_WAKE
dhdpcie_pme_active(bus->osh, state);
-#endif // endif
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
rc = dhdpcie_suspend_dev(dev);
if (!rc) {
dhdpcie_suspend_host_dev(bus);
}
} else {
- rc = dhdpcie_resume_host_dev(bus);
- if (!rc) {
- rc = dhdpcie_resume_dev(dev);
- if (PCIECTO_ENAB(bus)) {
- /* reinit CTO configuration
- * because cfg space got reset at D3 (PERST)
- */
- dhdpcie_cto_cfg_init(bus, TRUE);
- }
- if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
- dhdpcie_ssreset_dis_enum_rst(bus);
- }
-#if !defined(BCMPCIE_OOB_HOST_WAKE)
- dhdpcie_pme_active(bus->osh, state);
-#endif // endif
- }
- dhdpcie_config_save_restore_coherent(bus, state);
- if (bus->is_linkdown) {
+ dhdpcie_resume_host_dev(bus);
+ rc = dhdpcie_resume_dev(dev);
+#ifndef BCMPCIE_OOB_HOST_WAKE
+ dhdpcie_pme_active(bus->osh, state);
+#endif /* !BCMPCIE_OOB_HOST_WAKE */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (bus->is_linkdown ||
+ bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL)
+#else /* DHD_HANG_SEND_UP_TEST */
+ if (bus->is_linkdown)
+#endif /* DHD_HANG_SEND_UP_TEST */
+ {
bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
dhd_os_send_hang_message(bus->dhd);
}
+#endif
}
return rc;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
static int dhdpcie_device_scan(struct device *dev, void *data)
{
struct pci_dev *pcidev;
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+#endif
pcidev = container_of(dev, struct pci_dev, dev);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
-#endif // endif
+#endif
if (pcidev->vendor != 0x14e4)
return 0;
return 0;
}
+#endif /* LINUX_VERSION >= 2.6.0 */
int
dhdpcie_bus_register(void)
{
int error = 0;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (!(error = pci_module_init(&dhdpcie_driver)))
+ return 0;
+
+ DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#else
if (!(error = pci_register_driver(&dhdpcie_driver))) {
bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
if (!error) {
pci_unregister_driver(&dhdpcie_driver);
error = BCME_ERROR;
}
+#endif /* LINUX_VERSION < 2.6.0 */
return error;
}
+
void
dhdpcie_bus_unregister(void)
{
int __devinit
dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int err = 0;
DHD_MUTEX_LOCK();
if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
- err = -ENODEV;
- goto exit;
+ return -ENODEV;
}
-
printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X"
"(good PCI location)\n", pdev->bus->number,
PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
- if (dhdpcie_init_succeeded == TRUE) {
- DHD_ERROR(("%s(): === Driver Already attached to a BRCM device === \r\n",
- __FUNCTION__));
- err = -ENODEV;
- goto exit;
- }
-
if (dhdpcie_init (pdev)) {
DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
- err = -ENODEV;
- goto exit;
+ return -ENODEV;
}
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- /*
- Since MSM PCIe RC dev usage conunt already incremented +2 even
- before dhdpcie_pci_probe() called, then we inevitably to call
- pm_runtime_put_noidle() two times to make the count start with zero.
- */
-
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
#ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
/* disable async suspend */
device_disable_async_suspend(&pdev->dev);
#endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
-exit:
DHD_MUTEX_UNLOCK();
- return err;
+ return 0;
}
int
return 0;
}
+
void __devexit
dhdpcie_pci_remove(struct pci_dev *pdev)
{
bus = pch->bus;
osh = pch->osh;
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- pm_runtime_get_noresume(&pdev->dev);
- pm_runtime_get_noresume(&pdev->dev);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
+#ifdef SUPPORT_LINKDOWN_RECOVERY
if (bus) {
-
- bus->rc_dev = NULL;
-
- dhdpcie_bus_release(bus);
+#ifdef CONFIG_ARCH_MSM
+ msm_pcie_deregister_event(&bus->pcie_event);
+#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#ifdef CONFIG_SOC_EXYNOS8890
+ exynos_pcie_deregister_event(&bus->pcie_event);
+#endif /* CONFIG_SOC_EXYNOS8890 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
}
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ bus->rc_dev = NULL;
+
+ dhdpcie_bus_release(bus);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
if (pci_is_enabled(pdev))
+#endif
pci_disable_device(pdev);
#ifdef BCMPCIE_OOB_HOST_WAKE
/* pcie os info detach */
return;
}
-/* Enable Linux Msi */
-int
-dhdpcie_enable_msi(struct pci_dev *pdev, unsigned int min_vecs, unsigned int max_vecs)
-{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
- return pci_alloc_irq_vectors(pdev, min_vecs, max_vecs, PCI_IRQ_MSI);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
- return pci_enable_msi_range(pdev, min_vecs, max_vecs);
-#else
- return pci_enable_msi_block(pdev, max_vecs);
-#endif // endif
-}
-
-/* Disable Linux Msi */
-void
-dhdpcie_disable_msi(struct pci_dev *pdev)
-{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
- pci_free_irq_vectors(pdev);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
- pci_disable_msi(pdev);
-#else
- pci_disable_msi(pdev);
-#endif // endif
- return;
-}
-
-/* Request Linux irq */
+/* Free Linux irq */
int
dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
{
dhd_bus_t *bus = dhdpcie_info->bus;
struct pci_dev *pdev = dhdpcie_info->bus->dev;
- int host_irq_disabled;
+ int err = 0;
if (!bus->irq_registered) {
snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
- "dhdpcie:%s", pci_name(pdev));
-
- if (bus->d2h_intr_method == PCIE_MSI) {
- if (dhdpcie_enable_msi(pdev, 1, 1) < 0) {
- DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__));
- dhdpcie_disable_msi(pdev);
- bus->d2h_intr_method = PCIE_INTX;
- }
+ "dhdpcie:%s", pci_name(pdev));
+#ifdef DHD_USE_MSI
+ printf("%s: MSI enabled\n", __FUNCTION__);
+ err = pci_enable_msi(pdev);
+ if (err < 0) {
+ DHD_ERROR(("%s: pci_enable_msi() failed, %d, fall back to INTx\n", __FUNCTION__, err));
}
-
- if (bus->d2h_intr_method == PCIE_MSI)
- printf("%s: MSI enabled\n", __FUNCTION__);
- else
- printf("%s: INTx enabled\n", __FUNCTION__);
-
- if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
- dhdpcie_info->pciname, bus) < 0) {
+#else
+ printf("%s: MSI not enabled\n", __FUNCTION__);
+#endif /* DHD_USE_MSI */
+ err = request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
+ dhdpcie_info->pciname, bus);
+ if (err) {
DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
- if (bus->d2h_intr_method == PCIE_MSI) {
- dhdpcie_disable_msi(pdev);
- }
+#ifdef DHD_USE_MSI
+ pci_disable_msi(pdev);
+#endif /* DHD_USE_MSI */
return -1;
- }
- else {
+ } else {
bus->irq_registered = TRUE;
}
} else {
DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__));
}
- host_irq_disabled = dhdpcie_irq_disabled(bus);
- if (host_irq_disabled) {
- DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n",
- __FUNCTION__, host_irq_disabled));
+ if (!dhdpcie_irq_enabled(bus)) {
+ DHD_ERROR(("%s: PCIe IRQ was disabled, so, enabled it again\n", __FUNCTION__));
dhdpcie_enable_irq(bus);
}
DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
+
return 0; /* SUCCESS */
}
#define PRINTF_RESOURCE "0x%016llx"
#else
#define PRINTF_RESOURCE "0x%08x"
-#endif // endif
-
-#ifdef EXYNOS_PCIE_MODULE_PATCH
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
-extern struct pci_saved_state *bcm_pcie_default_state;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
-#endif /* EXYNOS_MODULE_PATCH */
+#endif
/*
1: struct pci_dev *pdev -- pci device structure
2: pci_res -- structure containing pci configuration space values
+
Return value:
int - Status (TRUE or FALSE)
struct pci_dev *pdev = NULL;
pdev = dhdpcie_info->dev;
#ifdef EXYNOS_PCIE_MODULE_PATCH
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
- if (bcm_pcie_default_state) {
- pci_load_saved_state(pdev, bcm_pcie_default_state);
- pci_restore_state(pdev);
- }
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ pci_restore_state(pdev);
#endif /* EXYNOS_MODULE_PATCH */
do {
if (pci_enable_device(pdev)) {
}
dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
- dhdpcie_info->bar1_size =
+ dhdpcie_info->tcm_size =
(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
- dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
break;
}
-#ifdef EXYNOS_PCIE_MODULE_PATCH
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
- if (bcm_pcie_default_state == NULL) {
+ if (!dhd_download_fw_on_driverload) {
+ /* Backup PCIe configuration so as to use Wi-Fi on/off process
+ * in case of built in driver
+ */
pci_save_state(pdev);
- bcm_pcie_default_state = pci_store_saved_state(pdev);
+ dhdpcie_info->default_state = pci_store_saved_state(pdev);
+
+ if (dhdpcie_info->default_state == NULL) {
+ DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
+ __FUNCTION__));
+ REG_UNMAP(dhdpcie_info->regs);
+ REG_UNMAP(dhdpcie_info->tcm);
+ pci_disable_device(pdev);
+ break;
+ }
}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
-#endif /* EXYNOS_MODULE_PATCH */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
- /* Backup PCIe configuration so as to use Wi-Fi on/off process
- * in case of built in driver
- */
- pci_save_state(pdev);
- dhdpcie_info->default_state = pci_store_saved_state(pdev);
- if (dhdpcie_info->default_state == NULL) {
- DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
- __FUNCTION__));
- REG_UNMAP(dhdpcie_info->regs);
- REG_UNMAP(dhdpcie_info->tcm);
- pci_disable_device(pdev);
- break;
- }
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+#ifdef EXYNOS_PCIE_MODULE_PATCH
+ pci_save_state(pdev);
+#endif /* EXYNOS_MODULE_PATCH */
DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
__FUNCTION__, dhdpcie_info->regs, bar0_addr));
}
-void dhdpcie_dump_resource(dhd_bus_t *bus)
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
+ (defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)))
+void dhdpcie_linkdown_cb(struct_pcie_notify *noti)
{
- dhdpcie_info_t *pch;
-
- if (bus == NULL) {
- DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
- return;
- }
-
- if (bus->dev == NULL) {
- DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
- return;
- }
+ struct pci_dev *pdev = (struct pci_dev *)noti->user;
+ dhdpcie_info_t *pch = NULL;
- pch = pci_get_drvdata(bus->dev);
- if (pch == NULL) {
- DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
- return;
+ if (pdev) {
+ pch = pci_get_drvdata(pdev);
+ if (pch) {
+ dhd_bus_t *bus = pch->bus;
+ if (bus) {
+ dhd_pub_t *dhd = bus->dhd;
+ if (dhd) {
+ DHD_ERROR(("%s: Event HANG send up "
+ "due to PCIe linkdown\n",
+ __FUNCTION__));
+#ifdef CONFIG_ARCH_MSM
+ bus->no_cfg_restore = 1;
+#endif /* CONFIG_ARCH_MSM */
+ bus->is_linkdown = 1;
+ DHD_OS_WAKE_LOCK(dhd);
+ dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
+ dhd_os_send_hang_message(dhd);
+ }
+ }
+ }
}
- /* BAR0 */
- DHD_ERROR(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
- __FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0),
- DONGLE_REG_MAP_SIZE));
-
- /* BAR1 */
- DHD_ERROR(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
- __FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2),
- pch->bar1_size));
}
+#endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
+ * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895))
+ */
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
int dhdpcie_init(struct pci_dev *pdev)
{
#ifdef USE_SMMU_ARCH_MSM
dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL;
#endif /* USE_SMMU_ARCH_MSM */
- int ret = 0;
do {
/* osl attach */
}
/* Bus initialization */
- ret = dhdpcie_bus_attach(osh, &bus, dhdpcie_info->regs, dhdpcie_info->tcm, pdev);
- if (ret != BCME_OK) {
+ bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm, pdev);
+ if (!bus) {
DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
break;
}
dhdpcie_info->bus = bus;
bus->is_linkdown = 0;
- bus->no_bus_init = FALSE;
- bus->cto_triggered = 0;
-
- bus->rc_dev = NULL;
/* Get RC Device Handle */
- if (bus->dev->bus) {
- /* self member of structure pci_bus is bridge device as seen by parent */
- bus->rc_dev = bus->dev->bus->self;
- if (bus->rc_dev)
- DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n", __FUNCTION__,
- bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev));
- else
- DHD_ERROR(("%s: bus->dev->bus->self is NULL\n", __FUNCTION__));
- } else {
- DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__));
- }
-
- /* if rc_dev is still NULL, try to get from vendor/device IDs */
- if (bus->rc_dev == NULL) {
- bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
- DHD_ERROR(("%s: rc_dev from pci_get_device (%x:%x) is %p\n", __FUNCTION__,
- PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, bus->rc_dev));
- }
-
- bus->rc_ep_aspm_cap = dhd_bus_is_rc_ep_aspm_capable(bus);
- bus->rc_ep_l1ss_cap = dhd_bus_is_rc_ep_l1ss_capable(bus);
- DHD_ERROR(("%s: rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
- __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
-
-#ifdef FORCE_TPOWERON
- if (dhdpcie_chip_req_forced_tpoweron(bus)) {
- dhd_bus_set_tpoweron(bus, tpoweron_scale);
- }
-#endif /* FORCE_TPOWERON */
+#if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID)
+ bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
+#else
+ bus->rc_dev = NULL;
+#endif
#if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
defined(CONFIG_ARCH_APQ8084)
#ifdef DONGLE_ENABLE_ISOLATION
bus->dhd->dongle_isolation = TRUE;
#endif /* DONGLE_ENABLE_ISOLATION */
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+#ifdef CONFIG_ARCH_MSM
+ bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
+ bus->pcie_event.user = pdev;
+ bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
+ bus->pcie_event.callback = dhdpcie_linkdown_cb;
+ bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
+ msm_pcie_register_event(&bus->pcie_event);
+ bus->no_cfg_restore = 0;
+#endif /* CONFIG_ARCH_MSM */
+#ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
+#if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)
+ bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN;
+ bus->pcie_event.user = pdev;
+ bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
+ bus->pcie_event.callback = dhdpcie_linkdown_cb;
+ exynos_pcie_register_event(&bus->pcie_event);
+#endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */
+#endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
+ bus->read_shm_fail = FALSE;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
if (bus->intr) {
/* Register interrupt callback, but mask it (not operational yet). */
/* Attach to the OS network interface */
DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
- if (dhd_attach_net(bus->dhd, TRUE)) {
+ if (dhd_register_if(bus->dhd, 0, TRUE)) {
DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
break;
}
dhdpcie_init_succeeded = TRUE;
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- pm_runtime_set_autosuspend_delay(&pdev->dev, AUTO_SUSPEND_TIMEOUT);
- pm_runtime_use_autosuspend(&pdev->dev);
- atomic_set(&bus->dhd->block_bus, FALSE);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
-
#if defined(MULTIPLE_SUPPLICANT)
wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
#endif /* MULTIPLE_SUPPLICANT */
if (bus) {
pdev = bus->dev;
if (bus->irq_registered) {
-#if defined(SET_PCIE_IRQ_CPU_CORE) && defined(CONFIG_ARCH_SM8150)
- /* clean up the affinity_hint before
- * the unregistration of PCIe irq
- */
- (void)irq_set_affinity_hint(pdev->irq, NULL);
-#endif /* SET_PCIE_IRQ_CPU_CORE && CONFIG_ARCH_SM8150 */
free_irq(pdev->irq, bus);
bus->irq_registered = FALSE;
- if (bus->d2h_intr_method == PCIE_MSI) {
- dhdpcie_disable_msi(pdev);
- }
+#ifdef DHD_USE_MSI
+ pci_disable_msi(pdev);
+#endif /* DHD_USE_MSI */
} else {
DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__));
}
disable interrupt and queue DPC if mail box interrupts are raised.
*/
+
irqreturn_t
dhdpcie_isr(int irq, void *arg)
{
dhd_bus_t *bus = (dhd_bus_t*)arg;
- bus->isr_entry_time = OSL_LOCALTIME_NS();
- if (!dhdpcie_bus_isr(bus)) {
- DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__));
- }
- bus->isr_exit_time = OSL_LOCALTIME_NS();
- return IRQ_HANDLED;
+ if (dhdpcie_bus_isr(bus))
+ return TRUE;
+ else
+ return FALSE;
}
int
return BCME_OK;
}
-int
-dhdpcie_irq_disabled(dhd_bus_t *bus)
+bool
+dhdpcie_irq_enabled(dhd_bus_t *bus)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
struct irq_desc *desc = irq_to_desc(bus->dev->irq);
/* depth will be zero, if enabled */
- return desc->depth;
+ if (!desc->depth) {
+ DHD_ERROR(("%s: depth:%d\n", __FUNCTION__, desc->depth));
+ }
+ return desc->depth ? FALSE : TRUE;
#else
- /* return ERROR by default as there is no support for lower versions */
- return BCME_ERROR;
+ /* return TRUE by default as there is no support for lower versions */
+ return TRUE;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
}
{
int ret = 0;
#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ int options = 0;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
#endif /* CONFIG_ARCH_MSM */
DHD_TRACE(("%s Enter:\n", __FUNCTION__));
}
#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus->no_cfg_restore) {
+ options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
+ }
+ ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
+ bus->dev, NULL, options);
+ if (bus->no_cfg_restore && !ret) {
+ msm_pcie_recover_config(bus->dev);
+ bus->no_cfg_restore = 0;
+ }
+#else
ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
bus->dev, NULL, 0);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
if (ret) {
DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
goto done;
{
int ret = 0;
#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ int options = 0;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
#endif /* CONFIG_ARCH_MSM */
DHD_TRACE(("%s Enter:\n", __FUNCTION__));
}
#ifdef CONFIG_ARCH_MSM
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ /* Always reset the PCIe host when wifi off */
+ bus->no_cfg_restore = 1;
+
+ if (bus->no_cfg_restore) {
+ options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
+ }
+
+ ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
+ bus->dev, NULL, options);
+#else
ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
bus->dev, NULL, 0);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
if (ret) {
DHD_ERROR(("Failed to stop PCIe link\n"));
goto done;
return BCME_ERROR;
}
- if (pci_is_enabled(bus->dev))
- pci_disable_device(bus->dev);
+ pci_disable_device(bus->dev);
return 0;
}
pci_load_saved_state(bus->dev, pch->default_state);
#endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */
- /* Check if Device ID is valid */
- if (bus->dev->state_saved) {
- uint32 vid, saved_vid;
- pci_read_config_dword(bus->dev, PCI_CFG_VID, &vid);
- saved_vid = bus->dev->saved_config_space[PCI_CFG_VID];
- if (vid != saved_vid) {
- DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) "
- "Skip the bus init\n", __FUNCTION__, vid, saved_vid));
- bus->no_bus_init = TRUE;
- /* Check if the PCIe link is down */
- if (vid == (uint32)-1) {
- bus->is_linkdown = 1;
- }
- return BCME_ERROR;
- }
- }
-
pci_restore_state(bus->dev);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
}
bus->regs = dhdpcie_info->regs;
- dhdpcie_info->bar1_size =
+ dhdpcie_info->tcm_size =
(bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
- dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
if (!dhdpcie_info->tcm) {
DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
REG_UNMAP(dhdpcie_info->regs);
}
#ifdef BCMPCIE_OOB_HOST_WAKE
-#ifdef CONFIG_BCMDHD_GET_OOB_STATE
-extern int dhd_get_wlan_oob_gpio(void);
-#endif /* CONFIG_BCMDHD_GET_OOB_STATE */
-
-int dhdpcie_get_oob_irq_level(void)
-{
- int gpio_level;
-
-#ifdef CONFIG_BCMDHD_GET_OOB_STATE
- gpio_level = dhd_get_wlan_oob_gpio();
-#else
- gpio_level = BCME_UNSUPPORTED;
-#endif /* CONFIG_BCMDHD_GET_OOB_STATE */
- return gpio_level;
-}
-
-int dhdpcie_get_oob_irq_status(struct dhd_bus *bus)
-{
- dhdpcie_info_t *pch;
- dhdpcie_os_info_t *dhdpcie_osinfo;
-
- if (bus == NULL) {
- DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
- return 0;
- }
-
- if (bus->dev == NULL) {
- DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
- return 0;
- }
-
- pch = pci_get_drvdata(bus->dev);
- if (pch == NULL) {
- DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
- return 0;
- }
-
- dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
-
- return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0;
-}
-
-int dhdpcie_get_oob_irq_num(struct dhd_bus *bus)
-{
- dhdpcie_info_t *pch;
- dhdpcie_os_info_t *dhdpcie_osinfo;
-
- if (bus == NULL) {
- DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
- return 0;
- }
-
- if (bus->dev == NULL) {
- DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
- return 0;
- }
-
- pch = pci_get_drvdata(bus->dev);
- if (pch == NULL) {
- DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
- return 0;
- }
-
- dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
-
- return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_num : 0;
-}
-
void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable)
{
unsigned long flags;
(dhdpcie_osinfo->oob_irq_num > 0)) {
if (enable) {
enable_irq(dhdpcie_osinfo->oob_irq_num);
- bus->oob_intr_enable_count++;
- bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS();
} else {
disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
- bus->oob_intr_disable_count++;
- bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS();
}
dhdpcie_osinfo->oob_irq_enabled = enable;
}
static irqreturn_t wlan_oob_irq(int irq, void *data)
{
dhd_bus_t *bus;
- unsigned long flags_bus;
DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__));
bus = (dhd_bus_t *)data;
dhdpcie_oob_intr_set(bus, FALSE);
- bus->last_oob_irq_time = OSL_LOCALTIME_NS();
- bus->oob_intr_count++;
#ifdef DHD_WAKE_STATUS
+#ifdef DHD_PCIE_RUNTIMEPM
+ /* This condition is for avoiding counting of wake up from Runtime PM */
+ if (bus->chk_pm)
+#endif /* DHD_PCIE_RUNTIMPM */
{
bcmpcie_set_get_wake(bus, 1);
}
#endif /* DHD_WAKE_STATUS */
-#ifdef DHD_PCIE_NATIVE_RUNTIMEPM
- dhd_bus_wakeup_work(bus->dhd);
-#endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
- DHD_BUS_LOCK(bus->bus_lock, flags_bus);
- /* Hold wakelock if bus_low_power_state is
- * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED
- */
- if (bus->dhd->up && bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
+#ifdef DHD_PCIE_RUNTIMEPM
+ dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq);
+#endif /* DHD_PCIE_RUNTIMPM */
+ if (bus->dhd->up && bus->oob_presuspend) {
DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
}
- DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
return IRQ_HANDLED;
}
dhdpcie_osinfo->oob_irq_registered = TRUE;
- return 0;
+ return err;
}
void dhdpcie_oob_intr_unregister(dhd_bus_t *bus)
}
#endif /* BCMPCIE_OOB_HOST_WAKE */
-struct device * dhd_bus_to_dev(dhd_bus_t *bus)
+#ifdef PCIE_OOB
+void dhdpcie_oob_init(dhd_bus_t *bus)
{
- struct pci_dev *pdev;
- pdev = bus->dev;
+ gpio_handle_val = get_handle(OOB_PORT);
+ if (gpio_handle_val < 0)
+ {
+ DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__));
+ ASSERT(FALSE);
+ }
- if (pdev)
- return &pdev->dev;
- else
- return NULL;
-}
+ gpio_direction = 0;
+ ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG);
-#define KIRQ_PRINT_BUF_LEN 256
+ /* Note BT core is also enabled here */
+ gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
+ gpio_write_port(gpio_handle_val, gpio_port);
-void
-dhd_print_kirqstats(dhd_pub_t *dhd, unsigned int irq_num)
-{
- unsigned long flags = 0;
- struct irq_desc *desc;
- int i; /* cpu iterator */
- struct bcmstrbuf strbuf;
- char tmp_buf[KIRQ_PRINT_BUF_LEN];
-
- desc = irq_to_desc(irq_num);
- if (!desc) {
- DHD_ERROR(("%s : irqdesc is not found \n", __FUNCTION__));
- return;
- }
- bcm_binit(&strbuf, tmp_buf, KIRQ_PRINT_BUF_LEN);
- raw_spin_lock_irqsave(&desc->lock, flags);
- bcm_bprintf(&strbuf, "dhd irq %u:", irq_num);
- for_each_online_cpu(i)
- bcm_bprintf(&strbuf, "%10u ",
- desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, i) : 0);
- if (desc->irq_data.chip) {
- if (desc->irq_data.chip->name)
- bcm_bprintf(&strbuf, " %8s", desc->irq_data.chip->name);
- else
- bcm_bprintf(&strbuf, " %8s", "-");
- } else {
- bcm_bprintf(&strbuf, " %8s", "None");
- }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
- if (desc->irq_data.domain)
- bcm_bprintf(&strbuf, " %d", (int)desc->irq_data.hwirq);
-#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
- bcm_bprintf(&strbuf, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
-#endif // endif
-#endif /* LINUX VERSION > 3.1.0 */
+ gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
+ ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG);
+
+ bus->oob_enabled = TRUE;
+ bus->oob_presuspend = FALSE;
- if (desc->name)
- bcm_bprintf(&strbuf, "-%-8s", desc->name);
+ /* drive the Device_Wake GPIO low on startup */
+ bus->device_wake_state = TRUE;
+ dhd_bus_set_device_wake(bus, FALSE);
+ dhd_bus_doorbell_timeout_reset(bus);
- DHD_ERROR(("%s\n", strbuf.origbuf));
- raw_spin_unlock_irqrestore(&desc->lock, flags);
}
void
-dhd_show_kirqstats(dhd_pub_t *dhd)
+dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val)
{
- unsigned int irq = -1;
-#ifdef BCMPCIE
- dhdpcie_get_pcieirq(dhd->bus, &irq);
-#endif /* BCMPCIE */
-#ifdef BCMSDIO
- irq = ((wifi_adapter_info_t *)dhd->info->adapter)->irq_num;
-#endif /* BCMSDIO */
- if (irq != -1) {
-#ifdef BCMPCIE
- DHD_ERROR(("DUMP data kernel irq stats : \n"));
-#endif /* BCMPCIE */
-#ifdef BCMSDIO
- DHD_ERROR(("DUMP data/host wakeup kernel irq stats : \n"));
-#endif /* BCMSDIO */
- dhd_print_kirqstats(dhd, irq);
- }
-#ifdef BCMPCIE_OOB_HOST_WAKE
- irq = dhdpcie_get_oob_irq_num(dhd->bus);
- if (irq) {
- DHD_ERROR(("DUMP PCIE host wakeup kernel irq stats : \n"));
- dhd_print_kirqstats(dhd, irq);
+ DHD_INFO(("Set Device_Wake to %d\n", val));
+ if (val)
+ {
+ gpio_port = gpio_port | (1 << BIT_BT_REG_ON);
+ gpio_write_port(gpio_handle_val, gpio_port);
+ } else {
+ gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON));
+ gpio_write_port(gpio_handle_val, gpio_port);
}
-#endif /* BCMPCIE_OOB_HOST_WAKE */
}
-#ifdef DHD_FW_COREDUMP
int
-dhd_dongle_mem_dump(void)
+dhd_oob_get_bt_reg_on(struct dhd_bus *bus)
{
- if (!g_dhd_bus) {
- DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
- return -ENODEV;
+ int ret;
+ uint8 val;
+ ret = gpio_read_port(gpio_handle_val, &val);
+
+ if (ret < 0) {
+ DHD_ERROR(("gpio_read_port returns %d\n", ret));
+ return ret;
+ }
+
+ if (val & (1 << BIT_BT_REG_ON))
+ {
+ ret = 1;
+ } else {
+ ret = 0;
}
- dhd_bus_dump_console_buffer(g_dhd_bus);
- dhd_prot_debug_info_print(g_dhd_bus->dhd);
+ return ret;
+}
- g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
- g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
+int
+dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val)
+{
+ if (bus->device_wake_state != val)
+ {
+ DHD_INFO(("Set Device_Wake to %d\n", val));
+
+ if (bus->oob_enabled && !bus->oob_presuspend)
+ {
+ if (val)
+ {
+ gpio_port = gpio_port | (1 << DEVICE_WAKE);
+ gpio_write_port_non_block(gpio_handle_val, gpio_port);
+ } else {
+ gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE));
+ gpio_write_port_non_block(gpio_handle_val, gpio_port);
+ }
+ }
- dhd_bus_mem_dump(g_dhd_bus->dhd);
- return 0;
+ bus->device_wake_state = val;
+ }
+ return BCME_OK;
}
-EXPORT_SYMBOL(dhd_dongle_mem_dump);
-#endif /* DHD_FW_COREDUMP */
-bool
-dhd_bus_check_driver_up(void)
+INLINE void
+dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val)
+{
+ /* TODO: Currently Inband implementation of Device_Wake is not supported,
+ * so this function is left empty later this can be used to support the same.
+ */
+}
+#endif /* PCIE_OOB */
+
+#ifdef DHD_PCIE_RUNTIMEPM
+bool dhd_runtimepm_state(dhd_pub_t *dhd)
{
dhd_bus_t *bus;
- dhd_pub_t *dhdp;
- bool isup = FALSE;
+ unsigned long flags;
+ bus = dhd->bus;
+
+ DHD_GENERAL_LOCK(dhd, flags);
+
+ bus->idlecount++;
+
+ DHD_TRACE(("%s : Enter \n", __FUNCTION__));
+ if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
+ bus->idlecount = 0;
+ if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
+ bus->bus_wake = 0;
+ DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd);
+ bus->runtime_resume_done = FALSE;
+ /* stop all interface network queue. */
+ dhd_bus_stop_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n",
+ __FUNCTION__, bus->idletime, dhd_runtimepm_ms));
+ /* RPM suspend is failed, return FALSE then re-trying */
+ if (dhdpcie_set_suspend_resume(bus, TRUE)) {
+ DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__));
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ bus->runtime_resume_done = TRUE;
+ /* It can make stuck NET TX Queue without below */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ smp_wmb();
+ wake_up_interruptible(&bus->rpm_queue);
+ return FALSE;
+ }
- bus = (dhd_bus_t *)g_dhd_bus;
- if (!bus) {
- DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
- return isup;
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
+ DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd);
+ /* For making sure NET TX Queue active */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ wait_event_interruptible(bus->rpm_queue, bus->bus_wake);
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd);
+ DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ dhdpcie_set_suspend_resume(bus, FALSE);
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd);
+ dhd_os_busbusy_wake(bus->dhd);
+ /* Inform the wake up context that Resume is over */
+ bus->runtime_resume_done = TRUE;
+ /* For making sure NET TX Queue active */
+ dhd_bus_start_queue(bus);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ smp_wmb();
+ wake_up_interruptible(&bus->rpm_queue);
+ DHD_ERROR(("%s : runtime resume ended \n", __FUNCTION__));
+ return TRUE;
+ } else {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ /* Since one of the contexts are busy (TX, IOVAR or RX)
+ * we should not suspend
+ */
+ DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n",
+ __FUNCTION__, dhd->dhd_bus_busy_state));
+ return FALSE;
+ }
+ }
+
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return FALSE;
+} /* dhd_runtimepm_state */
+
+/*
+ * dhd_runtime_bus_wake
+ * TRUE - related with runtime pm context
+ * FALSE - It isn't invloved in runtime pm context
+ */
+bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr)
+{
+ unsigned long flags;
+ bus->idlecount = 0;
+ DHD_TRACE(("%s : enter\n", __FUNCTION__));
+ if (bus->dhd->up == FALSE) {
+ DHD_INFO(("%s : dhd is not up\n", __FUNCTION__));
+ return FALSE;
+ }
+
+ DHD_GENERAL_LOCK(bus->dhd, flags);
+ if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus->dhd)) {
+ /* Wake up RPM state thread if it is suspend in progress or suspended */
+ if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd) ||
+ DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
+ bus->bus_wake = 1;
+
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr));
+ smp_wmb();
+ wake_up_interruptible(&bus->rpm_queue);
+ /* No need to wake up the RPM state thread */
+ } else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) {
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+ }
+
+ /* If wait is TRUE, function with wait = TRUE will be wait in here */
+ if (wait) {
+ wait_event_interruptible(bus->rpm_queue, bus->runtime_resume_done);
+ } else {
+ DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__));
+ }
+ /* If it is called from RPM context, it returns TRUE */
+ return TRUE;
}
- dhdp = bus->dhd;
- if (dhdp) {
- isup = dhdp->up;
+ DHD_GENERAL_UNLOCK(bus->dhd, flags);
+
+ return FALSE;
+}
+
+bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return dhd_runtime_bus_wake(bus, wait, func_addr);
+}
+
+void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ bus->idletime = 0;
+}
+
+bool dhdpcie_is_resume_done(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return bus->runtime_resume_done;
+}
+#endif /* DHD_PCIE_RUNTIMEPM */
+
+struct device * dhd_bus_to_dev(dhd_bus_t *bus)
+{
+ struct pci_dev *pdev;
+ pdev = bus->dev;
+
+ if (pdev)
+ return &pdev->dev;
+ else
+ return NULL;
+}
+
+#ifdef HOFFLOAD_MODULES
+void
+dhd_free_module_memory(struct dhd_bus *bus, struct module_metadata *hmem)
+{
+ struct device *dev = &bus->dev->dev;
+ if (hmem) {
+ dma_unmap_single(dev, (dma_addr_t) hmem->data_addr, hmem->size, DMA_TO_DEVICE);
+ kfree(hmem->data);
+ hmem->data = NULL;
+ hmem->size = 0;
+ } else {
+ DHD_ERROR(("dev:%p pci unmapping error\n", dev));
}
+}
- return isup;
+void *
+dhd_alloc_module_memory(struct dhd_bus *bus, uint32_t size, struct module_metadata *hmem)
+{
+ struct device *dev = &bus->dev->dev;
+ if (!hmem->data) {
+ hmem->data = kzalloc(size, GFP_KERNEL);
+ if (!hmem->data) {
+ DHD_ERROR(("dev:%p mem alloc failure\n", dev));
+ return NULL;
+ }
+ }
+ hmem->size = size;
+ DHD_INFO(("module size: 0x%x \n", hmem->size));
+ hmem->data_addr = (u64) dma_map_single(dev, hmem->data, hmem->size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, hmem->data_addr)) {
+ DHD_ERROR(("dev:%p dma mapping error\n", dev));
+ kfree(hmem->data);
+ hmem->data = NULL;
+ return hmem->data;
+ }
+ return hmem->data;
}
-EXPORT_SYMBOL(dhd_bus_check_driver_up);
+#endif /* HOFFLOAD_MODULES */
* Broadcom Dongle Host Driver (DHD)
* Prefered Network Offload and Wi-Fi Location Service(WLS) code.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pno.c 812762 2019-04-02 09:36:26Z $
+ * $Id: dhd_pno.c 707287 2017-06-27 06:44:29Z $
*/
#if defined(GSCAN_SUPPORT) && !defined(PNO_SUPPORT)
#error "GSCAN needs PNO to be enabled!"
-#endif // endif
+#endif
#ifdef PNO_SUPPORT
#include <typedefs.h>
} \
} while (0)
#define PNO_GET_PNOSTATE(dhd) ((dhd_pno_status_info_t *)dhd->pno_state)
-
-#define PNO_BESTNET_LEN WLC_IOCTL_MEDLEN
-
+#define PNO_BESTNET_LEN 1024
#define PNO_ON 1
#define PNO_OFF 0
-#define CHANNEL_2G_MIN 1
#define CHANNEL_2G_MAX 14
-#define CHANNEL_5G_MIN 34
#define CHANNEL_5G_MAX 165
-#define IS_2G_CHANNEL(ch) ((ch >= CHANNEL_2G_MIN) && \
- (ch <= CHANNEL_2G_MAX))
-#define IS_5G_CHANNEL(ch) ((ch >= CHANNEL_5G_MIN) && \
- (ch <= CHANNEL_5G_MAX))
#define MAX_NODE_CNT 5
#define WLS_SUPPORTED(pno_state) (pno_state->wls_supported == TRUE)
#define TIME_DIFF(timestamp1, timestamp2) (abs((uint32)(timestamp1/1000) \
static int _dhd_pno_flush_ssid(dhd_pub_t *dhd);
static wl_pfn_gscan_ch_bucket_cfg_t *
dhd_pno_gscan_create_channel_list(dhd_pub_t *dhd, dhd_pno_status_info_t *pno_state,
- uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw);
+ uint16 *chan_list, uint32 *num_buckets, uint32 *num_buckets_to_fw);
#endif /* GSCAN_SUPPORT */
-
static int dhd_pno_set_legacy_pno(dhd_pub_t *dhd, uint16 scan_fr, int pno_repeat,
- int pno_freq_expo_max, uint16 *channel_list, int nchan);
+ int pno_freq_expo_max, uint16 *channel_list, int nchan);
static inline bool
-is_dfs(dhd_pub_t *dhd, uint16 channel)
+is_dfs(uint16 channel)
{
- u32 ch;
- s32 err;
- u8 buf[32];
-
- ch = wl_ch_host_to_driver(channel);
- err = dhd_iovar(dhd, 0, "per_chan_info", (char *)&ch,
- sizeof(u32), buf, sizeof(buf), FALSE);
- if (unlikely(err)) {
- DHD_ERROR(("get per chan info failed:%d\n", err));
- return FALSE;
- }
- /* Check the channel flags returned by fw */
- if (*((u32 *)buf) & WL_CHAN_PASSIVE) {
+ if (channel >= 52 && channel <= 64) /* class 2 */
return TRUE;
- }
- return FALSE;
+ else if (channel >= 100 && channel <= 140) /* class 4 */
+ return TRUE;
+ else
+ return FALSE;
}
-
int
dhd_pno_clean(dhd_pub_t *dhd)
{
#ifdef GSCAN_SUPPORT
static uint64
-convert_fw_rel_time_to_systime(struct osl_timespec *ts, uint32 fw_ts_ms)
+convert_fw_rel_time_to_systime(struct timespec *ts, uint32 fw_ts_ms)
{
return ((uint64)(TIMESPEC_TO_US(*ts)) - (uint64)(fw_ts_ms * 1000));
}
static void
dhd_pno_idx_to_ssid(struct dhd_pno_gscan_params *gscan_params,
- dhd_epno_results_t *res, uint32 idx)
+ dhd_epno_results_t *res, uint32 idx)
{
dhd_pno_ssid_t *iter, *next;
int i;
if (gscan_params->epno_cfg.num_epno_ssid > 0) {
i = 0;
-
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(iter, next,
&gscan_params->epno_cfg.epno_ssid_list, list) {
- GCC_DIAGNOSTIC_POP();
if (i++ == idx) {
memcpy(res->ssid, iter->SSID, iter->SSID_len);
res->ssid_len = iter->SSID_len;
}
/* Translate HAL flag bitmask to BRCM FW flag bitmask */
-void
-dhd_pno_translate_epno_fw_flags(uint32 *flags)
+void dhd_pno_translate_epno_fw_flags(uint32 *flags)
{
uint32 in_flags, fw_flags = 0;
in_flags = *flags;
}
if (!(in_flags & DHD_EPNO_STRICT_MATCH) &&
- !(in_flags & DHD_EPNO_HIDDEN_SSID)) {
+ !(in_flags & DHD_EPNO_HIDDEN_SSID)) {
fw_flags |= WL_PFN_SSID_IMPRECISE_MATCH;
}
}
/* Translate HAL auth bitmask to BRCM FW bitmask */
-void
-dhd_pno_set_epno_auth_flag(uint32 *wpa_auth)
+void dhd_pno_set_epno_auth_flag(uint32 *wpa_auth)
{
switch (*wpa_auth) {
case DHD_PNO_AUTH_CODE_OPEN:
return err;
}
+#ifdef GSCAN_SUPPORT
static int
_dhd_pno_flush_ssid(dhd_pub_t *dhd)
{
wl_pfn_t pfn_elem;
memset(&pfn_elem, 0, sizeof(wl_pfn_t));
pfn_elem.flags = htod32(WL_PFN_FLUSH_ALL_SSIDS);
-
err = dhd_iovar(dhd, 0, "pfn_add", (char *)&pfn_elem, sizeof(wl_pfn_t), NULL, 0, TRUE);
if (err < 0) {
DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
}
return err;
}
+#endif /* GSCAN_SUPPORT */
static bool
is_batch_retrieval_complete(struct dhd_pno_gscan_params *gscan_params)
#ifdef GSCAN_SUPPORT
if (mode == DHD_PNO_BATCH_MODE ||
- ((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan))
+ ((mode & DHD_PNO_GSCAN_MODE) && pno_params->params_gscan.mscan))
#else
if (mode == DHD_PNO_BATCH_MODE)
#endif /* GSCAN_SUPPORT */
}
static int
-_dhd_pno_add_ssid(dhd_pub_t *dhd, struct list_head* ssid_list, int nssid)
+_dhd_pno_add_ssid(dhd_pub_t *dhd, struct list_head *ssid_list, int nssid)
{
int err = BCME_OK;
int i = 0, mem_needed;
return BCME_ERROR;
}
mem_needed = (sizeof(wl_pfn_t) * nssid);
- pfn_elem_buf = (wl_pfn_t *) MALLOCZ(dhd->osh, mem_needed);
+ pfn_elem_buf = (wl_pfn_t *) kzalloc(mem_needed, GFP_KERNEL);
if (!pfn_elem_buf) {
DHD_ERROR(("%s: Can't malloc %d bytes!\n", __FUNCTION__, mem_needed));
return BCME_NOMEM;
}
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(iter, next, ssid_list, list) {
- GCC_DIAGNOSTIC_POP();
pfn_elem_buf[i].infra = htod32(1);
pfn_elem_buf[i].auth = htod32(DOT11_OPEN_SYSTEM);
pfn_elem_buf[i].wpa_auth = htod32(iter->wpa_auth);
break;
}
}
-
err = dhd_iovar(dhd, 0, "pfn_add", (char *)pfn_elem_buf, mem_needed, NULL, 0, TRUE);
if (err < 0) {
DHD_ERROR(("%s : failed to execute pfn_add\n", __FUNCTION__));
}
- MFREE(dhd->osh, pfn_elem_buf, mem_needed);
+ kfree(pfn_elem_buf);
return err;
}
err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, chan_buf, sizeof(chan_buf), FALSE, 0);
if (err < 0) {
DHD_ERROR(("failed to get channel list (err: %d)\n", err));
- return err;
+ goto exit;
}
for (i = 0, j = 0; i < dtoh32(list->count) && i < *nchan; i++) {
- if (IS_2G_CHANNEL(dtoh32(list->element[i]))) {
- if (!(band & WLC_BAND_2G)) {
- /* Skip, if not 2g */
+ if (band == WLC_BAND_2G) {
+ if (dtoh32(list->element[i]) > CHANNEL_2G_MAX)
continue;
- }
- /* fall through to include the channel */
- } else if (IS_5G_CHANNEL(dtoh32(list->element[i]))) {
- bool dfs_channel = is_dfs(dhd, dtoh32(list->element[i]));
- if ((skip_dfs && dfs_channel) ||
- (!(band & WLC_BAND_5G) && !dfs_channel)) {
- /* Skip the channel if:
- * the DFS bit is NOT set & the channel is a dfs channel
- * the band 5G is not set & the channel is a non DFS 5G channel
- */
+ } else if (band == WLC_BAND_5G) {
+ if (dtoh32(list->element[i]) <= CHANNEL_2G_MAX)
continue;
- }
- /* fall through to include the channel */
+ if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+ continue;
+
+ } else if (band == WLC_BAND_AUTO) {
+ if (skip_dfs || !is_dfs(dtoh32(list->element[i])))
+ continue;
+
+ } else { /* All channels */
+ if (skip_dfs && is_dfs(dtoh32(list->element[i])))
+ continue;
+ }
+ if (dtoh32(list->element[i]) <= CHANNEL_5G_MAX) {
+ d_chan_list[j++] = (uint16) dtoh32(list->element[i]);
} else {
- /* Not in range. Bad channel */
- DHD_ERROR(("Not in range. bad channel\n"));
- *nchan = 0;
- return BCME_BADCHAN;
+ err = BCME_BADCHAN;
+ goto exit;
}
-
- /* Include the channel */
- d_chan_list[j++] = (uint16) dtoh32(list->element[i]);
}
*nchan = j;
+exit:
return err;
}
#ifdef PNO_DEBUG
char *_base_bp;
char msg[150];
-#endif // endif
+#endif
dhd_pno_bestnet_entry_t *iter, *next;
dhd_pno_scan_results_t *siter, *snext;
dhd_pno_best_header_t *phead, *pprev;
}
DHD_PNO(("%s scancount %d\n", __FUNCTION__, params_batch->get_batch.expired_tot_scan_cnt));
/* preestimate scan count until which scan result this report is going to end */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(siter, snext,
¶ms_batch->get_batch.expired_scan_results_list, list) {
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
phead = siter->bestnetheader;
while (phead != NULL) {
/* if left_size is less than bestheader total size , stop this */
bp += nreadsize = snprintf(bp, nleftsize, "trunc\n");
nleftsize -= nreadsize;
}
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(iter, next,
&phead->entry_list, list) {
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
t_delta = jiffies_to_msecs(jiffies - iter->recorded_time);
#ifdef PNO_DEBUG
_base_bp = bp;
memset(msg, 0, sizeof(msg));
-#endif // endif
+#endif
/* BSSID info */
bp += nreadsize = snprintf(bp, nleftsize, "bssid=%s\n",
bcm_ether_ntoa((const struct ether_addr *)&iter->BSSID, eabuf));
#ifdef PNO_DEBUG
memcpy(msg, _base_bp, bp - _base_bp);
DHD_PNO(("Entry : \n%s", msg));
-#endif // endif
+#endif
}
bp += nreadsize = snprintf(bp, nleftsize, "%s", SCAN_END_MARKER);
DHD_PNO(("%s", SCAN_END_MARKER));
}
params_batch->get_batch.expired_tot_scan_cnt -= cnt;
/* set FALSE only if the link list is empty after returning the data */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
if (list_empty(¶ms_batch->get_batch.expired_scan_results_list)) {
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
params_batch->get_batch.batch_started = FALSE;
bp += snprintf(bp, nleftsize, "%s", RESULTS_END_MARKER);
DHD_PNO(("%s", RESULTS_END_MARKER));
NULL_CHECK(head, "head is NULL", err);
NULL_CHECK(head->next, "head->next is NULL", err);
DHD_PNO(("%s enter\n", __FUNCTION__));
-
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(siter, snext,
head, list) {
if (only_last) {
MFREE(dhd->osh, siter, SCAN_RESULTS_SIZE);
}
}
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
return removed_scan_cnt;
}
case DHD_PNO_LEGACY_MODE: {
struct dhd_pno_ssid *iter, *next;
if (params->params_legacy.nssid > 0) {
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(iter, next,
¶ms->params_legacy.ssid_list, list) {
- GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
- MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid));
+ kfree(iter);
}
}
-
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
params->params_legacy.nssid = 0;
params->params_legacy.scan_fr = 0;
params->params_legacy.pno_freq_expo_max = 0;
case DHD_PNO_HOTLIST_MODE: {
struct dhd_pno_bssid *iter, *next;
if (params->params_hotlist.nbssid > 0) {
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(iter, next,
¶ms->params_hotlist.bssid_list, list) {
- GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
- MFREE(dhd->osh, iter, sizeof(struct dhd_pno_ssid));
+ kfree(iter);
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
}
params->params_hotlist.scan_fr = 0;
params->params_hotlist.nbssid = 0;
dhd_pno_stop_for_ssid(dhd_pub_t *dhd)
{
int err = BCME_OK;
- uint32 mode = 0, cnt = 0;
+ uint32 mode = 0;
dhd_pno_status_info_t *_pno_state;
- dhd_pno_params_t *_params = NULL;
- wl_pfn_bssid_t *p_pfn_bssid = NULL, *tmp_bssid;
-
+ dhd_pno_params_t *_params;
+ wl_pfn_bssid_t *p_pfn_bssid = NULL;
NULL_CHECK(dhd, "dev is NULL", err);
NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
_pno_state = PNO_GET_PNOSTATE(dhd);
goto exit;
}
DHD_PNO(("%s enter\n", __FUNCTION__));
- /* If pno mode is PNO_LEGACY_MODE clear the pno values and unset the DHD_PNO_LEGACY_MODE */
- _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
- _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
-
#ifdef GSCAN_SUPPORT
if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
struct dhd_pno_gscan_params *gscan_params;
/* restart HOTLIST SCAN */
struct dhd_pno_bssid *iter, *next;
_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
- p_pfn_bssid = MALLOCZ(dhd->osh, sizeof(wl_pfn_bssid_t) *
- _params->params_hotlist.nbssid);
+ p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) *
+ _params->params_hotlist.nbssid, GFP_KERNEL);
if (p_pfn_bssid == NULL) {
DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
" (count: %d)",
goto exit;
}
/* convert dhd_pno_bssid to wl_pfn_bssid */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- cnt = 0;
- tmp_bssid = p_pfn_bssid;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(iter, next,
&_params->params_hotlist.bssid_list, list) {
- GCC_DIAGNOSTIC_POP();
- memcpy(&tmp_bssid->macaddr,
+ memcpy(&p_pfn_bssid->macaddr,
&iter->macaddr, ETHER_ADDR_LEN);
- tmp_bssid->flags = iter->flags;
- if (cnt < _params->params_hotlist.nbssid) {
- tmp_bssid++;
- cnt++;
- } else {
- DHD_ERROR(("%s: Allocated insufficient memory\n",
- __FUNCTION__));
- break;
- }
+ p_pfn_bssid->flags = iter->flags;
+ p_pfn_bssid++;
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
if (err < 0) {
_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
}
}
exit:
- if (p_pfn_bssid) {
- MFREE(dhd->osh, p_pfn_bssid, sizeof(wl_pfn_bssid_t) *
- _params->params_hotlist.nbssid);
- }
+ kfree(p_pfn_bssid);
return err;
}
}
static int
-dhd_pno_add_to_ssid_list(dhd_pub_t *dhd, struct list_head *ptr, wlc_ssid_ext_t *ssid_list,
+dhd_pno_add_to_ssid_list(struct list_head *ptr, wlc_ssid_ext_t *ssid_list,
int nssid, int *num_ssid_added)
{
int ret = BCME_OK;
ret = BCME_ERROR;
goto exit;
}
- _pno_ssid = (struct dhd_pno_ssid *)MALLOCZ(dhd->osh,
- sizeof(struct dhd_pno_ssid));
+
+ _pno_ssid = kzalloc(sizeof(struct dhd_pno_ssid), GFP_KERNEL);
if (_pno_ssid == NULL) {
DHD_ERROR(("%s : failed to allocate struct dhd_pno_ssid\n",
__FUNCTION__));
dhd_pno_set_for_ssid(dhd_pub_t *dhd, wlc_ssid_ext_t* ssid_list, int nssid,
uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
{
+
dhd_pno_status_info_t *_pno_state;
dhd_pno_params_t *_params;
struct dhd_pno_legacy_params *params_legacy;
INIT_LIST_HEAD(¶ms_legacy->ssid_list);
- if (dhd_pno_add_to_ssid_list(dhd, ¶ms_legacy->ssid_list, ssid_list,
- nssid, ¶ms_legacy->nssid) < 0) {
+ if (dhd_pno_add_to_ssid_list(¶ms_legacy->ssid_list, ssid_list,
+ nssid, ¶ms_legacy->nssid) < 0) {
_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
return BCME_ERROR;
}
DHD_PNO(("%s enter : nssid %d, scan_fr :%d, pno_repeat :%d,"
- "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__,
- params_legacy->nssid, scan_fr, pno_repeat, pno_freq_expo_max, nchan));
+ "pno_freq_expo_max: %d, nchan :%d\n", __FUNCTION__,
+ params_legacy->nssid, scan_fr, pno_repeat, pno_freq_expo_max, nchan));
return dhd_pno_set_legacy_pno(dhd, scan_fr, pno_repeat,
- pno_freq_expo_max, channel_list, nchan);
+ pno_freq_expo_max, channel_list, nchan);
}
struct list_head *ssid_list;
_pno_state = PNO_GET_PNOSTATE(dhd);
-
_params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
/* If GSCAN is also ON will handle this down below */
#ifdef GSCAN_SUPPORT
else {
tot_nchan = WL_NUMCHANNELS;
err = _dhd_pno_get_channels(dhd, _chan_list, &tot_nchan,
- (WLC_BAND_2G | WLC_BAND_5G), FALSE);
+ (WLC_BAND_2G | WLC_BAND_5G), FALSE);
if (err < 0) {
tot_nchan = 0;
DHD_PNO(("Could not get channel list for PNO SSID\n"));
} else {
for (i = 0; i < tot_nchan; i++)
- _params->params_legacy.chan_list[i] = _chan_list[i];
+ _params->params_legacy.chan_list[i] = _chan_list[i];
}
}
#endif /* GSCAN_SUPPORT */
}
DHD_PNO(("\n"));
}
-#endif // endif
+#endif
if (_params->params_batch.nchan) {
/* copy the channel list into local array */
memcpy(_chan_list, _params->params_batch.chan_list, sizeof(_chan_list));
return err;
}
+
#ifdef GSCAN_SUPPORT
static int
-dhd_set_epno_params(dhd_pub_t *dhd, wl_ssid_ext_params_t *params, bool set)
+dhd_set_epno_params(dhd_pub_t *dhd, wl_pfn_ssid_params_t *params, bool set)
{
wl_pfn_ssid_cfg_t cfg;
int err;
if (!set)
cfg.flags |= WL_PFN_SSID_CFG_CLEAR;
else if (params)
- memcpy(&cfg.params, params, sizeof(wl_ssid_ext_params_t));
+ memcpy(&cfg.params, params, sizeof(wl_pfn_ssid_params_t));
err = dhd_iovar(dhd, 0, "pfn_ssid_cfg", (char *)&cfg,
sizeof(wl_pfn_ssid_cfg_t), NULL, 0, TRUE);
if (err != BCME_OK) {
if (gscan_params->epno_cfg.num_epno_ssid) {
DHD_PNO(("num_epno_ssid %d\n", gscan_params->epno_cfg.num_epno_ssid));
if ((err = _dhd_pno_add_ssid(dhd, &gscan_params->epno_cfg.epno_ssid_list,
- gscan_params->epno_cfg.num_epno_ssid)) < 0) {
+ gscan_params->epno_cfg.num_epno_ssid)) < 0) {
DHD_ERROR(("failed to add ssid list (err %d) to firmware\n", err));
return err;
}
return err;
}
+
static void
-dhd_pno_reset_cfg_gscan(dhd_pub_t *dhd, dhd_pno_params_t *_params,
+dhd_pno_reset_cfg_gscan(dhd_pno_params_t *_params,
dhd_pno_status_info_t *_pno_state, uint8 flags)
{
DHD_PNO(("%s enter\n", __FUNCTION__));
if (flags & GSCAN_FLUSH_HOTLIST_CFG) {
struct dhd_pno_bssid *iter, *next;
if (_params->params_gscan.nbssid_hotlist > 0) {
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(iter, next,
&_params->params_gscan.hotlist_bssid_list, list) {
- GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
- MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid));
+ kfree(iter);
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
}
_params->params_gscan.nbssid_hotlist = 0;
DHD_PNO(("Flush Hotlist Config\n"));
dhd_epno_ssid_cfg_t *epno_cfg = &_params->params_gscan.epno_cfg;
if (epno_cfg->num_epno_ssid > 0) {
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(iter, next,
&epno_cfg->epno_ssid_list, list) {
- GCC_DIAGNOSTIC_POP();
list_del(&iter->list);
- MFREE(dhd->osh, iter, sizeof(struct dhd_pno_bssid));
+ kfree(iter);
}
- epno_cfg->num_epno_ssid = 0;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ epno_cfg->num_epno_ssid = 0;
}
- memset(&epno_cfg->params, 0, sizeof(wl_ssid_ext_params_t));
+ memset(&epno_cfg->params, 0, sizeof(wl_pfn_ssid_params_t));
DHD_PNO(("Flushed ePNO Config\n"));
}
return;
}
-int
-dhd_wait_batch_results_complete(dhd_pub_t *dhd)
+int dhd_wait_batch_results_complete(dhd_pub_t *dhd)
{
dhd_pno_status_info_t *_pno_state;
dhd_pno_params_t *_params;
return err;
}
+static void *
+dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len)
+{
+ gscan_results_cache_t *iter, *results;
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
+ uint16 num_scan_ids = 0, num_results = 0;
+
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
+
+ iter = results = _params->params_gscan.gscan_batch_cache;
+ while (iter) {
+ num_results += iter->tot_count - iter->tot_consumed;
+ num_scan_ids++;
+ iter = iter->next;
+ }
+
+ *len = ((num_results << 16) | (num_scan_ids));
+ return results;
+}
+
int
dhd_pno_set_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
void *buf, bool flush)
int8 flags;
if (flush) {
- dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state,
+ dhd_pno_reset_cfg_gscan(_params, _pno_state,
GSCAN_FLUSH_HOTLIST_CFG);
}
INIT_LIST_HEAD(&_params->params_gscan.hotlist_bssid_list);
}
- if ((_params->params_gscan.nbssid_hotlist +
- ptr->nbssid) > PFN_SWC_MAX_NUM_APS) {
- DHD_ERROR(("Excessive number of hotlist APs programmed %d\n",
- (_params->params_gscan.nbssid_hotlist +
- ptr->nbssid)));
- err = BCME_RANGE;
- goto exit;
- }
-
for (i = 0, bssid_ptr = ptr->bssid; i < ptr->nbssid; i++, bssid_ptr++) {
- _pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh,
- sizeof(struct dhd_pno_bssid));
+ _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL);
+
if (!_pno_bssid) {
DHD_ERROR(("_pno_bssid is NULL, cannot kalloc %zd bytes",
sizeof(struct dhd_pno_bssid)));
break;
case DHD_PNO_EPNO_CFG_ID:
if (flush) {
- dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state,
- GSCAN_FLUSH_EPNO_CFG);
+ dhd_pno_reset_cfg_gscan(_params, _pno_state,
+ GSCAN_FLUSH_EPNO_CFG);
}
break;
case DHD_PNO_EPNO_PARAMS_ID:
if (flush) {
memset(&_params->params_gscan.epno_cfg.params, 0,
- sizeof(wl_ssid_ext_params_t));
+ sizeof(wl_pfn_ssid_params_t));
}
if (buf) {
memcpy(&_params->params_gscan.epno_cfg.params, buf,
- sizeof(wl_ssid_ext_params_t));
+ sizeof(wl_pfn_ssid_params_t));
}
break;
default:
- err = BCME_BADARG;
- DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
- break;
+ err = BCME_BADARG;
+ DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
+ break;
}
exit:
mutex_unlock(&_pno_state->pno_mutex);
}
+
static bool
validate_gscan_params(struct dhd_pno_gscan_params *gscan_params)
{
dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
wl_pfn_gscan_ch_bucket_cfg_t *ch_bucket = NULL;
wl_pfn_gscan_cfg_t *pfn_gscan_cfg_t = NULL;
+ wl_pfn_significant_bssid_t *p_pfn_significant_bssid = NULL;
wl_pfn_bssid_t *p_pfn_bssid = NULL;
dhd_pno_params_t *_params;
bool fw_flushed = FALSE;
if ((_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) &&
!gscan_params->epno_cfg.num_epno_ssid) {
struct dhd_pno_legacy_params *params_legacy;
- params_legacy =
- &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+ params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
if ((err = _dhd_pno_add_ssid(dhd, ¶ms_legacy->ssid_list,
params_legacy->nssid)) < 0) {
if (gscan_params->nbssid_hotlist) {
struct dhd_pno_bssid *iter, *next;
wl_pfn_bssid_t *ptr;
- p_pfn_bssid = (wl_pfn_bssid_t *)MALLOCZ(dhd->osh,
- sizeof(wl_pfn_bssid_t) * gscan_params->nbssid_hotlist);
+ p_pfn_bssid = (wl_pfn_bssid_t *)kzalloc(sizeof(wl_pfn_bssid_t) *
+ gscan_params->nbssid_hotlist, GFP_KERNEL);
if (p_pfn_bssid == NULL) {
DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
" (count: %d)",
ptr = p_pfn_bssid;
/* convert dhd_pno_bssid to wl_pfn_bssid */
DHD_PNO(("nhotlist %d\n", gscan_params->nbssid_hotlist));
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(iter, next,
&gscan_params->hotlist_bssid_list, list) {
char buffer_hotlist[64];
- GCC_DIAGNOSTIC_POP();
memcpy(&ptr->macaddr,
&iter->macaddr, ETHER_ADDR_LEN);
- BCM_REFERENCE(buffer_hotlist);
DHD_PNO(("%s\n", bcm_ether_ntoa(&ptr->macaddr, buffer_hotlist)));
+ BCM_REFERENCE(buffer_hotlist);
ptr->flags = iter->flags;
ptr++;
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
err = _dhd_pno_add_bssid(dhd, p_pfn_bssid, gscan_params->nbssid_hotlist);
if (err < 0) {
goto exit;
}
}
-
if ((err = _dhd_pno_enable(dhd, PNO_ON)) < 0) {
DHD_ERROR(("%s : failed to enable PNO err %d\n", __FUNCTION__, err));
}
_pno_state->pno_mode &= ~DHD_PNO_GSCAN_MODE;
}
}
- MFREE(dhd->osh, p_pfn_bssid,
- sizeof(wl_pfn_bssid_t) * gscan_params->nbssid_hotlist);
+ kfree(p_pfn_significant_bssid);
+ kfree(p_pfn_bssid);
if (pfn_gscan_cfg_t) {
MFREE(dhd->osh, pfn_gscan_cfg_t, gscan_param_size);
}
params_legacy = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
_pno_state->pno_mode &= ~DHD_PNO_LEGACY_MODE;
-
DHD_PNO(("Restarting Legacy PNO SSID scan...\n"));
memcpy(chan_list, params_legacy->chan_list,
- (params_legacy->nchan * sizeof(uint16)));
+ (params_legacy->nchan * sizeof(uint16)));
err = dhd_pno_set_legacy_pno(dhd, params_legacy->scan_fr,
params_legacy->pno_repeat, params_legacy->pno_freq_expo_max,
chan_list, params_legacy->nchan);
} else {
if (flush) {
mutex_lock(&_pno_state->pno_mutex);
- dhd_pno_reset_cfg_gscan(dhd, params, _pno_state, GSCAN_FLUSH_ALL_CFG);
+ dhd_pno_reset_cfg_gscan(params, _pno_state, GSCAN_FLUSH_ALL_CFG);
mutex_unlock(&_pno_state->pno_mutex);
}
/* Need to stop all gscan */
}
/* Cleanup any consumed results
- * Return TRUE if all results consumed else FALSE
+ * Return TRUE if all results consumed, else FALSE
*/
int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd)
{
while (iter) {
if (iter->tot_consumed == iter->tot_count) {
tmp = iter->next;
- MFREE(dhd->osh, iter,
- ((iter->tot_count - 1) * sizeof(wifi_gscan_result_t))
- + sizeof(gscan_results_cache_t));
+ kfree(iter);
iter = tmp;
} else
break;
uint32 timestamp = 0, ts = 0, i, j, timediff;
dhd_pno_params_t *params;
dhd_pno_status_info_t *_pno_state;
- wl_pfn_lnet_info_v1_t *plnetinfo;
- wl_pfn_lnet_info_v2_t *plnetinfo_v2;
+ wl_pfn_lnet_info_v2_t *plnetinfo;
struct dhd_pno_gscan_params *gscan_params;
- wl_pfn_lscanresults_v1_t *plbestnet_v1 = NULL;
- wl_pfn_lscanresults_v2_t *plbestnet_v2 = NULL;
+ wl_pfn_lscanresults_v2_t *plbestnet = NULL;
gscan_results_cache_t *iter, *tail;
wifi_gscan_result_t *result;
uint8 *nAPs_per_scan = NULL;
uint8 num_scans_in_cur_iter;
uint16 count;
- uint16 fwcount;
- uint16 fwstatus = PFN_INCOMPLETE;
- struct osl_timespec tm_spec;
-
- /* Static asserts in _dhd_pno_get_for_batch() below guarantee the v1 and v2
- * net_info and subnet_info structures are compatible in size and SSID offset,
- * allowing v1 to be safely used in the code below except for lscanresults
- * fields themselves (status, count, offset to netinfo).
- */
+ struct timespec tm_spec;
NULL_CHECK(dhd, "dhd is NULL\n", err);
NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
goto exit;
}
- plbestnet_v1 = (wl_pfn_lscanresults_v1_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
- if (!plbestnet_v1) {
+ plbestnet = (wl_pfn_lscanresults_v2_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+ if (!plbestnet) {
DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n", __FUNCTION__,
- (int)PNO_BESTNET_LEN));
+ PNO_BESTNET_LEN));
err = BCME_NOMEM;
goto exit;
}
- plbestnet_v2 = (wl_pfn_lscanresults_v2_t *)plbestnet_v1;
mutex_lock(&_pno_state->pno_mutex);
timediff = timediff >> 1;
/* Ok, now lets start getting results from the FW */
+ plbestnet->status = PFN_INCOMPLETE;
tail = gscan_params->gscan_batch_cache;
- do {
- err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet_v1, PNO_BESTNET_LEN,
+ while (plbestnet->status != PFN_COMPLETE) {
+ memset(plbestnet, 0, PNO_BESTNET_LEN);
+ err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet, PNO_BESTNET_LEN,
FALSE);
if (err < 0) {
DHD_ERROR(("%s : Cannot get all the batch results, err :%d\n",
__FUNCTION__, err));
goto exit_mutex_unlock;
}
- osl_get_monotonic_boottime(&tm_spec);
-
- if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) {
- fwstatus = plbestnet_v1->status;
- fwcount = plbestnet_v1->count;
- plnetinfo = &plbestnet_v1->netinfo[0];
-
- DHD_PNO(("ver %d, status : %d, count %d\n",
- plbestnet_v1->version, fwstatus, fwcount));
-
- if (fwcount == 0) {
- DHD_PNO(("No more batch results\n"));
- goto exit_mutex_unlock;
- }
- if (fwcount > BESTN_MAX) {
- DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n",
- __FUNCTION__, fwcount, (int)BESTN_MAX));
- /* Process only BESTN_MAX number of results per batch */
- fwcount = BESTN_MAX;
- }
- num_scans_in_cur_iter = 0;
-
- timestamp = plnetinfo->timestamp;
- /* find out how many scans' results did we get in
- * this batch of FW results
+ get_monotonic_boottime(&tm_spec);
+ DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version,
+ plbestnet->status, plbestnet->count));
+ if (plbestnet->version != PFN_SCANRESULT_VERSION) {
+ err = BCME_VERSION;
+ DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n",
+ plbestnet->version, PFN_SCANRESULT_VERSION));
+ goto exit_mutex_unlock;
+ }
+ if (plbestnet->count == 0) {
+ DHD_PNO(("No more batch results\n"));
+ goto exit_mutex_unlock;
+ }
+ num_scans_in_cur_iter = 0;
+ timestamp = plbestnet->netinfo[0].timestamp;
+ /* find out how many scans' results did we get in this batch of FW results */
+ for (i = 0, count = 0; i < plbestnet->count; i++, count++) {
+ plnetinfo = &plbestnet->netinfo[i];
+ /* Unlikely to happen, but just in case the results from
+ * FW doesnt make sense..... Assume its part of one single scan
*/
- for (i = 0, count = 0; i < fwcount; i++, count++, plnetinfo++) {
- /* Unlikely to happen, but just in case the results from
- * FW doesnt make sense..... Assume its part of one single scan
- */
- if (num_scans_in_cur_iter >= gscan_params->mscan) {
- num_scans_in_cur_iter = 0;
- count = fwcount;
- break;
- }
- if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) {
- nAPs_per_scan[num_scans_in_cur_iter] = count;
- count = 0;
- num_scans_in_cur_iter++;
- }
- timestamp = plnetinfo->timestamp;
+ if (num_scans_in_cur_iter >= gscan_params->mscan) {
+ num_scans_in_cur_iter = 0;
+ count = plbestnet->count;
+ break;
}
- if (num_scans_in_cur_iter < gscan_params->mscan) {
+ if (TIME_DIFF_MS(timestamp, plnetinfo->timestamp) > timediff) {
nAPs_per_scan[num_scans_in_cur_iter] = count;
+ count = 0;
num_scans_in_cur_iter++;
}
+ timestamp = plnetinfo->timestamp;
+ }
+ if (num_scans_in_cur_iter < gscan_params->mscan) {
+ nAPs_per_scan[num_scans_in_cur_iter] = count;
+ num_scans_in_cur_iter++;
+ }
- DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter));
- /* reset plnetinfo to the first item for the next loop */
- plnetinfo -= i;
-
- for (i = 0; i < num_scans_in_cur_iter; i++) {
- iter = (gscan_results_cache_t *)
- MALLOCZ(dhd->osh, ((nAPs_per_scan[i] - 1) *
- sizeof(wifi_gscan_result_t)) +
- sizeof(gscan_results_cache_t));
- if (!iter) {
- DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n",
- __FUNCTION__, gscan_params->mscan));
- err = BCME_NOMEM;
- goto exit_mutex_unlock;
- }
- /* Need this check because the new set of results from FW
- * maybe a continuation of previous sets' scan results
- */
- if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) {
- iter->scan_id = ++gscan_params->scan_id;
- } else {
- iter->scan_id = gscan_params->scan_id;
- }
- DHD_PNO(("scan_id %d tot_count %d \n",
- gscan_params->scan_id, nAPs_per_scan[i]));
- iter->tot_count = nAPs_per_scan[i];
- iter->tot_consumed = 0;
- iter->flag = 0;
- if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
- DHD_PNO(("This scan is aborted\n"));
- iter->flag = (ENABLE << PNO_STATUS_ABORT);
- } else if (gscan_params->reason) {
- iter->flag = (ENABLE << gscan_params->reason);
- }
-
- if (!tail) {
- gscan_params->gscan_batch_cache = iter;
- } else {
- tail->next = iter;
- }
- tail = iter;
- iter->next = NULL;
- for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) {
- result = &iter->results[j];
-
- result->channel =
- wf_channel2mhz(plnetinfo->pfnsubnet.channel,
- (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
- WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
- result->rssi = (int32) plnetinfo->RSSI;
- result->beacon_period = 0;
- result->capability = 0;
- result->rtt = (uint64) plnetinfo->rtt0;
- result->rtt_sd = (uint64) plnetinfo->rtt1;
- result->ts = convert_fw_rel_time_to_systime(&tm_spec,
- plnetinfo->timestamp);
- ts = plnetinfo->timestamp;
- if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
- DHD_ERROR(("%s: Invalid SSID length %d\n",
- __FUNCTION__,
- plnetinfo->pfnsubnet.SSID_len));
- plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
- }
- memcpy(result->ssid, plnetinfo->pfnsubnet.SSID,
- plnetinfo->pfnsubnet.SSID_len);
- result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0';
- memcpy(&result->macaddr, &plnetinfo->pfnsubnet.BSSID,
- ETHER_ADDR_LEN);
-
- DHD_PNO(("\tSSID : "));
- DHD_PNO(("\n"));
- DHD_PNO(("\tBSSID: "MACDBG"\n",
- MAC2STRDBG(result->macaddr.octet)));
- DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
- plnetinfo->pfnsubnet.channel,
- plnetinfo->RSSI, plnetinfo->timestamp));
- DHD_PNO(("\tRTT0 : %d, RTT1: %d\n",
- plnetinfo->rtt0, plnetinfo->rtt1));
-
- }
- }
-
- } else if (plbestnet_v2->version == PFN_LBEST_SCAN_RESULT_VERSION_V2) {
- fwstatus = plbestnet_v2->status;
- fwcount = plbestnet_v2->count;
- plnetinfo_v2 = (wl_pfn_lnet_info_v2_t*)&plbestnet_v2->netinfo[0];
-
- DHD_PNO(("ver %d, status : %d, count %d\n",
- plbestnet_v2->version, fwstatus, fwcount));
+ DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter));
+ plnetinfo = &plbestnet->netinfo[0];
- if (fwcount == 0) {
- DHD_PNO(("No more batch results\n"));
+ for (i = 0; i < num_scans_in_cur_iter; i++) {
+ iter = (gscan_results_cache_t *)
+ kmalloc(((nAPs_per_scan[i] - 1) * sizeof(wifi_gscan_result_t)) +
+ sizeof(gscan_results_cache_t), GFP_KERNEL);
+ if (!iter) {
+ DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n",
+ __FUNCTION__, gscan_params->mscan));
+ err = BCME_NOMEM;
goto exit_mutex_unlock;
}
- if (fwcount > BESTN_MAX) {
- DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n",
- __FUNCTION__, fwcount, (int)BESTN_MAX));
- /* Process only BESTN_MAX number of results per batch */
- fwcount = BESTN_MAX;
- }
- num_scans_in_cur_iter = 0;
-
- timestamp = plnetinfo_v2->timestamp;
- /* find out how many scans' results did we get
- * in this batch of FW results
+ /* Need this check because the new set of results from FW
+ * maybe a continuation of previous sets' scan results
*/
- for (i = 0, count = 0; i < fwcount; i++, count++, plnetinfo_v2++) {
- /* Unlikely to happen, but just in case the results from
- * FW doesnt make sense..... Assume its part of one single scan
- */
- if (num_scans_in_cur_iter >= gscan_params->mscan) {
- num_scans_in_cur_iter = 0;
- count = fwcount;
- break;
- }
- if (TIME_DIFF_MS(timestamp, plnetinfo_v2->timestamp) > timediff) {
- nAPs_per_scan[num_scans_in_cur_iter] = count;
- count = 0;
- num_scans_in_cur_iter++;
- }
- timestamp = plnetinfo_v2->timestamp;
- }
- if (num_scans_in_cur_iter < gscan_params->mscan) {
- nAPs_per_scan[num_scans_in_cur_iter] = count;
- num_scans_in_cur_iter++;
+ if (TIME_DIFF_MS(ts, plnetinfo->timestamp) > timediff) {
+ iter->scan_id = ++gscan_params->scan_id;
+ } else {
+ iter->scan_id = gscan_params->scan_id;
+ }
+ DHD_PNO(("scan_id %d tot_count %d ch_bucket %x\n",
+ gscan_params->scan_id, nAPs_per_scan[i],
+ plbestnet->scan_ch_buckets[i]));
+ iter->tot_count = nAPs_per_scan[i];
+ iter->scan_ch_bucket = plbestnet->scan_ch_buckets[i];
+ iter->tot_consumed = 0;
+ iter->flag = 0;
+ if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+ DHD_PNO(("This scan is aborted\n"));
+ iter->flag = (ENABLE << PNO_STATUS_ABORT);
+ } else if (gscan_params->reason) {
+ iter->flag = (ENABLE << gscan_params->reason);
+ }
+
+ if (!tail) {
+ gscan_params->gscan_batch_cache = iter;
+ } else {
+ tail->next = iter;
}
+ tail = iter;
+ iter->next = NULL;
+ for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo++) {
+ result = &iter->results[j];
- DHD_PNO(("num_scans_in_cur_iter %d\n", num_scans_in_cur_iter));
- /* reset plnetinfo to the first item for the next loop */
- plnetinfo_v2 -= i;
-
- for (i = 0; i < num_scans_in_cur_iter; i++) {
- iter = (gscan_results_cache_t *)
- MALLOCZ(dhd->osh, ((nAPs_per_scan[i] - 1) *
- sizeof(wifi_gscan_result_t)) +
- sizeof(gscan_results_cache_t));
- if (!iter) {
- DHD_ERROR(("%s :Out of memory!! Cant malloc %d bytes\n",
- __FUNCTION__, gscan_params->mscan));
- err = BCME_NOMEM;
- goto exit_mutex_unlock;
- }
- /* Need this check because the new set of results from FW
- * maybe a continuation of previous sets' scan results
- */
- if (TIME_DIFF_MS(ts, plnetinfo_v2->timestamp) > timediff) {
- iter->scan_id = ++gscan_params->scan_id;
- } else {
- iter->scan_id = gscan_params->scan_id;
- }
- DHD_PNO(("scan_id %d tot_count %d ch_bucket %x\n",
- gscan_params->scan_id, nAPs_per_scan[i],
- plbestnet_v2->scan_ch_buckets[i]));
- iter->tot_count = nAPs_per_scan[i];
- iter->scan_ch_bucket = plbestnet_v2->scan_ch_buckets[i];
- iter->tot_consumed = 0;
- iter->flag = 0;
- if (plnetinfo_v2->flags & PFN_PARTIAL_SCAN_MASK) {
- DHD_PNO(("This scan is aborted\n"));
- iter->flag = (ENABLE << PNO_STATUS_ABORT);
- } else if (gscan_params->reason) {
- iter->flag = (ENABLE << gscan_params->reason);
+ result->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel,
+ (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ result->rssi = (int32) plnetinfo->RSSI;
+ /* Info not available & not expected */
+ result->beacon_period = 0;
+ result->capability = 0;
+ result->rtt = (uint64) plnetinfo->rtt0;
+ result->rtt_sd = (uint64) plnetinfo->rtt1;
+ result->ts = convert_fw_rel_time_to_systime(&tm_spec,
+ plnetinfo->timestamp);
+ ts = plnetinfo->timestamp;
+ if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s: Invalid SSID length %d\n",
+ __FUNCTION__, plnetinfo->pfnsubnet.SSID_len));
+ plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
}
+ memcpy(result->ssid, plnetinfo->pfnsubnet.u.SSID,
+ plnetinfo->pfnsubnet.SSID_len);
+ result->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0';
+ memcpy(&result->macaddr, &plnetinfo->pfnsubnet.BSSID,
+ ETHER_ADDR_LEN);
- if (!tail) {
- gscan_params->gscan_batch_cache = iter;
- } else {
- tail->next = iter;
- }
- tail = iter;
- iter->next = NULL;
- for (j = 0; j < nAPs_per_scan[i]; j++, plnetinfo_v2++) {
- result = &iter->results[j];
-
- result->channel =
- wf_channel2mhz(plnetinfo_v2->pfnsubnet.channel,
- (plnetinfo_v2->pfnsubnet.channel <=
- CH_MAX_2G_CHANNEL?
- WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
- result->rssi = (int32) plnetinfo_v2->RSSI;
- /* Info not available & not expected */
- result->beacon_period = 0;
- result->capability = 0;
- result->rtt = (uint64) plnetinfo_v2->rtt0;
- result->rtt_sd = (uint64) plnetinfo_v2->rtt1;
- result->ts = convert_fw_rel_time_to_systime(&tm_spec,
- plnetinfo_v2->timestamp);
- ts = plnetinfo_v2->timestamp;
- if (plnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
- DHD_ERROR(("%s: Invalid SSID length %d\n",
- __FUNCTION__,
- plnetinfo_v2->pfnsubnet.SSID_len));
- plnetinfo_v2->pfnsubnet.SSID_len =
- DOT11_MAX_SSID_LEN;
- }
- memcpy(result->ssid, plnetinfo_v2->pfnsubnet.u.SSID,
- plnetinfo_v2->pfnsubnet.SSID_len);
- result->ssid[plnetinfo_v2->pfnsubnet.SSID_len] = '\0';
- memcpy(&result->macaddr, &plnetinfo_v2->pfnsubnet.BSSID,
- ETHER_ADDR_LEN);
-
- DHD_PNO(("\tSSID : "));
- DHD_PNO(("\n"));
- DHD_PNO(("\tBSSID: "MACDBG"\n",
- MAC2STRDBG(result->macaddr.octet)));
- DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
- plnetinfo_v2->pfnsubnet.channel,
- plnetinfo_v2->RSSI, plnetinfo_v2->timestamp));
- DHD_PNO(("\tRTT0 : %d, RTT1: %d\n",
- plnetinfo_v2->rtt0, plnetinfo_v2->rtt1));
+ DHD_PNO(("\tSSID : "));
+ DHD_PNO(("\n"));
+ DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ result->macaddr.octet[0],
+ result->macaddr.octet[1],
+ result->macaddr.octet[2],
+ result->macaddr.octet[3],
+ result->macaddr.octet[4],
+ result->macaddr.octet[5]));
+ DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+ plnetinfo->pfnsubnet.channel,
+ plnetinfo->RSSI, plnetinfo->timestamp));
+ DHD_PNO(("\tRTT0 : %d, RTT1: %d\n",
+ plnetinfo->rtt0, plnetinfo->rtt1));
- }
}
-
- } else {
- err = BCME_VERSION;
- DHD_ERROR(("bestnet fw version %d not supported\n",
- plbestnet_v1->version));
- goto exit_mutex_unlock;
}
- } while (fwstatus == PFN_INCOMPLETE);
-
+ }
exit_mutex_unlock:
mutex_unlock(&_pno_state->pno_mutex);
exit:
if (nAPs_per_scan) {
MFREE(dhd->osh, nAPs_per_scan, gscan_params->mscan * sizeof(uint8));
}
- if (plbestnet_v1) {
- MFREE(dhd->osh, plbestnet_v1, PNO_BESTNET_LEN);
+ if (plbestnet) {
+ MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN);
}
DHD_PNO(("Batch retrieval done!\n"));
return err;
#endif /* GSCAN_SUPPORT */
#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
-static void *
-dhd_get_gscan_batch_results(dhd_pub_t *dhd, uint32 *len)
-{
- gscan_results_cache_t *iter, *results;
- dhd_pno_status_info_t *_pno_state;
- dhd_pno_params_t *_params;
- uint16 num_scan_ids = 0, num_results = 0;
-
- _pno_state = PNO_GET_PNOSTATE(dhd);
- _params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
-
- iter = results = _params->params_gscan.gscan_batch_cache;
- while (iter) {
- num_results += iter->tot_count - iter->tot_consumed;
- num_scan_ids++;
- iter = iter->next;
- }
-
- *len = ((num_results << 16) | (num_scan_ids));
- return results;
-}
-
void *
dhd_pno_get_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type,
void *info, uint32 *len)
dhd_epno_ssid_cfg_t *epno_cfg;
dhd_pno_status_info_t *_pno_state;
+
if (!dhd || !dhd->pno_state) {
DHD_ERROR(("NULL POINTER : %s\n", __FUNCTION__));
return NULL;
}
-
_pno_state = PNO_GET_PNOSTATE(dhd);
_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
-
if (!len) {
DHD_ERROR(("%s: len is NULL\n", __FUNCTION__));
return NULL;
switch (type) {
case DHD_PNO_GET_CAPABILITIES:
ptr = (dhd_pno_gscan_capabilities_t *)
- MALLOCZ(dhd->osh, sizeof(dhd_pno_gscan_capabilities_t));
+ kmalloc(sizeof(dhd_pno_gscan_capabilities_t), GFP_KERNEL);
if (!ptr)
break;
/* Hardcoding these values for now, need to get
ptr->max_scan_cache_size = GSCAN_MAX_AP_CACHE;
ptr->max_scan_buckets = GSCAN_MAX_CH_BUCKETS;
ptr->max_ap_cache_per_scan = GSCAN_MAX_AP_CACHE_PER_SCAN;
- ptr->max_rssi_sample_size = PFN_SWC_RSSI_WINDOW_MAX;
ptr->max_scan_reporting_threshold = 100;
- ptr->max_hotlist_bssids = PFN_HOTLIST_MAX_NUM_APS;
- ptr->max_hotlist_ssids = 0;
- ptr->max_significant_wifi_change_aps = 0;
- ptr->max_bssid_history_entries = 0;
+ ptr->max_hotlist_aps = PFN_HOTLIST_MAX_NUM_APS;
ptr->max_epno_ssid_crc32 = MAX_EPNO_SSID_NUM;
ptr->max_epno_hidden_ssid = MAX_EPNO_HIDDEN_SSID;
ptr->max_white_list_ssid = MAX_WHITELIST_SSID;
ret = (void *)ptr;
*len = sizeof(dhd_pno_gscan_capabilities_t);
break;
-
+#ifdef GSCAN_SUPPORT
case DHD_PNO_GET_BATCH_RESULTS:
ret = dhd_get_gscan_batch_results(dhd, len);
break;
+#endif /* GSCAN_SUPPORT */
case DHD_PNO_GET_CHANNEL_LIST:
if (info) {
uint16 ch_list[WL_NUMCHANNELS];
*len = 0;
} else {
mem_needed = sizeof(uint32) * nchan;
- p = (uint32 *)MALLOC(dhd->osh, mem_needed);
+ p = (uint32 *) kmalloc(mem_needed, GFP_KERNEL);
if (!p) {
DHD_ERROR(("%s: Unable to malloc %d bytes\n",
__FUNCTION__, mem_needed));
case DHD_PNO_GET_NEW_EPNO_SSID_ELEM:
epno_cfg = &_params->params_gscan.epno_cfg;
if (epno_cfg->num_epno_ssid >=
- MAX_EPNO_SSID_NUM) {
+ MAX_EPNO_SSID_NUM) {
DHD_ERROR(("Excessive number of ePNO SSIDs programmed %d\n",
- epno_cfg->num_epno_ssid));
+ epno_cfg->num_epno_ssid));
return NULL;
}
if (!epno_cfg->num_epno_ssid) {
INIT_LIST_HEAD(&epno_cfg->epno_ssid_list);
}
- ssid_elem = MALLOCZ(dhd->osh, sizeof(dhd_pno_ssid_t));
+ ssid_elem = kzalloc(sizeof(dhd_pno_ssid_t), GFP_KERNEL);
if (!ssid_elem) {
DHD_ERROR(("EPNO ssid: cannot alloc %zd bytes",
- sizeof(dhd_pno_ssid_t)));
+ sizeof(dhd_pno_ssid_t)));
return NULL;
}
epno_cfg->num_epno_ssid++;
list_add_tail(&ssid_elem->list, &epno_cfg->epno_ssid_list);
ret = ssid_elem;
break;
+
default:
DHD_ERROR(("%s: Unrecognized cmd type - %d\n", __FUNCTION__, type));
break;
uint32 timestamp = 0;
dhd_pno_params_t *_params = NULL;
dhd_pno_status_info_t *_pno_state = NULL;
- wl_pfn_lscanresults_v1_t *plbestnet_v1 = NULL;
- wl_pfn_lscanresults_v2_t *plbestnet_v2 = NULL;
- wl_pfn_lnet_info_v1_t *plnetinfo;
- wl_pfn_lnet_info_v2_t *plnetinfo_v2;
+ wl_pfn_lscanresults_v2_t *plbestnet = NULL;
+ wl_pfn_lnet_info_v2_t *plnetinfo;
dhd_pno_bestnet_entry_t *pbestnet_entry;
dhd_pno_best_header_t *pbestnetheader = NULL;
dhd_pno_scan_results_t *pscan_results = NULL, *siter, *snext;
bool allocate_header = FALSE;
- uint16 fwstatus = PFN_INCOMPLETE;
- uint16 fwcount;
-
NULL_CHECK(dhd, "dhd is NULL", err);
NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
- /* The static asserts below guarantee the v1 and v2 net_info and subnet_info
- * structures are compatible in size and SSID offset, allowing v1 to be safely
- * used in the code below except for lscanresults fields themselves
- * (status, count, offset to netinfo).
- */
- STATIC_ASSERT(sizeof(wl_pfn_net_info_v1_t) == sizeof(wl_pfn_net_info_v2_t));
- STATIC_ASSERT(sizeof(wl_pfn_lnet_info_v1_t) == sizeof(wl_pfn_lnet_info_v2_t));
- STATIC_ASSERT(sizeof(wl_pfn_subnet_info_v1_t) == sizeof(wl_pfn_subnet_info_v2_t));
- ASSERT(OFFSETOF(wl_pfn_subnet_info_v1_t, SSID) ==
- OFFSETOF(wl_pfn_subnet_info_v2_t, u.SSID));
-
DHD_PNO(("%s enter\n", __FUNCTION__));
_pno_state = PNO_GET_PNOSTATE(dhd);
/* this is a first try to get batching results */
if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
/* move the scan_results_list to expired_scan_results_lists */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(siter, snext,
&_params->params_batch.get_batch.scan_results_list, list) {
- GCC_DIAGNOSTIC_POP();
list_move_tail(&siter->list,
&_params->params_batch.get_batch.expired_scan_results_list);
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
_params->params_batch.get_batch.top_node_cnt = 0;
_params->params_batch.get_batch.expired_tot_scan_cnt =
_params->params_batch.get_batch.tot_scan_cnt;
list_add(&pscan_results->list, &_params->params_batch.get_batch.scan_results_list);
}
-
- plbestnet_v1 = (wl_pfn_lscanresults_v1_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
- NULL_CHECK(plbestnet_v1, "failed to allocate buffer for bestnet", err);
- plbestnet_v2 = (wl_pfn_lscanresults_v2_t*)plbestnet_v1;
-
+ plbestnet = (wl_pfn_lscanresults_v2_t *)MALLOC(dhd->osh, PNO_BESTNET_LEN);
+ if (!(plbestnet))
+ {
+ DHD_ERROR(("(%s) : plbestnet (%p) is NULL\n", __FUNCTION__, (plbestnet)));
+ goto exit;
+ }
DHD_PNO(("%s enter\n", __FUNCTION__));
- do {
- err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet_v1, PNO_BESTNET_LEN,
- FALSE);
+ memset(plbestnet, 0, PNO_BESTNET_LEN);
+ while (plbestnet->status != PFN_COMPLETE) {
+ memset(plbestnet, 0, PNO_BESTNET_LEN);
+ err = dhd_iovar(dhd, 0, "pfnlbest", NULL, 0, (char *)plbestnet, PNO_BESTNET_LEN, 0);
if (err < 0) {
if (err == BCME_EPERM) {
DHD_ERROR(("we cannot get the batching data "
goto exit;
}
}
-
- if (plbestnet_v1->version == PFN_LBEST_SCAN_RESULT_VERSION_V1) {
- fwstatus = plbestnet_v1->status;
- fwcount = plbestnet_v1->count;
- plnetinfo = &plbestnet_v1->netinfo[0];
- if (fwcount == 0) {
- DHD_PNO(("No more batch results\n"));
+ DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version,
+ plbestnet->status, plbestnet->count));
+ if (plbestnet->version != PFN_SCANRESULT_VERSION) {
+ err = BCME_VERSION;
+ DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n",
+ plbestnet->version, PFN_SCANRESULT_VERSION));
+ goto exit;
+ }
+ plnetinfo = plbestnet->netinfo;
+ for (i = 0; i < plbestnet->count; i++) {
+ pbestnet_entry = (dhd_pno_bestnet_entry_t *)
+ MALLOC(dhd->osh, BESTNET_ENTRY_SIZE);
+ if (pbestnet_entry == NULL) {
+ err = BCME_NOMEM;
+ DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
goto exit;
}
- if (fwcount > BESTN_MAX) {
- DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n",
- __FUNCTION__, fwcount, (int)BESTN_MAX));
- /* Process only BESTN_MAX number of results per batch */
- fwcount = BESTN_MAX;
- }
- for (i = 0; i < fwcount; i++) {
- pbestnet_entry = (dhd_pno_bestnet_entry_t *)
- MALLOC(dhd->osh, BESTNET_ENTRY_SIZE);
- if (pbestnet_entry == NULL) {
+ memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE);
+ pbestnet_entry->recorded_time = jiffies; /* record the current time */
+ /* create header for the first entry */
+ allocate_header = (i == 0)? TRUE : FALSE;
+ /* check whether the new generation is started or not */
+ if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp)
+ > TIME_MIN_DIFF))
+ allocate_header = TRUE;
+ timestamp = plnetinfo->timestamp;
+ if (allocate_header) {
+ pbestnetheader = (dhd_pno_best_header_t *)
+ MALLOC(dhd->osh, BEST_HEADER_SIZE);
+ if (pbestnetheader == NULL) {
err = BCME_NOMEM;
+ if (pbestnet_entry)
+ MFREE(dhd->osh, pbestnet_entry,
+ BESTNET_ENTRY_SIZE);
DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
goto exit;
}
- memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE);
- /* record the current time */
- pbestnet_entry->recorded_time = jiffies;
- /* create header for the first entry */
- allocate_header = (i == 0)? TRUE : FALSE;
- /* check whether the new generation is started or not */
- if (timestamp && (TIME_DIFF(timestamp, plnetinfo->timestamp)
- > TIME_MIN_DIFF))
- allocate_header = TRUE;
- timestamp = plnetinfo->timestamp;
- if (allocate_header) {
- pbestnetheader = (dhd_pno_best_header_t *)
- MALLOC(dhd->osh, BEST_HEADER_SIZE);
- if (pbestnetheader == NULL) {
- err = BCME_NOMEM;
- if (pbestnet_entry)
- MFREE(dhd->osh, pbestnet_entry,
- BESTNET_ENTRY_SIZE);
- DHD_ERROR(("failed to allocate"
- " dhd_pno_bestnet_entry\n"));
- goto exit;
- }
- /* increase total cnt of bestnet header */
- pscan_results->cnt_header++;
- /* need to record the reason to call dhd_pno_get_for_bach */
- if (reason)
- pbestnetheader->reason = (ENABLE << reason);
- memset(pbestnetheader, 0, BEST_HEADER_SIZE);
- /* initialize the head of linked list */
- INIT_LIST_HEAD(&(pbestnetheader->entry_list));
- /* link the pbestnet heaer into existed list */
- if (pscan_results->bestnetheader == NULL)
- /* In case of header */
- pscan_results->bestnetheader = pbestnetheader;
- else {
- dhd_pno_best_header_t *head =
- pscan_results->bestnetheader;
- pscan_results->bestnetheader = pbestnetheader;
- pbestnetheader->next = head;
- }
- }
- pbestnet_entry->channel = plnetinfo->pfnsubnet.channel;
- pbestnet_entry->RSSI = plnetinfo->RSSI;
- if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
- /* if RSSI is positive value, we assume that
- * this scan is aborted by other scan
- */
- DHD_PNO(("This scan is aborted\n"));
- pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT);
+ /* increase total cnt of bestnet header */
+ pscan_results->cnt_header++;
+ /* need to record the reason to call dhd_pno_get_for_bach */
+ if (reason)
+ pbestnetheader->reason = (ENABLE << reason);
+ memset(pbestnetheader, 0, BEST_HEADER_SIZE);
+ /* initialize the head of linked list */
+ INIT_LIST_HEAD(&(pbestnetheader->entry_list));
+ /* link the pbestnet heaer into existed list */
+ if (pscan_results->bestnetheader == NULL)
+ /* In case of header */
+ pscan_results->bestnetheader = pbestnetheader;
+ else {
+ dhd_pno_best_header_t *head = pscan_results->bestnetheader;
+ pscan_results->bestnetheader = pbestnetheader;
+ pbestnetheader->next = head;
}
- pbestnet_entry->rtt0 = plnetinfo->rtt0;
- pbestnet_entry->rtt1 = plnetinfo->rtt1;
- pbestnet_entry->timestamp = plnetinfo->timestamp;
- if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
- DHD_ERROR(("%s: Invalid SSID length"
- " %d: trimming it to max\n",
- __FUNCTION__, plnetinfo->pfnsubnet.SSID_len));
- plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
- }
- pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len;
- memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.SSID,
- pbestnet_entry->SSID_len);
- memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID,
- ETHER_ADDR_LEN);
- /* add the element into list */
- list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list);
- /* increase best entry count */
- pbestnetheader->tot_cnt++;
- pbestnetheader->tot_size += BESTNET_ENTRY_SIZE;
- DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1));
- DHD_PNO(("\tSSID : "));
- for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++)
- DHD_PNO(("%c", plnetinfo->pfnsubnet.SSID[j]));
- DHD_PNO(("\n"));
- DHD_PNO(("\tBSSID: "MACDBG"\n",
- MAC2STRDBG(plnetinfo->pfnsubnet.BSSID.octet)));
- DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
- plnetinfo->pfnsubnet.channel,
- plnetinfo->RSSI, plnetinfo->timestamp));
- DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0,
- plnetinfo->rtt1));
- plnetinfo++;
}
- } else if (plbestnet_v2->version == PFN_LBEST_SCAN_RESULT_VERSION_V2) {
- fwstatus = plbestnet_v2->status;
- fwcount = plbestnet_v2->count;
- plnetinfo_v2 = (wl_pfn_lnet_info_v2_t*)&plbestnet_v2->netinfo[0];
- if (fwcount == 0) {
- DHD_PNO(("No more batch results\n"));
- goto exit;
- }
- if (fwcount > BESTN_MAX) {
- DHD_ERROR(("%s :fwcount %d is greater than BESTN_MAX %d \n",
- __FUNCTION__, fwcount, (int)BESTN_MAX));
- /* Process only BESTN_MAX number of results per batch */
- fwcount = BESTN_MAX;
- }
- DHD_PNO(("ver %d, status : %d, count %d\n",
- plbestnet_v2->version, fwstatus, fwcount));
-
- for (i = 0; i < fwcount; i++) {
- pbestnet_entry = (dhd_pno_bestnet_entry_t *)
- MALLOC(dhd->osh, BESTNET_ENTRY_SIZE);
- if (pbestnet_entry == NULL) {
- err = BCME_NOMEM;
- DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
- goto exit;
- }
- memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE);
- /* record the current time */
- pbestnet_entry->recorded_time = jiffies;
- /* create header for the first entry */
- allocate_header = (i == 0)? TRUE : FALSE;
- /* check whether the new generation is started or not */
- if (timestamp && (TIME_DIFF(timestamp, plnetinfo_v2->timestamp)
- > TIME_MIN_DIFF))
- allocate_header = TRUE;
- timestamp = plnetinfo_v2->timestamp;
- if (allocate_header) {
- pbestnetheader = (dhd_pno_best_header_t *)
- MALLOC(dhd->osh, BEST_HEADER_SIZE);
- if (pbestnetheader == NULL) {
- err = BCME_NOMEM;
- if (pbestnet_entry)
- MFREE(dhd->osh, pbestnet_entry,
- BESTNET_ENTRY_SIZE);
- DHD_ERROR(("failed to allocate"
- " dhd_pno_bestnet_entry\n"));
- goto exit;
- }
- /* increase total cnt of bestnet header */
- pscan_results->cnt_header++;
- /* need to record the reason to call dhd_pno_get_for_bach */
- if (reason)
- pbestnetheader->reason = (ENABLE << reason);
- memset(pbestnetheader, 0, BEST_HEADER_SIZE);
- /* initialize the head of linked list */
- INIT_LIST_HEAD(&(pbestnetheader->entry_list));
- /* link the pbestnet heaer into existed list */
- if (pscan_results->bestnetheader == NULL)
- /* In case of header */
- pscan_results->bestnetheader = pbestnetheader;
- else {
- dhd_pno_best_header_t *head =
- pscan_results->bestnetheader;
- pscan_results->bestnetheader = pbestnetheader;
- pbestnetheader->next = head;
- }
- }
- /* fills the best network info */
- pbestnet_entry->channel = plnetinfo_v2->pfnsubnet.channel;
- pbestnet_entry->RSSI = plnetinfo_v2->RSSI;
- if (plnetinfo_v2->flags & PFN_PARTIAL_SCAN_MASK) {
- /* if RSSI is positive value, we assume that
- * this scan is aborted by other scan
- */
- DHD_PNO(("This scan is aborted\n"));
- pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT);
- }
- pbestnet_entry->rtt0 = plnetinfo_v2->rtt0;
- pbestnet_entry->rtt1 = plnetinfo_v2->rtt1;
- pbestnet_entry->timestamp = plnetinfo_v2->timestamp;
- if (plnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
- DHD_ERROR(("%s: Invalid SSID length"
- " %d: trimming it to max\n",
- __FUNCTION__, plnetinfo_v2->pfnsubnet.SSID_len));
- plnetinfo_v2->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
- }
- pbestnet_entry->SSID_len = plnetinfo_v2->pfnsubnet.SSID_len;
- memcpy(pbestnet_entry->SSID, plnetinfo_v2->pfnsubnet.u.SSID,
- pbestnet_entry->SSID_len);
- memcpy(&pbestnet_entry->BSSID, &plnetinfo_v2->pfnsubnet.BSSID,
- ETHER_ADDR_LEN);
- /* add the element into list */
- list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list);
- /* increase best entry count */
- pbestnetheader->tot_cnt++;
- pbestnetheader->tot_size += BESTNET_ENTRY_SIZE;
- DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1));
- DHD_PNO(("\tSSID : "));
- for (j = 0; j < plnetinfo_v2->pfnsubnet.SSID_len; j++)
- DHD_PNO(("%c", plnetinfo_v2->pfnsubnet.u.SSID[j]));
- DHD_PNO(("\n"));
- DHD_PNO(("\tBSSID: "MACDBG"\n",
- MAC2STRDBG(plnetinfo_v2->pfnsubnet.BSSID.octet)));
- DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
- plnetinfo_v2->pfnsubnet.channel,
- plnetinfo_v2->RSSI, plnetinfo_v2->timestamp));
- DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo_v2->rtt0,
- plnetinfo_v2->rtt1));
- plnetinfo_v2++;
- }
- } else {
- err = BCME_VERSION;
- DHD_ERROR(("bestnet fw version %d not supported\n",
- plbestnet_v1->version));
- goto exit;
+ /* fills the best network info */
+ pbestnet_entry->channel = plnetinfo->pfnsubnet.channel;
+ pbestnet_entry->RSSI = plnetinfo->RSSI;
+ if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
+ /* if RSSI is positive value, we assume that
+ * this scan is aborted by other scan
+ */
+ DHD_PNO(("This scan is aborted\n"));
+ pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT);
+ }
+ pbestnet_entry->rtt0 = plnetinfo->rtt0;
+ pbestnet_entry->rtt1 = plnetinfo->rtt1;
+ pbestnet_entry->timestamp = plnetinfo->timestamp;
+ if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("%s: Invalid SSID length %d: trimming it to max\n",
+ __FUNCTION__, plnetinfo->pfnsubnet.SSID_len));
+ plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
+ pbestnet_entry->SSID_len = plnetinfo->pfnsubnet.SSID_len;
+ memcpy(pbestnet_entry->SSID, plnetinfo->pfnsubnet.u.SSID,
+ pbestnet_entry->SSID_len);
+ memcpy(&pbestnet_entry->BSSID, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+ /* add the element into list */
+ list_add_tail(&pbestnet_entry->list, &pbestnetheader->entry_list);
+ /* increase best entry count */
+ pbestnetheader->tot_cnt++;
+ pbestnetheader->tot_size += BESTNET_ENTRY_SIZE;
+ DHD_PNO(("Header %d\n", pscan_results->cnt_header - 1));
+ DHD_PNO(("\tSSID : "));
+ for (j = 0; j < plnetinfo->pfnsubnet.SSID_len; j++)
+ DHD_PNO(("%c", plnetinfo->pfnsubnet.u.SSID[j]));
+ DHD_PNO(("\n"));
+ DHD_PNO(("\tBSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ plnetinfo->pfnsubnet.BSSID.octet[0],
+ plnetinfo->pfnsubnet.BSSID.octet[1],
+ plnetinfo->pfnsubnet.BSSID.octet[2],
+ plnetinfo->pfnsubnet.BSSID.octet[3],
+ plnetinfo->pfnsubnet.BSSID.octet[4],
+ plnetinfo->pfnsubnet.BSSID.octet[5]));
+ DHD_PNO(("\tchannel: %d, RSSI: %d, timestamp: %d ms\n",
+ plnetinfo->pfnsubnet.channel,
+ plnetinfo->RSSI, plnetinfo->timestamp));
+ DHD_PNO(("\tRTT0 : %d, RTT1: %d\n", plnetinfo->rtt0, plnetinfo->rtt1));
+ plnetinfo++;
}
- } while (fwstatus != PFN_COMPLETE);
-
+ }
if (pscan_results->cnt_header == 0) {
/* In case that we didn't get any data from the firmware
* Remove the current scan_result list from get_bach.scan_results_list.
/* This is a first try to get batching results */
if (!list_empty(&_params->params_batch.get_batch.scan_results_list)) {
/* move the scan_results_list to expired_scan_results_lists */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(siter, snext,
&_params->params_batch.get_batch.scan_results_list, list) {
- GCC_DIAGNOSTIC_POP();
list_move_tail(&siter->list,
&_params->params_batch.get_batch.expired_scan_results_list);
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
/* reset gloval values after moving to expired list */
_params->params_batch.get_batch.top_node_cnt = 0;
_params->params_batch.get_batch.expired_tot_scan_cnt =
}
}
exit:
- if (plbestnet_v1)
- MFREE(dhd->osh, plbestnet_v1, PNO_BESTNET_LEN);
+ if (plbestnet)
+ MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN);
if (_params) {
_params->params_batch.get_batch.buf = NULL;
_params->params_batch.get_batch.bufsize = 0;
dhd_pub_t *dhd;
struct dhd_pno_batch_params *params_batch;
DHD_PNO(("%s enter\n", __FUNCTION__));
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
_pno_state = container_of(work, struct dhd_pno_status_info, work);
- GCC_DIAGNOSTIC_POP();
-
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
dhd = _pno_state->dhd;
if (dhd == NULL) {
DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
msecs_to_jiffies(GSCAN_BATCH_GET_MAX_WAIT));
}
} else
-#endif // endif
+#endif
{
if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
#ifdef GSCAN_SUPPORT
if (!(_pno_state->pno_mode & DHD_PNO_GSCAN_MODE))
-#endif // endif
+#endif
err = params_batch->get_batch.bytes_written;
exit:
return err;
DHD_PNO(("Gscan is ongoing, nothing to stop here\n"));
return err;
}
-#endif // endif
+#endif
if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
DHD_ERROR(("%s : PNO BATCH MODE is not enabled\n", __FUNCTION__));
_params_legacy =
&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr,
- _params_legacy->pno_repeat,
- _params_legacy->pno_freq_expo_max,
- _params_legacy->chan_list, _params_legacy->nchan);
+ _params_legacy->pno_repeat,
+ _params_legacy->pno_freq_expo_max,
+ _params_legacy->chan_list, _params_legacy->nchan);
if (err < 0) {
DHD_ERROR(("%s : failed to restart legacy PNO scan(err: %d)\n",
__FUNCTION__, err));
} else if (_pno_state->pno_mode & DHD_PNO_HOTLIST_MODE) {
struct dhd_pno_bssid *iter, *next;
_params = &(_pno_state->pno_params_arr[INDEX_OF_HOTLIST_PARAMS]);
- p_pfn_bssid = (wl_pfn_bssid_t *)MALLOCZ(dhd->osh,
- sizeof(wl_pfn_bssid_t) * _params->params_hotlist.nbssid);
+ p_pfn_bssid = kzalloc(sizeof(wl_pfn_bssid_t) *
+ _params->params_hotlist.nbssid, GFP_KERNEL);
if (p_pfn_bssid == NULL) {
DHD_ERROR(("%s : failed to allocate wl_pfn_bssid_t array"
" (count: %d)",
}
i = 0;
/* convert dhd_pno_bssid to wl_pfn_bssid */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(iter, next,
&_params->params_hotlist.bssid_list, list) {
- GCC_DIAGNOSTIC_POP();
memcpy(&p_pfn_bssid[i].macaddr, &iter->macaddr, ETHER_ADDR_LEN);
p_pfn_bssid[i].flags = iter->flags;
i++;
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
err = dhd_pno_set_for_hotlist(dhd, p_pfn_bssid, &_params->params_hotlist);
if (err < 0) {
_pno_state->pno_mode &= ~DHD_PNO_HOTLIST_MODE;
exit:
_params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
_dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
- MFREE(dhd->osh, p_pfn_bssid,
- sizeof(wl_pfn_bssid_t) * _params->params_hotlist.nbssid);
+ kfree(p_pfn_bssid);
return err;
}
}
DHD_PNO(("\n"));
}
-#endif // endif
+#endif
if (_params->params_hotlist.nchan) {
/* copy the channel list into local array */
memcpy(_chan_list, _params->params_hotlist.chan_list,
}
}
for (i = 0; i < hotlist_params->nbssid; i++) {
- _pno_bssid = (struct dhd_pno_bssid *)MALLOCZ(dhd->osh,
- sizeof(struct dhd_pno_bssid));
+ _pno_bssid = kzalloc(sizeof(struct dhd_pno_bssid), GFP_KERNEL);
NULL_CHECK(_pno_bssid, "_pfn_bssid is NULL", err);
memcpy(&_pno_bssid->macaddr, &p_pfn_bssid[i].macaddr, ETHER_ADDR_LEN);
_pno_bssid->flags = p_pfn_bssid[i].flags;
struct dhd_pno_legacy_params *_params_legacy;
_params_legacy =
&(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS].params_legacy);
+
err = dhd_pno_set_legacy_pno(dhd, _params_legacy->scan_fr,
_params_legacy->pno_repeat, _params_legacy->pno_freq_expo_max,
_params_legacy->chan_list, _params_legacy->nchan);
while (iter) {
tmp = iter->next;
- MFREE(dhd->osh, iter,
- ((iter->tot_count - 1) * sizeof(wifi_gscan_result_t))
- + sizeof(gscan_results_cache_t));
+ kfree(iter);
iter = tmp;
}
u32 bi_length = 0;
uint8 channel;
uint32 mem_needed;
- struct osl_timespec ts;
+ struct timespec ts;
u32 bi_ie_length = 0;
u32 bi_ie_offset = 0;
*size = 0;
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
gscan_result = (wl_gscan_result_t *)data;
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
if (!gscan_result) {
DHD_ERROR(("Invalid gscan result (NULL pointer)\n"));
goto exit;
DHD_ERROR(("%s: Invalid SSID length:%u\n", __FUNCTION__, bi->SSID_len));
goto exit;
}
-
- mem_needed = OFFSETOF(wifi_gscan_full_result_t, ie_data) + bi->ie_length;
- result = (wifi_gscan_full_result_t *)MALLOC(dhd->osh, mem_needed);
+ mem_needed = OFFSETOF(wifi_gscan_full_result_t, ie_data) + bi_ie_length;
+ result = (wifi_gscan_full_result_t *) kmalloc(mem_needed, GFP_KERNEL);
if (!result) {
DHD_ERROR(("%s Cannot malloc scan result buffer %d bytes\n",
__FUNCTION__, mem_needed));
goto exit;
}
-
result->scan_ch_bucket = gscan_result->scan_ch_bucket;
memcpy(result->fixed.ssid, bi->SSID, bi->SSID_len);
result->fixed.ssid[bi->SSID_len] = '\0';
result->fixed.rssi = (int32) bi->RSSI;
result->fixed.rtt = 0;
result->fixed.rtt_sd = 0;
- osl_get_monotonic_boottime(&ts);
+ get_monotonic_boottime(&ts);
result->fixed.ts = (uint64) TIMESPEC_TO_US(ts);
result->fixed.beacon_period = dtoh16(bi->beacon_period);
result->fixed.capability = dtoh16(bi->capability);
return NULL;
gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
- if (event == WLC_E_PFN_NET_FOUND || event == WLC_E_PFN_NET_LOST) {
- wl_pfn_scanresults_v1_t *pfn_result = (wl_pfn_scanresults_v1_t *)data;
- wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data;
- wl_pfn_net_info_v1_t *net;
- wl_pfn_net_info_v2_t *net_v2;
+ if (event == WLC_E_PFN_NET_FOUND || event == WLC_E_PFN_NET_LOST) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ wl_pfn_scanresults_v2_t *pfn_result = (wl_pfn_scanresults_v2_t *)data;
+ wl_pfn_net_info_v2_t *net;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ if (pfn_result->version != PFN_SCANRESULT_VERSION) {
+ DHD_ERROR(("%s event %d: Incorrect version %d %d\n", __FUNCTION__, event,
+ pfn_result->version, PFN_SCANRESULT_VERSION));
+ return NULL;
+ }
+ /* Check if count of pfn results is corrupted */
+ if (pfn_result->count > EVENT_MAX_NETCNT_V2) {
+ DHD_ERROR(("%s event %d: pfn results count %d"
+ "exceeds the max limit\n", __FUNCTION__, event,
+ pfn_result->count));
+ return NULL;
+ }
- if (pfn_result->version == PFN_SCANRESULT_VERSION_V1) {
- if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V1)) {
- DHD_ERROR(("%s event %d: wrong pfn v1 results count %d\n",
- __FUNCTION__, event, pfn_result->count));
- return NULL;
- }
- count = pfn_result->count;
- mem_needed = sizeof(dhd_epno_results_t) * count;
- results = (dhd_epno_results_t *)MALLOC(dhd->osh, mem_needed);
- if (!results) {
- DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__,
- mem_needed));
- return NULL;
- }
- for (i = 0; i < count; i++) {
- net = &pfn_result->netinfo[i];
- results[i].rssi = net->RSSI;
- results[i].channel = wf_channel2mhz(net->pfnsubnet.channel,
- (net->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ?
- WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
- results[i].flags = (event == WLC_E_PFN_NET_FOUND) ?
- WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST;
- results[i].ssid_len = min(net->pfnsubnet.SSID_len,
- (uint8)DOT11_MAX_SSID_LEN);
- bssid = &results[i].bssid;
- memcpy(bssid, &net->pfnsubnet.BSSID, ETHER_ADDR_LEN);
- if (!net->pfnsubnet.SSID_len) {
- DHD_ERROR(("%s: Gscan results indexing is not"
- " supported in version 1 \n", __FUNCTION__));
- MFREE(dhd->osh, results, mem_needed);
- return NULL;
- } else {
- memcpy(results[i].ssid, net->pfnsubnet.SSID,
- results[i].ssid_len);
- }
- memcpy(ssid, results[i].ssid, results[i].ssid_len);
- ssid[results[i].ssid_len] = '\0';
- DHD_PNO(("ssid - %s bssid "MACDBG" ch %d rssi %d flags %d\n",
- ssid, MAC2STRDBG(bssid->octet), results[i].channel,
- results[i].rssi, results[i].flags));
- }
- } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) {
- if ((pfn_result->count == 0) || (pfn_result->count > EVENT_MAX_NETCNT_V2)) {
- DHD_ERROR(("%s event %d: wrong pfn v2 results count %d\n",
- __FUNCTION__, event, pfn_result->count));
- return NULL;
- }
- count = pfn_result_v2->count;
- mem_needed = sizeof(dhd_epno_results_t) * count;
- results = (dhd_epno_results_t *)MALLOC(dhd->osh, mem_needed);
- if (!results) {
- DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__,
- mem_needed));
- return NULL;
- }
- for (i = 0; i < count; i++) {
- net_v2 = &pfn_result_v2->netinfo[i];
- results[i].rssi = net_v2->RSSI;
- results[i].channel = wf_channel2mhz(net_v2->pfnsubnet.channel,
- (net_v2->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ?
- WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
- results[i].flags = (event == WLC_E_PFN_NET_FOUND) ?
- WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST;
- results[i].ssid_len = min(net_v2->pfnsubnet.SSID_len,
- (uint8)DOT11_MAX_SSID_LEN);
- bssid = &results[i].bssid;
- memcpy(bssid, &net_v2->pfnsubnet.BSSID, ETHER_ADDR_LEN);
- if (!net_v2->pfnsubnet.SSID_len) {
- dhd_pno_idx_to_ssid(gscan_params, &results[i],
- net_v2->pfnsubnet.u.index);
- } else {
- memcpy(results[i].ssid, net_v2->pfnsubnet.u.SSID,
- results[i].ssid_len);
- }
- memcpy(ssid, results[i].ssid, results[i].ssid_len);
- ssid[results[i].ssid_len] = '\0';
- DHD_PNO(("ssid - %s bssid "MACDBG" ch %d rssi %d flags %d\n",
- ssid, MAC2STRDBG(bssid->octet), results[i].channel,
- results[i].rssi, results[i].flags));
- }
- } else {
- DHD_ERROR(("%s event %d: Incorrect version %d , not supported\n",
- __FUNCTION__, event, pfn_result->version));
+ count = pfn_result->count;
+ mem_needed = sizeof(dhd_epno_results_t) * count;
+ results = (dhd_epno_results_t *) kmalloc(mem_needed, GFP_KERNEL);
+ if (!results) {
+ DHD_ERROR(("%s: Can't malloc %d bytes for results\n", __FUNCTION__,
+ mem_needed));
return NULL;
}
+ for (i = 0; i < count; i++) {
+ net = &pfn_result->netinfo[i];
+ results[i].rssi = net->RSSI;
+ results[i].channel = wf_channel2mhz(net->pfnsubnet.channel,
+ (net->pfnsubnet.channel <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ results[i].flags = (event == WLC_E_PFN_NET_FOUND) ?
+ WL_PFN_SSID_EXT_FOUND: WL_PFN_SSID_EXT_LOST;
+ results[i].ssid_len = min(net->pfnsubnet.SSID_len,
+ (uint8)DOT11_MAX_SSID_LEN);
+ bssid = &results[i].bssid;
+ memcpy(bssid, &net->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+ if (!net->pfnsubnet.SSID_len) {
+ dhd_pno_idx_to_ssid(gscan_params, &results[i],
+ net->pfnsubnet.u.index);
+ } else {
+ memcpy(results[i].ssid, net->pfnsubnet.u.SSID, results[i].ssid_len);
+ }
+ memcpy(ssid, results[i].ssid, results[i].ssid_len);
+ ssid[results[i].ssid_len] = '\0';
+ DHD_PNO(("ssid - %s bssid %02x:%02x:%02x:%02x:%02x:%02x "
+ "ch %d rssi %d flags %d\n", ssid,
+ bssid->octet[0], bssid->octet[1],
+ bssid->octet[2], bssid->octet[3],
+ bssid->octet[4], bssid->octet[5],
+ results[i].channel, results[i].rssi, results[i].flags));
+ }
}
*size = mem_needed;
return results;
void *
dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data,
- int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
+ int *send_evt_bytes, hotlist_type_t type)
{
void *ptr = NULL;
dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
struct dhd_pno_gscan_params *gscan_params;
- wl_pfn_scanresults_v1_t *results_v1 = (wl_pfn_scanresults_v1_t *)event_data;
- wl_pfn_scanresults_v2_t *results_v2 = (wl_pfn_scanresults_v2_t *)event_data;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ wl_pfn_scanresults_v2_t *results = (wl_pfn_scanresults_v2_t *)event_data;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
wifi_gscan_result_t *hotlist_found_array;
- wl_pfn_net_info_v1_t *pnetinfo;
- wl_pfn_net_info_v2_t *pnetinfo_v2;
+ wl_pfn_net_info_v2_t *plnetinfo;
gscan_results_cache_t *gscan_hotlist_cache;
- u32 malloc_size = 0, i, total = 0;
- struct osl_timespec tm_spec;
- uint16 fwstatus;
- uint16 fwcount;
-
- /* Static asserts in _dhd_pno_get_for_batch() above guarantee the v1 and v2
- * net_info and subnet_info structures are compatible in size and SSID offset,
- * allowing v1 to be safely used in the code below except for lscanresults
- * fields themselves (status, count, offset to netinfo).
- */
-
- *buf_len = 0;
- if (results_v1->version == PFN_SCANRESULTS_VERSION_V1) {
- fwstatus = results_v1->status;
- fwcount = results_v1->count;
- pnetinfo = &results_v1->netinfo[0];
-
- gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
-
- if (!fwcount || (fwcount > EVENT_MAX_NETCNT_V1)) {
- DHD_ERROR(("%s: wrong v1 fwcount:%d\n", __FUNCTION__, fwcount));
- *send_evt_bytes = 0;
- return ptr;
- }
+ int malloc_size = 0, i, total = 0;
+ struct timespec tm_spec;
- osl_get_monotonic_boottime(&tm_spec);
- malloc_size = sizeof(gscan_results_cache_t) +
- ((fwcount - 1) * sizeof(wifi_gscan_result_t));
- gscan_hotlist_cache = (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size);
- if (!gscan_hotlist_cache) {
- DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size));
- *send_evt_bytes = 0;
- return ptr;
- }
-
- *buf_len = malloc_size;
- if (type == HOTLIST_FOUND) {
- gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found;
- gscan_params->gscan_hotlist_found = gscan_hotlist_cache;
- DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, fwcount));
- } else {
- gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost;
- gscan_params->gscan_hotlist_lost = gscan_hotlist_cache;
- DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, fwcount));
- }
-
- gscan_hotlist_cache->tot_count = fwcount;
- gscan_hotlist_cache->tot_consumed = 0;
-
- for (i = 0; i < fwcount; i++, pnetinfo++) {
- hotlist_found_array = &gscan_hotlist_cache->results[i];
- memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t));
- hotlist_found_array->channel = wf_channel2mhz(pnetinfo->pfnsubnet.channel,
- (pnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
- WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
- hotlist_found_array->rssi = (int32) pnetinfo->RSSI;
-
- hotlist_found_array->ts =
- convert_fw_rel_time_to_systime(&tm_spec,
- (pnetinfo->timestamp * 1000));
- if (pnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
- DHD_ERROR(("Invalid SSID length %d: trimming it to max\n",
- pnetinfo->pfnsubnet.SSID_len));
- pnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
- }
- memcpy(hotlist_found_array->ssid, pnetinfo->pfnsubnet.SSID,
- pnetinfo->pfnsubnet.SSID_len);
- hotlist_found_array->ssid[pnetinfo->pfnsubnet.SSID_len] = '\0';
-
- memcpy(&hotlist_found_array->macaddr, &pnetinfo->pfnsubnet.BSSID,
- ETHER_ADDR_LEN);
- DHD_PNO(("\t%s "MACDBG" rssi %d\n",
- hotlist_found_array->ssid,
- MAC2STRDBG(hotlist_found_array->macaddr.octet),
- hotlist_found_array->rssi));
- }
- } else if (results_v2->version == PFN_SCANRESULTS_VERSION_V2) {
- fwstatus = results_v2->status;
- fwcount = results_v2->count;
- pnetinfo_v2 = (wl_pfn_net_info_v2_t*)&results_v2->netinfo[0];
-
- gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
-
- if (!fwcount || (fwcount > EVENT_MAX_NETCNT_V2)) {
- DHD_ERROR(("%s: wrong v2 fwcount:%d\n", __FUNCTION__, fwcount));
- *send_evt_bytes = 0;
- return ptr;
- }
+ gscan_params = &(_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS].params_gscan);
- osl_get_monotonic_boottime(&tm_spec);
- malloc_size = sizeof(gscan_results_cache_t) +
- ((fwcount - 1) * sizeof(wifi_gscan_result_t));
- gscan_hotlist_cache =
- (gscan_results_cache_t *)MALLOC(dhd->osh, malloc_size);
- if (!gscan_hotlist_cache) {
- DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size));
- *send_evt_bytes = 0;
- return ptr;
- }
- *buf_len = malloc_size;
- if (type == HOTLIST_FOUND) {
- gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found;
- gscan_params->gscan_hotlist_found = gscan_hotlist_cache;
- DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, fwcount));
- } else {
- gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost;
- gscan_params->gscan_hotlist_lost = gscan_hotlist_cache;
- DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, fwcount));
- }
+ if ((results->count == 0) || (results->count > EVENT_MAX_NETCNT_V2)) {
+ DHD_ERROR(("%s: wrong result count:%d\n", __FUNCTION__, results->count));
+ *send_evt_bytes = 0;
+ return ptr;
+ }
- gscan_hotlist_cache->tot_count = fwcount;
- gscan_hotlist_cache->tot_consumed = 0;
- gscan_hotlist_cache->scan_ch_bucket = results_v2->scan_ch_bucket;
+ get_monotonic_boottime(&tm_spec);
+ malloc_size = sizeof(gscan_results_cache_t) +
+ ((results->count - 1) * sizeof(wifi_gscan_result_t));
+ gscan_hotlist_cache = (gscan_results_cache_t *) kmalloc(malloc_size, GFP_KERNEL);
- for (i = 0; i < fwcount; i++, pnetinfo_v2++) {
- hotlist_found_array = &gscan_hotlist_cache->results[i];
- memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t));
- hotlist_found_array->channel =
- wf_channel2mhz(pnetinfo_v2->pfnsubnet.channel,
- (pnetinfo_v2->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
- WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
- hotlist_found_array->rssi = (int32) pnetinfo_v2->RSSI;
-
- hotlist_found_array->ts =
- convert_fw_rel_time_to_systime(&tm_spec,
- (pnetinfo_v2->timestamp * 1000));
- if (pnetinfo_v2->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
- DHD_ERROR(("Invalid SSID length %d: trimming it to max\n",
- pnetinfo_v2->pfnsubnet.SSID_len));
- pnetinfo_v2->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
- }
- memcpy(hotlist_found_array->ssid, pnetinfo_v2->pfnsubnet.u.SSID,
- pnetinfo_v2->pfnsubnet.SSID_len);
- hotlist_found_array->ssid[pnetinfo_v2->pfnsubnet.SSID_len] = '\0';
-
- memcpy(&hotlist_found_array->macaddr, &pnetinfo_v2->pfnsubnet.BSSID,
- ETHER_ADDR_LEN);
- DHD_PNO(("\t%s "MACDBG" rssi %d\n",
- hotlist_found_array->ssid,
- MAC2STRDBG(hotlist_found_array->macaddr.octet),
- hotlist_found_array->rssi));
- }
- } else {
- DHD_ERROR(("%s: event version %d not supported\n",
- __FUNCTION__, results_v1->version));
+ if (!gscan_hotlist_cache) {
+ DHD_ERROR(("%s Cannot Malloc %d bytes!!\n", __FUNCTION__, malloc_size));
*send_evt_bytes = 0;
return ptr;
}
- if (fwstatus == PFN_COMPLETE) {
+
+ if (type == HOTLIST_FOUND) {
+ gscan_hotlist_cache->next = gscan_params->gscan_hotlist_found;
+ gscan_params->gscan_hotlist_found = gscan_hotlist_cache;
+ DHD_PNO(("%s enter, FOUND results count %d\n", __FUNCTION__, results->count));
+ } else {
+ gscan_hotlist_cache->next = gscan_params->gscan_hotlist_lost;
+ gscan_params->gscan_hotlist_lost = gscan_hotlist_cache;
+ DHD_PNO(("%s enter, LOST results count %d\n", __FUNCTION__, results->count));
+ }
+
+ gscan_hotlist_cache->tot_count = results->count;
+ gscan_hotlist_cache->tot_consumed = 0;
+ gscan_hotlist_cache->scan_ch_bucket = results->scan_ch_bucket;
+ plnetinfo = results->netinfo;
+
+ for (i = 0; i < results->count; i++, plnetinfo++) {
+ hotlist_found_array = &gscan_hotlist_cache->results[i];
+ memset(hotlist_found_array, 0, sizeof(wifi_gscan_result_t));
+ hotlist_found_array->channel = wf_channel2mhz(plnetinfo->pfnsubnet.channel,
+ (plnetinfo->pfnsubnet.channel <= CH_MAX_2G_CHANNEL?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G));
+ hotlist_found_array->rssi = (int32) plnetinfo->RSSI;
+
+ hotlist_found_array->ts =
+ convert_fw_rel_time_to_systime(&tm_spec, (plnetinfo->timestamp * 1000));
+ if (plnetinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
+ DHD_ERROR(("Invalid SSID length %d: trimming it to max\n",
+ plnetinfo->pfnsubnet.SSID_len));
+ plnetinfo->pfnsubnet.SSID_len = DOT11_MAX_SSID_LEN;
+ }
+ memcpy(hotlist_found_array->ssid, plnetinfo->pfnsubnet.u.SSID,
+ plnetinfo->pfnsubnet.SSID_len);
+ hotlist_found_array->ssid[plnetinfo->pfnsubnet.SSID_len] = '\0';
+
+ memcpy(&hotlist_found_array->macaddr, &plnetinfo->pfnsubnet.BSSID, ETHER_ADDR_LEN);
+ DHD_PNO(("\t%s %02x:%02x:%02x:%02x:%02x:%02x rssi %d\n", hotlist_found_array->ssid,
+ hotlist_found_array->macaddr.octet[0],
+ hotlist_found_array->macaddr.octet[1],
+ hotlist_found_array->macaddr.octet[2],
+ hotlist_found_array->macaddr.octet[3],
+ hotlist_found_array->macaddr.octet[4],
+ hotlist_found_array->macaddr.octet[5],
+ hotlist_found_array->rssi));
+ }
+
+
+ if (results->status == PFN_COMPLETE) {
ptr = (void *) gscan_hotlist_cache;
while (gscan_hotlist_cache) {
total += gscan_hotlist_cache->tot_count;
return ptr;
}
#endif /* GSCAN_SUPPORT */
-
int
dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
{
#ifdef GSCAN_SUPPORT
init_waitqueue_head(&_pno_state->batch_get_wait);
#endif /* GSCAN_SUPPORT */
- buf = MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
+ buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
if (!buf) {
DHD_ERROR((":%s buf alloc err.\n", __FUNCTION__));
return BCME_NOMEM;
__FUNCTION__));
}
exit:
- MFREE(dhd->osh, buf, WLC_IOCTL_SMLEN);
+ kfree(buf);
return err;
}
if (_pno_state->pno_mode & DHD_PNO_GSCAN_MODE) {
_params = &_pno_state->pno_params_arr[INDEX_OF_GSCAN_PARAMS];
mutex_lock(&_pno_state->pno_mutex);
- dhd_pno_reset_cfg_gscan(dhd, _params, _pno_state, GSCAN_FLUSH_ALL_CFG);
+ dhd_pno_reset_cfg_gscan(_params, _pno_state, GSCAN_FLUSH_ALL_CFG);
mutex_unlock(&_pno_state->pno_mutex);
}
#endif /* GSCAN_SUPPORT */
* Header file of Broadcom Dongle Host Driver (DHD)
* Prefered Network Offload code and Wi-Fi Location Service(WLS) code.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_pno.h 805174 2019-02-15 17:26:01Z $
+ * $Id: dhd_pno.h 707287 2017-06-27 06:44:29Z $
*/
#ifndef __DHD_PNO_H__
uint16 chan_list[GSCAN_MAX_CHANNELS_IN_BUCKET];
} dhd_pno_gscan_channel_bucket_t;
+
#define DHD_PNO_AUTH_CODE_OPEN 1 /* Open */
#define DHD_PNO_AUTH_CODE_PSK 2 /* WPA_PSK or WPA2PSK */
#define DHD_PNO_AUTH_CODE_EAPOL 4 /* any EAPOL */
struct ether_addr bssid;
} dhd_epno_results_t;
-typedef struct dhd_pno_swc_evt_param {
- uint16 results_rxed_so_far;
- wl_pfn_significant_net_t *change_array;
-} dhd_pno_swc_evt_param_t;
-
typedef struct wifi_gscan_result {
- uint64 ts; /* Time of discovery */
- char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */
- struct ether_addr macaddr; /* BSSID */
- uint32 channel; /* channel frequency in MHz */
- int32 rssi; /* in db */
- uint64 rtt; /* in nanoseconds */
- uint64 rtt_sd; /* standard deviation in rtt */
- uint16 beacon_period; /* units are Kusec */
- uint16 capability; /* Capability information */
+ uint64 ts; /* Time of discovery */
+ char ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated */
+ struct ether_addr macaddr; /* BSSID */
+ uint32 channel; /* channel frequency in MHz */
+ int32 rssi; /* in db */
+ uint64 rtt; /* in nanoseconds */
+ uint64 rtt_sd; /* standard deviation in rtt */
+ uint16 beacon_period; /* units are Kusec */
+ uint16 capability; /* Capability information */
uint32 pad;
} wifi_gscan_result_t;
int max_ap_cache_per_scan;
int max_rssi_sample_size;
int max_scan_reporting_threshold;
- int max_hotlist_bssids;
- int max_hotlist_ssids;
+ int max_hotlist_aps;
int max_significant_wifi_change_aps;
- int max_bssid_history_entries;
int max_epno_ssid_crc32;
int max_epno_hidden_ssid;
int max_white_list_ssid;
} dhd_pno_gscan_capabilities_t;
typedef struct dhd_epno_ssid_cfg {
- wl_ssid_ext_params_t params;
+ wl_pfn_ssid_params_t params;
uint32 num_epno_ssid;
struct list_head epno_ssid_list;
} dhd_epno_ssid_cfg_t;
uint8 bestn;
uint8 mscan;
uint8 buffer_threshold;
- uint8 swc_nbssid_threshold;
- uint8 swc_rssi_window_size;
uint8 lost_ap_window;
uint8 nchannel_buckets;
uint8 reason;
uint16 max_ch_bucket_freq;
gscan_results_cache_t *gscan_batch_cache;
gscan_results_cache_t *gscan_hotlist_found;
- gscan_results_cache_t*gscan_hotlist_lost;
- uint16 nbssid_significant_change;
+ gscan_results_cache_t *gscan_hotlist_lost;
uint16 nbssid_hotlist;
- struct dhd_pno_swc_evt_param param_significant;
struct dhd_pno_gscan_channel_bucket channel_bucket[GSCAN_MAX_CH_BUCKETS];
struct list_head hotlist_bssid_list;
- struct list_head significant_bssid_list;
dhd_epno_ssid_cfg_t epno_cfg;
uint32 scan_id;
};
} gscan_hotlist_scan_params_t;
#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
-
typedef union dhd_pno_params {
struct dhd_pno_legacy_params params_legacy;
struct dhd_pno_batch_params params_batch;
struct dhd_pno_gscan_params params_gscan;
#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
} dhd_pno_params_t;
-
typedef struct dhd_pno_status_info {
dhd_pub_t *dhd;
struct work_struct work;
extern int dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time);
int dhd_retreive_batch_scan_results(dhd_pub_t *dhd);
extern void * dhd_dev_hotlist_scan_event(struct net_device *dev,
- const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len);
+ const void *data, int *send_evt_bytes, hotlist_type_t type);
void * dhd_dev_process_full_gscan_result(struct net_device *dev,
const void *data, uint32 len, int *send_evt_bytes);
extern int dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev);
extern int dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason);
+
extern int dhd_pno_stop_for_batch(dhd_pub_t *dhd);
extern int dhd_pno_set_for_hotlist(dhd_pub_t *dhd, wl_pfn_bssid_t *p_pfn_bssid,
extern int dhd_pno_cfg_gscan(dhd_pub_t *dhd, dhd_pno_gscan_cmd_cfg_t type, void *buf);
extern int dhd_dev_retrieve_batch_scan(struct net_device *dev);
extern void *dhd_handle_hotlist_scan_evt(dhd_pub_t *dhd, const void *event_data,
- int *send_evt_bytes, hotlist_type_t type, u32 *buf_len);
+ int *send_evt_bytes, hotlist_type_t type);
extern void *dhd_process_full_gscan_result(dhd_pub_t *dhd, const void *event_data,
uint32 len, int *send_evt_bytes);
extern int dhd_gscan_batch_cache_cleanup(dhd_pub_t *dhd);
extern int dhd_pno_flush_fw_epno(dhd_pub_t *dhd);
extern void dhd_pno_set_epno_auth_flag(uint32 *wpa_auth);
#endif /* GSCAN_SUPPORT */
-#endif // endif
+#endif
#endif /* __DHD_PNO_H__ */
* Provides type definitions and function prototypes used to link the
* DHD OS, bus, and protocol modules.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_proto.h 814912 2019-04-15 10:38:59Z $
+ * $Id: dhd_proto.h 678890 2017-01-11 11:48:36Z $
*/
#ifndef _dhd_proto_h_
#include <wlioctl.h>
#ifdef BCMPCIE
#include <dhd_flowring.h>
-#endif // endif
+#endif
-#define DEFAULT_IOCTL_RESP_TIMEOUT 5000
+#define DEFAULT_IOCTL_RESP_TIMEOUT 4000
#ifndef IOCTL_RESP_TIMEOUT
/* In milli second default value for Production FW */
#define IOCTL_RESP_TIMEOUT DEFAULT_IOCTL_RESP_TIMEOUT
#endif /* IOCTL_RESP_TIMEOUT */
-/* In milli second default value for Production FW */
-#define IOCTL_DMAXFER_TIMEOUT 10000
-
#ifndef MFG_IOCTL_RESP_TIMEOUT
#define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */
#endif /* MFG_IOCTL_RESP_TIMEOUT */
-#define DEFAULT_D3_ACK_RESP_TIMEOUT 2000
+#define DEFAULT_D3_ACK_RESP_TIMEOUT 1000
#ifndef D3_ACK_RESP_TIMEOUT
#define D3_ACK_RESP_TIMEOUT DEFAULT_D3_ACK_RESP_TIMEOUT
#endif /* D3_ACK_RESP_TIMEOUT */
uint reorder_info_len, void **pkt, uint32 *free_buf_count);
#ifdef BCMPCIE
-extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound, int ringtype);
-extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound, int ringtype);
+extern bool dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound);
+extern bool dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound);
extern bool dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound);
extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd);
extern int dhd_prot_process_trapbuf(dhd_pub_t * dhd);
extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset);
extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx);
extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd,
- uint len, uint srcdelay, uint destdelay, uint d11_lpbk, uint core_num);
-extern int dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd, dma_xfer_info_t *result);
+ uint len, uint srcdelay, uint destdelay, uint d11_lpbk);
extern void dhd_dma_buf_init(dhd_pub_t *dhd, void *dma_buf,
void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma);
struct bcmstrbuf *strbuf, const char * fmt);
extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf);
extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info);
-extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id);
+extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id, bool in_lock);
extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val);
extern void dhd_prot_reset(dhd_pub_t *dhd);
-extern uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
#ifdef IDLE_TX_FLOW_MGMT
extern int dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count);
extern int dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
#endif /* IDLE_TX_FLOW_MGMT */
extern int dhd_prot_init_info_rings(dhd_pub_t *dhd);
-#ifdef DHD_HP2P
-extern int dhd_prot_init_hp2p_rings(dhd_pub_t *dhd);
-#endif /* DHD_HP2P */
-
-extern int dhd_prot_check_tx_resource(dhd_pub_t *dhd);
-extern void dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd);
-extern void dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd);
-#else
-static INLINE void dhd_prot_update_pktid_txq_stop_cnt(dhd_pub_t *dhd) { return; }
-static INLINE void dhd_prot_update_pktid_txq_start_cnt(dhd_pub_t *dhd) { return; }
#endif /* BCMPCIE */
#ifdef DHD_LB
uint16 seq, uint16 xt_id);
extern bool dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set);
extern bool dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set);
-extern bool dhd_prot_pkt_noretry(dhd_pub_t *dhd, bool enable, bool set);
-extern bool dhd_prot_pkt_noaggr(dhd_pub_t *dhd, bool enable, bool set);
-extern bool dhd_prot_pkt_fixed_rate(dhd_pub_t *dhd, bool enable, bool set);
#else /* BCMPCIE */
#define dhd_prot_send_host_timestamp(a, b, c, d, e) 0
#define dhd_prot_data_path_tx_timestamp_logging(a, b, c) 0
extern void dhd_prot_dma_indx_free(dhd_pub_t *dhd);
-#ifdef EWP_EDL
-int dhd_prot_init_edl_rings(dhd_pub_t *dhd);
-bool dhd_prot_process_msgbuf_edl(dhd_pub_t *dhd);
-int dhd_prot_process_edl_complete(dhd_pub_t *dhd, void *evt_decode_data);
-#endif /* EWP_EDL */
-
-/* APIs for managing a DMA-able buffer */
-int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
-void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
-
/********************************
* For version-string expansion *
*/
#define DHD_PROTOCOL "unknown"
#endif /* proto */
-int dhd_get_hscb_info(dhd_pub_t *dhd, void ** va, uint32 *len);
-int dhd_get_hscb_buff(dhd_pub_t *dhd, uint32 offset, uint32 length, void * buff);
-
-#ifdef DHD_HP2P
-extern uint8 dhd_prot_hp2p_enable(dhd_pub_t *dhd, bool set, int enable);
-extern uint32 dhd_prot_pkt_threshold(dhd_pub_t *dhd, bool set, uint32 val);
-extern uint32 dhd_prot_time_threshold(dhd_pub_t *dhd, bool set, uint32 val);
-extern uint32 dhd_prot_pkt_expiry(dhd_pub_t *dhd, bool set, uint32 val);
-#endif // endif
-
-#ifdef DHD_MAP_LOGGING
-extern void dhd_prot_smmu_fault_dump(dhd_pub_t *dhdp);
-#endif /* DHD_MAP_LOGGING */
#endif /* _dhd_proto_h_ */
/*
* Broadcom Dongle Host Driver (DHD), RTT
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#include <linux/sort.h>
#include <dngl_stats.h>
#include <wlioctl.h>
-#include <bcmwifi_rspec.h>
#include <bcmevent.h>
#include <dhd.h>
#include <dhd_rtt.h>
#include <dhd_dbg.h>
-#include <dhd_bus.h>
#include <wldev_common.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif /* WL_CFG80211 */
-#ifdef WL_NAN
-#include <wl_cfgnan.h>
-#endif /* WL_NAN */
-
static DEFINE_SPINLOCK(noti_list_lock);
#define NULL_CHECK(p, s, err) \
do { \
} \
} while (0)
+#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED)
+#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED)
#define TIMESPEC_TO_US(ts) (((uint64)(ts).tv_sec * USEC_PER_SEC) + \
(ts).tv_nsec / NSEC_PER_USEC)
-#undef DHD_RTT_MEM
-#undef DHD_RTT_ERR
-#define DHD_RTT_MEM DHD_LOG_MEM
-#define DHD_RTT_ERR DHD_ERROR
-
#define FTM_IOC_BUFSZ 2048 /* ioc buffsize for our module (> BCM_XTLV_HDR_SIZE) */
#define FTM_AVAIL_MAX_SLOTS 32
#define FTM_MAX_CONFIGS 10
#define FTM_DEFAULT_SESSION 1
#define FTM_BURST_TIMEOUT_UNIT 250 /* 250 ns */
#define FTM_INVALID -1
-#define FTM_DEFAULT_CNT_20M 24u
-#define FTM_DEFAULT_CNT_40M 16u
-#define FTM_DEFAULT_CNT_80M 11u
-/* To handle congestion env, set max dur/timeout */
-#define FTM_MAX_BURST_DUR_TMO_MS 128u
+#define FTM_DEFAULT_CNT_20M 12
+#define FTM_DEFAULT_CNT_40M 10
+#define FTM_DEFAULT_CNT_80M 5
/* convenience macros */
#define FTM_TU2MICRO(_tu) ((uint64)(_tu) << 10)
/* broadcom specific set to have more accurate data */
#define ENABLE_VHT_ACK
#define CH_MIN_5G_CHANNEL 34
-
-/* CUR ETH became obsolete with this major version onwards */
-#define RTT_IOV_CUR_ETH_OBSOLETE 12
-
-/* PROXD TIMEOUT */
-#define DHD_RTT_TIMER_INTERVAL_MS 5000u
-#define DHD_NAN_RTT_TIMER_INTERVAL_MS 20000u
+#define CH_MIN_2G_CHANNEL 1
struct rtt_noti_callback {
struct list_head list;
dhd_rtt_compl_noti_fn noti_fn;
};
+
/* bitmask indicating which command groups; */
typedef enum {
FTM_SUBCMD_FLAG_METHOD = 0x01, /* FTM method command */
FTM_CONFIG_CAT_AVAIL = 3, /* 'config avail' */
} ftm_config_category_t;
+
typedef struct ftm_subcmd_info {
int16 version; /* FTM version (optional) */
char *name; /* cmd-name string as cmdline input */
ftm_subcmd_flag_t cmdflag; /* CMD flag (optional) */
} ftm_subcmd_info_t;
+
typedef struct ftm_config_options_info {
uint32 flags; /* wl_proxd_flags_t/wl_proxd_session_flags_t */
bool enable;
uint32 data32;
uint16 data16;
uint8 data8;
- uint32 event_mask;
};
} ftm_config_param_info_t;
char *text;
} ftm_strmap_entry_t;
+
typedef struct ftm_status_map_host_entry {
wl_proxd_status_t proxd_status;
rtt_reason_t rtt_reason;
} ftm_status_map_host_entry_t;
-static uint16
-rtt_result_ver(uint16 tlvid, const uint8 *p_data);
-
static int
-dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data,
- uint16 tlvid, uint16 len);
-
-static int
-dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data,
- uint16 tlvid, uint16 len);
+dhd_rtt_convert_results_to_host(rtt_report_t *rtt_report, uint8 *p_data, uint16 tlvid, uint16 len);
static wifi_rate_t
dhd_rtt_convert_rate_to_host(uint32 ratespec);
-#if defined(WL_CFG80211) && defined(RTT_DEBUG)
-const char *
-ftm_cmdid_to_str(uint16 cmdid);
-#endif /* WL_CFG80211 && RTT_DEBUG */
-
#ifdef WL_CFG80211
static int
dhd_rtt_start(dhd_pub_t *dhd);
-static int dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status,
- struct ether_addr *addr);
-static void dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd);
-static void dhd_rtt_timeout_work(struct work_struct *work);
#endif /* WL_CFG80211 */
static const int burst_duration_idx[] = {0, 0, 1, 2, 4, 8, 16, 32, 64, 128, 0, 0};
/* ftm status mapping to host status */
static const ftm_status_map_host_entry_t ftm_status_map_info[] = {
- {WL_PROXD_E_INCOMPLETE, RTT_STATUS_FAILURE},
- {WL_PROXD_E_OVERRIDDEN, RTT_STATUS_FAILURE},
- {WL_PROXD_E_ASAP_FAILED, RTT_STATUS_FAILURE},
- {WL_PROXD_E_NOTSTARTED, RTT_STATUS_FAIL_NOT_SCHEDULED_YET},
- {WL_PROXD_E_INVALIDMEAS, RTT_STATUS_FAIL_INVALID_TS},
- {WL_PROXD_E_INCAPABLE, RTT_STATUS_FAIL_NO_CAPABILITY},
- {WL_PROXD_E_MISMATCH, RTT_STATUS_FAILURE},
- {WL_PROXD_E_DUP_SESSION, RTT_STATUS_FAILURE},
- {WL_PROXD_E_REMOTE_FAIL, RTT_STATUS_FAILURE},
- {WL_PROXD_E_REMOTE_INCAPABLE, RTT_STATUS_FAILURE},
- {WL_PROXD_E_SCHED_FAIL, RTT_STATUS_FAIL_SCHEDULE},
- {WL_PROXD_E_PROTO, RTT_STATUS_FAIL_PROTOCOL},
- {WL_PROXD_E_EXPIRED, RTT_STATUS_FAILURE},
- {WL_PROXD_E_TIMEOUT, RTT_STATUS_FAIL_TM_TIMEOUT},
- {WL_PROXD_E_NOACK, RTT_STATUS_FAIL_NO_RSP},
- {WL_PROXD_E_DEFERRED, RTT_STATUS_FAILURE},
- {WL_PROXD_E_INVALID_SID, RTT_STATUS_FAILURE},
- {WL_PROXD_E_REMOTE_CANCEL, RTT_STATUS_FAILURE},
- {WL_PROXD_E_CANCELED, RTT_STATUS_ABORTED},
- {WL_PROXD_E_INVALID_SESSION, RTT_STATUS_FAILURE},
- {WL_PROXD_E_BAD_STATE, RTT_STATUS_FAILURE},
- {WL_PROXD_E_ERROR, RTT_STATUS_FAILURE},
- {WL_PROXD_E_OK, RTT_STATUS_SUCCESS}
+ {WL_PROXD_E_INCOMPLETE, RTT_REASON_FAILURE},
+ {WL_PROXD_E_OVERRIDDEN, RTT_REASON_FAILURE},
+ {WL_PROXD_E_ASAP_FAILED, RTT_REASON_FAILURE},
+ {WL_PROXD_E_NOTSTARTED, RTT_REASON_FAIL_NOT_SCHEDULED_YET},
+ {WL_PROXD_E_INVALIDMEAS, RTT_REASON_FAIL_INVALID_TS},
+ {WL_PROXD_E_INCAPABLE, RTT_REASON_FAIL_NO_CAPABILITY},
+ {WL_PROXD_E_MISMATCH, RTT_REASON_FAILURE},
+ {WL_PROXD_E_DUP_SESSION, RTT_REASON_FAILURE},
+ {WL_PROXD_E_REMOTE_FAIL, RTT_REASON_FAILURE},
+ {WL_PROXD_E_REMOTE_INCAPABLE, RTT_REASON_FAILURE},
+ {WL_PROXD_E_SCHED_FAIL, RTT_REASON_FAIL_SCHEDULE},
+ {WL_PROXD_E_PROTO, RTT_REASON_FAIL_PROTOCOL},
+ {WL_PROXD_E_EXPIRED, RTT_REASON_FAILURE},
+ {WL_PROXD_E_TIMEOUT, RTT_REASON_FAIL_TM_TIMEOUT},
+ {WL_PROXD_E_NOACK, RTT_REASON_FAIL_NO_RSP},
+ {WL_PROXD_E_DEFERRED, RTT_REASON_FAILURE},
+ {WL_PROXD_E_INVALID_SID, RTT_REASON_FAILURE},
+ {WL_PROXD_E_REMOTE_CANCEL, RTT_REASON_FAILURE},
+ {WL_PROXD_E_CANCELED, RTT_REASON_ABORTED},
+ {WL_PROXD_E_INVALID_SESSION, RTT_REASON_FAILURE},
+ {WL_PROXD_E_BAD_STATE, RTT_REASON_FAILURE},
+ {WL_PROXD_E_ERROR, RTT_REASON_FAILURE},
+ {WL_PROXD_E_OK, RTT_REASON_SUCCESS}
+};
+
+/* ftm tlv-id mapping */
+static const ftm_strmap_entry_t ftm_tlvid_loginfo[] = {
+ /* { WL_PROXD_TLV_ID_xxx, "text for WL_PROXD_TLV_ID_xxx" }, */
+ { WL_PROXD_TLV_ID_NONE, "none" },
+ { WL_PROXD_TLV_ID_METHOD, "method" },
+ { WL_PROXD_TLV_ID_FLAGS, "flags" },
+ { WL_PROXD_TLV_ID_CHANSPEC, "chanspec" },
+ { WL_PROXD_TLV_ID_TX_POWER, "tx power" },
+ { WL_PROXD_TLV_ID_RATESPEC, "ratespec" },
+ { WL_PROXD_TLV_ID_BURST_DURATION, "burst duration" },
+ { WL_PROXD_TLV_ID_BURST_PERIOD, "burst period" },
+ { WL_PROXD_TLV_ID_BURST_FTM_SEP, "burst ftm sep" },
+ { WL_PROXD_TLV_ID_BURST_NUM_FTM, "burst num ftm" },
+ { WL_PROXD_TLV_ID_NUM_BURST, "num burst" },
+ { WL_PROXD_TLV_ID_FTM_RETRIES, "ftm retries" },
+ { WL_PROXD_TLV_ID_BSS_INDEX, "BSS index" },
+ { WL_PROXD_TLV_ID_BSSID, "bssid" },
+ { WL_PROXD_TLV_ID_INIT_DELAY, "burst init delay" },
+ { WL_PROXD_TLV_ID_BURST_TIMEOUT, "burst timeout" },
+ { WL_PROXD_TLV_ID_EVENT_MASK, "event mask" },
+ { WL_PROXD_TLV_ID_FLAGS_MASK, "flags mask" },
+ { WL_PROXD_TLV_ID_PEER_MAC, "peer addr" },
+ { WL_PROXD_TLV_ID_FTM_REQ, "ftm req" },
+ { WL_PROXD_TLV_ID_LCI_REQ, "lci req" },
+ { WL_PROXD_TLV_ID_LCI, "lci" },
+ { WL_PROXD_TLV_ID_CIVIC_REQ, "civic req" },
+ { WL_PROXD_TLV_ID_CIVIC, "civic" },
+ { WL_PROXD_TLV_ID_AVAIL, "availability" },
+ { WL_PROXD_TLV_ID_SESSION_FLAGS, "session flags" },
+ { WL_PROXD_TLV_ID_SESSION_FLAGS_MASK, "session flags mask" },
+ { WL_PROXD_TLV_ID_RX_MAX_BURST, "rx max bursts" },
+ { WL_PROXD_TLV_ID_RANGING_INFO, "ranging info" },
+ { WL_PROXD_TLV_ID_RANGING_FLAGS, "ranging flags" },
+ { WL_PROXD_TLV_ID_RANGING_FLAGS_MASK, "ranging flags mask" },
+ /* output - 512 + x */
+ { WL_PROXD_TLV_ID_STATUS, "status" },
+ { WL_PROXD_TLV_ID_COUNTERS, "counters" },
+ { WL_PROXD_TLV_ID_INFO, "info" },
+ { WL_PROXD_TLV_ID_RTT_RESULT, "rtt result" },
+ { WL_PROXD_TLV_ID_AOA_RESULT, "aoa result" },
+ { WL_PROXD_TLV_ID_SESSION_INFO, "session info" },
+ { WL_PROXD_TLV_ID_SESSION_STATUS, "session status" },
+ { WL_PROXD_TLV_ID_SESSION_ID_LIST, "session ids" },
+ /* debug tlvs can be added starting 1024 */
+ { WL_PROXD_TLV_ID_DEBUG_MASK, "debug mask" },
+ { WL_PROXD_TLV_ID_COLLECT, "collect" },
+ { WL_PROXD_TLV_ID_STRBUF, "result" },
+ { WL_PROXD_TLV_ID_COLLECT_DATA, "collect-data" },
+ { WL_PROXD_TLV_ID_RI_RR, "ri_rr" },
+ { WL_PROXD_TLV_ID_COLLECT_CHAN_DATA, "chan est"}
};
static const ftm_strmap_entry_t ftm_event_type_loginfo[] = {
- /* wl_proxd_event_type_t, text-string */
- { WL_PROXD_EVENT_NONE, "none" },
- { WL_PROXD_EVENT_SESSION_CREATE, "session create" },
- { WL_PROXD_EVENT_SESSION_START, "session start" },
- { WL_PROXD_EVENT_FTM_REQ, "FTM req" },
- { WL_PROXD_EVENT_BURST_START, "burst start" },
- { WL_PROXD_EVENT_BURST_END, "burst end" },
- { WL_PROXD_EVENT_SESSION_END, "session end" },
- { WL_PROXD_EVENT_SESSION_RESTART, "session restart" },
- { WL_PROXD_EVENT_BURST_RESCHED, "burst rescheduled" },
- { WL_PROXD_EVENT_SESSION_DESTROY, "session destroy" },
- { WL_PROXD_EVENT_RANGE_REQ, "range request" },
- { WL_PROXD_EVENT_FTM_FRAME, "FTM frame" },
- { WL_PROXD_EVENT_DELAY, "delay" },
- { WL_PROXD_EVENT_VS_INITIATOR_RPT, "initiator-report " }, /* rx initiator-rpt */
- { WL_PROXD_EVENT_RANGING, "ranging " },
- { WL_PROXD_EVENT_COLLECT, "collect" },
- { WL_PROXD_EVENT_MF_STATS, "mf_stats" },
+ /* wl_proxd_event_type_t, text-string */
+ { WL_PROXD_EVENT_NONE, "none" },
+ { WL_PROXD_EVENT_SESSION_CREATE, "session create" },
+ { WL_PROXD_EVENT_SESSION_START, "session start" },
+ { WL_PROXD_EVENT_FTM_REQ, "FTM req" },
+ { WL_PROXD_EVENT_BURST_START, "burst start" },
+ { WL_PROXD_EVENT_BURST_END, "burst end" },
+ { WL_PROXD_EVENT_SESSION_END, "session end" },
+ { WL_PROXD_EVENT_SESSION_RESTART, "session restart" },
+ { WL_PROXD_EVENT_BURST_RESCHED, "burst rescheduled" },
+ { WL_PROXD_EVENT_SESSION_DESTROY, "session destroy" },
+ { WL_PROXD_EVENT_RANGE_REQ, "range request" },
+ { WL_PROXD_EVENT_FTM_FRAME, "FTM frame" },
+ { WL_PROXD_EVENT_DELAY, "delay" },
+ { WL_PROXD_EVENT_VS_INITIATOR_RPT, "initiator-report " }, /* rx */
+ { WL_PROXD_EVENT_RANGING, "ranging " },
+ { WL_PROXD_EVENT_COLLECT, "collect" },
};
/*
* session-state --> text string mapping
*/
static const ftm_strmap_entry_t ftm_session_state_value_loginfo[] = {
- /* wl_proxd_session_state_t, text string */
- { WL_PROXD_SESSION_STATE_CREATED, "created" },
- { WL_PROXD_SESSION_STATE_CONFIGURED, "configured" },
- { WL_PROXD_SESSION_STATE_STARTED, "started" },
- { WL_PROXD_SESSION_STATE_DELAY, "delay" },
- { WL_PROXD_SESSION_STATE_USER_WAIT, "user-wait" },
- { WL_PROXD_SESSION_STATE_SCHED_WAIT, "sched-wait" },
- { WL_PROXD_SESSION_STATE_BURST, "burst" },
- { WL_PROXD_SESSION_STATE_STOPPING, "stopping" },
- { WL_PROXD_SESSION_STATE_ENDED, "ended" },
- { WL_PROXD_SESSION_STATE_DESTROYING, "destroying" },
- { WL_PROXD_SESSION_STATE_NONE, "none" }
+ /* wl_proxd_session_state_t, text string */
+ { WL_PROXD_SESSION_STATE_CREATED, "created" },
+ { WL_PROXD_SESSION_STATE_CONFIGURED, "configured" },
+ { WL_PROXD_SESSION_STATE_STARTED, "started" },
+ { WL_PROXD_SESSION_STATE_DELAY, "delay" },
+ { WL_PROXD_SESSION_STATE_USER_WAIT, "user-wait" },
+ { WL_PROXD_SESSION_STATE_SCHED_WAIT, "sched-wait" },
+ { WL_PROXD_SESSION_STATE_BURST, "burst" },
+ { WL_PROXD_SESSION_STATE_STOPPING, "stopping" },
+ { WL_PROXD_SESSION_STATE_ENDED, "ended" },
+ { WL_PROXD_SESSION_STATE_DESTROYING, "destroying" },
+ { WL_PROXD_SESSION_STATE_NONE, "none" }
+};
+
+/*
+* ranging-state --> text string mapping
+*/
+static const ftm_strmap_entry_t ftm_ranging_state_value_loginfo [] = {
+ /* wl_proxd_ranging_state_t, text string */
+ { WL_PROXD_RANGING_STATE_NONE, "none" },
+ { WL_PROXD_RANGING_STATE_NOTSTARTED, "nonstarted" },
+ { WL_PROXD_RANGING_STATE_INPROGRESS, "inprogress" },
+ { WL_PROXD_RANGING_STATE_DONE, "done" },
};
/*
* time interval unit --> text string mapping
*/
static const ftm_strmap_entry_t ftm_tmu_value_loginfo[] = {
- /* wl_proxd_tmu_t, text-string */
- { WL_PROXD_TMU_TU, "TU" },
- { WL_PROXD_TMU_SEC, "sec" },
- { WL_PROXD_TMU_MILLI_SEC, "ms" },
- { WL_PROXD_TMU_MICRO_SEC, "us" },
- { WL_PROXD_TMU_NANO_SEC, "ns" },
- { WL_PROXD_TMU_PICO_SEC, "ps" }
+ /* wl_proxd_tmu_t, text-string */
+ { WL_PROXD_TMU_TU, "TU" },
+ { WL_PROXD_TMU_SEC, "sec" },
+ { WL_PROXD_TMU_MILLI_SEC, "ms" },
+ { WL_PROXD_TMU_MICRO_SEC, "us" },
+ { WL_PROXD_TMU_NANO_SEC, "ns" },
+ { WL_PROXD_TMU_PICO_SEC, "ps" }
};
+#define RSPEC_BW(rspec) ((rspec) & WL_RSPEC_BW_MASK)
+#define RSPEC_IS20MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_20MHZ)
+#define RSPEC_IS40MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_40MHZ)
+#define RSPEC_IS80MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_80MHZ)
+#define RSPEC_IS160MHZ(rspec) (RSPEC_BW(rspec) == WL_RSPEC_BW_160MHZ)
+
+#define IS_MCS(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) != WL_RSPEC_ENCODE_RATE)
+#define IS_STBC(rspec) (((((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) || \
+ (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)) && \
+ (((rspec) & WL_RSPEC_STBC) == WL_RSPEC_STBC))
+#define RSPEC_ISSGI(rspec) (((rspec) & WL_RSPEC_SGI) != 0)
+#define RSPEC_ISLDPC(rspec) (((rspec) & WL_RSPEC_LDPC) != 0)
+#define RSPEC_ISSTBC(rspec) (((rspec) & WL_RSPEC_STBC) != 0)
+#define RSPEC_ISTXBF(rspec) (((rspec) & WL_RSPEC_TXBF) != 0)
+#define RSPEC_ISVHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT)
+#define RSPEC_ISHT(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT)
+#define RSPEC_ISLEGACY(rspec) (((rspec) & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE)
+#define RSPEC2RATE(rspec) (RSPEC_ISLEGACY(rspec) ? \
+ ((rspec) & RSPEC_RATE_MASK) : rate_rspec2rate(rspec))
+/* return rate in unit of 500Kbps -- for internal use in wlc_rate_sel.c */
+#define RSPEC2KBPS(rspec) rate_rspec2rate(rspec)
+
struct ieee_80211_mcs_rate_info {
uint8 constellation_bits;
uint8 coding_q;
} /* wlc_rate_mcs2rate */
/** take a well formed ratespec_t arg and return phy rate in [Kbps] units */
-static uint32
+int
rate_rspec2rate(uint32 rspec)
{
- int rate = 0;
+ int rate = -1;
if (RSPEC_ISLEGACY(rspec)) {
rate = 500 * (rspec & WL_RSPEC_RATE_MASK);
} else if (RSPEC_ISVHT(rspec)) {
uint mcs = (rspec & WL_RSPEC_VHT_MCS_MASK);
uint nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT;
- if (mcs > 9 || nss > 8) {
- DHD_RTT(("%s: Invalid mcs %d or nss %d\n", __FUNCTION__, mcs, nss));
- goto exit;
- }
+
+ ASSERT(mcs <= 9);
+ ASSERT(nss <= 8);
rate = rate_mcs2rate(mcs, nss, RSPEC_BW(rspec), RSPEC_ISSGI(rspec));
} else {
- DHD_RTT(("%s: wrong rspec:%d\n", __FUNCTION__, rspec));
+ ASSERT(0);
}
-exit:
- return rate;
+
+ return (rate == 0) ? -1 : rate;
}
char resp_buf[WLC_IOCTL_SMLEN];
}
p_entry++; /* next entry */
}
- return RTT_STATUS_FAILURE; /* not found */
+ return RTT_REASON_FAILURE; /* not found */
}
/*
* lookup 'id' (as a key) from a table
return "invalid";
}
-#if defined(WL_CFG80211) && defined(RTT_DEBUG)
+
+#ifdef RTT_DEBUG
+
/* define entry, e.g. { WL_PROXD_CMD_xxx, "WL_PROXD_CMD_xxx" } */
#define DEF_STRMAP_ENTRY(id) { (id), #id }
/*
* map a ftm cmd-id to a text-string for display
*/
-const char *
+static const char *
ftm_cmdid_to_str(uint16 cmdid)
{
return ftm_map_id_to_str((int32) cmdid, &ftm_cmdid_map[0], ARRAYSIZE(ftm_cmdid_map));
}
-#endif /* WL_CFG80211 && RTT_DEBUG */
+#endif /* RTT_DEBUG */
+
/*
* convert BCME_xxx error codes into related error strings
ARRAYSIZE(ftm_session_state_value_loginfo));
}
+
#ifdef WL_CFG80211
/*
* send 'proxd' iovar for all ftm get-related commands
status = dhd_getiovar(dhd, 0, "proxd", (char *)p_proxd_iov,
proxd_iovsize, (char **)&p_iovresp, WLC_IOCTL_SMLEN);
if (status != BCME_OK) {
- DHD_RTT_ERR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n",
+ DHD_ERROR(("%s: failed to send getbuf proxd iovar (CMD ID : %d), status=%d\n",
__FUNCTION__, p_subcmd_info->cmdid, status));
return status;
}
tlvs_len = ltoh16(p_iovresp->len) - WL_PROXD_IOV_HDR_SIZE;
if (tlvs_len < 0) {
- DHD_RTT_ERR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n",
+ DHD_ERROR(("%s: alert, p_iovresp->len(%d) should not be smaller than %d\n",
__FUNCTION__, ltoh16(p_iovresp->len), (int) WL_PROXD_IOV_HDR_SIZE));
tlvs_len = 0;
}
return status;
}
+
static wl_proxd_iov_t *
rtt_alloc_getset_buf(wl_proxd_method_t method, wl_proxd_session_id_t session_id,
wl_proxd_cmd_t cmdid, uint16 tlvs_bufsize, uint16 *p_out_bufsize)
{
uint16 proxd_iovsize;
- uint32 kflags;
+ uint16 kflags;
wl_proxd_tlv_t *p_tlv;
wl_proxd_iov_t *p_proxd_iov = (wl_proxd_iov_t *) NULL;
p_proxd_iov = kzalloc(proxd_iovsize, kflags);
if (p_proxd_iov == NULL) {
- DHD_RTT_ERR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize));
+ DHD_ERROR(("error: failed to allocate %d bytes of memory\n", proxd_iovsize));
return NULL;
}
return p_proxd_iov;
}
+
static int
dhd_rtt_common_get_handler(dhd_pub_t *dhd, ftm_subcmd_info_t *p_subcmd_info,
wl_proxd_method_t method,
DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n",
__FUNCTION__, method, session_id, p_subcmd_info->cmdid,
ftm_cmdid_to_str(p_subcmd_info->cmdid)));
-#endif // endif
+#endif
/* alloc mem for ioctl headr + reserved 0 bufsize for tlvs (initialize to zero) */
p_proxd_iov = rtt_alloc_getset_buf(method, session_id, p_subcmd_info->cmdid,
0, &proxd_iovsize);
DHD_RTT(("enter %s: method=%d, session_id=%d, cmdid=%d(%s)\n",
__FUNCTION__, method, session_id, p_subcmd_info->cmdid,
ftm_cmdid_to_str(p_subcmd_info->cmdid)));
-#endif // endif
+#endif
/* allocate and initialize a temp buffer for 'set proxd' iovar */
proxd_iovsize = 0;
if (ret != BCME_OK) {
DHD_RTT(("error: IOVAR failed, status=%d\n", ret));
}
-#endif // endif
+#endif
/* clean up */
kfree(p_proxd_iov);
}
#endif /* WL_CFG80211 */
-/* gets the length and returns the version
- * of the wl_proxd_collect_event_t version
- */
-static uint
-rtt_collect_data_event_ver(uint16 len)
-{
- if (len > sizeof(wl_proxd_collect_event_data_v3_t)) {
- return WL_PROXD_COLLECT_EVENT_DATA_VERSION_MAX;
- } else if (len == sizeof(wl_proxd_collect_event_data_v3_t)) {
- return WL_PROXD_COLLECT_EVENT_DATA_VERSION_3;
- } else if (len == sizeof(wl_proxd_collect_event_data_v2_t)) {
- return WL_PROXD_COLLECT_EVENT_DATA_VERSION_2;
- } else {
- return WL_PROXD_COLLECT_EVENT_DATA_VERSION_1;
- }
-}
-
-static void
-rtt_collect_event_data_display(uint8 ver, void *ctx, const uint8 *p_data, uint16 len)
-{
- int i;
- wl_proxd_collect_event_data_v1_t *p_collect_data_v1 = NULL;
- wl_proxd_collect_event_data_v2_t *p_collect_data_v2 = NULL;
- wl_proxd_collect_event_data_v3_t *p_collect_data_v3 = NULL;
-
- if (!ctx || !p_data) {
- return;
- }
-
- switch (ver) {
- case WL_PROXD_COLLECT_EVENT_DATA_VERSION_1:
- DHD_RTT(("\tVERSION_1\n"));
- memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v1_t));
- p_collect_data_v1 = (wl_proxd_collect_event_data_v1_t *)ctx;
- DHD_RTT(("\tH_RX\n"));
- for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
- p_collect_data_v1->H_RX[i] = ltoh32_ua(&p_collect_data_v1->H_RX[i]);
- DHD_RTT(("\t%u\n", p_collect_data_v1->H_RX[i]));
- }
- DHD_RTT(("\n"));
- DHD_RTT(("\tH_LB\n"));
- for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
- p_collect_data_v1->H_LB[i] = ltoh32_ua(&p_collect_data_v1->H_LB[i]);
- DHD_RTT(("\t%u\n", p_collect_data_v1->H_LB[i]));
- }
- DHD_RTT(("\n"));
- DHD_RTT(("\tri_rr\n"));
- for (i = 0; i < FTM_TPK_RI_RR_LEN; i++) {
- DHD_RTT(("\t%u\n", p_collect_data_v1->ri_rr[i]));
- }
- p_collect_data_v1->phy_err_mask = ltoh32_ua(&p_collect_data_v1->phy_err_mask);
- DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v1->phy_err_mask));
- break;
- case WL_PROXD_COLLECT_EVENT_DATA_VERSION_2:
- memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v2_t));
- p_collect_data_v2 = (wl_proxd_collect_event_data_v2_t *)ctx;
- DHD_RTT(("\tH_RX\n"));
- for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
- p_collect_data_v2->H_RX[i] = ltoh32_ua(&p_collect_data_v2->H_RX[i]);
- DHD_RTT(("\t%u\n", p_collect_data_v2->H_RX[i]));
- }
- DHD_RTT(("\n"));
- DHD_RTT(("\tH_LB\n"));
- for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
- p_collect_data_v2->H_LB[i] = ltoh32_ua(&p_collect_data_v2->H_LB[i]);
- DHD_RTT(("\t%u\n", p_collect_data_v2->H_LB[i]));
- }
- DHD_RTT(("\n"));
- DHD_RTT(("\tri_rr\n"));
- for (i = 0; i < FTM_TPK_RI_RR_LEN_SECURE_2_0; i++) {
- DHD_RTT(("\t%u\n", p_collect_data_v2->ri_rr[i]));
- }
- p_collect_data_v2->phy_err_mask = ltoh32_ua(&p_collect_data_v2->phy_err_mask);
- DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v2->phy_err_mask));
- break;
- case WL_PROXD_COLLECT_EVENT_DATA_VERSION_3:
- memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_v3_t));
- p_collect_data_v3 = (wl_proxd_collect_event_data_v3_t *)ctx;
- switch (p_collect_data_v3->version) {
- case WL_PROXD_COLLECT_EVENT_DATA_VERSION_3:
- if (p_collect_data_v3->length !=
- (len - OFFSETOF(wl_proxd_collect_event_data_v3_t, H_LB))) {
- DHD_RTT(("\tversion/length mismatch\n"));
- break;
- }
- DHD_RTT(("\tH_RX\n"));
- for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
- p_collect_data_v3->H_RX[i] =
- ltoh32_ua(&p_collect_data_v3->H_RX[i]);
- DHD_RTT(("\t%u\n", p_collect_data_v3->H_RX[i]));
- }
- DHD_RTT(("\n"));
- DHD_RTT(("\tH_LB\n"));
- for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
- p_collect_data_v3->H_LB[i] =
- ltoh32_ua(&p_collect_data_v3->H_LB[i]);
- DHD_RTT(("\t%u\n", p_collect_data_v3->H_LB[i]));
- }
- DHD_RTT(("\n"));
- DHD_RTT(("\tri_rr\n"));
- for (i = 0; i < FTM_TPK_RI_RR_LEN_SECURE_2_0; i++) {
- DHD_RTT(("\t%u\n", p_collect_data_v3->ri_rr[i]));
- }
- p_collect_data_v3->phy_err_mask =
- ltoh32_ua(&p_collect_data_v3->phy_err_mask);
- DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data_v3->phy_err_mask));
- break;
- /* future case */
- }
- break;
- }
-}
-
-static uint16
-rtt_result_ver(uint16 tlvid, const uint8 *p_data)
-{
- uint16 ret = BCME_OK;
- const wl_proxd_rtt_result_v2_t *r_v2 = NULL;
-
- switch (tlvid) {
- case WL_PROXD_TLV_ID_RTT_RESULT:
- BCM_REFERENCE(p_data);
- ret = WL_PROXD_RTT_RESULT_VERSION_1;
- break;
- case WL_PROXD_TLV_ID_RTT_RESULT_V2:
- if (p_data) {
- r_v2 = (const wl_proxd_rtt_result_v2_t *)p_data;
- if (r_v2->version == WL_PROXD_RTT_RESULT_VERSION_2) {
- ret = WL_PROXD_RTT_RESULT_VERSION_2;
- }
- }
- break;
- default:
- DHD_RTT_ERR(("%s: > Unsupported TLV ID %d\n",
- __FUNCTION__, tlvid));
- break;
- }
- return ret;
-}
-
-/* pretty hex print a contiguous buffer */
-static void
-rtt_prhex(const char *msg, const uint8 *buf, uint nbytes)
-{
- char line[128], *p;
- int len = sizeof(line);
- int nchar;
- uint i;
-
- if (msg && (msg[0] != '\0'))
- DHD_RTT(("%s:\n", msg));
-
- p = line;
- for (i = 0; i < nbytes; i++) {
- if (i % 16 == 0) {
- nchar = snprintf(p, len, " %04d: ", i); /* line prefix */
- p += nchar;
- len -= nchar;
- }
- if (len > 0) {
- nchar = snprintf(p, len, "%02x ", buf[i]);
- p += nchar;
- len -= nchar;
- }
-
- if (i % 16 == 15) {
- DHD_RTT(("%s\n", line)); /* flush line */
- p = line;
- len = sizeof(line);
- }
- }
-
- /* flush last partial line */
- if (p != line)
- DHD_RTT(("%s\n", line));
-}
-
static int
-rtt_unpack_xtlv_cbfn(void *ctx, const uint8 *p_data, uint16 tlvid, uint16 len)
+rtt_unpack_xtlv_cbfn(void *ctx, uint8 *p_data, uint16 tlvid, uint16 len)
{
int ret = BCME_OK;
int i;
wl_proxd_ftm_session_status_t *p_data_info = NULL;
+ wl_proxd_collect_event_data_t *p_collect_data = NULL;
uint32 chan_data_entry = 0;
- uint16 expected_rtt_result_ver = 0;
-
- BCM_REFERENCE(p_data_info);
switch (tlvid) {
case WL_PROXD_TLV_ID_RTT_RESULT:
- case WL_PROXD_TLV_ID_RTT_RESULT_V2:
- DHD_RTT(("WL_PROXD_TLV_ID_RTT_RESULT\n"));
- expected_rtt_result_ver = rtt_result_ver(tlvid, p_data);
- switch (expected_rtt_result_ver) {
- case WL_PROXD_RTT_RESULT_VERSION_1:
- ret = dhd_rtt_convert_results_to_host_v1((rtt_result_t *)ctx,
- p_data, tlvid, len);
- break;
- case WL_PROXD_RTT_RESULT_VERSION_2:
- ret = dhd_rtt_convert_results_to_host_v2((rtt_result_t *)ctx,
- p_data, tlvid, len);
- break;
- default:
- DHD_RTT_ERR((" > Unsupported RTT_RESULT version\n"));
- ret = BCME_UNSUPPORTED;
- break;
- }
+ ret = dhd_rtt_convert_results_to_host((rtt_report_t *)ctx,
+ p_data, tlvid, len);
break;
case WL_PROXD_TLV_ID_SESSION_STATUS:
DHD_RTT(("WL_PROXD_TLV_ID_SESSION_STATUS\n"));
break;
case WL_PROXD_TLV_ID_COLLECT_DATA:
DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_DATA\n"));
- rtt_collect_event_data_display(
- rtt_collect_data_event_ver(len),
- ctx, p_data, len);
+ memcpy(ctx, p_data, sizeof(wl_proxd_collect_event_data_t));
+ p_collect_data = (wl_proxd_collect_event_data_t *)ctx;
+ DHD_RTT(("\tH_RX\n"));
+ for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+ p_collect_data->H_RX[i] = ltoh32_ua(&p_collect_data->H_RX[i]);
+ DHD_RTT(("\t%u\n", p_collect_data->H_RX[i]));
+ }
+ DHD_RTT(("\n"));
+ DHD_RTT(("\tH_LB\n"));
+ for (i = 0; i < K_TOF_COLLECT_H_SIZE_20MHZ; i++) {
+ p_collect_data->H_LB[i] = ltoh32_ua(&p_collect_data->H_LB[i]);
+ DHD_RTT(("\t%u\n", p_collect_data->H_LB[i]));
+ }
+ DHD_RTT(("\n"));
+ DHD_RTT(("\tri_rr\n"));
+ for (i = 0; i < FTM_TPK_RI_RR_LEN; i++) {
+ DHD_RTT(("\t%u\n", p_collect_data->ri_rr[i]));
+ }
+ p_collect_data->phy_err_mask = ltoh32_ua(&p_collect_data->phy_err_mask);
+ DHD_RTT(("\tphy_err_mask=0x%x\n", p_collect_data->phy_err_mask));
break;
case WL_PROXD_TLV_ID_COLLECT_CHAN_DATA:
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
DHD_RTT(("WL_PROXD_TLV_ID_COLLECT_CHAN_DATA\n"));
DHD_RTT(("\tchan est %u\n", (uint32) (len / sizeof(uint32))));
- for (i = 0; (uint16)i < (len/sizeof(chan_data_entry)); i++) {
+ for (i = 0; i < (len/sizeof(chan_data_entry)); i++) {
uint32 *p = (uint32*)p_data;
chan_data_entry = ltoh32_ua(p + i);
DHD_RTT(("\t%u\n", chan_data_entry));
}
- GCC_DIAGNOSTIC_POP();
- break;
- case WL_PROXD_TLV_ID_MF_STATS_DATA:
- DHD_RTT(("WL_PROXD_TLV_ID_MF_STATS_DATA\n"));
- DHD_RTT(("\tmf stats len=%u\n", len));
- rtt_prhex("", p_data, len);
break;
default:
- DHD_RTT_ERR(("> Unsupported TLV ID %d\n", tlvid));
+ DHD_ERROR(("> Unsupported TLV ID %d\n", tlvid));
ret = BCME_ERROR;
break;
}
flags_mask = htol32(flags_mask);
/* setup flags_mask TLV */
ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left,
- type, sizeof(uint32), (uint8 *)&flags_mask, BCM_XTLV_OPTION_ALIGN32);
+ type, sizeof(uint32), &flags_mask, BCM_XTLV_OPTION_ALIGN32);
if (ret != BCME_OK) {
- DHD_RTT_ERR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n",
+ DHD_ERROR(("%s : bcm_pack_xltv_entry() for mask flags failed, status=%d\n",
__FUNCTION__, ret));
goto exit;
}
WL_PROXD_TLV_ID_FLAGS : WL_PROXD_TLV_ID_SESSION_FLAGS;
/* setup flags TLV */
ret = bcm_pack_xtlv_entry((uint8 **)p_tlv, p_buf_space_left,
- type, sizeof(uint32), (uint8 *)&flags, BCM_XTLV_OPTION_ALIGN32);
+ type, sizeof(uint32), &flags, BCM_XTLV_OPTION_ALIGN32);
if (ret != BCME_OK) {
#ifdef RTT_DEBUG
DHD_RTT(("%s: bcm_pack_xltv_entry() for flags failed, status=%d\n",
__FUNCTION__, ret));
-#endif // endif
+#endif
}
exit:
return ret;
break;
case WL_PROXD_TLV_ID_BSSID: /* mac address */
case WL_PROXD_TLV_ID_PEER_MAC:
- case WL_PROXD_TLV_ID_CUR_ETHER_ADDR:
p_src_data = &p_config_param_info->mac_addr;
src_data_size = sizeof(struct ether_addr);
break;
break;
}
if (ret != BCME_OK) {
- DHD_RTT_ERR(("%s bad TLV ID : %d\n",
+ DHD_ERROR(("%s bad TLV ID : %d\n",
__FUNCTION__, p_config_param_info->tlvid));
break;
}
ret = bcm_pack_xtlv_entry((uint8 **) p_tlv, p_buf_space_left,
- p_config_param_info->tlvid, src_data_size, (uint8 *)p_src_data,
+ p_config_param_info->tlvid, src_data_size, p_src_data,
BCM_XTLV_OPTION_ALIGN32);
if (ret != BCME_OK) {
- DHD_RTT_ERR(("%s: bcm_pack_xltv_entry() failed,"
+ DHD_ERROR(("%s: bcm_pack_xltv_entry() failed,"
" status=%d\n", __FUNCTION__, ret));
break;
}
return dhd_rtt_common_set_handler(dhd, &subcmd_info,
WL_PROXD_METHOD_FTM, session_id);
}
-#ifdef WL_NAN
-int
-dhd_rtt_delete_nan_session(dhd_pub_t *dhd)
-{
- struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
- struct wireless_dev *wdev = ndev_to_wdev(dev);
- struct wiphy *wiphy = wdev->wiphy;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- wl_cfgnan_terminate_directed_rtt_sessions(dev, cfg);
- return BCME_OK;
-}
-#endif /* WL_NAN */
-/* API to find out if the given Peer Mac from FTM events
-* is nan-peer. Based on this we will handle the SESSION_END
-* event. For nan-peer FTM_SESSION_END event is ignored and handled in
-* nan-ranging-cancel or nan-ranging-end event.
-*/
-static bool
-dhd_rtt_is_nan_peer(dhd_pub_t *dhd, struct ether_addr *peer_mac)
-{
-#ifdef WL_NAN
- struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
- struct wireless_dev *wdev = ndev_to_wdev(dev);
- struct wiphy *wiphy = wdev->wiphy;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_ranging_inst_t *ranging_inst = NULL;
- bool ret = FALSE;
-
- if (cfg->nan_enable == FALSE || ETHER_ISNULLADDR(peer_mac)) {
- goto exit;
- }
-
- ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_mac);
- if (ranging_inst) {
- DHD_RTT((" RTT peer is of type NAN\n"));
- ret = TRUE;
- goto exit;
- }
-exit:
- return ret;
-#else
- return FALSE;
-#endif /* WL_NAN */
-}
-
-#ifdef WL_NAN
-static int
-dhd_rtt_nan_start_session(dhd_pub_t *dhd, rtt_target_info_t *rtt_target)
-{
- s32 err = BCME_OK;
- struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
- struct wireless_dev *wdev = ndev_to_wdev(dev);
- struct wiphy *wiphy = wdev->wiphy;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- wl_nan_ev_rng_rpt_ind_t range_res;
- nan_ranging_inst_t *ranging_inst = NULL;
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
-
- NAN_MUTEX_LOCK();
-
- bzero(&range_res, sizeof(range_res));
-
- if (!rtt_status) {
- err = BCME_NOTENABLED;
- goto done;
- }
-
- if (!cfg->nan_enable) { /* If nan is not enabled report error */
- err = BCME_NOTENABLED;
- goto done;
- }
-
- /* check if new ranging session allowed */
- if (!wl_cfgnan_ranging_allowed(cfg)) {
- /* responder should be in progress because initiator requests are
- * queued in DHD. Since initiator has more proef cancel responder
- * sessions
- */
- wl_cfgnan_cancel_rng_responders(dev, cfg);
- }
-
- ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
- &rtt_target->addr, NAN_RANGING_ROLE_INITIATOR);
- if (!ranging_inst) {
- err = BCME_NORESOURCE;
- goto done;
- }
-
- DHD_RTT(("Trigger nan based range request\n"));
- err = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
- cfg, ranging_inst, NULL, NAN_RANGE_REQ_CMD, TRUE);
- if (unlikely(err)) {
- goto done;
- }
- ranging_inst->range_type = RTT_TYPE_NAN_DIRECTED;
- ranging_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
- /* schedule proxd timeout */
- schedule_delayed_work(&rtt_status->proxd_timeout,
- msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS));
-done:
- if (err) { /* notify failure RTT event to host */
- DHD_RTT_ERR(("Failed to issue Nan Ranging Request err %d\n", err));
- dhd_rtt_handle_nan_rtt_session_end(dhd, &rtt_target->addr);
- /* try to reset geofence */
- if (ranging_inst) {
- wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
- RTT_SCHED_DIR_TRIGGER_FAIL);
- }
- }
- NAN_MUTEX_UNLOCK();
- return err;
-}
-#endif /* WL_NAN */
static int
dhd_rtt_ftm_config(dhd_pub_t *dhd, wl_proxd_session_id_t session_id,
FTM_IOC_BUFSZ, &proxd_iovsize);
if (p_proxd_iov == NULL) {
- DHD_RTT_ERR(("%s : failed to allocate the iovar (size :%d)\n",
+ DHD_ERROR(("%s : failed to allocate the iovar (size :%d)\n",
__FUNCTION__, FTM_IOC_BUFSZ));
return BCME_NOMEM;
}
ret = dhd_iovar(dhd, 0, "proxd", (char *)p_proxd_iov,
all_tlvsize + WL_PROXD_IOV_HDR_SIZE, NULL, 0, TRUE);
if (ret != BCME_OK) {
- DHD_RTT_ERR(("%s : failed to set config\n", __FUNCTION__));
+ DHD_ERROR(("%s : failed to set config\n", __FUNCTION__));
}
}
/* clean up */
chanspec = wf_chspec_80(center_chan, primary_chan);
break;
default:
- DHD_RTT_ERR(("doesn't support this bandwith : %d", channel.width));
+ DHD_ERROR(("doesn't support this bandwith : %d", channel.width));
bw = -1;
break;
}
{
int err = BCME_OK;
int idx;
- rtt_status_info_t *rtt_status = NULL;
- struct net_device *dev = NULL;
-
+ rtt_status_info_t *rtt_status;
NULL_CHECK(params, "params is NULL", err);
- NULL_CHECK(dhd, "dhd is NULL", err);
- dev = dhd_linux_get_primary_netdev(dhd);
+ NULL_CHECK(dhd, "dhd is NULL", err);
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
- NULL_CHECK(dev, "dev is NULL", err);
-
- mutex_lock(&rtt_status->rtt_work_mutex);
if (!HAS_11MC_CAP(rtt_status->rtt_capa.proto)) {
- DHD_RTT_ERR(("doesn't support RTT \n"));
- err = BCME_ERROR;
- goto exit;
- }
-
- DHD_RTT(("%s enter\n", __FUNCTION__));
-
- if (params->rtt_target_cnt > 0) {
-#ifdef WL_NAN
- /* cancel ongoing geofence RTT if there */
- if ((err = wl_cfgnan_suspend_geofence_rng_session(dev,
- NULL, RTT_GEO_SUSPN_HOST_DIR_RTT_TRIG, 0)) != BCME_OK) {
- goto exit;
- }
-#endif /* WL_NAN */
- } else {
- err = BCME_BADARG;
- goto exit;
+ DHD_ERROR(("doesn't support RTT \n"));
+ return BCME_ERROR;
}
-
- mutex_lock(&rtt_status->rtt_mutex);
if (rtt_status->status != RTT_STOPPED) {
- DHD_RTT_ERR(("rtt is already started\n"));
- err = BCME_BUSY;
- goto exit;
+ DHD_ERROR(("rtt is already started\n"));
+ return BCME_BUSY;
}
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+
memset(rtt_status->rtt_config.target_info, 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
rtt_status->rtt_config.rtt_target_cnt = params->rtt_target_cnt;
memcpy(rtt_status->rtt_config.target_info,
params->target_info, TARGET_INFO_SIZE(params->rtt_target_cnt));
rtt_status->status = RTT_STARTED;
- DHD_RTT_MEM(("dhd_rtt_set_cfg: RTT Started, target_cnt = %d\n", params->rtt_target_cnt));
/* start to measure RTT from first device */
/* find next target to trigger RTT */
for (idx = rtt_status->cur_idx; idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
}
if (idx < rtt_status->rtt_config.rtt_target_cnt) {
DHD_RTT(("rtt_status->cur_idx : %d\n", rtt_status->cur_idx));
- rtt_status->rtt_sched_reason = RTT_SCHED_HOST_TRIGGER;
- /* Cancel pending retry timer if any */
- if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
- cancel_delayed_work(&rtt_status->rtt_retry_timer);
- }
schedule_work(&rtt_status->work);
}
-exit:
- mutex_unlock(&rtt_status->rtt_mutex);
- mutex_unlock(&rtt_status->rtt_work_mutex);
return err;
}
-#ifdef WL_NAN
-void
-dhd_rtt_initialize_geofence_cfg(dhd_pub_t *dhd)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- if (!rtt_status) {
- return;
- }
-
- GEOFENCE_RTT_LOCK(rtt_status);
- memset_s(&rtt_status->geofence_cfg, sizeof(rtt_status->geofence_cfg),
- 0, sizeof(rtt_status->geofence_cfg));
-
- /* initialize non zero params of geofence cfg */
- rtt_status->geofence_cfg.cur_target_idx = DHD_RTT_INVALID_TARGET_INDEX;
- rtt_status->geofence_cfg.geofence_rtt_interval = DHD_RTT_RETRY_TIMER_INTERVAL_MS;
- GEOFENCE_RTT_UNLOCK(rtt_status);
- return;
-}
-
-#ifdef RTT_GEOFENCE_CONT
-void
-dhd_rtt_get_geofence_cont_ind(dhd_pub_t *dhd, bool* geofence_cont)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- if (!rtt_status) {
- return;
- }
- GEOFENCE_RTT_LOCK(rtt_status);
- *geofence_cont = rtt_status->geofence_cfg.geofence_cont;
- GEOFENCE_RTT_UNLOCK(rtt_status);
-}
-
-void
-dhd_rtt_set_geofence_cont_ind(dhd_pub_t *dhd, bool geofence_cont)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- if (!rtt_status) {
- return;
- }
- GEOFENCE_RTT_LOCK(rtt_status);
- rtt_status->geofence_cfg.geofence_cont = geofence_cont;
- DHD_RTT(("dhd_rtt_set_geofence_cont_override, geofence_cont = %d\n",
- rtt_status->geofence_cfg.geofence_cont));
- GEOFENCE_RTT_UNLOCK(rtt_status);
-}
-#endif /* RTT_GEOFENCE_CONT */
-
-#ifdef RTT_GEOFENCE_INTERVAL
-void
-dhd_rtt_set_geofence_rtt_interval(dhd_pub_t *dhd, int interval)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- if (!rtt_status) {
- return;
- }
- GEOFENCE_RTT_LOCK(rtt_status);
- rtt_status->geofence_cfg.geofence_rtt_interval = interval;
- DHD_RTT(("dhd_rtt_set_geofence_rtt_interval: geofence interval = %d\n",
- rtt_status->geofence_cfg.geofence_rtt_interval));
- GEOFENCE_RTT_UNLOCK(rtt_status);
-}
-#endif /* RTT_GEOFENCE_INTERVAL */
-
-/* sets geofence role concurrency state TRUE/FALSE */
-void
-dhd_rtt_set_role_concurrency_state(dhd_pub_t *dhd, bool state)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- if (!rtt_status) {
- return;
- }
- GEOFENCE_RTT_LOCK(rtt_status);
- rtt_status->geofence_cfg.role_concurr_state = state;
- GEOFENCE_RTT_UNLOCK(rtt_status);
-}
-
-/* returns TRUE if geofence role concurrency constraint exists */
-bool
-dhd_rtt_get_role_concurrency_state(dhd_pub_t *dhd)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- if (!rtt_status) {
- return FALSE;
- }
- return rtt_status->geofence_cfg.role_concurr_state;
-}
-
-int8
-dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- if (!rtt_status) {
- return 0;
- }
- return rtt_status->geofence_cfg.geofence_target_cnt;
-}
-
-/* sets geofence rtt state TRUE/FALSE */
-void
-dhd_rtt_set_geofence_rtt_state(dhd_pub_t *dhd, bool state)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- if (!rtt_status) {
- return;
- }
- GEOFENCE_RTT_LOCK(rtt_status);
- rtt_status->geofence_cfg.rtt_in_progress = state;
- GEOFENCE_RTT_UNLOCK(rtt_status);
-}
-
-/* returns TRUE if geofence rtt is in progress */
-bool
-dhd_rtt_get_geofence_rtt_state(dhd_pub_t *dhd)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
-
- if (!rtt_status) {
- return FALSE;
- }
-
- return rtt_status->geofence_cfg.rtt_in_progress;
-}
-
-/* returns geofence RTT target list Head */
-rtt_geofence_target_info_t*
-dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd)
+int
+dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt)
{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- rtt_geofence_target_info_t* head = NULL;
+ int err = BCME_OK;
+#ifdef WL_CFG8011
+ int i = 0, j = 0;
+ rtt_status_info_t *rtt_status;
+ rtt_results_header_t *entry, *next;
+ rtt_result_t *rtt_result, *next2;
+ struct rtt_noti_callback *iter;
- if (!rtt_status) {
- return NULL;
+ NULL_CHECK(dhd, "dhd is NULL", err);
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
+ if (rtt_status->status == RTT_STOPPED) {
+ DHD_ERROR(("rtt is not started\n"));
+ return BCME_OK;
}
-
- if (rtt_status->geofence_cfg.geofence_target_cnt) {
- head = &rtt_status->geofence_cfg.geofence_target_info[0];
- }
-
- return head;
-}
-
-int8
-dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd)
-{
- int8 target_cnt = 0, cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
-
- if (!rtt_status) {
- goto exit;
- }
-
- target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
- if (target_cnt == 0) {
- goto exit;
- }
-
- cur_idx = rtt_status->geofence_cfg.cur_target_idx;
- ASSERT(cur_idx <= target_cnt);
-
-exit:
- return cur_idx;
-}
-
-void
-dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
-
- if (!rtt_status) {
- return;
- }
-
- if (rtt_status->geofence_cfg.geofence_target_cnt == 0) {
- /* Invalidate current idx if no targets */
- rtt_status->geofence_cfg.cur_target_idx =
- DHD_RTT_INVALID_TARGET_INDEX;
- /* Cancel pending retry timer if any */
- if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
- cancel_delayed_work(&rtt_status->rtt_retry_timer);
- }
- return;
- }
- rtt_status->geofence_cfg.cur_target_idx++;
-
- if (rtt_status->geofence_cfg.cur_target_idx >=
- rtt_status->geofence_cfg.geofence_target_cnt) {
- /* Reset once all targets done */
- rtt_status->geofence_cfg.cur_target_idx = 0;
- }
-}
-
-/* returns geofence current RTT target */
-rtt_geofence_target_info_t*
-dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- rtt_geofence_target_info_t* cur_target = NULL;
- int cur_idx = 0;
-
- if (!rtt_status) {
- return NULL;
- }
-
- cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
- if (cur_idx >= 0) {
- cur_target = &rtt_status->geofence_cfg.geofence_target_info[cur_idx];
- }
-
- return cur_target;
-}
-
-/* returns geofence target from list for the peer */
-rtt_geofence_target_info_t*
-dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr, int8 *index)
-{
- int8 i;
- rtt_status_info_t *rtt_status;
- int target_cnt;
- rtt_geofence_target_info_t *geofence_target_info, *tgt = NULL;
-
- rtt_status = GET_RTTSTATE(dhd);
-
- if (!rtt_status) {
- return NULL;
- }
-
- target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
- geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
-
- /* Loop through to find target */
- for (i = 0; i < target_cnt; i++) {
- if (geofence_target_info[i].valid == FALSE) {
- break;
- }
- if (!memcmp(peer_addr, &geofence_target_info[i].peer_addr,
- ETHER_ADDR_LEN)) {
- *index = i;
- tgt = &geofence_target_info[i];
- }
- }
- if (!tgt) {
- DHD_RTT(("dhd_rtt_get_geofence_target: Target not found in list,"
- " MAC ADDR: "MACDBG" \n", MAC2STRDBG(peer_addr)));
- }
- return tgt;
-}
-
-/* add geofence target to the target list */
-int
-dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target)
-{
- int err = BCME_OK;
- rtt_status_info_t *rtt_status;
- rtt_geofence_target_info_t *geofence_target_info;
- int8 geofence_target_cnt, index;
-
- NULL_CHECK(dhd, "dhd is NULL", err);
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", err);
-
- GEOFENCE_RTT_LOCK(rtt_status);
-
- /* Get the geofence_target via peer addr, index param is dumm here */
- geofence_target_info = dhd_rtt_get_geofence_target(dhd, &target->peer_addr, &index);
- if (geofence_target_info) {
- DHD_RTT(("Duplicate geofencing RTT add request dropped\n"));
- err = BCME_OK;
- goto exit;
- }
-
- geofence_target_cnt = rtt_status->geofence_cfg.geofence_target_cnt;
- if (geofence_target_cnt >= RTT_MAX_GEOFENCE_TARGET_CNT) {
- DHD_RTT(("Queue full, Geofencing RTT add request dropped\n"));
- err = BCME_NORESOURCE;
- goto exit;
- }
-
- /* Add Geofence RTT request and increment target count */
- geofence_target_info = rtt_status->geofence_cfg.geofence_target_info;
- /* src and dest buffer len same, pointers of same DS statically allocated */
- (void)memcpy_s(&geofence_target_info[geofence_target_cnt],
- sizeof(geofence_target_info[geofence_target_cnt]), target,
- sizeof(*target));
- geofence_target_info[geofence_target_cnt].valid = TRUE;
- rtt_status->geofence_cfg.geofence_target_cnt++;
- if (rtt_status->geofence_cfg.geofence_target_cnt == 1) {
- /* Adding first target */
- rtt_status->geofence_cfg.cur_target_idx = 0;
- }
-
-exit:
- GEOFENCE_RTT_UNLOCK(rtt_status);
- return err;
-}
-
-/* removes geofence target from the target list */
-int
-dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr)
-{
- int err = BCME_OK;
- rtt_status_info_t *rtt_status;
- rtt_geofence_target_info_t *geofence_target_info;
- int8 geofence_target_cnt, j, index = 0;
-
- NULL_CHECK(dhd, "dhd is NULL", err);
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", err);
-
- GEOFENCE_RTT_LOCK(rtt_status);
-
- geofence_target_cnt = dhd_rtt_get_geofence_target_cnt(dhd);
- if (geofence_target_cnt == 0) {
- DHD_RTT(("Queue Empty, Geofencing RTT remove request dropped\n"));
- ASSERT(0);
- goto exit;
- }
-
- /* Get the geofence_target via peer addr */
- geofence_target_info = dhd_rtt_get_geofence_target(dhd, peer_addr, &index);
- if (geofence_target_info == NULL) {
- DHD_RTT(("Geofencing RTT target not found, remove request dropped\n"));
- err = BCME_NOTFOUND;
- goto exit;
- }
-
- /* left shift all the valid entries, as we dont keep holes in list */
- for (j = index; (j+1) < geofence_target_cnt; j++) {
- if (geofence_target_info[j].valid == TRUE) {
- /*
- * src and dest buffer len same, pointers of same DS
- * statically allocated
- */
- (void)memcpy_s(&geofence_target_info[j], sizeof(geofence_target_info[j]),
- &geofence_target_info[j + 1],
- sizeof(geofence_target_info[j + 1]));
- } else {
- break;
- }
- }
- rtt_status->geofence_cfg.geofence_target_cnt--;
- if ((rtt_status->geofence_cfg.geofence_target_cnt == 0) ||
- (index == rtt_status->geofence_cfg.cur_target_idx)) {
- /* Move cur_idx to next target */
- dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
- } else if (index < rtt_status->geofence_cfg.cur_target_idx) {
- /* Decrement cur index if cur target position changed */
- rtt_status->geofence_cfg.cur_target_idx--;
- }
-
-exit:
- GEOFENCE_RTT_UNLOCK(rtt_status);
- return err;
-}
-
-/* deletes/empty geofence target list */
-int
-dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd)
-{
- rtt_status_info_t *rtt_status;
-
- int err = BCME_OK;
-
- NULL_CHECK(dhd, "dhd is NULL", err);
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", err);
- GEOFENCE_RTT_LOCK(rtt_status);
- memset_s(&rtt_status->geofence_cfg, sizeof(rtt_geofence_cfg_t),
- 0, sizeof(rtt_geofence_cfg_t));
- GEOFENCE_RTT_UNLOCK(rtt_status);
- return err;
-}
-
-int
-dhd_rtt_sched_geofencing_target(dhd_pub_t *dhd)
-{
- rtt_geofence_target_info_t *geofence_target_info;
- struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
- int ret = BCME_OK;
- bool geofence_state;
- bool role_concurrency_state;
- u8 rtt_invalid_reason = RTT_STATE_VALID;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
-
- NAN_MUTEX_LOCK();
-
- if ((cfg->nan_init_state == FALSE) ||
- (cfg->nan_enable == FALSE)) {
- ret = BCME_NOTENABLED;
- goto done;
- }
- geofence_state = dhd_rtt_get_geofence_rtt_state(dhd);
- role_concurrency_state = dhd_rtt_get_role_concurrency_state(dhd);
-
- DHD_RTT_ERR(("dhd_rtt_sched_geofencing_target: sched_reason = %d\n",
- rtt_status->rtt_sched_reason));
-
- if (geofence_state == TRUE || role_concurrency_state == TRUE) {
- ret = BCME_ERROR;
- DHD_RTT_ERR(("geofencing constraint , sched request dropped,"
- " geofence_state = %d, role_concurrency_state = %d\n",
- geofence_state, role_concurrency_state));
- goto done;
- }
-
- /* Get current geofencing target */
- geofence_target_info = dhd_rtt_get_geofence_current_target(dhd);
-
- /* call cfg API for trigerring geofencing RTT */
- if (geofence_target_info) {
- /* check for dp/others concurrency */
- rtt_invalid_reason = dhd_rtt_invalid_states(dev,
- &geofence_target_info->peer_addr);
- if (rtt_invalid_reason != RTT_STATE_VALID) {
- ret = BCME_BUSY;
- DHD_RTT_ERR(("DRV State is not valid for RTT, "
- "invalid_state = %d\n", rtt_invalid_reason));
- goto done;
- }
-
- ret = wl_cfgnan_trigger_geofencing_ranging(dev,
- &geofence_target_info->peer_addr);
- if (ret == BCME_OK) {
- dhd_rtt_set_geofence_rtt_state(dhd, TRUE);
- }
- } else {
- DHD_RTT(("No RTT target to schedule\n"));
- ret = BCME_NOTFOUND;
- }
-
-done:
- NAN_MUTEX_UNLOCK();
- return ret;
-}
-#endif /* WL_NAN */
-
-#ifdef WL_CFG80211
-#ifdef WL_NAN
-static void
-dhd_rtt_retry(dhd_pub_t *dhd)
-{
- struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- rtt_geofence_target_info_t *geofence_target = NULL;
- nan_ranging_inst_t *ranging_inst = NULL;
-
- geofence_target = dhd_rtt_get_geofence_current_target(dhd);
- if (!geofence_target) {
- DHD_RTT(("dhd_rtt_retry: geofence target null\n"));
- goto exit;
- }
- ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
- &geofence_target->peer_addr, NAN_RANGING_ROLE_INITIATOR);
- if (!ranging_inst) {
- DHD_RTT(("dhd_rtt_retry: ranging instance null\n"));
- goto exit;
- }
- wl_cfgnan_reset_geofence_ranging(cfg,
- ranging_inst, RTT_SCHED_RTT_RETRY_GEOFENCE);
-
-exit:
- return;
-}
-
-static void
-dhd_rtt_retry_work(struct work_struct *work)
-{
- rtt_status_info_t *rtt_status = NULL;
- dhd_pub_t *dhd = NULL;
- struct net_device *dev = NULL;
- struct bcm_cfg80211 *cfg = NULL;
-
- if (!work) {
- goto exit;
- }
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- rtt_status = container_of(work, rtt_status_info_t, rtt_retry_timer.work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
- dhd = rtt_status->dhd;
- if (dhd == NULL) {
- DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
- goto exit;
- }
- dev = dhd_linux_get_primary_netdev(dhd);
- cfg = wl_get_cfg(dev);
-
- NAN_MUTEX_LOCK();
- mutex_lock(&rtt_status->rtt_mutex);
- (void) dhd_rtt_retry(dhd);
- mutex_unlock(&rtt_status->rtt_mutex);
- NAN_MUTEX_UNLOCK();
-
-exit:
- return;
-}
-#endif /* WL_NAN */
-
-/*
- * Return zero (0)
- * for valid RTT state
- * means if RTT is applicable
- */
-uint8
-dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr)
-{
- uint8 invalid_reason = RTT_STATE_VALID;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
-
- UNUSED_PARAMETER(cfg);
- UNUSED_PARAMETER(invalid_reason);
-
- /* Make sure peer addr is not NULL in caller */
- ASSERT(peer_addr);
- /*
- * Keep adding prohibited drv states here
- * Only generic conditions which block
- * All RTTs like NDP connection
- */
-
-#ifdef WL_NAN
- if (wl_cfgnan_data_dp_exists_with_peer(cfg, peer_addr)) {
- invalid_reason = RTT_STATE_INV_REASON_NDP_EXIST;
- DHD_RTT(("NDP in progress/connected, RTT prohibited\n"));
- goto exit;
- }
-#endif /* WL_NAN */
-
- /* Remove below #defines once more exit calls come */
-#ifdef WL_NAN
-exit:
-#endif /* WL_NAN */
- return invalid_reason;
-}
-#endif /* WL_CFG80211 */
-
-void
-dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason)
-{
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- if (rtt_status == NULL) {
- ASSERT(0);
- } else {
- /* Cancel pending retry timer if any */
- if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
- cancel_delayed_work(&rtt_status->rtt_retry_timer);
- }
- rtt_status->rtt_sched_reason = sched_reason;
- schedule_work(&rtt_status->work);
- }
- return;
-}
-
-int
-dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt)
-{
- int err = BCME_OK;
-#ifdef WL_CFG80211
- int i = 0, j = 0;
- rtt_status_info_t *rtt_status;
- rtt_results_header_t *entry, *next;
- rtt_result_t *rtt_result, *next2;
- struct rtt_noti_callback *iter;
-
- NULL_CHECK(dhd, "dhd is NULL", err);
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", err);
- if (rtt_status->status == RTT_STOPPED) {
- DHD_RTT_ERR(("rtt is not started\n"));
- return BCME_OK;
- }
- DHD_RTT(("%s enter\n", __FUNCTION__));
- mutex_lock(&rtt_status->rtt_mutex);
- for (i = 0; i < mac_cnt; i++) {
- for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) {
- if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr,
- ETHER_ADDR_LEN)) {
- rtt_status->rtt_config.target_info[j].disable = TRUE;
- }
- }
+ DHD_RTT(("%s enter\n", __FUNCTION__));
+ mutex_lock(&rtt_status->rtt_mutex);
+ for (i = 0; i < mac_cnt; i++) {
+ for (j = 0; j < rtt_status->rtt_config.rtt_target_cnt; j++) {
+ if (!bcmp(&mac_list[i], &rtt_status->rtt_config.target_info[j].addr,
+ ETHER_ADDR_LEN)) {
+ rtt_status->rtt_config.target_info[j].disable = TRUE;
+ }
+ }
}
if (rtt_status->all_cancel) {
/* cancel all of request */
/* remove the rtt results in cache */
if (!list_empty(&rtt_status->rtt_results_cache)) {
/* Iterate rtt_results_header list */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(entry, next,
&rtt_status->rtt_results_cache, list) {
list_del(&entry->list);
}
kfree(entry);
}
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
}
/* send the rtt complete event to wake up the user process */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
- GCC_DIAGNOSTIC_POP();
iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
/* reinitialize the HEAD */
INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
/* clear information for rtt_config */
memset(rtt_status->rtt_config.target_info, 0,
TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
rtt_status->cur_idx = 0;
- /* Cancel pending proxd timeout work if any */
- if (delayed_work_pending(&rtt_status->proxd_timeout)) {
- cancel_delayed_work(&rtt_status->proxd_timeout);
- }
dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
-#ifdef WL_NAN
- dhd_rtt_delete_nan_session(dhd);
-#endif /* WL_NAN */
dhd_rtt_ftm_enable(dhd, FALSE);
}
mutex_unlock(&rtt_status->rtt_mutex);
return err;
}
+
#ifdef WL_CFG80211
-static void
-dhd_rtt_timeout(dhd_pub_t *dhd)
+static int
+dhd_rtt_start(dhd_pub_t *dhd)
{
+ int err = BCME_OK;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ char chanbuf[CHANSPEC_STR_LEN];
+ int ftm_cfg_cnt = 0;
+ int ftm_param_cnt = 0;
+ uint32 rspec = 0;
+ ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS];
+ ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
+ rtt_target_info_t *rtt_target;
rtt_status_info_t *rtt_status;
-#ifndef DHD_DUMP_ON_RTT_TIMEOUT
- rtt_target_info_t *rtt_target = NULL;
- rtt_target_info_t *rtt_target_info = NULL;
-#ifdef WL_NAN
- nan_ranging_inst_t *ranging_inst = NULL;
- int ret = BCME_OK;
- uint32 status;
- struct net_device *ndev = dhd_linux_get_primary_netdev(dhd);
- struct bcm_cfg80211 *cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
-#endif /* WL_NAN */
-#endif /* !DHD_DUMP_ON_RTT_TIMEOUT */
+ int pm = PM_OFF;
+ struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
+ NULL_CHECK(dhd, "dhd is NULL", err);
rtt_status = GET_RTTSTATE(dhd);
- if (!rtt_status) {
- DHD_RTT_ERR(("Proxd timer expired but no RTT status\n"));
- goto exit;
- }
-
- if (RTT_IS_STOPPED(rtt_status)) {
- DHD_RTT_ERR(("Proxd timer expired but no RTT Request\n"));
- goto exit;
- }
+ NULL_CHECK(rtt_status, "rtt_status is NULL", err);
-#ifdef DHD_DUMP_ON_RTT_TIMEOUT
- /* Dump, and Panic depending on memdump.info */
- if (dhd_query_bus_erros(dhd)) {
+ DHD_RTT(("Enter %s\n", __FUNCTION__));
+ if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) {
+ err = BCME_RANGE;
+ DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx));
+ if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+ DHD_ERROR(("STA is set as Target/Responder \n"));
+ return BCME_ERROR;
+ }
goto exit;
}
-#ifdef DHD_FW_COREDUMP
- if (dhd->memdump_enabled) {
- /* Behave based on user memdump info */
- dhd->memdump_type = DUMP_TYPE_PROXD_TIMEOUT;
- dhd_bus_mem_dump(dhd);
- }
-#endif /* DHD_FW_COREDUMP */
-#else /* DHD_DUMP_ON_RTT_TIMEOUT */
- /* Cancel RTT for target and proceed to next target */
- rtt_target_info = rtt_status->rtt_config.target_info;
- if ((!rtt_target_info) ||
- (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt)) {
+ if (RTT_IS_STOPPED(rtt_status)) {
+ DHD_RTT(("RTT is stopped\n"));
goto exit;
}
- rtt_target = &rtt_target_info[rtt_status->cur_idx];
- WL_ERR(("Proxd timer expired for Target: "MACDBG" \n", MAC2STRDBG(&rtt_target->addr)));
-#ifdef WL_NAN
- if (rtt_target->peer == RTT_PEER_NAN) {
- ranging_inst = wl_cfgnan_check_for_ranging(cfg, &rtt_target->addr);
- if (!ranging_inst) {
- goto exit;
- }
- ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
- NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
- __FUNCTION__, ret, status));
- }
- } else
-#endif /* WL_NAN */
- {
- /* For Legacy RTT */
- dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
- }
- dhd_rtt_create_failure_result(rtt_status, &rtt_target->addr);
- dhd_rtt_handle_rtt_session_end(dhd);
-#endif /* DHD_DUMP_ON_RTT_TIMEOUT */
-exit:
- return;
-}
-
-static void
-dhd_rtt_timeout_work(struct work_struct *work)
-{
- rtt_status_info_t *rtt_status = NULL;
- dhd_pub_t *dhd = NULL;
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
- rtt_status = container_of(work, rtt_status_info_t, proxd_timeout.work);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
-#pragma GCC diagnostic pop
-#endif // endif
-
- dhd = rtt_status->dhd;
- if (dhd == NULL) {
- DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
- return;
- }
- mutex_lock(&rtt_status->rtt_mutex);
- (void) dhd_rtt_timeout(dhd);
- mutex_unlock(&rtt_status->rtt_mutex);
-}
-
-static int
-dhd_rtt_start(dhd_pub_t *dhd)
-{
- int err = BCME_OK;
- int err_at = 0;
- char eabuf[ETHER_ADDR_STR_LEN];
- char chanbuf[CHANSPEC_STR_LEN];
- int pm = PM_OFF;
- int ftm_cfg_cnt = 0;
- int ftm_param_cnt = 0;
- uint32 rspec = 0;
- ftm_config_options_info_t ftm_configs[FTM_MAX_CONFIGS];
- ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
- rtt_target_info_t *rtt_target;
- rtt_status_info_t *rtt_status;
- struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- u8 rtt_invalid_reason = RTT_STATE_VALID;
- int rtt_sched_type = RTT_TYPE_INVALID;
-
- NULL_CHECK(dhd, "dhd is NULL", err);
-
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", err);
-
- mutex_lock(&rtt_status->rtt_work_mutex);
-
- DHD_RTT(("Enter %s\n", __FUNCTION__));
-
- if (RTT_IS_STOPPED(rtt_status)) {
- DHD_RTT(("No Directed RTT target to process, check for geofence\n"));
- goto geofence;
- }
-
- if (rtt_status->cur_idx >= rtt_status->rtt_config.rtt_target_cnt) {
- err = BCME_RANGE;
- err_at = 1;
- DHD_RTT(("%s : idx %d is out of range\n", __FUNCTION__, rtt_status->cur_idx));
- if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
- DHD_RTT_ERR(("STA is set as Target/Responder \n"));
- err = BCME_ERROR;
- err_at = 1;
- }
- goto exit;
- }
-
- rtt_status->pm = PM_OFF;
err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm));
if (err) {
- DHD_RTT_ERR(("Failed to get the PM value\n"));
+ DHD_ERROR(("Failed to get the PM value\n"));
} else {
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_RTT_ERR(("Failed to set the PM\n"));
+ DHD_ERROR(("Failed to set the PM\n"));
rtt_status->pm_restore = FALSE;
} else {
rtt_status->pm_restore = TRUE;
rtt_target = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
mutex_unlock(&rtt_status->rtt_mutex);
DHD_RTT(("%s enter\n", __FUNCTION__));
-
- if (ETHER_ISNULLADDR(rtt_target->addr.octet)) {
- err = BCME_BADADDR;
- err_at = 2;
- DHD_RTT(("RTT Target addr is NULL\n"));
- goto exit;
- }
-
- /* check for dp/others concurrency */
- rtt_invalid_reason = dhd_rtt_invalid_states(dev, &rtt_target->addr);
- if (rtt_invalid_reason != RTT_STATE_VALID) {
- err = BCME_BUSY;
- err_at = 3;
- DHD_RTT(("DRV State is not valid for RTT\n"));
- goto exit;
- }
-
-#ifdef WL_NAN
- if (rtt_target->peer == RTT_PEER_NAN) {
- rtt_sched_type = RTT_TYPE_NAN_DIRECTED;
- rtt_status->status = RTT_ENABLED;
- /* Ignore return value..failure taken care inside the API */
- dhd_rtt_nan_start_session(dhd, rtt_target);
- goto exit;
- }
-#endif /* WL_NAN */
if (!RTT_IS_ENABLED(rtt_status)) {
/* enable ftm */
err = dhd_rtt_ftm_enable(dhd, TRUE);
if (err) {
- DHD_RTT_ERR(("failed to enable FTM (%d)\n", err));
- err_at = 5;
+ DHD_ERROR(("failed to enable FTM (%d)\n", err));
goto exit;
}
}
/* delete session of index default sesession */
err = dhd_rtt_delete_session(dhd, FTM_DEFAULT_SESSION);
if (err < 0 && err != BCME_NOTFOUND) {
- DHD_RTT_ERR(("failed to delete session of FTM (%d)\n", err));
- err_at = 6;
+ DHD_ERROR(("failed to delete session of FTM (%d)\n", err));
goto exit;
}
rtt_status->status = RTT_ENABLED;
/* configure the session 1 as initiator */
ftm_configs[ftm_cfg_cnt].enable = TRUE;
- ftm_configs[ftm_cfg_cnt++].flags =
- WL_PROXD_SESSION_FLAG_INITIATOR | WL_PROXD_SESSION_FLAG_RANDMAC;
+ ftm_configs[ftm_cfg_cnt++].flags = WL_PROXD_SESSION_FLAG_INITIATOR;
dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_OPTIONS,
ftm_configs, ftm_cfg_cnt);
-
- memset(ioctl_buf, 0, WLC_IOCTL_SMLEN);
-
- /* Rand Mac for newer version in place of cur_eth */
- if (dhd->wlc_ver_major < RTT_IOV_CUR_ETH_OBSOLETE) {
- err = wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0,
- ioctl_buf, WLC_IOCTL_SMLEN, NULL);
- if (err) {
- DHD_RTT_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err));
- err_at = 7;
- goto exit;
- }
- memcpy(rtt_target->local_addr.octet, ioctl_buf, ETHER_ADDR_LEN);
-
- /* local mac address */
- if (!ETHER_ISNULLADDR(rtt_target->local_addr.octet)) {
- ftm_params[ftm_param_cnt].mac_addr = rtt_target->local_addr;
- ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_CUR_ETHER_ADDR;
- bcm_ether_ntoa(&rtt_target->local_addr, eabuf);
- DHD_RTT((">\t local %s\n", eabuf));
- }
- }
/* target's mac address */
if (!ETHER_ISNULLADDR(rtt_target->addr.octet)) {
ftm_params[ftm_param_cnt].mac_addr = rtt_target->addr;
DHD_RTT((">\t num of burst : %d\n", rtt_target->num_burst));
}
/* number of frame per burst */
- rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M;
- if (CHSPEC_IS80(rtt_target->chanspec)) {
- rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_80M;
- } else if (CHSPEC_IS40(rtt_target->chanspec)) {
- rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_40M;
- } else if (CHSPEC_IS20(rtt_target->chanspec)) {
- rtt_target->num_frames_per_burst = FTM_DEFAULT_CNT_20M;
+ if (rtt_target->num_frames_per_burst == 0) {
+ rtt_target->num_frames_per_burst =
+ CHSPEC_IS20(rtt_target->chanspec) ? FTM_DEFAULT_CNT_20M :
+ CHSPEC_IS40(rtt_target->chanspec) ? FTM_DEFAULT_CNT_40M :
+ FTM_DEFAULT_CNT_80M;
}
ftm_params[ftm_param_cnt].data16 = htol16(rtt_target->num_frames_per_burst);
ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_NUM_FTM;
DHD_RTT((">\t number of frame per burst : %d\n", rtt_target->num_frames_per_burst));
-
/* FTM retry count */
if (rtt_target->num_retries_per_ftm) {
ftm_params[ftm_param_cnt].data8 = rtt_target->num_retries_per_ftm;
ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_PERIOD;
DHD_RTT((">\t burst period : %d ms\n", rtt_target->burst_period));
}
- /* Setting both duration and timeout to MAX duration
- * to handle the congestion environments.
- * Hence ignoring the user config.
- */
/* burst-duration */
- rtt_target->burst_duration = FTM_MAX_BURST_DUR_TMO_MS;
if (rtt_target->burst_duration) {
ftm_params[ftm_param_cnt].data_intvl.intvl =
htol32(rtt_target->burst_duration); /* ms */
DHD_RTT((">\t burst duration : %d ms\n",
rtt_target->burst_duration));
}
- /* burst-timeout */
- rtt_target->burst_timeout = FTM_MAX_BURST_DUR_TMO_MS;
- if (rtt_target->burst_timeout) {
- ftm_params[ftm_param_cnt].data_intvl.intvl =
- htol32(rtt_target->burst_timeout); /* ms */
- ftm_params[ftm_param_cnt].data_intvl.tmu = WL_PROXD_TMU_MILLI_SEC;
- ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_BURST_TIMEOUT;
- DHD_RTT((">\t burst timeout : %d ms\n",
- rtt_target->burst_timeout));
- }
- /* event_mask..applicable for only Legacy RTT.
- * For nan-rtt config happens from firmware
- */
- ftm_params[ftm_param_cnt].event_mask = ((1 << WL_PROXD_EVENT_BURST_END) |
- (1 << WL_PROXD_EVENT_SESSION_END));
- ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_EVENT_MASK;
-
if (rtt_target->bw && rtt_target->preamble) {
bool use_default = FALSE;
int nss;
int mcs;
switch (rtt_target->preamble) {
- case RTT_PREAMBLE_LEGACY:
- rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */
- rspec |= WL_RATE_6M;
- break;
- case RTT_PREAMBLE_HT:
- rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */
- mcs = 0; /* default MCS 0 */
- rspec |= mcs;
- break;
- case RTT_PREAMBLE_VHT:
- rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */
- mcs = 0; /* default MCS 0 */
- nss = 1; /* default Nss = 1 */
- rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs;
- break;
- default:
- DHD_RTT(("doesn't support this preamble : %d\n",
- rtt_target->preamble));
- use_default = TRUE;
- break;
+ case RTT_PREAMBLE_LEGACY:
+ rspec |= WL_RSPEC_ENCODE_RATE; /* 11abg */
+ rspec |= WL_RATE_6M;
+ break;
+ case RTT_PREAMBLE_HT:
+ rspec |= WL_RSPEC_ENCODE_HT; /* 11n HT */
+ mcs = 0; /* default MCS 0 */
+ rspec |= mcs;
+ break;
+ case RTT_PREAMBLE_VHT:
+ rspec |= WL_RSPEC_ENCODE_VHT; /* 11ac VHT */
+ mcs = 0; /* default MCS 0 */
+ nss = 1; /* default Nss = 1 */
+ rspec |= (nss << WL_RSPEC_VHT_NSS_SHIFT) | mcs;
+ break;
+ default:
+ DHD_RTT(("doesn't support this preamble : %d\n", rtt_target->preamble));
+ use_default = TRUE;
+ break;
}
switch (rtt_target->bw) {
- case RTT_BW_20:
- rspec |= WL_RSPEC_BW_20MHZ;
- break;
- case RTT_BW_40:
- rspec |= WL_RSPEC_BW_40MHZ;
- break;
- case RTT_BW_80:
- rspec |= WL_RSPEC_BW_80MHZ;
- break;
- default:
- DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw));
- use_default = TRUE;
- break;
+ case RTT_BW_20:
+ rspec |= WL_RSPEC_BW_20MHZ;
+ break;
+ case RTT_BW_40:
+ rspec |= WL_RSPEC_BW_40MHZ;
+ break;
+ case RTT_BW_80:
+ rspec |= WL_RSPEC_BW_80MHZ;
+ break;
+ default:
+ DHD_RTT(("doesn't support this BW : %d\n", rtt_target->bw));
+ use_default = TRUE;
+ break;
}
if (!use_default) {
ftm_params[ftm_param_cnt].data32 = htol32(rspec);
}
dhd_set_rand_mac_oui(dhd);
dhd_rtt_ftm_config(dhd, FTM_DEFAULT_SESSION, FTM_CONFIG_CAT_GENERAL,
- ftm_params, ftm_param_cnt);
+ ftm_params, ftm_param_cnt);
- rtt_sched_type = RTT_TYPE_LEGACY;
err = dhd_rtt_start_session(dhd, FTM_DEFAULT_SESSION, TRUE);
if (err) {
- DHD_RTT_ERR(("failed to start session of FTM : error %d\n", err));
- err_at = 8;
- } else {
- /* schedule proxd timeout */
- schedule_delayed_work(&rtt_status->proxd_timeout,
- msecs_to_jiffies(DHD_NAN_RTT_TIMER_INTERVAL_MS));
-
- }
-
- goto exit;
-geofence:
-#ifdef WL_NAN
- /* sched geofencing rtt */
- rtt_sched_type = RTT_TYPE_NAN_GEOFENCE;
- if ((err = dhd_rtt_sched_geofencing_target(dhd)) != BCME_OK) {
- DHD_RTT_ERR(("geofencing sched failed, err = %d\n", err));
- err_at = 9;
+ DHD_ERROR(("failed to start session of FTM : error %d\n", err));
}
-#endif /* WL_NAN */
-
exit:
if (err) {
- /* RTT Failed */
- DHD_RTT_ERR(("dhd_rtt_start: Failed & RTT_STOPPED, err = %d,"
- " err_at = %d, rtt_sched_type = %d, rtt_invalid_reason = %d\n"
- " sched_reason = %d",
- err, err_at, rtt_sched_type, rtt_invalid_reason,
- rtt_status->rtt_sched_reason));
+ DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__));
rtt_status->status = RTT_STOPPED;
/* disable FTM */
dhd_rtt_ftm_enable(dhd, FALSE);
if (rtt_status->pm_restore) {
- pm = PM_FAST;
- DHD_RTT_ERR(("pm_restore =%d func =%s \n",
+ DHD_ERROR(("pm_restore =%d func =%s \n",
rtt_status->pm_restore, __FUNCTION__));
+ pm = PM_FAST;
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_RTT_ERR(("Failed to set PM \n"));
+ DHD_ERROR(("Failed to set PM \n"));
} else {
rtt_status->pm_restore = FALSE;
}
}
}
- mutex_unlock(&rtt_status->rtt_work_mutex);
return err;
}
#endif /* WL_CFG80211 */
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
spin_lock_bh(¬i_list_lock);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
- GCC_DIAGNOSTIC_POP();
if (iter->noti_fn == noti_fn) {
goto exit;
}
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
cb = kmalloc(sizeof(struct rtt_noti_callback), GFP_ATOMIC);
if (!cb) {
err = -ENOMEM;
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
spin_lock_bh(¬i_list_lock);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
- GCC_DIAGNOSTIC_POP();
if (iter->noti_fn == noti_fn) {
cb = iter;
list_del(&cb->list);
break;
}
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
spin_unlock_bh(¬i_list_lock);
if (cb) {
dhd_rtt_convert_rate_to_host(uint32 rspec)
{
wifi_rate_t host_rate;
- uint32 bandwidth;
memset(&host_rate, 0, sizeof(wifi_rate_t));
- if (RSPEC_ISLEGACY(rspec)) {
+ if ((rspec & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_RATE) {
host_rate.preamble = 0;
- } else if (RSPEC_ISHT(rspec)) {
+ } else if ((rspec & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_HT) {
host_rate.preamble = 2;
host_rate.rateMcsIdx = rspec & WL_RSPEC_RATE_MASK;
- } else if (RSPEC_ISVHT(rspec)) {
+ } else if ((rspec & WL_RSPEC_ENCODING_MASK) == WL_RSPEC_ENCODE_VHT) {
host_rate.preamble = 3;
host_rate.rateMcsIdx = rspec & WL_RSPEC_VHT_MCS_MASK;
host_rate.nss = (rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT;
}
-
- bandwidth = RSPEC_BW(rspec);
- switch (bandwidth) {
- case WL_RSPEC_BW_20MHZ:
- host_rate.bw = RTT_RATE_20M;
- break;
- case WL_RSPEC_BW_40MHZ:
- host_rate.bw = RTT_RATE_40M;
- break;
- case WL_RSPEC_BW_80MHZ:
- host_rate.bw = RTT_RATE_80M;
- break;
- case WL_RSPEC_BW_160MHZ:
- host_rate.bw = RTT_RATE_160M;
- break;
- default:
- host_rate.bw = RTT_RATE_20M;
- break;
- }
-
+ host_rate.bw = (rspec & WL_RSPEC_BW_MASK) - 1;
host_rate.bitrate = rate_rspec2rate(rspec) / 100; /* 100kbps */
DHD_RTT(("bit rate : %d\n", host_rate.bitrate));
return host_rate;
}
-#define FTM_FRAME_TYPES {"SETUP", "TRIGGER", "TIMESTAMP"}
+
static int
-dhd_rtt_convert_results_to_host_v1(rtt_result_t *rtt_result, const uint8 *p_data,
- uint16 tlvid, uint16 len)
+dhd_rtt_convert_results_to_host(rtt_report_t *rtt_report, uint8 *p_data, uint16 tlvid, uint16 len)
{
- int i;
int err = BCME_OK;
char eabuf[ETHER_ADDR_STR_LEN];
+ wl_proxd_rtt_result_t *p_data_info;
wl_proxd_result_flags_t flags;
wl_proxd_session_state_t session_state;
wl_proxd_status_t proxd_status;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
- struct osl_timespec ts;
+ struct timespec ts;
#endif /* LINUX_VER >= 2.6.39 */
uint32 ratespec;
uint32 avg_dist;
- const wl_proxd_rtt_result_v1_t *p_data_info = NULL;
- const wl_proxd_rtt_sample_v1_t *p_sample_avg = NULL;
- const wl_proxd_rtt_sample_v1_t *p_sample = NULL;
+ wl_proxd_rtt_sample_t *p_sample;
wl_proxd_intvl_t rtt;
wl_proxd_intvl_t p_time;
- uint16 num_rtt = 0, snr = 0, bitflips = 0;
- wl_proxd_phy_error_t tof_phy_error = 0;
- wl_proxd_phy_error_t tof_phy_tgt_error = 0;
- wl_proxd_snr_t tof_target_snr = 0;
- wl_proxd_bitflips_t tof_target_bitflips = 0;
- int16 rssi = 0;
- int32 dist = 0;
- uint8 num_ftm = 0;
- char *ftm_frame_types[] = FTM_FRAME_TYPES;
- rtt_report_t *rtt_report = &(rtt_result->report);
-
- BCM_REFERENCE(ftm_frame_types);
- BCM_REFERENCE(dist);
- BCM_REFERENCE(rssi);
- BCM_REFERENCE(tof_target_bitflips);
- BCM_REFERENCE(tof_target_snr);
- BCM_REFERENCE(tof_phy_tgt_error);
- BCM_REFERENCE(tof_phy_error);
- BCM_REFERENCE(bitflips);
- BCM_REFERENCE(snr);
- BCM_REFERENCE(session_state);
- BCM_REFERENCE(ftm_session_state_value_to_logstr);
NULL_CHECK(rtt_report, "rtt_report is NULL", err);
NULL_CHECK(p_data, "p_data is NULL", err);
DHD_RTT(("%s enter\n", __FUNCTION__));
- p_data_info = (const wl_proxd_rtt_result_v1_t *) p_data;
+ p_data_info = (wl_proxd_rtt_result_t *) p_data;
/* unpack and format 'flags' for display */
flags = ltoh16_ua(&p_data_info->flags);
session_state = ltoh16_ua(&p_data_info->state);
proxd_status = ltoh32_ua(&p_data_info->status);
bcm_ether_ntoa((&(p_data_info->peer)), eabuf);
+ ftm_session_state_value_to_logstr(session_state);
ftm_status_value_to_logstr(proxd_status);
DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
eabuf,
- session_state,
- ftm_session_state_value_to_logstr(session_state),
- proxd_status,
- ftm_status_value_to_logstr(proxd_status)));
-
- /* show avg_dist (1/256m units), burst_num */
- avg_dist = ltoh32_ua(&p_data_info->avg_dist);
- if (avg_dist == 0xffffffff) { /* report 'failure' case */
- DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n",
- ltoh16_ua(&p_data_info->burst_num),
- p_data_info->num_valid_rtt)); /* in a session */
- avg_dist = FTM_INVALID;
- }
- else {
- DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n",
- avg_dist >> 8, /* 1/256m units */
- ((avg_dist & 0xff) * 625) >> 4,
- ltoh16_ua(&p_data_info->burst_num),
- p_data_info->num_valid_rtt,
- p_data_info->num_ftm)); /* in a session */
- }
- /* show 'avg_rtt' sample */
- p_sample_avg = &p_data_info->avg_rtt;
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu));
- DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d ratespec=0x%08x\n",
- (int16) ltoh16_ua(&p_sample_avg->rssi),
- ltoh32_ua(&p_sample_avg->rtt.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)),
- ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10,
- ltoh32_ua(&p_sample_avg->ratespec)));
-
- /* set peer address */
- rtt_report->addr = p_data_info->peer;
- /* burst num */
- rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num);
- /* success num */
- rtt_report->success_num = p_data_info->num_valid_rtt;
- /* actual number of FTM supported by peer */
- rtt_report->num_per_burst_peer = p_data_info->num_ftm;
- rtt_report->negotiated_burst_num = p_data_info->num_ftm;
- /* status */
- rtt_report->status = ftm_get_statusmap_info(proxd_status,
- &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info));
-
- /* rssi (0.5db) */
- rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_data_info->avg_rtt.rssi)) * 2;
-
- /* rx rate */
- ratespec = ltoh32_ua(&p_data_info->avg_rtt.ratespec);
- rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec);
- /* tx rate */
- if (flags & WL_PROXD_RESULT_FLAG_VHTACK) {
- rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010);
- } else {
- rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc);
- }
- /* rtt_sd */
- rtt.tmu = ltoh16_ua(&p_data_info->avg_rtt.rtt.tmu);
- rtt.intvl = ltoh32_ua(&p_data_info->avg_rtt.rtt.intvl);
- rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */
- rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */
- DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt));
- DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi));
-
- /* average distance */
- if (avg_dist != FTM_INVALID) {
- rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */
- rtt_report->distance += (avg_dist & 0xff) * 1000 / 256;
- } else {
- rtt_report->distance = FTM_INVALID;
- }
- /* time stamp */
- /* get the time elapsed from boot time */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
- osl_get_monotonic_boottime(&ts);
- rtt_report->ts = (uint64)TIMESPEC_TO_US(ts);
-#endif /* LINUX_VER >= 2.6.39 */
-
- if (proxd_status == WL_PROXD_E_REMOTE_FAIL) {
- /* retry time after failure */
- p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
- p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
- rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */
- DHD_RTT((">\tretry_after: %d%s\n",
- ltoh32_ua(&p_data_info->u.retry_after.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu))));
- } else {
- /* burst duration */
- p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
- p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
- rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */
- DHD_RTT((">\tburst_duration: %d%s\n",
- ltoh32_ua(&p_data_info->u.burst_duration.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu))));
- DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration));
- }
-
- /* display detail if available */
- num_rtt = ltoh16_ua(&p_data_info->num_rtt);
- if (num_rtt > 0) {
- DHD_RTT((">\tnum rtt: %d samples\n", num_rtt));
- p_sample = &p_data_info->rtt[0];
- for (i = 0; i < num_rtt; i++) {
- snr = 0;
- bitflips = 0;
- tof_phy_error = 0;
- tof_phy_tgt_error = 0;
- tof_target_snr = 0;
- tof_target_bitflips = 0;
- rssi = 0;
- dist = 0;
- num_ftm = p_data_info->num_ftm;
- /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */
- if ((i % num_ftm) == 1) {
- rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi);
- snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr);
- bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips);
- tof_phy_error =
- (wl_proxd_phy_error_t)
- ltoh32_ua(&p_sample->tof_phy_error);
- tof_phy_tgt_error =
- (wl_proxd_phy_error_t)
- ltoh32_ua(&p_sample->tof_tgt_phy_error);
- tof_target_snr =
- (wl_proxd_snr_t)
- ltoh16_ua(&p_sample->tof_tgt_snr);
- tof_target_bitflips =
- (wl_proxd_bitflips_t)
- ltoh16_ua(&p_sample->tof_tgt_bitflips);
- dist = ltoh32_ua(&p_sample->distance);
- } else {
- rssi = -1;
- snr = 0;
- bitflips = 0;
- dist = 0;
- tof_target_bitflips = 0;
- tof_target_snr = 0;
- tof_phy_tgt_error = 0;
- }
- DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d"
- " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x"
- " target_bitflips=%d dist=%d rtt=%d%s status %s"
- " Type %s coreid=%d\n",
- i, p_sample->id, rssi, snr,
- bitflips, tof_phy_error, tof_phy_tgt_error,
- tof_target_snr,
- tof_target_bitflips, dist,
- ltoh32_ua(&p_sample->rtt.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)),
- ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)),
- ftm_frame_types[i % num_ftm], p_sample->coreid));
- p_sample++;
- }
- }
- return err;
-}
-
-static int
-dhd_rtt_convert_results_to_host_v2(rtt_result_t *rtt_result, const uint8 *p_data,
- uint16 tlvid, uint16 len)
-{
- int i;
- int err = BCME_OK;
- char eabuf[ETHER_ADDR_STR_LEN];
- wl_proxd_result_flags_t flags;
- wl_proxd_session_state_t session_state;
- wl_proxd_status_t proxd_status;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
- struct osl_timespec ts;
-#endif /* LINUX_VER >= 2.6.39 */
- uint32 ratespec;
- uint32 avg_dist;
- const wl_proxd_rtt_result_v2_t *p_data_info = NULL;
- const wl_proxd_rtt_sample_v2_t *p_sample_avg = NULL;
- const wl_proxd_rtt_sample_v2_t *p_sample = NULL;
- uint16 num_rtt = 0;
- wl_proxd_intvl_t rtt;
- wl_proxd_intvl_t p_time;
- uint16 snr = 0, bitflips = 0;
- wl_proxd_phy_error_t tof_phy_error = 0;
- wl_proxd_phy_error_t tof_phy_tgt_error = 0;
- wl_proxd_snr_t tof_target_snr = 0;
- wl_proxd_bitflips_t tof_target_bitflips = 0;
- int16 rssi = 0;
- int32 dist = 0;
- uint32 chanspec = 0;
- uint8 num_ftm = 0;
- char *ftm_frame_types[] = FTM_FRAME_TYPES;
- rtt_report_t *rtt_report = &(rtt_result->report);
-
- BCM_REFERENCE(ftm_frame_types);
- BCM_REFERENCE(dist);
- BCM_REFERENCE(rssi);
- BCM_REFERENCE(tof_target_bitflips);
- BCM_REFERENCE(tof_target_snr);
- BCM_REFERENCE(tof_phy_tgt_error);
- BCM_REFERENCE(tof_phy_error);
- BCM_REFERENCE(bitflips);
- BCM_REFERENCE(snr);
- BCM_REFERENCE(chanspec);
- BCM_REFERENCE(session_state);
- BCM_REFERENCE(ftm_session_state_value_to_logstr);
-
- NULL_CHECK(rtt_report, "rtt_report is NULL", err);
- NULL_CHECK(p_data, "p_data is NULL", err);
- DHD_RTT(("%s enter\n", __FUNCTION__));
- p_data_info = (const wl_proxd_rtt_result_v2_t *) p_data;
- /* unpack and format 'flags' for display */
- flags = ltoh16_ua(&p_data_info->flags);
- /* session state and status */
- session_state = ltoh16_ua(&p_data_info->state);
- proxd_status = ltoh32_ua(&p_data_info->status);
- bcm_ether_ntoa((&(p_data_info->peer)), eabuf);
-
- if (proxd_status != BCME_OK) {
- DHD_RTT_ERR((">\tTarget(%s) session state=%d(%s), status=%d(%s) "
- "num_meas_ota %d num_valid_rtt %d result_flags %x\n",
- eabuf, session_state,
- ftm_session_state_value_to_logstr(session_state),
- proxd_status, ftm_status_value_to_logstr(proxd_status),
- p_data_info->num_meas, p_data_info->num_valid_rtt,
- p_data_info->flags));
- } else {
- DHD_RTT((">\tTarget(%s) session state=%d(%s), status=%d(%s)\n",
- eabuf, session_state,
- ftm_session_state_value_to_logstr(session_state),
- proxd_status, ftm_status_value_to_logstr(proxd_status)));
- }
- /* show avg_dist (1/256m units), burst_num */
- avg_dist = ltoh32_ua(&p_data_info->avg_dist);
- if (avg_dist == 0xffffffff) { /* report 'failure' case */
- DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n",
- ltoh16_ua(&p_data_info->burst_num),
- p_data_info->num_valid_rtt)); /* in a session */
- avg_dist = FTM_INVALID;
- } else {
- DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d "
- "num_meas_ota=%d, result_flags=%x\n", avg_dist >> 8, /* 1/256m units */
- ((avg_dist & 0xff) * 625) >> 4,
- ltoh16_ua(&p_data_info->burst_num),
- p_data_info->num_valid_rtt,
- p_data_info->num_ftm, p_data_info->num_meas,
- p_data_info->flags)); /* in a session */
- }
- rtt_result->rtt_detail.num_ota_meas = p_data_info->num_meas;
- rtt_result->rtt_detail.result_flags = p_data_info->flags;
- /* show 'avg_rtt' sample */
- /* in v2, avg_rtt is the first element of the variable rtt[] */
- p_sample_avg = &p_data_info->rtt[0];
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu));
- DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d"
- "ratespec=0x%08x chanspec=0x%08x\n",
- (int16) ltoh16_ua(&p_sample_avg->rssi),
- ltoh32_ua(&p_sample_avg->rtt.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample_avg->rtt.tmu)),
- ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10,
- ltoh32_ua(&p_sample_avg->ratespec),
- ltoh32_ua(&p_sample_avg->chanspec)));
-
- /* set peer address */
- rtt_report->addr = p_data_info->peer;
-
- /* burst num */
- rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num);
-
- /* success num */
- rtt_report->success_num = p_data_info->num_valid_rtt;
-
- /* num-ftm configured */
- rtt_report->ftm_num = p_data_info->num_ftm;
-
- /* actual number of FTM supported by peer */
- rtt_report->num_per_burst_peer = p_data_info->num_ftm;
- rtt_report->negotiated_burst_num = p_data_info->num_ftm;
-
- /* status */
- rtt_report->status = ftm_get_statusmap_info(proxd_status,
- &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info));
-
- /* Framework expects status as SUCCESS else all results will be
- * set to zero even if we have partial valid result.
- * So setting status as SUCCESS if we have a valid_rtt
- * On burst timeout we stop burst with "timeout" reason and
- * on msch end we set status as "cancel"
- */
- if ((proxd_status == WL_PROXD_E_TIMEOUT ||
- proxd_status == WL_PROXD_E_CANCELED) &&
- rtt_report->success_num) {
- rtt_report->status = RTT_STATUS_SUCCESS;
- }
-
- /* rssi (0.5db) */
- rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_sample_avg->rssi)) * 2;
-
- /* rx rate */
- ratespec = ltoh32_ua(&p_sample_avg->ratespec);
- rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec);
-
- /* tx rate */
- if (flags & WL_PROXD_RESULT_FLAG_VHTACK) {
- rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010);
- } else {
- rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc);
- }
-
- /* rtt_sd */
- rtt.tmu = ltoh16_ua(&p_sample_avg->rtt.tmu);
- rtt.intvl = ltoh32_ua(&p_sample_avg->rtt.intvl);
- rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */
- rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */
- DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt));
- DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi));
-
- /* average distance */
- if (avg_dist != FTM_INVALID) {
- rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */
- rtt_report->distance += (avg_dist & 0xff) * 1000 / 256;
- /* rtt_sd is in 0.1 ns.
- * host needs distance_sd in milli mtrs
- * (0.1 * rtt_sd/2 * 10^-9) * C * 1000
- */
- rtt_report->distance_sd = rtt_report->rtt_sd * 15; /* mm */
- } else {
- rtt_report->distance = FTM_INVALID;
- }
- /* time stamp */
- /* get the time elapsed from boot time */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
- osl_get_monotonic_boottime(&ts);
- rtt_report->ts = (uint64)TIMESPEC_TO_US(ts);
-#endif /* LINUX_VER >= 2.6.39 */
-
- if (proxd_status == WL_PROXD_E_REMOTE_FAIL) {
- /* retry time after failure */
- p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
- p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
- rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */
- DHD_RTT((">\tretry_after: %d%s\n",
- ltoh32_ua(&p_data_info->u.retry_after.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu))));
- } else {
- /* burst duration */
- p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
- p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
- rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */
- DHD_RTT((">\tburst_duration: %d%s\n",
- ltoh32_ua(&p_data_info->u.burst_duration.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu))));
- DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration));
- }
- /* display detail if available */
- num_rtt = ltoh16_ua(&p_data_info->num_rtt);
- if (num_rtt > 0) {
- DHD_RTT((">\tnum rtt: %d samples\n", num_rtt));
- p_sample = &p_data_info->rtt[1];
- for (i = 0; i < num_rtt; i++) {
- snr = 0;
- bitflips = 0;
- tof_phy_error = 0;
- tof_phy_tgt_error = 0;
- tof_target_snr = 0;
- tof_target_bitflips = 0;
- rssi = 0;
- dist = 0;
- num_ftm = p_data_info->num_ftm;
- /* FTM frames 1,4,7,11 have valid snr, rssi and bitflips */
- if ((i % num_ftm) == 1) {
- rssi = (wl_proxd_rssi_t) ltoh16_ua(&p_sample->rssi);
- snr = (wl_proxd_snr_t) ltoh16_ua(&p_sample->snr);
- bitflips = (wl_proxd_bitflips_t) ltoh16_ua(&p_sample->bitflips);
- tof_phy_error =
- (wl_proxd_phy_error_t)
- ltoh32_ua(&p_sample->tof_phy_error);
- tof_phy_tgt_error =
- (wl_proxd_phy_error_t)
- ltoh32_ua(&p_sample->tof_tgt_phy_error);
- tof_target_snr =
- (wl_proxd_snr_t)
- ltoh16_ua(&p_sample->tof_tgt_snr);
- tof_target_bitflips =
- (wl_proxd_bitflips_t)
- ltoh16_ua(&p_sample->tof_tgt_bitflips);
- dist = ltoh32_ua(&p_sample->distance);
- chanspec = ltoh32_ua(&p_sample->chanspec);
- } else {
- rssi = -1;
- snr = 0;
- bitflips = 0;
- dist = 0;
- tof_target_bitflips = 0;
- tof_target_snr = 0;
- tof_phy_tgt_error = 0;
- }
- DHD_RTT((">\t sample[%d]: id=%d rssi=%d snr=0x%x bitflips=%d"
- " tof_phy_error %x tof_phy_tgt_error %x target_snr=0x%x"
- " target_bitflips=%d dist=%d rtt=%d%s status %s Type %s"
- " coreid=%d chanspec=0x%08x\n",
- i, p_sample->id, rssi, snr,
- bitflips, tof_phy_error, tof_phy_tgt_error,
- tof_target_snr,
- tof_target_bitflips, dist,
- ltoh32_ua(&p_sample->rtt.intvl),
- ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)),
- ftm_status_value_to_logstr(ltoh32_ua(&p_sample->status)),
- ftm_frame_types[i % num_ftm], p_sample->coreid,
- chanspec));
- p_sample++;
- }
- }
- return err;
-}
-#ifdef WL_CFG80211
-/* Common API for handling Session End.
-* This API will flush out the results for a peer MAC.
-*
-* @For legacy FTM session, this API will be called
-* when legacy FTM_SESSION_END event is received.
-* @For legacy Nan-RTT , this API will be called when
-* we are cancelling the nan-ranging session or on
-* nan-ranging-end event.
-*/
-static void
-dhd_rtt_handle_rtt_session_end(dhd_pub_t *dhd)
-{
-
- int idx;
- struct rtt_noti_callback *iter;
- rtt_results_header_t *entry, *next;
- rtt_result_t *next2;
- rtt_result_t *rtt_result;
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
-
- /* Cancel pending proxd timeout work if any */
- if (delayed_work_pending(&rtt_status->proxd_timeout)) {
- cancel_delayed_work(&rtt_status->proxd_timeout);
- }
-
- /* find next target to trigger RTT */
- for (idx = (rtt_status->cur_idx + 1);
- idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
- /* skip the disabled device */
- if (rtt_status->rtt_config.target_info[idx].disable) {
- continue;
- } else {
- /* set the idx to cur_idx */
- rtt_status->cur_idx = idx;
- break;
- }
- }
- if (idx < rtt_status->rtt_config.rtt_target_cnt) {
- /* restart to measure RTT from next device */
- DHD_INFO(("restart to measure rtt\n"));
- schedule_work(&rtt_status->work);
- } else {
- DHD_RTT(("RTT_STOPPED\n"));
- rtt_status->status = RTT_STOPPED;
- /* notify the completed information to others */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
- iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
- }
- /* remove the rtt results in cache */
- if (!list_empty(&rtt_status->rtt_results_cache)) {
- /* Iterate rtt_results_header list */
- list_for_each_entry_safe(entry, next,
- &rtt_status->rtt_results_cache, list) {
- list_del(&entry->list);
- /* Iterate rtt_result list */
- list_for_each_entry_safe(rtt_result, next2,
- &entry->result_list, list) {
- list_del(&rtt_result->list);
- kfree(rtt_result);
- }
- kfree(entry);
- }
- }
- GCC_DIAGNOSTIC_POP();
- /* reinitialize the HEAD */
- INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
- /* clear information for rtt_config */
- rtt_status->rtt_config.rtt_target_cnt = 0;
- memset_s(rtt_status->rtt_config.target_info, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT),
- 0, TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
- rtt_status->cur_idx = 0;
- }
-}
-#endif /* WL_CFG80211 */
-
-#ifdef WL_CFG80211
-static int
-dhd_rtt_create_failure_result(rtt_status_info_t *rtt_status,
- struct ether_addr *addr)
-{
- rtt_results_header_t *rtt_results_header = NULL;
- rtt_target_info_t *rtt_target_info;
- int ret = BCME_OK;
- rtt_result_t *rtt_result;
-
- /* allocate new header for rtt_results */
- rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh,
- sizeof(rtt_results_header_t));
- if (!rtt_results_header) {
- ret = -ENOMEM;
- goto exit;
- }
- rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
- /* Initialize the head of list for rtt result */
- INIT_LIST_HEAD(&rtt_results_header->result_list);
- /* same src and dest len */
- (void)memcpy_s(&rtt_results_header->peer_mac,
- ETHER_ADDR_LEN, addr, ETHER_ADDR_LEN);
- list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
-
- /* allocate rtt_results for new results */
- rtt_result = (rtt_result_t *)MALLOCZ(rtt_status->dhd->osh,
- sizeof(rtt_result_t));
- if (!rtt_result) {
- ret = -ENOMEM;
- kfree(rtt_results_header);
- goto exit;
- }
- /* fill out the results from the configuration param */
- rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
- rtt_result->report.type = RTT_TWO_WAY;
- DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
- rtt_result->report_len = RTT_REPORT_SIZE;
- rtt_result->report.status = RTT_STATUS_FAIL_NO_RSP;
- /* same src and dest len */
- (void)memcpy_s(&rtt_result->report.addr, ETHER_ADDR_LEN,
- &rtt_target_info->addr, ETHER_ADDR_LEN);
- rtt_result->report.distance = FTM_INVALID;
- list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
- rtt_results_header->result_cnt++;
- rtt_results_header->result_tot_len += rtt_result->report_len;
-exit:
- return ret;
-}
-
-static bool
-dhd_rtt_get_report_header(rtt_status_info_t *rtt_status,
- rtt_results_header_t **rtt_results_header, struct ether_addr *addr)
-{
- rtt_results_header_t *entry;
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- /* find a rtt_report_header for this mac address */
- list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) {
- GCC_DIAGNOSTIC_POP();
- if (!memcmp(&entry->peer_mac, addr, ETHER_ADDR_LEN)) {
- /* found a rtt_report_header for peer_mac in the list */
- if (rtt_results_header) {
- *rtt_results_header = entry;
- }
- return TRUE;
- }
- }
- return FALSE;
-}
-
-int
-dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd, struct ether_addr *peer)
-{
- bool is_new = TRUE;
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- mutex_lock(&rtt_status->rtt_mutex);
- is_new = !dhd_rtt_get_report_header(rtt_status, NULL, peer);
-
- if (is_new) { /* no FTM result..create failure result */
- dhd_rtt_create_failure_result(rtt_status, peer);
- }
- dhd_rtt_handle_rtt_session_end(dhd);
- mutex_unlock(&rtt_status->rtt_mutex);
- return BCME_OK;
-}
-#endif /* WL_CFG80211 */
-
-static bool
-dhd_rtt_is_valid_measurement(rtt_result_t *rtt_result)
-{
- bool ret = FALSE;
-
- if (rtt_result && (rtt_result->report.success_num != 0)) {
- ret = TRUE;
- }
- return ret;
-}
-
-static int
-dhd_rtt_parse_result_event(wl_proxd_event_t *proxd_ev_data,
- int tlvs_len, rtt_result_t *rtt_result)
-{
- int ret = BCME_OK;
-
- /* unpack TLVs and invokes the cbfn to print the event content TLVs */
- ret = bcm_unpack_xtlv_buf((void *) rtt_result,
- (uint8 *)&proxd_ev_data->tlvs[0], tlvs_len,
- BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
- if (ret != BCME_OK) {
- DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n",
- __FUNCTION__));
- goto exit;
- }
- /* fill out the results from the configuration param */
- rtt_result->report.type = RTT_TWO_WAY;
- DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
- rtt_result->report_len = RTT_REPORT_SIZE;
- rtt_result->detail_len = sizeof(rtt_result->rtt_detail);
-
-exit:
- return ret;
-
-}
-
-static int
-dhd_rtt_handle_directed_rtt_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr,
- wl_proxd_event_t *proxd_ev_data, int tlvs_len, rtt_result_t *rtt_result, bool is_nan)
-{
- rtt_status_info_t *rtt_status;
- rtt_results_header_t *rtt_results_header = NULL;
- bool is_new = TRUE;
- int ret = BCME_OK;
- int err_at = 0;
-
- rtt_status = GET_RTTSTATE(dhd);
- is_new = !dhd_rtt_get_report_header(rtt_status,
- &rtt_results_header, peer_addr);
-
- if (tlvs_len > 0) {
- if (is_new) {
- /* allocate new header for rtt_results */
- rtt_results_header = (rtt_results_header_t *)MALLOCZ(rtt_status->dhd->osh,
- sizeof(rtt_results_header_t));
- if (!rtt_results_header) {
- ret = BCME_NORESOURCE;
- err_at = 1;
- goto exit;
- }
- /* Initialize the head of list for rtt result */
- INIT_LIST_HEAD(&rtt_results_header->result_list);
- /* same src and header len */
- (void)memcpy_s(&rtt_results_header->peer_mac, ETHER_ADDR_LEN,
- peer_addr, ETHER_ADDR_LEN);
- list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
- }
-
- ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, rtt_result);
- if ((ret == BCME_OK) && ((!is_nan) ||
- dhd_rtt_is_valid_measurement(rtt_result))) {
- /*
- * Add to list, if non-nan RTT (legacy) or
- * valid measurement in nan rtt case
- */
- list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
- rtt_results_header->result_cnt++;
- rtt_results_header->result_tot_len += rtt_result->report_len +
- rtt_result->detail_len;
- } else {
- err_at = 2;
- if (ret == BCME_OK) {
- /* Case for nan rtt invalid measurement */
- ret = BCME_ERROR;
- err_at = 3;
- }
- goto exit;
- }
- } else {
- ret = BCME_ERROR;
- err_at = 4;
- goto exit;
- }
-
-exit:
- if (ret != BCME_OK) {
- DHD_RTT_ERR(("dhd_rtt_handle_directed_rtt_burst_end: failed, "
- " ret = %d, err_at = %d\n", ret, err_at));
- if (rtt_results_header) {
- list_del(&rtt_results_header->list);
- kfree(rtt_results_header);
- rtt_results_header = NULL;
- }
- }
- return ret;
-}
-
-#ifdef WL_NAN
-static void
-dhd_rtt_nan_range_report(struct bcm_cfg80211 *cfg,
- rtt_result_t *rtt_result)
-{
- wl_nan_ev_rng_rpt_ind_t range_res;
-
- UNUSED_PARAMETER(range_res);
- if (!dhd_rtt_is_valid_measurement(rtt_result)) {
- /* Drop Invalid Measurements for NAN RTT report */
- DHD_RTT(("dhd_rtt_nan_range_report: Drop Invalid Measurements\n"));
- goto exit;
- }
- bzero(&range_res, sizeof(range_res));
- range_res.indication = 0;
- range_res.dist_mm = rtt_result->report.distance;
- /* same src and header len, ignoring ret val here */
- (void)memcpy_s(&range_res.peer_m_addr, ETHER_ADDR_LEN,
- &rtt_result->report.addr, ETHER_ADDR_LEN);
- wl_cfgnan_process_range_report(cfg, &range_res);
-
-exit:
- return;
-}
-
-static int
-dhd_rtt_handle_nan_burst_end(dhd_pub_t *dhd, struct ether_addr *peer_addr,
- wl_proxd_event_t *proxd_ev_data, int tlvs_len)
-{
- struct net_device *ndev = NULL;
- struct bcm_cfg80211 *cfg = NULL;
- nan_ranging_inst_t *rng_inst = NULL;
- rtt_status_info_t *rtt_status = NULL;
- rtt_result_t *rtt_result = NULL;
- bool is_geofence = FALSE;
- int ret = BCME_OK;
+ session_state,
+ ftm_session_state_value_to_logstr(session_state),
+ proxd_status,
+ ftm_status_value_to_logstr(proxd_status)));
- ndev = dhd_linux_get_primary_netdev(dhd);
- cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
+ /* show avg_dist (1/256m units), burst_num */
+ avg_dist = ltoh32_ua(&p_data_info->avg_dist);
+ if (avg_dist == 0xffffffff) { /* report 'failure' case */
+ DHD_RTT((">\tavg_dist=-1m, burst_num=%d, valid_measure_cnt=%d\n",
+ ltoh16_ua(&p_data_info->burst_num),
+ p_data_info->num_valid_rtt)); /* in a session */
+ avg_dist = FTM_INVALID;
+ }
+ else {
+ DHD_RTT((">\tavg_dist=%d.%04dm, burst_num=%d, valid_measure_cnt=%d num_ftm=%d\n",
+ avg_dist >> 8, /* 1/256m units */
+ ((avg_dist & 0xff) * 625) >> 4,
+ ltoh16_ua(&p_data_info->burst_num),
+ p_data_info->num_valid_rtt,
+ p_data_info->num_ftm)); /* in a session */
+ }
+ /* show 'avg_rtt' sample */
+ p_sample = &p_data_info->avg_rtt;
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu));
+ DHD_RTT((">\tavg_rtt sample: rssi=%d rtt=%d%s std_deviation =%d.%d ratespec=0x%08x\n",
+ (int16) ltoh16_ua(&p_sample->rssi),
+ ltoh32_ua(&p_sample->rtt.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_sample->rtt.tmu)),
+ ltoh16_ua(&p_data_info->sd_rtt)/10, ltoh16_ua(&p_data_info->sd_rtt)%10,
+ ltoh32_ua(&p_sample->ratespec)));
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
- NAN_MUTEX_LOCK();
- mutex_lock(&rtt_status->rtt_mutex);
+ /* set peer address */
+ rtt_report->addr = p_data_info->peer;
+ /* burst num */
+ rtt_report->burst_num = ltoh16_ua(&p_data_info->burst_num);
+ /* success num */
+ rtt_report->success_num = p_data_info->num_valid_rtt;
+ /* actual number of FTM supported by peer */
+ rtt_report->num_per_burst_peer = p_data_info->num_ftm;
+ rtt_report->negotiated_burst_num = p_data_info->num_ftm;
+ /* status */
+ rtt_report->status = ftm_get_statusmap_info(proxd_status,
+ &ftm_status_map_info[0], ARRAYSIZE(ftm_status_map_info));
- if ((cfg->nan_enable == FALSE) ||
- ETHER_ISNULLADDR(peer_addr)) {
- DHD_RTT_ERR(("Received Burst End with NULL ether addr, "
- "or nan disable, nan_enable = %d\n", cfg->nan_enable));
- ret = BCME_UNSUPPORTED;
- goto exit;
- }
+ /* rssi (0.5db) */
+ rtt_report->rssi = ABS((wl_proxd_rssi_t)ltoh16_ua(&p_data_info->avg_rtt.rssi)) * 2;
- rng_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
- if (rng_inst) {
- is_geofence = (rng_inst->range_type
- == RTT_TYPE_NAN_GEOFENCE);
+ /* rx rate */
+ ratespec = ltoh32_ua(&p_data_info->avg_rtt.ratespec);
+ rtt_report->rx_rate = dhd_rtt_convert_rate_to_host(ratespec);
+ /* tx rate */
+ if (flags & WL_PROXD_RESULT_FLAG_VHTACK) {
+ rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0x2010010);
} else {
- DHD_RTT_ERR(("Received Burst End without Ranging Instance\n"));
- ret = BCME_ERROR;
- goto exit;
- }
-
- /* allocate rtt_results for new results */
- rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t));
- if (!rtt_result) {
- ret = BCME_NORESOURCE;
- goto exit;
+ rtt_report->tx_rate = dhd_rtt_convert_rate_to_host(0xc);
}
+ /* rtt_sd */
+ rtt.tmu = ltoh16_ua(&p_data_info->avg_rtt.rtt.tmu);
+ rtt.intvl = ltoh32_ua(&p_data_info->avg_rtt.rtt.intvl);
+ rtt_report->rtt = (wifi_timespan)FTM_INTVL2NSEC(&rtt) * 1000; /* nano -> pico seconds */
+ rtt_report->rtt_sd = ltoh16_ua(&p_data_info->sd_rtt); /* nano -> 0.1 nano */
+ DHD_RTT(("rtt_report->rtt : %llu\n", rtt_report->rtt));
+ DHD_RTT(("rtt_report->rssi : %d (0.5db)\n", rtt_report->rssi));
- if (is_geofence) {
- ret = dhd_rtt_parse_result_event(proxd_ev_data, tlvs_len, rtt_result);
- if (ret != BCME_OK) {
- DHD_RTT_ERR(("dhd_rtt_handle_nan_burst_end: "
- "dhd_rtt_parse_result_event failed\n"));
- goto exit;
- }
+ /* average distance */
+ if (avg_dist != FTM_INVALID) {
+ rtt_report->distance = (avg_dist >> 8) * 1000; /* meter -> mm */
+ rtt_report->distance += (avg_dist & 0xff) * 1000 / 256;
} else {
- if (RTT_IS_STOPPED(rtt_status)) {
- /* Ignore the Proxd event */
- DHD_RTT((" event handler rtt is stopped \n"));
- if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
- DHD_RTT(("Device is target/Responder. Recv the event. \n"));
- } else {
- ret = BCME_UNSUPPORTED;
- goto exit;
- }
- }
- ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, peer_addr,
- proxd_ev_data, tlvs_len, rtt_result, TRUE);
- if (ret != BCME_OK) {
- goto exit;
- }
-
+ rtt_report->distance = FTM_INVALID;
}
+ /* time stamp */
+ /* get the time elapsed from boot time */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ get_monotonic_boottime(&ts);
+ rtt_report->ts = (uint64)TIMESPEC_TO_US(ts);
+#endif /* LINUX_VER >= 2.6.39 */
-exit:
- mutex_unlock(&rtt_status->rtt_mutex);
- if (ret == BCME_OK) {
- dhd_rtt_nan_range_report(cfg, rtt_result);
- }
- if (rtt_result &&
- ((ret != BCME_OK) || is_geofence)) {
- kfree(rtt_result);
- rtt_result = NULL;
+ if (proxd_status == WL_PROXD_E_REMOTE_FAIL) {
+ /* retry time after failure */
+ p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
+ p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
+ rtt_report->retry_after_duration = FTM_INTVL2SEC(&p_time); /* s -> s */
+ DHD_RTT((">\tretry_after: %d%s\n",
+ ltoh32_ua(&p_data_info->u.retry_after.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.retry_after.tmu))));
+ } else {
+ /* burst duration */
+ p_time.intvl = ltoh32_ua(&p_data_info->u.retry_after.intvl);
+ p_time.tmu = ltoh16_ua(&p_data_info->u.retry_after.tmu);
+ rtt_report->burst_duration = FTM_INTVL2MSEC(&p_time); /* s -> ms */
+ DHD_RTT((">\tburst_duration: %d%s\n",
+ ltoh32_ua(&p_data_info->u.burst_duration.intvl),
+ ftm_tmu_value_to_logstr(ltoh16_ua(&p_data_info->u.burst_duration.tmu))));
+ DHD_RTT(("rtt_report->burst_duration : %d\n", rtt_report->burst_duration));
}
- NAN_MUTEX_UNLOCK();
- return ret;
+ return err;
}
-#endif /* WL_NAN */
int
dhd_rtt_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data)
wl_proxd_event_t *p_event;
wl_proxd_event_type_t event_type;
wl_proxd_ftm_session_status_t session_status;
+ wl_proxd_collect_event_data_t *collect_event_data;
const ftm_strmap_entry_t *p_loginfo;
rtt_result_t *rtt_result;
+ gfp_t kflags;
#ifdef WL_CFG80211
- rtt_status_info_t *rtt_status;
- rtt_results_header_t *rtt_results_header = NULL;
+ int idx;
+ struct rtt_noti_callback *iter;
bool is_new = TRUE;
+ rtt_status_info_t *rtt_status;
+ rtt_result_t *next2;
+ rtt_results_header_t *next = NULL;
+ rtt_target_info_t *rtt_target_info;
+ rtt_results_header_t *entry, *rtt_results_header = NULL;
#endif /* WL_CFG80211 */
DHD_RTT(("Enter %s \n", __FUNCTION__));
NULL_CHECK(dhd, "dhd is NULL", ret);
+#ifdef WL_CFG80211
+ rtt_status = GET_RTTSTATE(dhd);
+ NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
+
+ if (RTT_IS_STOPPED(rtt_status)) {
+ /* Ignore the Proxd event */
+ DHD_RTT((" event handler rtt is stopped \n"));
+ if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
+ DHD_RTT(("Device is target/Responder. Recv the event. \n"));
+ } else {
+ return ret;
+ }
+ }
+#endif /* WL_CFG80211 */
if (ntoh32_ua((void *)&event->datalen) < OFFSETOF(wl_proxd_event_t, tlvs)) {
DHD_RTT(("%s: wrong datalen:%d\n", __FUNCTION__,
ntoh32_ua((void *)&event->datalen)));
}
event_type = ntoh32_ua((void *)&event->event_type);
if (event_type != WLC_E_PROXD) {
- DHD_RTT_ERR((" failed event \n"));
+ DHD_ERROR((" failed event \n"));
return -EINVAL;
}
if (!event_data) {
- DHD_RTT_ERR(("%s: event_data:NULL\n", __FUNCTION__));
+ DHD_ERROR(("%s: event_data:NULL\n", __FUNCTION__));
return -EINVAL;
}
p_event = (wl_proxd_event_t *) event_data;
version = ltoh16(p_event->version);
if (version < WL_PROXD_API_VERSION) {
- DHD_RTT_ERR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n",
+ DHD_ERROR(("ignore non-ftm event version = 0x%0x < WL_PROXD_API_VERSION (0x%x)\n",
version, WL_PROXD_API_VERSION));
return ret;
}
-
+#ifdef WL_CFG80211
+ if (!in_atomic()) {
+ mutex_lock(&rtt_status->rtt_mutex);
+ }
+#endif /* WL_CFG80211 */
event_type = (wl_proxd_event_type_t) ltoh16(p_event->type);
+ kflags = in_softirq()? GFP_ATOMIC : GFP_KERNEL;
+
DHD_RTT(("event_type=0x%x, ntoh16()=0x%x, ltoh16()=0x%x\n",
p_event->type, ntoh16(p_event->type), ltoh16(p_event->type)));
p_loginfo = ftm_get_event_type_loginfo(event_type);
if (p_loginfo == NULL) {
- DHD_RTT_ERR(("receive an invalid FTM event %d\n", event_type));
+ DHD_ERROR(("receive an invalid FTM event %d\n", event_type));
ret = -EINVAL;
- return ret; /* ignore this event */
+ goto exit; /* ignore this event */
}
/* get TLVs len, skip over event header */
if (ltoh16(p_event->len) < OFFSETOF(wl_proxd_event_t, tlvs)) {
- DHD_RTT_ERR(("invalid FTM event length:%d\n", ltoh16(p_event->len)));
+ DHD_ERROR(("invalid FTM event length:%d\n", ltoh16(p_event->len)));
ret = -EINVAL;
- return ret;
+ goto exit;
}
tlvs_len = ltoh16(p_event->len) - OFFSETOF(wl_proxd_event_t, tlvs);
DHD_RTT(("receive '%s' event: version=0x%x len=%d method=%d sid=%d tlvs_len=%d\n",
ltoh16(p_event->sid),
tlvs_len));
#ifdef WL_CFG80211
-#ifdef WL_NAN
- if ((event_type == WL_PROXD_EVENT_BURST_END) &&
- dhd_rtt_is_nan_peer(dhd, &event->addr)) {
- DHD_RTT(("WL_PROXD_EVENT_BURST_END for NAN RTT\n"));
- ret = dhd_rtt_handle_nan_burst_end(dhd, &event->addr, p_event, tlvs_len);
- return ret;
- }
-#endif /* WL_NAN */
-
- rtt_status = GET_RTTSTATE(dhd);
- NULL_CHECK(rtt_status, "rtt_status is NULL", ret);
- mutex_lock(&rtt_status->rtt_mutex);
-
- if (RTT_IS_STOPPED(rtt_status)) {
- /* Ignore the Proxd event */
- DHD_RTT((" event handler rtt is stopped \n"));
- if (rtt_status->flags == WL_PROXD_SESSION_FLAG_TARGET) {
- DHD_RTT(("Device is target/Responder. Recv the event. \n"));
- } else {
- ret = BCME_NOTREADY;
- goto exit;
- }
+ rtt_target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ /* find a rtt_report_header for this mac address */
+ list_for_each_entry(entry, &rtt_status->rtt_results_cache, list) {
+ if (!memcmp(&entry->peer_mac, &event->addr, ETHER_ADDR_LEN)) {
+ /* found a rtt_report_header for peer_mac in the list */
+ is_new = FALSE;
+ rtt_results_header = entry;
+ break;
+ }
}
-#endif /* WL_CFG80211 */
-
-#ifdef WL_CFG80211
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- is_new = !dhd_rtt_get_report_header(rtt_status,
- &rtt_results_header, &event->addr);
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
#endif /* WL_CFG80211 */
switch (event_type) {
case WL_PROXD_EVENT_SESSION_CREATE:
DHD_RTT(("WL_PROXD_EVENT_BURST_START\n"));
break;
case WL_PROXD_EVENT_BURST_END:
- DHD_RTT(("WL_PROXD_EVENT_BURST_END for Legacy RTT\n"));
- /* allocate rtt_results for new legacy rtt results */
- rtt_result = (rtt_result_t *)MALLOCZ(dhd->osh, sizeof(rtt_result_t));
- if (!rtt_result) {
- ret = -ENOMEM;
- goto exit;
+ DHD_RTT(("WL_PROXD_EVENT_BURST_END\n"));
+#ifdef WL_CFG80211
+ if (is_new) {
+ /* allocate new header for rtt_results */
+ rtt_results_header = kzalloc(sizeof(rtt_results_header_t), kflags);
+ if (!rtt_results_header) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ /* Initialize the head of list for rtt result */
+ INIT_LIST_HEAD(&rtt_results_header->result_list);
+ rtt_results_header->peer_mac = event->addr;
+ list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
}
- ret = dhd_rtt_handle_directed_rtt_burst_end(dhd, &event->addr,
- p_event, tlvs_len, rtt_result, FALSE);
- if (rtt_result && (ret != BCME_OK)) {
- kfree(rtt_result);
- rtt_result = NULL;
- goto exit;
+#endif /* WL_CFG80211 */
+ if (tlvs_len > 0) {
+ /* allocate rtt_results for new results */
+ rtt_result = kzalloc(sizeof(rtt_result_t), kflags);
+ if (!rtt_result) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ /* unpack TLVs and invokes the cbfn to print the event content TLVs */
+ ret = bcm_unpack_xtlv_buf((void *) &(rtt_result->report),
+ (uint8 *)&p_event->tlvs[0], tlvs_len,
+ BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s : Failed to unpack xtlv for an event\n",
+ __FUNCTION__));
+ goto exit;
+ }
+#ifdef WL_CFG80211
+ /* fill out the results from the configuration param */
+ rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
+ rtt_result->report.type = RTT_TWO_WAY;
+ DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
+ rtt_result->report_len = RTT_REPORT_SIZE;
+
+ list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
+ rtt_results_header->result_cnt++;
+ rtt_results_header->result_tot_len += rtt_result->report_len;
+#endif /* WL_CFG80211 */
}
break;
case WL_PROXD_EVENT_SESSION_END:
- DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n"));
- if (dhd_rtt_is_nan_peer(dhd, &event->addr)) {
- /*
- * Nothing to do for session end for nan peer
- * All taken care in burst end and nan rng rep
- */
- break;
- }
+ DHD_RTT(("WL_PROXD_EVENT_SESSION_END\n"));
#ifdef WL_CFG80211
if (!RTT_IS_ENABLED(rtt_status)) {
DHD_RTT(("Ignore the session end evt\n"));
(uint8 *)&p_event->tlvs[0], tlvs_len,
BCM_XTLV_OPTION_ALIGN32, rtt_unpack_xtlv_cbfn);
if (ret != BCME_OK) {
- DHD_RTT_ERR(("%s : Failed to unpack xtlv for an event\n",
+ DHD_ERROR(("%s : Failed to unpack xtlv for an event\n",
__FUNCTION__));
goto exit;
}
#ifdef WL_CFG80211
/* In case of no result for the peer device, make fake result for error case */
if (is_new) {
- dhd_rtt_create_failure_result(rtt_status, &event->addr);
+ /* allocate new header for rtt_results */
+ rtt_results_header = kzalloc(sizeof(rtt_results_header_t), GFP_KERNEL);
+ if (!rtt_results_header) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ /* Initialize the head of list for rtt result */
+ INIT_LIST_HEAD(&rtt_results_header->result_list);
+ rtt_results_header->peer_mac = event->addr;
+ list_add_tail(&rtt_results_header->list, &rtt_status->rtt_results_cache);
+
+ /* allocate rtt_results for new results */
+ rtt_result = kzalloc(sizeof(rtt_result_t), kflags);
+ if (!rtt_result) {
+ ret = -ENOMEM;
+ kfree(rtt_results_header);
+ goto exit;
+ }
+ /* fill out the results from the configuration param */
+ rtt_result->report.ftm_num = rtt_target_info->num_frames_per_burst;
+ rtt_result->report.type = RTT_TWO_WAY;
+ DHD_RTT(("report->ftm_num : %d\n", rtt_result->report.ftm_num));
+ rtt_result->report_len = RTT_REPORT_SIZE;
+ rtt_result->report.status = RTT_REASON_FAIL_NO_RSP;
+ rtt_result->report.addr = rtt_target_info->addr;
+ rtt_result->report.distance = FTM_INVALID;
+ list_add_tail(&rtt_result->list, &rtt_results_header->result_list);
+ rtt_results_header->result_cnt++;
+ rtt_results_header->result_tot_len += rtt_result->report_len;
+ }
+ /* find next target to trigger RTT */
+ for (idx = (rtt_status->cur_idx + 1);
+ idx < rtt_status->rtt_config.rtt_target_cnt; idx++) {
+ /* skip the disabled device */
+ if (rtt_status->rtt_config.target_info[idx].disable) {
+ continue;
+ } else {
+ /* set the idx to cur_idx */
+ rtt_status->cur_idx = idx;
+ break;
+ }
+ }
+ if (idx < rtt_status->rtt_config.rtt_target_cnt) {
+ /* restart to measure RTT from next device */
+ DHD_ERROR(("restart to measure rtt\n"));
+ schedule_work(&rtt_status->work);
+ } else {
+ DHD_RTT(("RTT_STOPPED\n"));
+ rtt_status->status = RTT_STOPPED;
+ /* to turn on mpc mode */
+ schedule_work(&rtt_status->work);
+ /* notify the completed information to others */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry(iter, &rtt_status->noti_fn_list, list) {
+ iter->noti_fn(iter->ctx, &rtt_status->rtt_results_cache);
+ }
+ /* remove the rtt results in cache */
+ if (!list_empty(&rtt_status->rtt_results_cache)) {
+ /* Iterate rtt_results_header list */
+ list_for_each_entry_safe(entry, next,
+ &rtt_status->rtt_results_cache, list) {
+ list_del(&entry->list);
+ /* Iterate rtt_result list */
+ list_for_each_entry_safe(rtt_result, next2,
+ &entry->result_list, list) {
+ list_del(&rtt_result->list);
+ kfree(rtt_result);
+ }
+ kfree(entry);
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ /* reinitialize the HEAD */
+ INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
+ /* clear information for rtt_config */
+ rtt_status->rtt_config.rtt_target_cnt = 0;
+ memset(rtt_status->rtt_config.target_info, 0,
+ TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ rtt_status->cur_idx = 0;
}
- DHD_RTT(("\n Not Nan peer..proceed to notify result and restart\n"));
- dhd_rtt_handle_rtt_session_end(dhd);
#endif /* WL_CFG80211 */
break;
case WL_PROXD_EVENT_SESSION_RESTART:
case WL_PROXD_EVENT_COLLECT:
DHD_RTT(("WL_PROXD_EVENT_COLLECT\n"));
if (tlvs_len > 0) {
- void *buffer = NULL;
- if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) {
- ret = -ENOMEM;
- goto exit;
- }
- /* unpack TLVs and invokes the cbfn to print the event content TLVs */
- ret = bcm_unpack_xtlv_buf(buffer,
- (uint8 *)&p_event->tlvs[0], tlvs_len,
- BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn);
- kfree(buffer);
- if (ret != BCME_OK) {
- DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n",
- __FUNCTION__, event_type));
- goto exit;
- }
- }
- break;
- case WL_PROXD_EVENT_MF_STATS:
- DHD_RTT(("WL_PROXD_EVENT_MF_STATS\n"));
- if (tlvs_len > 0) {
- void *buffer = NULL;
- if (!(buffer = (void *)MALLOCZ(dhd->osh, tlvs_len))) {
+ collect_event_data = kzalloc(sizeof(wl_proxd_collect_event_data_t), kflags);
+ if (!collect_event_data) {
ret = -ENOMEM;
goto exit;
}
/* unpack TLVs and invokes the cbfn to print the event content TLVs */
- ret = bcm_unpack_xtlv_buf(buffer,
+ ret = bcm_unpack_xtlv_buf((void *) collect_event_data,
(uint8 *)&p_event->tlvs[0], tlvs_len,
BCM_XTLV_OPTION_NONE, rtt_unpack_xtlv_cbfn);
- kfree(buffer);
+ kfree(collect_event_data);
if (ret != BCME_OK) {
- DHD_RTT_ERR(("%s : Failed to unpack xtlv for event %d\n",
- __FUNCTION__, event_type));
+ DHD_ERROR(("%s : Failed to unpack xtlv for an event\n",
+ __FUNCTION__));
goto exit;
}
}
break;
+
default:
- DHD_RTT_ERR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type));
+ DHD_ERROR(("WLC_E_PROXD: not supported EVENT Type:%d\n", event_type));
break;
}
exit:
#ifdef WL_CFG80211
- mutex_unlock(&rtt_status->rtt_mutex);
+ if (!in_atomic()) {
+ mutex_unlock(&rtt_status->rtt_mutex);
+ }
#endif /* WL_CFG80211 */
return ret;
{
rtt_status_info_t *rtt_status;
dhd_pub_t *dhd;
-
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
rtt_status = container_of(work, rtt_status_info_t, work);
- GCC_DIAGNOSTIC_POP();
-
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ if (rtt_status == NULL) {
+ DHD_ERROR(("%s : rtt_status is NULL\n", __FUNCTION__));
+ return;
+ }
dhd = rtt_status->dhd;
if (dhd == NULL) {
- DHD_RTT_ERR(("%s : dhd is NULL\n", __FUNCTION__));
+ DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
return;
}
(void) dhd_rtt_start(dhd);
ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
}
} else {
- DHD_RTT_ERR(("Failed to get the chanspec \n"));
+ DHD_ERROR(("Failed to get the chanspec \n"));
}
return err;
}
wf_chspec_ntoa(chanspec, chanbuf)));
err = wldev_iovar_setint(dev, "chanspec", chanspec);
if (err) {
- DHD_RTT_ERR(("Failed to set the chanspec \n"));
+ DHD_ERROR(("Failed to set the chanspec \n"));
}
}
- rtt_status->pm = PM_OFF;
err = wldev_ioctl_get(dev, WLC_GET_PM, &rtt_status->pm, sizeof(rtt_status->pm));
DHD_RTT(("Current PM value read %d\n", rtt_status->pm));
if (err) {
- DHD_RTT_ERR(("Failed to get the PM value \n"));
+ DHD_ERROR(("Failed to get the PM value \n"));
} else {
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_RTT_ERR(("Failed to set the PM \n"));
+ DHD_ERROR(("Failed to set the PM \n"));
rtt_status->pm_restore = FALSE;
} else {
rtt_status->pm_restore = TRUE;
if (!RTT_IS_ENABLED(rtt_status)) {
err = dhd_rtt_ftm_enable(dhd, TRUE);
if (err) {
- DHD_RTT_ERR(("Failed to enable FTM (%d)\n", err));
+ DHD_ERROR(("Failed to enable FTM (%d)\n", err));
goto exit;
}
DHD_RTT(("FTM enabled \n"));
exit:
if (err) {
rtt_status->status = RTT_STOPPED;
- DHD_RTT_ERR(("rtt is stopped %s \n", __FUNCTION__));
+ DHD_ERROR(("rtt is stopped %s \n", __FUNCTION__));
dhd_rtt_ftm_enable(dhd, FALSE);
DHD_RTT(("restoring the PM value \n"));
if (rtt_status->pm_restore) {
pm = PM_FAST;
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_RTT_ERR(("Failed to restore PM \n"));
+ DHD_ERROR(("Failed to restore PM \n"));
} else {
rtt_status->pm_restore = FALSE;
}
rtt_status_info_t *rtt_status;
int pm = 0;
struct net_device *dev = dhd_linux_get_primary_netdev(dhd);
-
NULL_CHECK(dhd, "dhd is NULL", err);
rtt_status = GET_RTTSTATE(dhd);
NULL_CHECK(rtt_status, "rtt_status is NULL", err);
DHD_RTT(("Enter %s \n", __FUNCTION__));
err = dhd_rtt_ftm_enable(dhd, FALSE);
if (err) {
- DHD_RTT_ERR(("failed to disable FTM (%d)\n", err));
+ DHD_ERROR(("failed to disable FTM (%d)\n", err));
}
rtt_status->status = RTT_STOPPED;
if (rtt_status->pm_restore) {
DHD_RTT(("pm_restore =%d \n", rtt_status->pm_restore));
err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm));
if (err) {
- DHD_RTT_ERR(("Failed to restore PM \n"));
+ DHD_ERROR(("Failed to restore PM \n"));
} else {
rtt_status->pm_restore = FALSE;
}
int32 drv_up = 1;
int32 version;
rtt_status_info_t *rtt_status;
- ftm_config_param_info_t ftm_params[FTM_MAX_PARAMS];
- int ftm_param_cnt = 0;
-
NULL_CHECK(dhd, "dhd is NULL", err);
dhd->rtt_supported = FALSE;
if (dhd->rtt_state) {
return err;
}
- dhd->rtt_state = (rtt_status_info_t *)MALLOCZ(dhd->osh,
- sizeof(rtt_status_info_t));
+ dhd->rtt_state = kzalloc(sizeof(rtt_status_info_t), GFP_KERNEL);
if (dhd->rtt_state == NULL) {
err = BCME_NOMEM;
- DHD_RTT_ERR(("%s : failed to create rtt_state\n", __FUNCTION__));
+ DHD_ERROR(("%s : failed to create rtt_state\n", __FUNCTION__));
return err;
}
bzero(dhd->rtt_state, sizeof(rtt_status_info_t));
rtt_status = GET_RTTSTATE(dhd);
rtt_status->rtt_config.target_info =
- (rtt_target_info_t *)MALLOCZ(dhd->osh,
- TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT));
+ kzalloc(TARGET_INFO_SIZE(RTT_MAX_TARGET_CNT), GFP_KERNEL);
if (rtt_status->rtt_config.target_info == NULL) {
- DHD_RTT_ERR(("%s failed to allocate the target info for %d\n",
+ DHD_ERROR(("%s failed to allocate the target info for %d\n",
__FUNCTION__, RTT_MAX_TARGET_CNT));
err = BCME_NOMEM;
goto exit;
ret = dhd_rtt_get_version(dhd, &version);
if (ret == BCME_OK && (version == WL_PROXD_API_VERSION)) {
- DHD_RTT_ERR(("%s : FTM is supported\n", __FUNCTION__));
+ DHD_ERROR(("%s : FTM is supported\n", __FUNCTION__));
dhd->rtt_supported = TRUE;
/* rtt_status->rtt_capa.proto |= RTT_CAP_ONE_WAY; */
rtt_status->rtt_capa.proto |= RTT_CAP_FTM_WAY;
rtt_status->rtt_capa.bw |= RTT_BW_80;
} else {
if ((ret != BCME_OK) || (version == 0)) {
- DHD_RTT_ERR(("%s : FTM is not supported\n", __FUNCTION__));
+ DHD_ERROR(("%s : FTM is not supported\n", __FUNCTION__));
} else {
- DHD_RTT_ERR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n",
+ DHD_ERROR(("%s : FTM version mismatch between HOST (%d) and FW (%d)\n",
__FUNCTION__, WL_PROXD_API_VERSION, version));
}
}
/* cancel all of RTT request once we got the cancel request */
rtt_status->all_cancel = TRUE;
mutex_init(&rtt_status->rtt_mutex);
- mutex_init(&rtt_status->rtt_work_mutex);
- mutex_init(&rtt_status->geofence_mutex);
INIT_LIST_HEAD(&rtt_status->noti_fn_list);
INIT_LIST_HEAD(&rtt_status->rtt_results_cache);
INIT_WORK(&rtt_status->work, dhd_rtt_work);
- /* initialize proxd timer */
- INIT_DELAYED_WORK(&rtt_status->proxd_timeout, dhd_rtt_timeout_work);
-#ifdef WL_NAN
- /* initialize proxd retry timer */
- INIT_DELAYED_WORK(&rtt_status->rtt_retry_timer, dhd_rtt_retry_work);
-#endif /* WL_NAN */
- /* Global proxd config */
- ftm_params[ftm_param_cnt].event_mask = ((1 << WL_PROXD_EVENT_BURST_END) |
- (1 << WL_PROXD_EVENT_SESSION_END));
- ftm_params[ftm_param_cnt++].tlvid = WL_PROXD_TLV_ID_EVENT_MASK;
- dhd_rtt_ftm_config(dhd, 0, FTM_CONFIG_CAT_GENERAL,
- ftm_params, ftm_param_cnt);
exit:
if (err < 0) {
kfree(rtt_status->rtt_config.target_info);
rtt_status->status = RTT_STOPPED;
DHD_RTT(("rtt is stopped %s \n", __FUNCTION__));
/* clear evt callback list */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
if (!list_empty(&rtt_status->noti_fn_list)) {
list_for_each_entry_safe(iter, iter2, &rtt_status->noti_fn_list, list) {
list_del(&iter->list);
kfree(rtt_header);
}
}
- GCC_DIAGNOSTIC_POP();
-
- if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
- cancel_delayed_work(&rtt_status->rtt_retry_timer);
- }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
kfree(rtt_status->rtt_config.target_info);
kfree(dhd->rtt_state);
dhd->rtt_state = NULL;
/*
* Broadcom Dongle Host Driver (DHD), RTT
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#define DEFAULT_RETRY_CNT 6
#define DEFAULT_FTM_FREQ 5180
#define DEFAULT_FTM_CNTR_FREQ0 5210
-#define RTT_MAX_GEOFENCE_TARGET_CNT 8
#define TARGET_INFO_SIZE(count) (sizeof(rtt_target_info_t) * count)
#define TARGET_TYPE(target) (target->type)
-#define RTT_IS_ENABLED(rtt_status) (rtt_status->status == RTT_ENABLED)
-#define RTT_IS_STOPPED(rtt_status) (rtt_status->status == RTT_STOPPED)
-
-#define GEOFENCE_RTT_LOCK(rtt_status) mutex_lock(&(rtt_status)->geofence_mutex)
-#define GEOFENCE_RTT_UNLOCK(rtt_status) mutex_unlock(&(rtt_status)->geofence_mutex)
-
#ifndef BIT
#define BIT(x) (1 << (x))
-#endif // endif
+#endif
/* DSSS, CCK and 802.11n rates in [500kbps] units */
#define WL_MAXRATE 108 /* in 500kbps units */
#define WL_RATE_54M 108 /* in 500kbps units */
#define GET_RTTSTATE(dhd) ((rtt_status_info_t *)dhd->rtt_state)
-#ifdef WL_NAN
-/* RTT Retry Timer Interval */
-#define DHD_RTT_RETRY_TIMER_INTERVAL_MS 3000u
-#endif /* WL_NAN */
-
-#define DHD_RTT_INVALID_TARGET_INDEX -1
-
enum rtt_role {
RTT_INITIATOR = 0,
RTT_TARGET = 1
RTT_AUTO
} rtt_type_t;
-/* RTT peer type */
typedef enum {
- RTT_PEER_AP = 0x1,
- RTT_PEER_STA = 0x2,
- RTT_PEER_P2P_GO = 0x3,
- RTT_PEER_P2P_CLIENT = 0x4,
- RTT_PEER_NAN = 0x5,
- RTT_PEER_INVALID = 0x6
+ RTT_PEER_STA,
+ RTT_PEER_AP,
+ RTT_PEER_P2P,
+ RTT_PEER_NAN,
+ RTT_PEER_INVALID
} rtt_peer_type_t;
-/* Ranging status */
typedef enum rtt_reason {
- RTT_STATUS_SUCCESS = 0,
- RTT_STATUS_FAILURE = 1, // general failure status
- RTT_STATUS_FAIL_NO_RSP = 2, // target STA does not respond to request
- RTT_STATUS_FAIL_REJECTED = 3, // request rejected. Applies to 2-sided RTT only
- RTT_STATUS_FAIL_NOT_SCHEDULED_YET = 4,
- RTT_STATUS_FAIL_TM_TIMEOUT = 5, // timing measurement times out
- RTT_STATUS_FAIL_AP_ON_DIFF_CHANNEL = 6, // Target on different channel, cannot range
- RTT_STATUS_FAIL_NO_CAPABILITY = 7, // ranging not supported
- RTT_STATUS_ABORTED = 8, // request aborted for unknown reason
- RTT_STATUS_FAIL_INVALID_TS = 9, // Invalid T1-T4 timestamp
- RTT_STATUS_FAIL_PROTOCOL = 10, // 11mc protocol failed
- RTT_STATUS_FAIL_SCHEDULE = 11, // request could not be scheduled
- RTT_STATUS_FAIL_BUSY_TRY_LATER = 12, // responder cannot collaborate at time of request
- RTT_STATUS_INVALID_REQ = 13, // bad request args
- RTT_STATUS_NO_WIFI = 14, // WiFi not enabled Responder overrides param info
- // cannot range with new params
- RTT_STATUS_FAIL_FTM_PARAM_OVERRIDE = 15
+ RTT_REASON_SUCCESS,
+ RTT_REASON_FAILURE,
+ RTT_REASON_FAIL_NO_RSP,
+ RTT_REASON_FAIL_INVALID_TS, /* Invalid timestamp */
+ RTT_REASON_FAIL_PROTOCOL, /* 11mc protocol failed */
+ RTT_REASON_FAIL_REJECTED,
+ RTT_REASON_FAIL_NOT_SCHEDULED_YET,
+ RTT_REASON_FAIL_SCHEDULE, /* schedule failed */
+ RTT_REASON_FAIL_TM_TIMEOUT,
+ RTT_REASON_FAIL_AP_ON_DIFF_CHANNEL,
+ RTT_REASON_FAIL_NO_CAPABILITY,
+ RTT_REASON_FAIL_BUSY_TRY_LATER,
+ RTT_REASON_ABORTED
} rtt_reason_t;
enum {
RTT_PREAMBLE_VHT = BIT(2)
};
+
enum {
RTT_BW_5 = BIT(0),
RTT_BW_10 = BIT(1),
RTT_BW_80 = BIT(4),
RTT_BW_160 = BIT(5)
};
-
-enum rtt_rate_bw {
- RTT_RATE_20M,
- RTT_RATE_40M,
- RTT_RATE_80M,
- RTT_RATE_160M
-};
-
-typedef enum ranging_type {
- RTT_TYPE_INVALID = 0,
- RTT_TYPE_LEGACY = 1,
- RTT_TYPE_NAN_DIRECTED = 2,
- RTT_TYPE_NAN_GEOFENCE = 3
-} ranging_type_t;
-
#define FTM_MAX_NUM_BURST_EXP 14
#define HAS_11MC_CAP(cap) (cap & RTT_CAP_FTM_WAY)
#define HAS_ONEWAY_CAP(cap) (cap & RTT_CAP_ONE_WAY)
typedef struct rtt_target_info {
struct ether_addr addr;
- struct ether_addr local_addr;
rtt_type_t type; /* rtt_type */
rtt_peer_type_t peer; /* peer type */
wifi_channel_info_t channel; /* channel information */
* initiator will request that the responder send
* in a single frame
*/
- uint32 num_frames_per_burst;
- /*
- * num of frames in each RTT burst
+ uint32 num_frames_per_burst;
+ /* num of frames in each RTT burst
* for single side, measurement result num = frame number
* for 2 side RTT, measurement result num = frame number - 1
*/
* at the end of the burst_duration it requested.
*/
uint32 burst_duration;
- uint32 burst_timeout;
uint8 preamble; /* 1 - Legacy, 2 - HT, 4 - VHT */
uint8 bw; /* 5, 10, 20, 40, 80, 160 */
} rtt_target_info_t;
-typedef struct rtt_goefence_target_info {
- bool valid;
- struct ether_addr peer_addr;
-} rtt_geofence_target_info_t;
-
typedef struct rtt_config_params {
int8 rtt_target_cnt;
rtt_target_info_t *target_info;
} rtt_config_params_t;
-typedef struct rtt_geofence_cfg {
- int8 geofence_target_cnt;
- bool rtt_in_progress;
- bool role_concurr_state;
- int8 cur_target_idx;
- rtt_geofence_target_info_t geofence_target_info[RTT_MAX_GEOFENCE_TARGET_CNT];
- int geofence_rtt_interval;
-#ifdef RTT_GEOFENCE_CONT
- bool geofence_cont;
-#endif /* RTT_GEOFENCE_CONT */
-} rtt_geofence_cfg_t;
-
-/*
- * Keep Adding more reasons
- * going forward if needed
- */
-enum rtt_schedule_reason {
- RTT_SCHED_HOST_TRIGGER = 1, /* On host command for directed RTT */
- RTT_SCHED_SUB_MATCH = 2, /* on Sub Match for svc with range req */
- RTT_SCHED_DIR_TRIGGER_FAIL = 3, /* On failure of Directed RTT Trigger */
- RTT_SCHED_DP_END = 4, /* ON NDP End event from fw */
- RTT_SCHED_DP_REJECTED = 5, /* On receving reject dp event from fw */
- RTT_SCHED_RNG_RPT_DIRECTED = 6, /* On Ranging report for directed RTT */
- RTT_SCHED_RNG_TERM = 7, /* On Range Term Indicator */
- RTT_SHCED_HOST_DIRECTED_TERM = 8, /* On host terminating directed RTT sessions */
- RTT_SCHED_RNG_RPT_GEOFENCE = 9, /* On Ranging report for geofence RTT */
- RTT_SCHED_RTT_RETRY_GEOFENCE = 10, /* On Geofence Retry */
- RTT_SCHED_RNG_TERM_PEND_ROLE_CHANGE = 11 /* On Rng Term, while pending role change */
-};
-
-/*
- * Keep Adding more invalid RTT states
- * going forward if needed
- */
-enum rtt_invalid_state {
- RTT_STATE_VALID = 0, /* RTT state is valid */
- RTT_STATE_INV_REASON_NDP_EXIST = 1 /* RTT state invalid as ndp exists */
-};
-
typedef struct rtt_status_info {
- dhd_pub_t *dhd;
- int8 status; /* current status for the current entry */
- int8 txchain; /* current device tx chain */
- int pm; /* to save current value of pm */
- int8 pm_restore; /* flag to reset the old value of pm */
- int8 cur_idx; /* current entry to do RTT */
- bool all_cancel; /* cancel all request once we got the cancel requet */
- uint32 flags; /* indicate whether device is configured as initiator or target */
- struct capability {
+ dhd_pub_t *dhd;
+ int8 status; /* current status for the current entry */
+ int8 txchain; /* current device tx chain */
+ int8 mpc; /* indicate we change mpc mode */
+ int pm; /* to save current value of pm */
+ int8 pm_restore; /* flag to reset the old value of pm */
+ int8 cur_idx; /* current entry to do RTT */
+ bool all_cancel; /* cancel all request once we got the cancel requet */
+ uint32 flags; /* indicate whether device is configured as initiator or target */
+ struct capability {
int32 proto :8;
int32 feature :8;
int32 preamble :8;
int32 bw :8;
} rtt_capa; /* rtt capability */
- struct mutex rtt_mutex;
- struct mutex rtt_work_mutex;
- struct mutex geofence_mutex;
- rtt_config_params_t rtt_config;
- rtt_geofence_cfg_t geofence_cfg;
- struct work_struct work;
- struct list_head noti_fn_list;
- struct list_head rtt_results_cache; /* store results for RTT */
- int rtt_sched_reason; /* rtt_schedule_reason: what scheduled RTT */
- struct delayed_work proxd_timeout; /* Proxd Timeout work */
- struct delayed_work rtt_retry_timer; /* Timer for retry RTT after all targets done */
+ struct mutex rtt_mutex;
+ rtt_config_params_t rtt_config;
+ struct work_struct work;
+ struct list_head noti_fn_list;
+ struct list_head rtt_results_cache; /* store results for RTT */
} rtt_status_info_t;
typedef struct rtt_report {
struct list_head list;
struct list_head result_list;
} rtt_results_header_t;
-struct rtt_result_detail {
- uint8 num_ota_meas;
- uint32 result_flags;
-};
+
/* rtt_result to link all of rtt_report */
typedef struct rtt_result {
struct list_head list;
struct rtt_report report;
int32 report_len; /* total length of rtt_report */
- struct rtt_result_detail rtt_detail;
- int32 detail_len;
} rtt_result_t;
/* RTT Capabilities */
uint8 bw_support; /* bit mask indicate what BW is supported */
} rtt_capabilities_t;
+
/* RTT responder information */
typedef struct wifi_rtt_responder {
wifi_channel_info channel; /* channel of responder */
int
dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa);
+#ifdef WL_CFG80211
int
dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info);
+#endif /* WL_CFG80211 */
int
dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info);
int
dhd_rtt_set_cfg(dhd_pub_t *dhd, rtt_config_params_t *params);
-#ifdef WL_NAN
-void dhd_rtt_initialize_geofence_cfg(dhd_pub_t *dhd);
-#ifdef RTT_GEOFENCE_CONT
-void dhd_rtt_set_geofence_cont_ind(dhd_pub_t *dhd, bool geofence_cont);
-
-void dhd_rtt_get_geofence_cont_ind(dhd_pub_t *dhd, bool* geofence_cont);
-#endif /* RTT_GEOFENCE_CONT */
-
-#ifdef RTT_GEOFENCE_INTERVAL
-void dhd_rtt_set_geofence_rtt_interval(dhd_pub_t *dhd, int interval);
-#endif /* RTT_GEOFENCE_INTERVAL */
-
-void dhd_rtt_set_role_concurrency_state(dhd_pub_t *dhd, bool state);
-
-bool dhd_rtt_get_role_concurrency_state(dhd_pub_t *dhd);
-
-int8 dhd_rtt_get_geofence_target_cnt(dhd_pub_t *dhd);
-
-void dhd_rtt_set_geofence_rtt_state(dhd_pub_t *dhd, bool state);
-
-bool dhd_rtt_get_geofence_rtt_state(dhd_pub_t *dhd);
-
-rtt_geofence_target_info_t*
-dhd_rtt_get_geofence_target_head(dhd_pub_t *dhd);
-
-rtt_geofence_target_info_t*
-dhd_rtt_get_geofence_current_target(dhd_pub_t *dhd);
-
-rtt_geofence_target_info_t*
-dhd_rtt_get_geofence_target(dhd_pub_t *dhd, struct ether_addr* peer_addr,
- int8 *index);
-
-int
-dhd_rtt_add_geofence_target(dhd_pub_t *dhd, rtt_geofence_target_info_t *target);
-
-int
-dhd_rtt_remove_geofence_target(dhd_pub_t *dhd, struct ether_addr *peer_addr);
-
-int
-dhd_rtt_delete_geofence_target_list(dhd_pub_t *dhd);
-
-int
-dhd_rtt_delete_nan_session(dhd_pub_t *dhd);
-#endif /* WL_NAN */
-
-uint8
-dhd_rtt_invalid_states(struct net_device *ndev, struct ether_addr *peer_addr);
-
-void
-dhd_rtt_schedule_rtt_work_thread(dhd_pub_t *dhd, int sched_reason);
-
int
dhd_rtt_stop(dhd_pub_t *dhd, struct ether_addr *mac_list, int mac_cnt);
+
int
dhd_rtt_register_noti_callback(dhd_pub_t *dhd, void *ctx, dhd_rtt_compl_noti_fn noti_fn);
int
dhd_rtt_deinit(dhd_pub_t *dhd);
-
-#ifdef WL_CFG80211
-int dhd_rtt_handle_nan_rtt_session_end(dhd_pub_t *dhd,
- struct ether_addr *peer);
-
-void dhd_rtt_move_geofence_cur_target_idx_to_next(dhd_pub_t *dhd);
-
-int8 dhd_rtt_get_geofence_cur_target_idx(dhd_pub_t *dhd);
-#endif /* WL_CFG80211 */
-
#endif /* __DHD_RTT_H__ */
/*
* DHD Bus Module for SDIO
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_sdio.c 825481 2019-06-14 10:06:03Z $
+ * $Id: dhd_sdio.c 705650 2017-06-19 03:00:50Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <bcmsdh.h>
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
#include <bcmdefs.h>
#include <bcmutils.h>
#include <bcmendian.h>
#include <sbhnddma.h>
#include <sdio.h>
-#ifdef BCMSPI
-#include <spid.h>
-#endif /* BCMSPI */
#include <sbsdio.h>
#include <sbsdpcmdev.h>
#include <bcmsdpcm.h>
#ifdef PROP_TXSTATUS
#include <dhd_wlfc.h>
-#endif // endif
+#endif
#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
#include <dhd_bt_interface.h>
#endif /* BT_OVER_SDIO */
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
-#include <debugger.h>
-#endif /* DEBUGGER || DHD_DSCOPE */
-
bool dhd_mp_halting(dhd_pub_t *dhdp);
extern void bcmsdh_waitfor_iodrain(void *sdh);
extern void bcmsdh_reject_ioreqs(void *sdh, bool reject);
static int dhdsdio_suspend(void *context);
static int dhdsdio_resume(void *context);
+
#ifndef DHDSDIO_MEM_DUMP_FNAME
#define DHDSDIO_MEM_DUMP_FNAME "mem_dump"
-#endif // endif
+#endif
#define QLEN (1024) /* bulk rx and tx queue lengths */
#define FCHI (QLEN - 10)
#define READ_FRM_CNT_RETRIES 3
#ifndef DHD_RXBOUND
#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */
-#endif // endif
+#endif
#ifndef DHD_TXBOUND
#define DHD_TXBOUND 20 /* Default for max tx frames in one scheduling */
-#endif // endif
+#endif
#define DHD_TXMINMAX 1 /* Max tx frames if rx still pending */
#ifndef DHD_FIRSTREAD
#define DHD_FIRSTREAD 32
-#endif // endif
+#endif
#if !ISPOWEROF2(DHD_FIRSTREAD)
#error DHD_FIRSTREAD is not a power of 2!
-#endif // endif
+#endif
/* Total length of frame header for dongle protocol */
#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
#else
#define SDPCM_RESERVE (SDPCM_HDRLEN + DHD_SDALIGN)
-#endif // endif
+#endif
/* Space for header read, limit for data packets */
#ifndef MAX_HDR_READ
#define MAX_HDR_READ 32
-#endif // endif
+#endif
#if !ISPOWEROF2(MAX_HDR_READ)
#error MAX_HDR_READ is not a power of 2!
-#endif // endif
+#endif
#define MAX_RX_DATASZ 2048
#if (PMU_MAX_TRANSITION_DLY <= 1000000)
#undef PMU_MAX_TRANSITION_DLY
#define PMU_MAX_TRANSITION_DLY 1000000
-#endif // endif
+#endif
/* hooks for limiting threshold custom tx num in rx processing */
#define DEFAULT_TXINRX_THRES 0
#ifndef CUSTOM_TXINRX_THRES
#define CUSTOM_TXINRX_THRES DEFAULT_TXINRX_THRES
-#endif // endif
+#endif
/* Value for ChipClockCSR during initial setup */
#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
/* Device console log buffer state */
#define CONSOLE_LINE_MAX 192
-#define CONSOLE_BUFFER_MAX 8192
+#define CONSOLE_BUFFER_MAX 2024
typedef struct dhd_console {
uint count; /* Poll interval msec counter */
uint log_addr; /* Log struct address (fixed) */
bool usebufpool;
int32 txinrx_thres; /* num of in-queued pkts */
int32 dotxinrx; /* tx first in dhdsdio_readframes */
-#ifdef BCMSDIO_RXLIM_POST
- bool rxlim_en;
- uint32 rxlim_addr;
-#endif /* BCMSDIO_RXLIM_POST */
#ifdef SDTEST
/* external loopback */
bool ext_loop;
uint f2txdata; /* Number of f2 frame writes */
uint f1regdata; /* Number of f1 register accesses */
wake_counts_t wake_counts; /* Wake up counter */
-#ifdef BCMSPI
- bool dwordmode;
-#endif /* BCMSPI */
#ifdef DHDENABLE_TAILPAD
uint tx_tailpad_chain; /* Number of tail padding by chaining pad_pkt */
uint tx_tailpad_pktget; /* Number of tail padding by new PKTGET */
uint8 *ctrl_frame_buf;
uint32 ctrl_frame_len;
bool ctrl_frame_stat;
-#ifndef BCMSPI
uint32 rxint_mode; /* rx interrupt mode */
-#endif /* BCMSPI */
bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram
* Available with socram rev 16
* Remap region not DMA-able
uint txglomframes; /* Number of tx glom frames (superframes) */
uint txglompkts; /* Number of packets from tx glom frames */
uint8 *membuf; /* Buffer for dhdsdio_membytes */
-#ifdef CONSOLE_DPC
- char cons_cmd[16];
-#endif
} dhd_bus_t;
+
/*
* Whenever DHD_IDLE_IMMEDIATE condition is handled, we have to now check if
* BT is active too. Instead of adding #ifdef code in all the places, we thought
extern uint dhd_watchdog_ms;
extern uint sd_f1_blocksize;
-#ifdef BCMSPI_ANDROID
-extern uint *dhd_spi_lockcount;
-#endif /* BCMSPI_ANDROID */
+#if defined(BT_OVER_SDIO)
+extern dhd_pub_t *g_dhd_pub;
+#endif /* (BT_OVER_SDIO) */
extern void dhd_os_wd_timer(void *bus, uint wdtick);
int dhd_enableOOB(dhd_pub_t *dhd, bool sleep);
-#ifdef DHD_PM_CONTROL_FROM_FILE
-extern bool g_pm_control;
-#endif /* DHD_PM_CONTROL_FROM_FILE */
/* Tx/Rx bounds */
uint dhd_txbound;
static bool retrydata;
#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
-#ifdef BCMSPI
-/* At a watermark around 8 the spid hits underflow error. */
-static uint watermark = 32;
-static uint mesbusyctrl = 0;
-#else
static uint watermark = 8;
static uint mesbusyctrl = 0;
-#endif /* BCMSPI */
static const uint firstread = DHD_FIRSTREAD;
/* Retry count for register access failures */
/* Force even SD lengths (some host controllers mess up on odd bytes) */
static bool forcealign;
-#if defined(DEBUGGER)
-static uint32 dhd_sdio_reg_read(struct dhd_bus *bus, ulong addr);
-static void dhd_sdio_reg_write(struct dhd_bus *bus, ulong addr, uint32 val);
-
-/** the debugger layer will call back into this (bus) layer to read/write dongle memory */
-static struct dhd_dbg_bus_ops_s bus_ops = {
- .read_u16 = NULL,
- .read_u32 = dhd_sdio_reg_read,
- .write_u32 = dhd_sdio_reg_write,
-};
-#endif /* DEBUGGER */
-
#define ALIGNMENT 4
#if (defined(OOB_INTR_ONLY) && defined(HW_OOB)) || defined(FORCE_WOWLAN)
extern void bcmsdh_enable_hw_oob_intr(void *sdh, bool enable);
-#endif // endif
+#endif
#if defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD)
#error OOB_INTR_ONLY is NOT working with SDIO_ISR_THREAD
#define SDIO_DEVICE_RXDATAINT_MODE_0 1 /* from sdiod rev 4 */
#define SDIO_DEVICE_RXDATAINT_MODE_1 2 /* from sdiod rev 4 */
-#ifdef BCMSPI
-
-#define FRAME_AVAIL_MASK(bus) I_HMB_FRAME_IND
-
-#define DHD_BUS SPI_BUS
-
-/* check packet-available-interrupt in piggybacked dstatus */
-#define PKT_AVAILABLE(bus, intstatus) (bcmsdh_get_dstatus(bus->sdh) & STATUS_F2_PKT_AVAILABLE)
-
-#define HOSTINTMASK (I_HMB_FC_CHANGE | I_HMB_HOST_INT)
-
-#define GSPI_PR55150_BAILOUT \
-do { \
- uint32 dstatussw = bcmsdh_get_dstatus((void *)bus->sdh); \
- uint32 dstatushw = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, SPID_STATUS_REG, NULL); \
- uint32 intstatuserr = 0; \
- uint retries = 0; \
- \
- R_SDREG(intstatuserr, &bus->regs->intstatus, retries); \
- printf("dstatussw = 0x%x, dstatushw = 0x%x, intstatus = 0x%x\n", \
- dstatussw, dstatushw, intstatuserr); \
- \
- bus->nextlen = 0; \
- *finished = TRUE; \
-} while (0)
-
-#else /* BCMSDIO */
#define FRAME_AVAIL_MASK(bus) \
((bus->rxint_mode == SDIO_DEVICE_HMB_RXINT) ? I_HMB_FRAME_IND : I_XMTDATA_AVAIL)
#define GSPI_PR55150_BAILOUT
-#endif /* BCMSPI */
-
#ifdef SDTEST
static void dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq);
static void dhdsdio_sdtest_set(dhd_bus_t *bus, uint count);
-#endif // endif
+#endif
static int dhdsdio_checkdied(dhd_bus_t *bus, char *data, uint size);
#ifdef DHD_DEBUG
#if defined(DHD_FW_COREDUMP)
static int dhdsdio_mem_dump(dhd_bus_t *bus);
-static int dhdsdio_get_mem_dump(dhd_bus_t *bus);
#endif /* DHD_FW_COREDUMP */
static int dhdsdio_devcap_set(dhd_bus_t *bus, uint8 cap);
static int dhdsdio_download_state(dhd_bus_t *bus, bool enter);
#endif /* DHD_UCODE_DOWNLOAD */
static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
static int dhdsdio_download_nvram(dhd_bus_t *bus);
+#ifdef BCMEMBEDIMAGE
+static int dhdsdio_download_code_array(dhd_bus_t *bus);
+#endif
static int dhdsdio_bussleep(dhd_bus_t *bus, bool sleep);
static int dhdsdio_clkctl(dhd_bus_t *bus, uint target, bool pendok);
static uint8 dhdsdio_sleepcsr_get(dhd_bus_t *bus);
static int dhdsdio_sdclk(dhd_bus_t *bus, bool on);
static void dhdsdio_advertise_bus_cleanup(dhd_pub_t *dhdp);
static void dhdsdio_advertise_bus_remove(dhd_pub_t *dhdp);
+#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT
+int dhd_get_system_rev(void);
+#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */
+
+#ifdef WLMEDIA_HTSF
+#include <htsf.h>
+extern uint32 dhd_get_htsf(void *dhd, int ifidx);
+#endif /* WLMEDIA_HTSF */
#if defined(BT_OVER_SDIO)
static int extract_hex_field(char * line, uint16 start_pos, uint16 num_chars, uint16 * value);
return err;
}
-#ifdef BCMSPI
-static void
-dhdsdio_wkwlan(dhd_bus_t *bus, bool on)
-{
- int err;
- uint32 regdata;
- bcmsdh_info_t *sdh = bus->sdh;
-
- if (bus->sih->buscoretype == SDIOD_CORE_ID) {
- /* wake up wlan function :WAKE_UP goes as ht_avail_request and alp_avail_request */
- regdata = bcmsdh_cfg_read_word(sdh, SDIO_FUNC_0, SPID_CONFIG, NULL);
- DHD_INFO(("F0 REG0 rd = 0x%x\n", regdata));
-
- if (on == TRUE)
- regdata |= WAKE_UP;
- else
- regdata &= ~WAKE_UP;
-
- bcmsdh_cfg_write_word(sdh, SDIO_FUNC_0, SPID_CONFIG, regdata, &err);
- }
-}
-#endif /* BCMSPI */
#ifdef USE_OOB_GPIO1
static int
bcmsdh_gpioouten(bus->sdh, GPIO_DEV_WAKEUP);
- addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr);
- data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data);
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
/* Set device for gpio1 wakeup */
bcmsdh_reg_write(bus->sdh, addr, 4, 2);
}
#endif /* USE_OOB_GPIO1 */
-#ifndef BCMSPI
/*
* Query if FW is in SR mode
*/
if (bus->sih->chip == BCM43430_CHIP_ID ||
bus->sih->chip == BCM43018_CHIP_ID) {
/* check if fw initialized sr engine */
- addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, sr_control1);
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, sr_control1);
if (bcmsdh_reg_read(bus->sdh, addr, 4) != 0)
cap = TRUE;
return cap;
}
- if (
- 0) {
+ if (bus->sih->chip == BCM4324_CHIP_ID) {
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
+ bcmsdh_reg_write(bus->sdh, addr, 4, 3);
+ core_capext = bcmsdh_reg_read(bus->sdh, data, 4);
+ } else if ((bus->sih->chip == BCM4330_CHIP_ID) ||
+ (bus->sih->chip == BCM43362_CHIP_ID) ||
+ (BCM4347_CHIP(bus->sih->chip))) {
core_capext = FALSE;
} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
(bus->sih->chip == BCM4339_CHIP_ID) ||
+ (bus->sih->chip == BCM43349_CHIP_ID) ||
BCM4345_CHIP(bus->sih->chip) ||
(bus->sih->chip == BCM4354_CHIP_ID) ||
(bus->sih->chip == BCM4358_CHIP_ID) ||
(bus->sih->chip == BCM4371_CHIP_ID) ||
(BCM4349_CHIP(bus->sih->chip)) ||
(bus->sih->chip == BCM4350_CHIP_ID) ||
- (bus->sih->chip == BCM4362_CHIP_ID) ||
(bus->sih->chip == BCM43012_CHIP_ID) ||
- (bus->sih->chip == BCM43014_CHIP_ID) ||
- (bus->sih->chip == BCM43751_CHIP_ID) ||
- (bus->sih->chip == BCM43752_CHIP_ID)) {
+ (bus->sih->chip == BCM4362_CHIP_ID)) {
core_capext = TRUE;
} else {
core_capext = bcmsdh_reg_read(bus->sdh,
if (!(core_capext))
return FALSE;
- if ((bus->sih->chip == BCM4335_CHIP_ID) ||
+ if (bus->sih->chip == BCM4324_CHIP_ID) {
+ /* FIX: Should change to query SR control register instead */
+ cap = TRUE;
+ } else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
(bus->sih->chip == BCM4339_CHIP_ID) ||
+ (bus->sih->chip == BCM43349_CHIP_ID) ||
BCM4345_CHIP(bus->sih->chip) ||
(bus->sih->chip == BCM4354_CHIP_ID) ||
(bus->sih->chip == BCM4358_CHIP_ID) ||
(bus->sih->chip == BCM4371_CHIP_ID) ||
(bus->sih->chip == BCM4350_CHIP_ID)) {
uint32 enabval = 0;
- addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr);
- data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data);
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
bcmsdh_reg_write(bus->sdh, addr, 4, CC_PMUCC3);
enabval = bcmsdh_reg_read(bus->sdh, data, 4);
return cap;
}
+static int
+dhdsdio_srwar_init(dhd_bus_t *bus)
+{
+ bcmsdh_gpio_init(bus->sdh);
+
+#ifdef USE_OOB_GPIO1
+ dhdsdio_oobwakeup_init(bus);
+#endif
+
+
+ return 0;
+}
+
static int
dhdsdio_sr_init(dhd_bus_t *bus)
{
uint8 val;
int err = 0;
+ if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2))
+ dhdsdio_srwar_init(bus);
+
+
if (bus->sih->chip == BCM43012_CHIP_ID) {
val = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WAKEUPCTRL, NULL);
val |= 1 << SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT;
CHIPID(bus->sih->chip) == BCM43018_CHIP_ID ||
CHIPID(bus->sih->chip) == BCM4339_CHIP_ID ||
CHIPID(bus->sih->chip) == BCM43012_CHIP_ID ||
- CHIPID(bus->sih->chip) == BCM4362_CHIP_ID ||
- CHIPID(bus->sih->chip) == BCM43014_CHIP_ID ||
- CHIPID(bus->sih->chip) == BCM43751_CHIP_ID ||
- CHIPID(bus->sih->chip) == BCM43752_CHIP_ID)
+ CHIPID(bus->sih->chip) == BCM4362_CHIP_ID)
dhdsdio_devcap_set(bus, SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC);
if (bus->sih->chip == BCM43012_CHIP_ID) {
return 0;
}
-#endif /* BCMSPI */
/*
* FIX: Be sure KSO bit is enabled
#define DEFAULT_MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
#ifndef CUSTOM_MAX_KSO_ATTEMPTS
#define CUSTOM_MAX_KSO_ATTEMPTS DEFAULT_MAX_KSO_ATTEMPTS
-#endif // endif
+#endif
static int
dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+
/* In case of 43012 chip, the chip could go down immediately after KSO bit is cleared.
* So the further reads of KSO register could fail. Thereby just bailing out immediately
* after clearing KSO bit, to avoid polling of KSO bit.
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
} while (try_cnt++ < CUSTOM_MAX_KSO_ATTEMPTS);
+
if (try_cnt > 2)
KSO_DBG(("%s> op:%s, try_cnt:%d, rd_val:%x, ERR:%x \n",
__FUNCTION__, (on ? "KSO_SET" : "KSO_CLR"), try_cnt, rd_val, err));
DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
}
+
+ if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) {
+ SPINWAIT_SLEEP(sdioh_spinwait_sleep,
+ (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) != TRUE),
+ GPIO_DEV_SRSTATE_TIMEOUT);
+
+ if (bcmsdh_gpioin(bus->sdh, GPIO_DEV_SRSTATE) == FALSE) {
+ DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n"));
+ }
+ }
#ifdef USE_CMD14
err = bcmsdh_sleep(bus->sdh, FALSE);
if (SLPAUTO_ENAB(bus) && (err != 0)) {
clkctl = 0;
sdh = bus->sdh;
+
if (!KSO_ENAB(bus))
return BCME_OK;
/* Request HT Avail */
clkreq = bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
-#ifdef BCMSPI
- dhdsdio_wkwlan(bus, TRUE);
-#endif /* BCMSPI */
+
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
if (err) {
DHD_ERROR(("%s: HT Avail request error: %d\n", __FUNCTION__, err));
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
else if (ht_avail_error == HT_AVAIL_ERROR_MAX) {
bus->dhd->hang_reason = HANG_REASON_HT_AVAIL_ERROR;
dhd_os_send_hang_message(bus->dhd);
}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) */
return BCME_ERROR;
} else {
ht_avail_error = 0;
}
+
/* Check current status */
clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
}
}
-#ifndef BCMSDIOLITE
+
/* Otherwise, wait here (polling) for HT Avail */
if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
SPINWAIT_SLEEP(sdioh_spinwait_sleep,
__FUNCTION__, PMU_MAX_TRANSITION_DLY, clkctl));
return BCME_ERROR;
}
-#endif /* BCMSDIOLITE */
+
/* Mark clock available */
bus->clkstate = CLK_AVAIL;
DHD_INFO(("CLKCTL: turned ON\n"));
return BCME_ERROR;
}
}
-#ifdef BCMSPI
- dhdsdio_wkwlan(bus, FALSE);
-#endif /* BCMSPI */
}
return BCME_OK;
}
static int
dhdsdio_sdclk(dhd_bus_t *bus, bool on)
{
-#ifndef BCMSPI
int err;
int32 iovalue;
}
bus->clkstate = CLK_NONE;
}
-#endif /* BCMSPI */
return BCME_OK;
}
/* Now remove the SD clock */
ret = dhdsdio_sdclk(bus, FALSE);
#ifdef DHD_DEBUG
- if (bus->dhd->dhd_console_ms == 0)
+ if (dhd_console_ms == 0)
#endif /* DHD_DEBUG */
if (bus->poll == 0)
dhd_os_wd_timer(bus->dhd, 0);
bcmsdh_info_t *sdh = bus->sdh;
sdpcmd_regs_t *regs = bus->regs;
uint retries = 0;
-#if defined(BCMSDIOH_STD)
- uint32 sd3_tuning_disable = FALSE;
-#endif /* BCMSDIOH_STD */
DHD_INFO(("dhdsdio_bussleep: request %s (currently %s)\n",
(sleep ? "SLEEP" : "WAKE"),
if (sleep) {
/* Don't sleep if something is pending */
#ifdef DHD_USE_IDLECOUNT
- if (bus->dpc_sched || bus->rxskip || pktq_n_pkts_tot(&bus->txq) ||
- bus->readframes || bus->ctrl_frame_stat)
+ if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq) || bus->readframes ||
+ bus->ctrl_frame_stat)
#else
- if (bus->dpc_sched || bus->rxskip || pktq_n_pkts_tot(&bus->txq))
+ if (bus->dpc_sched || bus->rxskip || pktq_len(&bus->txq))
#endif /* DHD_USE_IDLECOUNT */
return BCME_BUSY;
}
#endif /* !BT_OVER_SDIO */
+
if (!SLPAUTO_ENAB(bus)) {
/* Disable SDIO interrupts (no longer interested) */
bcmsdh_intr_disable(bus->sdh);
/* Change state */
bus->sleeping = TRUE;
-#if defined(BCMSDIOH_STD)
- sd3_tuning_disable = TRUE;
- err = bcmsdh_iovar_op(bus->sdh, "sd3_tuning_disable", NULL, 0,
- &sd3_tuning_disable, sizeof(sd3_tuning_disable), TRUE);
-#endif /* BCMSDIOH_STD */
#if defined(SUPPORT_P2P_GO_PS)
wake_up(&bus->bus_sleep);
#endif /* LINUX && SUPPORT_P2P_GO_PS */
/* Force pad isolation off if possible (in case power never toggled) */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, 0, NULL);
+
/* Make sure the controller has the bus up */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
dhd_pub_t *dhd = bus->dhd;
net = dhd_idx2net(dhd, 0);
if (net != NULL) {
- DHD_ERROR(("<< WIFI HANG by KSO Enabled failure\n"));
+ DHD_ERROR(("<<<<<< WIFI HANG by KSO Enabled failure\n"));
dhd_os_sdunlock(dhd);
net_os_send_hang_message(net);
dhd_os_sdlock(dhd);
} else {
- DHD_ERROR(("<< WIFI HANG Fail because net is NULL\n"));
+ DHD_ERROR(("<<<<< WIFI HANG Fail because net is NULL\n"));
}
}
#endif /* BT_OVER_SDIO */
if (err == 0) {
/* Change state */
bus->sleeping = FALSE;
-#if defined(BCMSDIOH_STD)
- sd3_tuning_disable = FALSE;
- err = bcmsdh_iovar_op(bus->sdh, "sd3_tuning_disable", NULL, 0,
- &sd3_tuning_disable, sizeof(sd3_tuning_disable), TRUE);
-#endif /* BCMSDIOH_STD */
}
}
}
#endif /* BT_OVER_SDIO */
-#ifdef USE_DYNAMIC_F2_BLKSIZE
int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size)
{
int func_blk_size = function_num;
bcmerr = dhd_bus_iovar_op(dhd, "sd_blocksize", NULL,
0, &func_blk_size, sizeof(int32), IOV_SET);
if (bcmerr != BCME_OK) {
- DHD_ERROR(("%s: Set F%d Block size error\n", __FUNCTION__, function_num));
+ DHD_ERROR(("%s: Set F2 Block size error\n", __FUNCTION__));
return BCME_ERROR;
}
}
return BCME_OK;
}
-#endif /* USE_DYNAMIC_F2_BLKSIZE */
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(FORCE_WOWLAN)
+#if defined(OOB_INTR_ONLY) || defined(FORCE_WOWLAN)
void
dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
{
-#if defined(BCMSPI_ANDROID)
- bcmsdh_intr_enable(bus->sdh);
-#elif defined(HW_OOB) || defined(FORCE_WOWLAN)
+#if defined(HW_OOB) || defined(FORCE_WOWLAN)
bcmsdh_enable_hw_oob_intr(bus->sdh, enable);
#else
sdpcmd_regs_t *regs = bus->regs;
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
#endif /* !defined(HW_OOB) */
}
-#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+#endif
int
dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
BCM_REFERENCE(datalen);
#endif /* SDTEST */
-#ifdef DHD_ULP
- dhd_ulp_set_path(bus->dhd, DHD_ULP_TX_DATA);
-#endif /* DHD_ULP */
-
prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
/* move from dhdsdio_sendfromq(), try to orphan skb early */
- if (bus->dhd->conf->orphan_move == 1)
+ if (bus->dhd->conf->orphan_move)
PKTORPHAN(pkt, bus->dhd->conf->tsq);
/* Check for existing queue, current flow-control, pending event, or pending clock */
- if (dhd_deferred_tx || bus->fcstate || pktq_n_pkts_tot(&bus->txq) || bus->dpc_sched ||
+ if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
(!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
(bus->clkstate != CLK_AVAIL)) {
bool deq_ret;
int pkq_len = 0;
- DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__,
- pktq_n_pkts_tot(&bus->txq)));
+ DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, pktq_len(&bus->txq)));
bus->fcqueued++;
/* Priority based enq */
if (dhd_doflow) {
dhd_os_sdlock_txq(bus->dhd);
- pkq_len = pktq_n_pkts_tot(&bus->txq);
+ pkq_len = pktq_len(&bus->txq);
dhd_os_sdunlock_txq(bus->dhd);
}
if (dhd_doflow && pkq_len >= FCHI) {
#ifdef PROP_TXSTATUS
wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) !=
WLFC_UNSUPPORTED);
-#endif // endif
+#endif
if (!wlfc_enabled && dhd_doflow) {
dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
}
#ifdef DHD_DEBUG
dhd_os_sdlock_txq(bus->dhd);
- if (pktqprec_n_pkts(&bus->txq, prec) > qcount[prec])
- qcount[prec] = pktqprec_n_pkts(&bus->txq, prec);
+ if (pktq_plen(&bus->txq, prec) > qcount[prec])
+ qcount[prec] = pktq_plen(&bus->txq, prec);
dhd_os_sdunlock_txq(bus->dhd);
-#endif // endif
+#endif
/* Schedule DPC if needed to send queued packet(s) */
if (dhd_deferred_tx && !bus->dpc_sched) {
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
}
- if(pktq_n_pkts_tot(&bus->txq) >= bus->dhd->conf->deferred_tx_len &&
+ if(pktq_len(&bus->txq) >= bus->dhd->conf->deferred_tx_len &&
dhd_os_wd_timer_enabled(bus->dhd) == FALSE) {
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
#ifdef SDTEST
chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL);
-#endif // endif
+#endif
/* Lock: we're about to use shared data/code (and SDIO) */
dhd_os_sdlock(bus->dhd);
frame = (uint8*)PKTDATA(osh, pkt);
pkt_len = (uint16)PKTLEN(osh, pkt);
+#ifdef WLMEDIA_HTSF
+ frame = (uint8*)PKTDATA(osh, pkt);
+ if (PKTLEN(osh, pkt) >= 100) {
+ htsf_ts = (htsfts_t*) (frame + HTSF_HOSTOFFSET + 12);
+ if (htsf_ts->magic == HTSFMAGIC) {
+ htsf_ts->c20 = get_cycles();
+ htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
+ }
+ }
+#endif /* WLMEDIA_HTSF */
#ifdef PKT_STATICS
len = (uint16)PKTLEN(osh, pkt);
switch(chan) {
#ifdef DHD_LOSSLESS_ROAMING
uint8 *pktdata;
struct ether_header *eh;
-#ifdef BDC
- struct bdc_header *bdc_header;
- uint8 data_offset;
-#endif // endif
#endif /* DHD_LOSSLESS_ROAMING */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
pktdata = (uint8 *)PKTDATA(osh, pkts[i]);
#ifdef BDC
/* Skip BDC header */
- bdc_header = (struct bdc_header *)pktdata;
- data_offset = bdc_header->dataOffset;
- pktdata += BDC_HEADER_LEN + (data_offset << 2);
-#endif // endif
+ pktdata += BDC_HEADER_LEN + ((struct bdc_header *)pktdata)->dataOffset;
+#endif
eh = (struct ether_header *)pktdata;
if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
uint8 prio = (uint8)PKTPRIO(pkts[i]);
/* Restore to original priority for 802.1X packet */
if (prio == PRIO_8021D_NC) {
PKTSETPRIO(pkts[i], dhd->prio_8021x);
-#ifdef BDC
- /* Restore to original priority in BDC header */
- bdc_header->priority =
- (dhd->prio_8021x & BDC_PRIORITY_MASK);
-#endif // endif
}
}
#endif /* DHD_LOSSLESS_ROAMING */
if (dhd_doflow) {
dhd_os_sdlock_txq(bus->dhd);
- txpktqlen = pktq_n_pkts_tot(&bus->txq);
+ txpktqlen = pktq_len(&bus->txq);
dhd_os_sdunlock_txq(bus->dhd);
}
bool wlfc_enabled = FALSE;
#ifdef PROP_TXSTATUS
wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED);
-#endif // endif
+#endif
if (!wlfc_enabled && dhd_doflow && dhd->txoff) {
dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
}
}
doff += sdpcm_hdrlen;
-#ifndef BCMSPI
/* Round send length to next SDIO block */
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
uint16 pad = bus->blocksize - (len % bus->blocksize);
} else if (len % DHD_SDALIGN) {
len += DHD_SDALIGN - (len % DHD_SDALIGN);
}
-#endif /* BCMSPI */
/* Satisfy length-alignment requirements */
if (forcealign && (len & (ALIGNMENT - 1)))
ASSERT(ISALIGNED((uintptr)frame, 2));
+
/* Need to lock here to protect txseq and SDIO tx calls */
dhd_os_sdlock(bus->dhd);
if (bus->dhd->conf->txctl_tmo_fix > 0 && !TXCTLOK(bus)) {
if (!TXCTLOK(bus) || !dhd_ulp_f2_ready(bus->dhd, bus->sdh))
#else
if (!TXCTLOK(bus))
-#endif // endif
+#endif
{
DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
__FUNCTION__, bus->tx_max, bus->tx_seq));
} else {
bus->dhd->txcnt_timeout++;
if (!bus->dhd->hang_was_sent) {
- DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d\n",
- __FUNCTION__, bus->dhd->txcnt_timeout));
-#ifdef BCMSDIO_RXLIM_POST
- DHD_ERROR(("%s: rxlim_en=%d, rxlim enable=%d, rxlim_addr=%d\n",
- __FUNCTION__,
- bus->dhd->conf->rxlim_en, bus->rxlim_en, bus->rxlim_addr));
-#endif /* BCMSDIO_RXLIM_POST */
+#ifdef CUSTOMER_HW4_DEBUG
+ uint32 status, retry = 0;
+ R_SDREG(status, &bus->regs->intstatus, retry);
+ DHD_TRACE_HW4(("%s: txcnt_timeout, INT status=0x%08X\n",
+ __FUNCTION__, status));
+ DHD_TRACE_HW4(("%s : tx_max : %d, tx_seq : %d, clkstate : %d \n",
+ __FUNCTION__, bus->tx_max, bus->tx_seq, bus->clkstate));
+#endif /* CUSTOMER_HW4_DEBUG */
+ DHD_ERROR(("%s: ctrl_frame_stat == TRUE txcnt_timeout=%d, bus->tx_max %d, bus->tx_seq %d\n",
+ __FUNCTION__, bus->dhd->txcnt_timeout, bus->tx_max, bus->tx_seq));
}
#ifdef DHD_FW_COREDUMP
/* Collect socram dump */
- if ((bus->dhd->memdump_enabled) &&
- (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT)) {
+ if (bus->dhd->memdump_enabled) {
/* collect core dump */
bus->dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT_TX;
dhd_os_sdunlock(bus->dhd);
} else if (DHD_HDRS_ON()) {
prhex("TxHdr", frame, MIN(len, 16));
}
-#endif // endif
+#endif
#ifdef PKT_STATICS
tx_statics.ctrl_count++;
tx_statics.ctrl_size += len;
bus->dhd->tx_ctlpkts++;
if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT) {
-#ifdef DHD_PM_CONTROL_FROM_FILE
- if (g_pm_control == TRUE) {
- return -BCME_ERROR;
- } else {
- return -ETIMEDOUT;
- }
-#else
return -ETIMEDOUT;
-#endif /* DHD_PM_CONTROL_FROM_FILE */
}
+
if (ret == BCME_NODEVICE)
err_nodevice++;
else
return -EIO;
/* Wait until control frame is available */
- timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen);
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, false);
dhd_os_sdlock(bus->dhd);
rxlen = bus->rxlen;
dhd_sched_dpc(bus->dhd);
/* Wait until control frame is available */
- timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen);
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, true);
dhd_os_sdlock(bus->dhd);
rxlen = bus->rxlen;
bus->dhd->rx_ctlerrs++;
if (bus->dhd->rxcnt_timeout >= MAX_CNTL_RX_TIMEOUT) {
-#ifdef DHD_PM_CONTROL_FROM_FILE
- if (g_pm_control == TRUE) {
- return -BCME_ERROR;
- } else {
- return -ETIMEDOUT;
- }
-#else
return -ETIMEDOUT;
-#endif /* DHD_PM_CONTROL_FROM_FILE */
}
+
+
if (bus->dhd->dongle_trap_occured)
return -EREMOTEIO;
IOV_VARS,
#ifdef SOFTAP
IOV_FWPATH,
-#endif // endif
+#endif
IOV_TXGLOMSIZE,
IOV_TXGLOMMODE,
IOV_HANGREPORT,
IOV_TXINRX_THRES,
IOV_SDIO_SUSPEND
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
- IOV_GDB_SERVER, /**< starts gdb server on given interface */
-#endif /* DEBUGGER || DHD_DSCOPE */
};
const bcm_iovar_t dhdsdio_iovars[] = {
{"devsleep", IOV_DEVSLEEP, 0, 0, IOVT_UINT32, 0 },
#ifdef SOFTAP
{"fwpath", IOV_FWPATH, 0, 0, IOVT_BUFFER, 0 },
-#endif // endif
+#endif
{"txglomsize", IOV_TXGLOMSIZE, 0, 0, IOVT_UINT32, 0 },
{"fw_hang_report", IOV_HANGREPORT, 0, 0, IOVT_BOOL, 0 },
{"txinrx_thres", IOV_TXINRX_THRES, 0, 0, IOVT_INT32, 0 },
{"sdio_suspend", IOV_SDIO_SUSPEND, 0, 0, IOVT_UINT32, 0 },
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
- {"gdb_server", IOV_GDB_SERVER, 0, 0, IOVT_UINT32, 0 },
-#endif /* DEBUGGER || DHD_DSCOPE */
{NULL, 0, 0, 0, 0, 0 }
};
dhd_bus_t *bus = dhdp->bus;
#if defined(DHD_WAKE_STATUS) && defined(DHD_WAKE_EVENT_STATUS)
int i;
-#endif // endif
+#endif
bcm_bprintf(strbuf, "Bus SDIO structure:\n");
bcm_bprintf(strbuf, "hostintmask 0x%08x intstatus 0x%08x sdpcm_ver %d\n",
bus->hostintmask, bus->intstatus, bus->sdpcm_ver);
bcm_bprintf(strbuf, "fcstate %d qlen %u tx_seq %d, max %d, rxskip %d rxlen %u rx_seq %d\n",
- bus->fcstate, pktq_n_pkts_tot(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
+ bus->fcstate, pktq_len(&bus->txq), bus->tx_seq, bus->tx_max, bus->rxskip,
bus->rxlen, bus->rx_seq);
bcm_bprintf(strbuf, "intr %d intrcount %u lastintrs %u spurious %u\n",
bus->intr, bus->intrcount, bus->lastintrs, bus->spurious);
DHD_ERROR(("%s: membytes transfer failed\n", __FUNCTION__));
break;
}
- if (dsize <= MAX_MEM_BUF && !write)
+ if (dsize <= MAX_MEM_BUF && !write) {
memcpy(data, bus->membuf, dsize);
+ }
/* Adjust for next transfer (if any) */
if ((size -= dsize)) {
sh->console_addr = ltoh32(sh->console_addr);
sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
-#ifdef BCMSDIO_RXLIM_POST
- if (sh->flags & SDPCM_SHARED_RXLIM_POST) {
- if (bus->dhd->conf->rxlim_en)
- bus->rxlim_en = !!sh->msgtrace_addr;
- bus->rxlim_addr = sh->msgtrace_addr;
- DHD_INFO(("%s: rxlim_en=%d, rxlim enable=%d, rxlim_addr=%d\n",
- __FUNCTION__,
- bus->dhd->conf->rxlim_en, bus->rxlim_en, bus->rxlim_addr));
- sh->flags &= ~SDPCM_SHARED_RXLIM_POST;
- } else {
- bus->rxlim_en = 0;
- DHD_INFO(("%s: FW has no rx limit post support\n", __FUNCTION__));
- }
-#endif /* BCMSDIO_RXLIM_POST */
-
if ((sh->flags & SDPCM_SHARED_VERSION_MASK) == 3 && SDPCM_SHARED_VERSION == 1)
return BCME_OK;
line[n] = ch;
}
+
if (n > 0) {
if (line[n - 1] == '\r')
n--;
return dhdsdio_mem_dump(bus);
}
-int
-dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
-{
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return BCME_ERROR;
- }
-
- return dhdsdio_get_mem_dump(dhdp->bus);
-}
-
static int
-dhdsdio_get_mem_dump(dhd_bus_t *bus)
+dhdsdio_mem_dump(dhd_bus_t *bus)
{
- int ret = BCME_ERROR;
- int size = bus->ramsize; /* Full mem size */
+ int ret = 0;
+ int size; /* Full mem size */
uint32 start = bus->dongle_ram_base; /* Start address */
uint read_size = 0; /* Read size of each iteration */
- uint8 *p_buf = NULL, *databuf = NULL;
+ uint8 *buf = NULL, *databuf = NULL;
/* Get full mem size */
- p_buf = dhd_get_fwdump_buf(bus->dhd, size);
- if (!p_buf) {
- DHD_ERROR(("%s: Out of memory (%d bytes)\n",
- __FUNCTION__, size));
- return BCME_ERROR;
+ size = bus->ramsize;
+ buf = dhd_get_fwdump_buf(bus->dhd, size);
+ if (!buf) {
+ DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
+ return -1;
}
dhd_os_sdlock(bus->dhd);
/* Read mem content */
DHD_ERROR(("Dump dongle memory\n"));
- databuf = p_buf;
- while (size) {
+ databuf = buf;
+ while (size)
+ {
read_size = MIN(MEMBLOCK, size);
- ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size);
- if (ret) {
+ if ((ret = dhdsdio_membytes(bus, FALSE, start, databuf, read_size)))
+ {
DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
ret = BCME_ERROR;
break;
dhd_os_sdunlock(bus->dhd);
- return ret;
-}
-
-static int
-dhdsdio_mem_dump(dhd_bus_t *bus)
-{
- dhd_pub_t *dhdp;
- int ret = BCME_ERROR;
-
- dhdp = bus->dhd;
- if (!dhdp) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return ret;
- }
-
- ret = dhdsdio_get_mem_dump(bus);
- if (ret) {
- DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
- __FUNCTION__, ret));
- } else {
- /* schedule a work queue to perform actual memdump.
- * dhd_mem_dump() performs the job
- */
- dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
- /* soc_ram free handled in dhd_{free,clear} */
+ /* schedule a work queue to perform actual memdump. dhd_mem_dump() performs the job */
+ if (!ret) {
+ /* buf, actually soc_ram free handled in dhd_{free,clear} */
+ dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
}
return ret;
return (dhdsdio_mem_dump(bus));
#else
return -1;
-#endif // endif
+#endif
}
int
}
#ifdef DHD_DEBUG
+
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB (1 << 24)
+#define CC_CHIPCTRL_JTAG_SEL (1 << 3)
+#define CC_CHIPCTRL_GPIO_SEL (0x3)
+#define CC_PLL_CHIPCTRL_SERIAL_ENAB_4334 (1 << 28)
+
static int
dhd_serialconsole(dhd_bus_t *bus, bool set, bool enable, int *bcmerror)
{
int int_val;
uint32 addr, data, uart_enab = 0;
+ uint32 jtag_sel = CC_CHIPCTRL_JTAG_SEL;
+ uint32 gpio_sel = CC_CHIPCTRL_GPIO_SEL;
- addr = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_addr);
- data = SI_ENUM_BASE(bus->sih) + OFFSETOF(chipcregs_t, chipcontrol_data);
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
+ data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
*bcmerror = 0;
bcmsdh_reg_write(bus->sdh, addr, 4, 1);
return -1;
}
+ if (bus->sih->chip == BCM4330_CHIP_ID) {
+ uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB;
+ } else if (bus->sih->chip == BCM4334_CHIP_ID ||
+ bus->sih->chip == BCM43340_CHIP_ID ||
+ bus->sih->chip == BCM43341_CHIP_ID ||
+ bus->sih->chip == BCM43342_CHIP_ID ||
+ 0) {
+ if (enable) {
+ /* Moved to PMU chipcontrol 1 from 4330 */
+ int_val &= ~gpio_sel;
+ int_val |= jtag_sel;
+ } else {
+ int_val |= gpio_sel;
+ int_val &= ~jtag_sel;
+ }
+ uart_enab = CC_PLL_CHIPCTRL_SERIAL_ENAB_4334;
+ }
+
if (!set)
return (int_val & uart_enab);
if (enable)
*bcmerror = BCME_SDIO_ERROR;
return -1;
}
+ if (bus->sih->chip == BCM4330_CHIP_ID) {
+ uint32 chipcontrol;
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol);
+ chipcontrol = bcmsdh_reg_read(bus->sdh, addr, 4);
+ chipcontrol &= ~jtag_sel;
+ if (enable) {
+ chipcontrol |= jtag_sel;
+ chipcontrol &= ~gpio_sel;
+ }
+ bcmsdh_reg_write(bus->sdh, addr, 4, chipcontrol);
+ }
return (int_val & uart_enab);
}
-#endif // endif
+#endif
static int
dhdsdio_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
bool_val = (int_val != 0) ? TRUE : FALSE;
+
/* Some ioctls use the bus */
dhd_os_sdlock(bus->dhd);
sd1idle = bool_val;
break;
-#ifdef DHD_DEBUG
- case IOV_GVAL(IOV_CHECKDIED):
- bcmerror = dhdsdio_checkdied(bus, arg, len);
- break;
-#endif /* DHD_DEBUG */
+
case IOV_GVAL(IOV_RAMSIZE):
int_val = (int32)bus->ramsize;
else
bus->use_rxchain = bool_val;
break;
-#ifndef BCMSPI
case IOV_GVAL(IOV_ALIGNCTL):
int_val = (int32)dhd_alignctl;
bcopy(&int_val, arg, val_size);
case IOV_SVAL(IOV_ALIGNCTL):
dhd_alignctl = bool_val;
break;
-#endif /* BCMSPI */
case IOV_GVAL(IOV_SDALIGN):
int_val = DHD_SDALIGN;
bcopy(params, &sdreg, sizeof(sdreg));
- addr = SI_ENUM_BASE(bus->sih) + sdreg.offset;
+ addr = SI_ENUM_BASE + sdreg.offset;
size = sdreg.func;
int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
if (bcmsdh_regfail(bus->sdh))
bcopy(params, &sdreg, sizeof(sdreg));
- addr = SI_ENUM_BASE(bus->sih) + sdreg.offset;
+ addr = SI_ENUM_BASE + sdreg.offset;
size = sdreg.func;
bcmsdh_reg_write(bus->sdh, addr, size, sdreg.value);
if (bcmsdh_regfail(bus->sdh))
dhd_txminmax = (uint)int_val;
break;
-#ifdef DHD_DEBUG
case IOV_GVAL(IOV_SERIALCONS):
int_val = dhd_serialconsole(bus, FALSE, 0, &bcmerror);
if (bcmerror != 0)
case IOV_SVAL(IOV_SERIALCONS):
dhd_serialconsole(bus, TRUE, bool_val, &bcmerror);
break;
-#endif /* DHD_DEBUG */
+
#endif /* DHD_DEBUG */
+
#ifdef SDTEST
case IOV_GVAL(IOV_EXTLOOP):
int_val = (int32)bus->ext_loop;
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
((uint8)mesbusyctrl | 0x80), NULL);
break;
-#endif // endif
+#endif
+
case IOV_GVAL(IOV_DONGLEISOLATION):
int_val = bus->dhd->dongle_isolation;
ASSERT(bus->dhd->osh);
/* ASSERT(bus->cl_devid); */
- /* must release sdlock, since devreset also acquires it */
- dhd_os_sdunlock(bus->dhd);
dhd_bus_devreset(bus->dhd, (uint8)bool_val);
- dhd_os_sdlock(bus->dhd);
+
break;
/*
* softap firmware is updated through module parameter or android private command
}
break;
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
- case IOV_SVAL(IOV_GDB_SERVER):
- if (bool_val == TRUE) {
- debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
- } else {
- debugger_close();
- }
- break;
-#endif /* DEBUGGER || DHD_DSCOPE */
-
default:
bcmerror = BCME_UNSUPPORTED;
break;
* Determine the length token:
* Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
*/
-#ifdef DHD_DEBUG
if (bcmerror) {
varsizew = 0;
- } else
-#endif /* DHD_DEBUG */
- {
+ } else {
varsizew = varsize / 4;
varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
varsizew = htol32(varsizew);
return bcmerror;
}
-bool
-dhd_bus_is_multibp_capable(struct dhd_bus *bus)
-{
- return MULTIBP_CAP(bus->sih);
-}
-
static int
dhdsdio_download_state(dhd_bus_t *bus, bool enter)
{
*/
if (REMAP_ENAB(bus) && !si_socdevram_remap_isenb(bus->sih))
dhdsdio_devram_remap(bus, TRUE);
-#ifdef BCMSDIOLITE
- if (!si_setcore(bus->sih, CC_CORE_ID, 0)) {
- DHD_ERROR(("%s: Can't set to Chip Common core?\n", __FUNCTION__));
- bcmerror = BCME_ERROR;
- goto fail;
- }
-#else
+
if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
!si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
-#endif // endif
W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
+
if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
!(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
goto fail;
}
-#ifdef BCMSDIOLITE
- if (!si_setcore(bus->sih, CC_CORE_ID, 0)) {
- DHD_ERROR(("%s: Can't set to Chip Common core?\n", __FUNCTION__));
- bcmerror = BCME_ERROR;
- goto fail;
- }
-#else
+
if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
!si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
bcmerror = BCME_ERROR;
goto fail;
}
-#endif // endif
W_SDREG(0xFFFFFFFF, &bus->regs->intstatus, retries);
/* switch back to arm core again */
if (bcmsdh_iovar_op(bus->sdh, "sd_blocksize", &fnum, sizeof(int32),
&bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on fn %d %s get\n",
- __FUNCTION__, fnum, "sd_blocksize"));
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
} else {
- DHD_INFO(("%s: noted fn %d %s update, value now %d\n",
- __FUNCTION__, fnum, "sd_blocksize", bus->blocksize));
+ DHD_INFO(("%s: noted %s update, value now %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
dhdsdio_tune_fifoparam(bus);
}
if ((bus->dhd->busstate == DHD_BUS_DOWN) || bus->dhd->hang_was_sent) {
/* if Firmware already hangs disbale any interrupt */
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
bus->hostintmask = 0;
bcmsdh_intr_disable(bus->sdh);
/* Turn off the bus (F2), free any pending packets */
DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
bcmsdh_intr_disable(bus->sdh);
-#ifndef BCMSPI
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
-#endif /* !BCMSPI */
/* Clear any pending interrupts now that F2 is disabled */
W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
/* Change our idea of bus state */
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
}
#ifdef PROP_TXSTATUS
wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED);
-#endif // endif
+#endif
if (!wlfc_enabled) {
#ifdef DHDTCPACK_SUPPRESS
/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD)
extern uint sd_txglom;
-#endif // endif
+#endif
void
dhd_txglom_enable(dhd_pub_t *dhdp, bool enable)
{
uint retries = 0;
uint8 ready, enable;
int err, ret = 0;
-#ifdef BCMSPI
- uint32 dstatus = 0; /* gSPI device-status bits */
-#else /* BCMSPI */
uint8 saveclk;
-#endif /* BCMSPI */
-#if defined(SDIO_ISR_THREAD)
- int intr_extn;
-#endif
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
goto exit;
}
-#ifdef BCMSPI
- /* fake "ready" for spi, wake-wlan would have already enabled F1 and F2 */
- ready = (SDIO_FUNC_ENABLE_1 | SDIO_FUNC_ENABLE_2);
- enable = 0;
-
- /* Give the dongle some time to do its thing and set IOR2 */
- dhd_timeout_start(&tmo, WAIT_F2RXFIFORDY * WAIT_F2RXFIFORDY_DELAY * 1000);
- while (!enable && !dhd_timeout_expired(&tmo)) {
- dstatus = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0, SPID_STATUS_REG, NULL);
- if (dstatus & STATUS_F2_RX_READY)
- enable = TRUE;
- }
-
- if (enable) {
- DHD_ERROR(("Took %u usec before dongle is ready\n", tmo.elapsed));
- enable = ready;
- } else {
- DHD_ERROR(("dstatus when timed out on f2-fifo not ready = 0x%x\n", dstatus));
- DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
- ret = -1;
- goto exit;
- }
-
-#else /* !BCMSPI */
/* Force clocks on backplane to be sure F2 interrupt propagates */
saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
while (ready != enable && !dhd_timeout_expired(&tmo))
ready = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IORDY, NULL);
-#endif /* !BCMSPI */
-
DHD_ERROR(("%s: enable 0x%02x, ready 0x%02x (waited %uus)\n",
__FUNCTION__, enable, ready, tmo.elapsed));
-#if defined(SDIO_ISR_THREAD)
- if (dhdp->conf->intr_extn) {
- intr_extn = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTR_EXTN, NULL);
- if (intr_extn & 0x1) {
- intr_extn |= 0x2;
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_INTR_EXTN, intr_extn, NULL);
- }
- }
-#endif
/* If F2 successfully enabled, set core and enable interrupts */
if (ready == enable) {
/* Make sure we're talking to the core. */
-#ifdef BCMSDIOLITE
- bus->regs = si_setcore(bus->sih, CC_CORE_ID, 0);
- ASSERT(bus->regs != NULL);
-#else
if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)))
bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0);
ASSERT(bus->regs != NULL);
-#endif // endif
+
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
/* corerev 4 could use the newer interrupt logic to detect the frames */
-#ifndef BCMSPI
if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev == 4) &&
(bus->rxint_mode != SDIO_DEVICE_HMB_RXINT)) {
bus->hostintmask &= ~I_HMB_FRAME_IND;
bus->hostintmask |= I_XMTDATA_AVAIL;
}
-#endif /* BCMSPI */
W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
if (bus->sih->buscorerev < 15) {
bus->intdis = FALSE;
if (bus->intr) {
DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
-#ifndef BCMSPI_ANDROID
bcmsdh_intr_enable(bus->sdh);
-#endif /* !BCMSPI_ANDROID */
} else {
DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
bcmsdh_intr_disable(bus->sdh);
}
-#ifndef BCMSPI
else {
/* Disable F2 again */
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
}
-#endif /* !BCMSPI */
/* If we didn't come up, turn off backplane clock */
if (dhdp->busstate != DHD_BUS_DATA)
fail:
/* If we can't reach the device, signal failure */
- if (err || bcmsdh_regfail(sdh)) {
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
+ if (err || bcmsdh_regfail(sdh))
bus->dhd->busstate = DHD_BUS_DOWN;
- }
}
static void
goto done;
}
+
/* Read remainder of frame body into the rxctl buffer */
sdret = dhd_bcmsdh_recv_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
(bus->rxctl + firstread), rdlen, NULL, NULL, NULL);
if (DHD_BYTES_ON() && DHD_CTL_ON()) {
prhex("RxCtrl", bus->rxctl, len);
}
-#endif // endif
+#endif
/* Point to valid data and indicate its length */
bus->rxctl += doff;
prhex("SUPERFRAME", PKTDATA(osh, pfirst),
MIN(PKTLEN(osh, pfirst), 48));
}
-#endif // endif
+#endif
+
/* Validate the superframe header */
dptr = (uint8 *)PKTDATA(osh, pfirst);
/* Check window for sanity */
if ((uint8)(txmax - bus->tx_seq) > 0x70) {
- DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n",
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
}
if (DHD_GLOM_ON()) {
prhex("subframe", dptr, 32);
}
-#endif // endif
+#endif
if ((uint16)~(sublen^check)) {
DHD_ERROR(("%s (subframe %d): HW hdr error: "
if (DHD_BYTES_ON() && DHD_DATA_ON()) {
prhex("Rx Subframe Data", dptr, dlen);
}
-#endif // endif
+#endif
PKTSETLEN(osh, pfirst, sublen);
PKTPULL(osh, pfirst, doff);
return num;
}
+
/* Return TRUE if there may be more frames to read */
static uint
dhdsdio_readframes(dhd_bus_t *bus, uint maxframes, bool *finished)
uint rxleft = 0; /* Remaining number of frames allowed */
int sdret; /* Return code from bcmsdh calls */
uint8 txmax; /* Maximum tx sequence offered */
-#ifdef BCMSPI
- uint32 dstatus = 0; /* gSPI device status bits of */
-#endif /* BCMSPI */
bool len_consistent; /* Result of comparing readahead len and len from hw-hdr */
uint8 *rxbuf;
int ifidx = 0;
#if defined(DHD_DEBUG) || defined(SDTEST)
bool sdtest = FALSE; /* To limit message spew from test mode */
-#endif // endif
+#endif
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
bus->readframes = TRUE;
maxframes = bus->pktgen_count;
sdtest = TRUE;
}
-#endif // endif
+#endif
/* Not finished unless we encounter no more frames indication */
*finished = FALSE;
-#ifdef BCMSPI
- /* Get pktlen from gSPI device F0 reg. */
- if (bus->bus == SPI_BUS) {
- /* Peek in dstatus bits and find out size to do rx-read. */
- dstatus = bcmsdh_get_dstatus(bus->sdh);
- if (dstatus == 0)
- DHD_ERROR(("%s:ZERO spi dstatus, a case observed in PR61352 hit !!!\n",
- __FUNCTION__));
-
- DHD_TRACE(("Device status from regread = 0x%x\n", dstatus));
- DHD_TRACE(("Device status from bit-reconstruction = 0x%x\n",
- bcmsdh_get_dstatus((void *)bus->sdh)));
-
- if ((dstatus & STATUS_F2_PKT_AVAILABLE) && (((dstatus & STATUS_UNDERFLOW)) == 0)) {
- bus->nextlen = ((dstatus & STATUS_F2_PKT_LEN_MASK) >>
- STATUS_F2_PKT_LEN_SHIFT);
- /* '0' size with pkt-available interrupt is eqvt to 2048 bytes */
- bus->nextlen = (bus->nextlen == 0) ? SPI_MAX_PKT_LEN : bus->nextlen;
- if (bus->dwordmode)
- bus->nextlen = bus->nextlen << 2;
- DHD_TRACE(("Entering %s: length to be read from gSPI = %d\n",
- __FUNCTION__, bus->nextlen));
- } else {
- if (dstatus & STATUS_F2_PKT_AVAILABLE)
- DHD_ERROR(("Underflow during %s.\n", __FUNCTION__));
- else
- DHD_ERROR(("False pkt-available intr.\n"));
- *finished = TRUE;
- return (maxframes - rxleft);
- }
- }
-#endif /* BCMSPI */
for (rxseq = bus->rx_seq, rxleft = maxframes;
!bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
*/
bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ?
FALSE : TRUE;
-#endif // endif
+#endif
}
/* Handle glomming separately */
bus->f2rxdata++;
ASSERT(sdret != BCME_PENDING);
-#ifdef BCMSPI
- if (bcmsdh_get_dstatus((void *)bus->sdh) &
- STATUS_UNDERFLOW) {
- bus->nextlen = 0;
- *finished = TRUE;
- DHD_ERROR(("%s: read %d control bytes failed "
- "due to spi underflow\n",
- __FUNCTION__, rdlen));
- /* dhd.rx_ctlerrs is higher level */
- bus->rxc_errors++;
- dhd_os_sdunlock_rxq(bus->dhd);
- continue;
- }
-#endif /* BCMSPI */
/* Control frame failures need retransmission */
if (sdret < 0) {
pkt, NULL, NULL);
bus->f2rxdata++;
ASSERT(sdret != BCME_PENDING);
-#ifdef BCMSPI
- if (bcmsdh_get_dstatus((void *)bus->sdh) & STATUS_UNDERFLOW) {
- bus->nextlen = 0;
- *finished = TRUE;
- DHD_ERROR(("%s (nextlen): read %d bytes failed due "
- "to spi underflow\n",
- __FUNCTION__, rdlen));
- PKTFREE(bus->dhd->osh, pkt, FALSE);
- bus->dhd->rx_errors++;
- dhd_os_sdunlock_rxq(bus->dhd);
- continue;
- }
-#endif /* BCMSPI */
if (sdret < 0) {
DHD_ERROR(("%s (nextlen): read %d bytes failed: %d\n",
}
/* Check for consistency with readahead info */
-#ifdef BCMSPI
- if (bus->bus == SPI_BUS) {
- if (bus->dwordmode) {
- uint16 spilen;
- spilen = ROUNDUP(len, 4);
- len_consistent = (nextlen != spilen);
- } else
- len_consistent = (nextlen != len);
- } else
-#endif /* BCMSPI */
len_consistent = (nextlen != (ROUNDUP(len, 16) >> 4));
if (len_consistent) {
/* Mismatch, force retry w/normal header (may be >4K) */
continue;
}
+
/* Extract software header fields */
chan = SDPCM_PACKET_CHANNEL(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
seq = SDPCM_PACKET_SEQUENCE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
doff = SDPCM_DOFFSET_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
-#ifdef BCMSPI
- /* Save the readahead length if there is one */
- if (bus->bus == SPI_BUS) {
- /* Use reconstructed dstatus bits and find out readahead size */
- dstatus = bcmsdh_get_dstatus((void *)bus->sdh);
- DHD_INFO(("Device status from bit-reconstruction = 0x%x\n",
- bcmsdh_get_dstatus((void *)bus->sdh)));
- if (dstatus & STATUS_F2_PKT_AVAILABLE) {
- bus->nextlen = ((dstatus & STATUS_F2_PKT_LEN_MASK) >>
- STATUS_F2_PKT_LEN_SHIFT);
- bus->nextlen = (bus->nextlen == 0) ?
- SPI_MAX_PKT_LEN : bus->nextlen;
- if (bus->dwordmode)
- bus->nextlen = bus->nextlen << 2;
- DHD_INFO(("readahead len from gSPI = %d \n",
- bus->nextlen));
- bus->dhd->rx_readahead_cnt ++;
- } else {
- bus->nextlen = 0;
- *finished = TRUE;
- }
- } else {
-#endif /* BCMSPI */
bus->nextlen =
bus->rxhdr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
if ((bus->nextlen << 4) > MAX_RX_DATASZ) {
}
bus->dhd->rx_readahead_cnt ++;
-#ifdef BCMSPI
- }
-#endif /* BCMSPI */
/* Handle Flow Control */
fcbits = SDPCM_FCMASK_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
/* Check window for sanity */
if ((uint8)(txmax - bus->tx_seq) > 0x70) {
-#ifdef BCMSPI
- if ((bus->bus == SPI_BUS) && !(dstatus & STATUS_F2_RX_READY)) {
- DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n",
- __FUNCTION__, txmax, bus->tx_seq));
- txmax = bus->tx_seq + 2;
- } else {
-#endif /* BCMSPI */
- DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n",
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
-#ifdef BCMSPI
- }
-#endif /* BCMSPI */
}
bus->tx_max = txmax;
} else if (DHD_HDRS_ON()) {
prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
}
-#endif // endif
+#endif
if (chan == SDPCM_CONTROL_CHANNEL) {
if (bus->bus == SPI_BUS) {
if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
}
-#endif // endif
+#endif
/* Extract hardware header fields */
len = ltoh16_ua(bus->rxhdr);
/* Check window for sanity */
if ((uint8)(txmax - bus->tx_seq) > 0x70) {
- DHD_INFO(("%s: got unlikely tx max %d with tx_seq %d\n",
+ DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
}
if (DHD_BYTES_ON() && DHD_DATA_ON()) {
prhex("Rx Data", PKTDATA(osh, pkt), len);
}
-#endif // endif
+#endif
deliver:
/* Save superframe descriptor and allocate packet frame */
if (DHD_GLOM_ON()) {
prhex("Glom Data", PKTDATA(osh, pkt), len);
}
-#endif // endif
+#endif
PKTSETLEN(osh, pkt, len);
ASSERT(doff == SDPCM_HDRLEN);
PKTPULL(osh, pkt, SDPCM_HDRLEN);
bus->sdpcm_ver, SDPCM_PROT_VERSION));
else
DHD_INFO(("Dongle ready, protocol version %d\n", bus->sdpcm_ver));
-#ifndef BCMSPI
/* make sure for the SDIO_DEVICE_RXDATAINT_MODE_1 corecontrol is proper */
if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
(bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_1)) {
val = R_REG(bus->dhd->osh, &bus->regs->corecontrol);
}
-#endif /* BCMSPI */
#ifdef DHD_DEBUG
/* Retrieve console state address now that firmware should have updated it */
if (hmb_data & HMB_DATA_FWHALT) {
DHD_ERROR(("INTERNAL ERROR: FIRMWARE HALTED : set BUS DOWN\n"));
dhdsdio_checkdied(bus, NULL, 0);
- DHD_ERROR(("Not doing bus down untill memdump done \n"));
+ bus->dhd->busstate = DHD_BUS_DOWN;
}
/* Shouldn't be any others */
devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
if (err) {
DHD_ERROR(("%s: error reading DEVCTL: %d\n", __FUNCTION__, err));
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
} else {
ASSERT(devctl & SBSDIO_DEVCTL_CA_INT_ONLY);
clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err) {
DHD_ERROR(("%s: error reading CSR: %d\n", __FUNCTION__, err));
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
}
if (err) {
DHD_ERROR(("%s: error reading DEVCTL: %d\n",
__FUNCTION__, err));
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
if (err) {
DHD_ERROR(("%s: error writing DEVCTL: %d\n",
__FUNCTION__, err));
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
bus->fcstate = !!(newstatus & I_HMB_FC_STATE);
if (newstatus) {
bus->f1regdata++;
-#ifndef BCMSPI
if ((bus->rxint_mode == SDIO_DEVICE_RXDATAINT_MODE_0) &&
(newstatus == I_XMTDATA_AVAIL)) {
} else
-#endif /* BCMSPI */
W_SDREG(newstatus, ®s->intstatus, retries);
}
}
goto exit;
}
}
-#endif // endif
+#endif
}
-#ifdef DHD_UCODE_DOWNLOAD
-exit_ucode:
-#endif /* DHD_UCODE_DOWNLOAD */
-
/* Just being here means nothing more to do for chipactive */
if (intstatus & I_CHIPACTIVE) {
/* ASSERT(bus->clkstate == CLK_AVAIL); */
bcmsdh_oob_intr_set(bus->sdh, TRUE);
#endif /* defined(OOB_INTR_ONLY) */
bcmsdh_intr_enable(sdh);
-#ifdef BCMSPI_ANDROID
- if (*dhd_spi_lockcount == 0)
- bcmsdh_oob_intr_set(bus->sdh, TRUE);
-#endif /* BCMSPI_ANDROID */
}
#if defined(OOB_INTR_ONLY) && !defined(HW_OOB)
}
#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */
-#ifdef BCMSDIO_RXLIM_POST
- if (!DATAOK(bus) && bus->rxlim_en) {
- uint8 rxlim = 0;
- if (0 == dhdsdio_membytes(bus, FALSE, bus->rxlim_addr, (uint8 *)&rxlim, 1)) {
- if (bus->tx_max != rxlim) {
- DHD_INFO(("%s: bus->tx_max/rxlim=%d/%d\n", __FUNCTION__,
- bus->tx_max, rxlim));
- bus->tx_max = rxlim;
- }
- }
- }
-#endif /* BCMSDIO_RXLIM_POST */
-
#ifdef PROP_TXSTATUS
dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE);
-#endif // endif
+#endif
if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL))
dhdsdio_sendpendctl(bus);
-#ifdef CONSOLE_DPC
- else if (DATAOK(bus) && strlen(bus->cons_cmd) && (bus->clkstate == CLK_AVAIL) &&
- !bus->fcstate) {
- dhd_bus_console_in(bus->dhd, bus->cons_cmd, strlen(bus->cons_cmd));
- }
-#endif
/* Send queued frames (limit 1 if rx may still be pending) */
else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
} else {
DHD_ERROR(("%s: failed backplane access over SDIO, halting operation\n",
__FUNCTION__));
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
bus->intstatus = 0;
}
} else if (bus->clkstate == CLK_PENDING) {
/* Awaiting I_CHIPACTIVE; don't resched */
} else if (bus->intstatus || bus->ipend ||
- (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
+ (!bus->fcstate && pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) ||
PKT_AVAILABLE(bus, bus->intstatus)) { /* Read multiple frames */
resched = TRUE;
}
dhd_bus_t *bus = (dhd_bus_t*)arg;
bcmsdh_info_t *sdh;
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
if (!bus) {
DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
return;
DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
}
-#ifdef BCMSPI_ANDROID
- bcmsdh_oob_intr_set(bus->sdh, FALSE);
-#endif /* BCMSPI_ANDROID */
bcmsdh_intr_disable(sdh);
bus->intdis = TRUE;
data = (uint8*)PKTDATA(osh, pkt) + SDPCM_HDRLEN;
prhex("dhdsdio_pktgen: Tx Data", data, PKTLEN(osh, pkt) - SDPCM_HDRLEN);
}
-#endif // endif
+#endif
/* Send it */
if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) {
bus->pktgen_fail++;
}
+
static void
dhdsdio_testrcv(dhd_bus_t *bus, void *pkt, uint seq)
{
{
int err = 0;
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+#if defined(OOB_INTR_ONLY)
err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus);
-#endif // endif
+#endif
return err;
}
void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
{
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+#if defined(OOB_INTR_ONLY)
bcmsdh_oob_intr_unregister(dhdp->bus->sdh);
-#endif // endif
+#endif
}
void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
{
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+#if defined(OOB_INTR_ONLY)
bcmsdh_oob_intr_set(dhdp->bus->sdh, enable);
-#endif // endif
+#endif
}
void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub)
/* Check device if no interrupts */
if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
-#ifndef BCMSPI
if (!bus->dpc_sched) {
uint8 devpend;
devpend = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_0,
SDIOD_CCCR_INTPEND, NULL);
intstatus = devpend & (INTR_STATUS_FUNC1 | INTR_STATUS_FUNC2);
}
-#else
- if (!bus->dpc_sched) {
- uint32 devpend;
- devpend = bcmsdh_cfg_read_word(bus->sdh, SDIO_FUNC_0,
- SPID_STATUS_REG, NULL);
- intstatus = devpend & STATUS_F2_PKT_AVAILABLE;
- }
-#endif /* !BCMSPI */
/* If there is something, make like the ISR and schedule the DPC */
if (intstatus) {
bus->lastintrs = bus->intrcount;
}
- if ((!bus->dpc_sched) && pktq_n_pkts_tot(&bus->txq)) {
+ if ((!bus->dpc_sched) && pktq_len(&bus->txq)) {
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
}
#ifdef DHD_DEBUG
/* Poll for console output periodically */
- if (dhdp->busstate == DHD_BUS_DATA && dhdp->dhd_console_ms != 0) {
+ if (dhdp->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
bus->console.count += dhd_watchdog_ms;
- if (bus->console.count >= dhdp->dhd_console_ms) {
- bus->console.count -= dhdp->dhd_console_ms;
+ if (bus->console.count >= dhd_console_ms) {
+ bus->console.count -= dhd_console_ms;
/* Make sure backplane clock is on */
if (SLPAUTO_ENAB(bus))
dhdsdio_bussleep(bus, FALSE);
else
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
if (dhdsdio_readconsole(bus) < 0)
- dhdp->dhd_console_ms = 0; /* On error, stop trying */
+ dhd_console_ms = 0; /* On error, stop trying */
}
}
#endif /* DHD_DEBUG */
bus->pktgen_tick = 0;
dhdsdio_pktgen(bus);
}
-#endif // endif
+#endif
/* On idle timeout clear activity flag and/or turn off clock */
#ifdef DHD_USE_IDLECOUNT
int rv;
void *pkt;
-#ifndef CONSOLE_DPC
+ /* Address could be zero if CONSOLE := 0 in dongle Makefile */
+ if (bus->console_addr == 0)
+ return BCME_UNSUPPORTED;
+
/* Exclusive bus access */
dhd_os_sdlock(bus->dhd);
-#endif
-
- /* Address could be zero if CONSOLE := 0 in dongle Makefile */
- if (bus->console_addr == 0) {
- rv = BCME_UNSUPPORTED;
- goto exit;
- }
/* Don't allow input if dongle is in reset */
if (bus->dhd->dongle_reset) {
- rv = BCME_NOTREADY;
- goto exit;
- }
-
-#ifndef CONSOLE_DPC
- if (!DATAOK(bus)) {
- DHD_CTL(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d, pktq_len %d\n",
- __FUNCTION__, bus->tx_max, bus->tx_seq, pktq_n_pkts_tot(&bus->txq)));
- rv = BCME_NOTREADY;
- goto exit;
+ dhd_os_sdunlock(bus->dhd);
+ return BCME_NOTREADY;
}
/* Request clock to allow SDIO accesses */
BUS_WAKE(bus);
/* No pend allowed since txpkt is called later, ht clk has to be on */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
-#endif
/* Zero cbuf_index */
addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
goto done;
+ if (!DATAOK(bus)) {
+ rv = BCME_NOTREADY;
+ goto done;
+ }
+
/* Bump dongle by sending an empty packet on the event channel.
* sdpcm_sendup (RX) checks for virtual console input.
*/
rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE);
done:
-#ifndef CONSOLE_DPC
if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched &&
NO_OTHER_ACTIVE_BUS_USER(bus)) {
bus->activity = FALSE;
dhdsdio_bussleep(bus, TRUE);
dhdsdio_clkctl(bus, CLK_NONE, FALSE);
}
-#endif
-exit:
-#ifdef CONSOLE_DPC
- memset(bus->cons_cmd, 0, sizeof(bus->cons_cmd));
-#else
dhd_os_sdunlock(bus->dhd);
-#endif
- return rv;
-}
-
-#ifdef CONSOLE_DPC
-extern int
-dhd_bus_txcons(dhd_pub_t *dhdp, uchar *msg, uint msglen)
-{
- dhd_bus_t *bus = dhdp->bus;
- int ret = BCME_OK;
-
- dhd_os_sdlock(bus->dhd);
- /* Address could be zero if CONSOLE := 0 in dongle Makefile */
- if (bus->console_addr == 0) {
- ret = BCME_UNSUPPORTED;
- goto exit;
- }
-
- /* Don't allow input if dongle is in reset */
- if (bus->dhd->dongle_reset) {
- ret = BCME_NOTREADY;
- goto exit;
- }
-
- if (msglen >= sizeof(bus->cons_cmd)) {
- DHD_ERROR(("%s: \"%s\"(%d) too long\n", __FUNCTION__, msg, msglen));
- ret = BCME_BADARG;
- goto exit;
- }
-
- if (!strlen(bus->cons_cmd)) {
- strncpy(bus->cons_cmd, msg, sizeof(bus->cons_cmd));
- DHD_CTL(("%s: \"%s\" delay send, tx_max %d, tx_seq %d, pktq_len %d\n",
- __FUNCTION__, bus->cons_cmd, bus->tx_max, bus->tx_seq, pktq_n_pkts_tot(&bus->txq)));
- if (!bus->dpc_sched) {
- bus->dpc_sched = TRUE;
- dhd_sched_dpc(bus->dhd);
- }
- } else {
- DHD_CTL(("%s: \"%s\" is pending, tx_max %d, tx_seq %d, pktq_len %d\n",
- __FUNCTION__, bus->cons_cmd, bus->tx_max, bus->tx_seq, pktq_n_pkts_tot(&bus->txq)));
- ret = BCME_NOTREADY;
- }
-
-exit:
- dhd_os_sdunlock(bus->dhd);
-
- return ret;
+ return rv;
}
-#endif
-#if defined(DHD_DEBUG) && !defined(BCMSDIOLITE)
+#ifdef DHD_DEBUG
static void
dhd_dump_cis(uint fn, uint8 *cis)
{
static bool
dhdsdio_chipmatch(uint16 chipid)
{
+ if (chipid == BCM4336_CHIP_ID)
+ return TRUE;
if (chipid == BCM4330_CHIP_ID)
return TRUE;
+ if (chipid == BCM43237_CHIP_ID)
+ return TRUE;
if (chipid == BCM43362_CHIP_ID)
return TRUE;
+ if (chipid == BCM4314_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43242_CHIP_ID)
+ return TRUE;
if (chipid == BCM43340_CHIP_ID)
return TRUE;
if (chipid == BCM43341_CHIP_ID)
return TRUE;
+ if (chipid == BCM43143_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43342_CHIP_ID)
+ return TRUE;
if (chipid == BCM4334_CHIP_ID)
return TRUE;
+ if (chipid == BCM43239_CHIP_ID)
+ return TRUE;
if (chipid == BCM4324_CHIP_ID)
return TRUE;
if (chipid == BCM4335_CHIP_ID)
return TRUE;
if (chipid == BCM4339_CHIP_ID)
return TRUE;
+ if (chipid == BCM43349_CHIP_ID)
+ return TRUE;
if (BCM4345_CHIP(chipid))
return TRUE;
if (chipid == BCM4350_CHIP_ID)
return TRUE;
if (BCM4349_CHIP(chipid))
return TRUE;
+ if (BCM4347_CHIP(chipid))
+ return TRUE;
if (chipid == BCM4364_CHIP_ID)
return TRUE;
if (chipid == BCM43012_CHIP_ID)
return TRUE;
-
- if (chipid == BCM43014_CHIP_ID)
- return TRUE;
-
- if (chipid == BCM4369_CHIP_ID)
- return TRUE;
if (chipid == BCM4362_CHIP_ID)
return TRUE;
- if (chipid == BCM43751_CHIP_ID)
- return TRUE;
- if (chipid == BCM43752_CHIP_ID)
- return TRUE;
return FALSE;
}
{
int ret;
dhd_bus_t *bus;
+#ifdef GET_OTP_MAC_ENABLE
+ struct ether_addr ea_addr;
+#endif
DHD_MUTEX_LOCK();
*/
dhd_txbound = DHD_TXBOUND;
dhd_rxbound = DHD_RXBOUND;
-#ifdef BCMSPI
- dhd_alignctl = FALSE;
-#else
dhd_alignctl = TRUE;
-#endif /* BCMSPI */
sd1idle = TRUE;
dhd_readahead = TRUE;
retrydata = FALSE;
dhd_dongle_ramsize = 0;
dhd_txminmax = DHD_TXMINMAX;
-#ifdef BCMSPI
- forcealign = FALSE;
-#else
forcealign = TRUE;
-#endif /* !BCMSPI */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
DHD_INFO(("%s: venid 0x%04x devid 0x%04x\n", __FUNCTION__, venid, devid));
/* We make assumptions about address window mappings */
- ASSERT((uintptr)regsva == si_enum_base(devid));
+ ASSERT((uintptr)regsva == SI_ENUM_BASE);
/* BCMSDH passes venid and devid based on CIS parsing -- but low-power start
* means early parse could fail, so here we should get either an ID
bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
#ifdef BT_OVER_SDIO
bus->bt_use_count = 0;
-#endif // endif
+#endif
#if defined(SUPPORT_P2P_GO_PS)
init_waitqueue_head(&bus->bus_sleep);
DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
goto fail;
}
-
-#if defined(GET_OTP_MAC_ENABLE) || defined(GET_OTP_MODULE_NAME)
- dhd_conf_get_otp(bus->dhd, sdh, bus->sih);
-#endif
+#if defined(BT_OVER_SDIO)
+ g_dhd_pub = bus->dhd;
+ DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
+#endif /* defined (BT_OVER_SDIO) */
/* Allocate buffers */
if (!(dhdsdio_probe_malloc(bus, osh, sdh))) {
#endif /* BT_OVER_SDIO */
#ifdef GET_OTP_MAC_ENABLE
- if (memcmp(ðer_null, &bus->dhd->conf->otp_mac, ETHER_ADDR_LEN))
- memcpy(bus->dhd->mac.octet, (void *)&bus->dhd->conf->otp_mac, ETHER_ADDR_LEN);
+ if (dhd_conf_get_mac(bus->dhd, sdh, ea_addr.octet)) {
+ DHD_TRACE(("%s: Can not read MAC address\n", __FUNCTION__));
+ } else
+ memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
#endif /* GET_CUSTOM_MAC_ENABLE */
/* Ok, have the per-port tell the stack we're open for business */
- if (dhd_attach_net(bus->dhd, TRUE) != 0)
- {
+ if (dhd_register_if(bus->dhd, 0, TRUE) != 0) {
DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
goto fail;
}
#ifdef BCMHOST_XTAL_PU_TIME_MOD
bcmsdh_reg_write(bus->sdh, 0x18000620, 2, 11);
+#ifdef BCM4330_CHIP
+ bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x0000F801);
+#else
bcmsdh_reg_write(bus->sdh, 0x18000628, 4, 0x00F80001);
+#endif /* BCM4330_CHIP */
#endif /* BCMHOST_XTAL_PU_TIME_MOD */
#if defined(MULTIPLE_SUPPLICANT)
dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
uint16 devid)
{
-#ifndef BCMSPI
uint8 clkctl = 0;
-#endif /* !BCMSPI */
uint fn, numfn;
uint8 *cis[SDIOD_MAX_IOFUNCS];
- int32 value;
int err = 0;
- BCM_REFERENCE(value);
+
bus->alp_only = TRUE;
bus->sih = NULL;
/* Return the window to backplane enumeration space for core access */
- if (dhdsdio_set_siaddr_window(bus, si_enum_base(devid))) {
+ if (dhdsdio_set_siaddr_window(bus, SI_ENUM_BASE)) {
DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
}
-#if defined(DHD_DEBUG)
+#if defined(DHD_DEBUG) && !defined(CUSTOMER_HW4_DEBUG)
DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
- bcmsdh_reg_read(bus->sdh, si_enum_base(devid), 4)));
-#endif // endif
+ bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)));
+#endif /* DHD_DEBUG && !CUSTOMER_HW4_DEBUG */
-#ifndef BCMSPI /* wake-wlan in gSPI will bring up the htavail/alpavail clocks. */
/* Force PLL off until si_attach() programs PLL control regs */
+
+
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, DHD_INIT_CLKCTL1, &err);
if (!err)
clkctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
err, DHD_INIT_CLKCTL1, clkctl));
goto fail;
}
-
-#endif /* !BCMSPI */
-#ifndef BCMSPI
numfn = bcmsdh_query_iofnum(sdh);
ASSERT(numfn <= SDIOD_MAX_IOFUNCS);
/* Now request ALP be put on the bus */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
DHD_INIT_CLKCTL2, &err);
- OSL_DELAY(65);
-#else
- numfn = 0; /* internally func is hardcoded to 1 as gSPI has cis on F1 only */
-#endif /* !BCMSPI */
-#ifndef BCMSDIOLITE
+ OSL_DELAY(200);
+
if (DHD_INFO_ON()) {
for (fn = 0; fn <= numfn; fn++) {
if (!(cis[fn] = MALLOC(osh, SBSDIO_CIS_SIZE_LIMIT))) {
value = F0_BLOCK_SIZE;
else
value = (cis[fn][25]<<8) | cis[fn][24] | (fn<<16);
- /* Get block size from sd */
- if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fn, sizeof(int32),
- &size, sizeof(int32), FALSE) != BCME_OK) {
- size = 0;
- DHD_ERROR(("%s: fail on fn %d %s get\n",
- __FUNCTION__, fn, "sd_blocksize"));
- } else {
- DHD_INFO(("%s: Initial value for fn %d %s is %d\n",
- __FUNCTION__, fn, "sd_blocksize", size));
- }
- if (size != 0 && size < value) {
- value = size;
- }
- value = fn << 16 | value;
+ printf("%s: fn=%d, value=%d\n", __FUNCTION__, fn, value);
if (bcmsdh_iovar_op(sdh, "sd_blocksize", NULL, 0, &value,
sizeof(value), TRUE) != BCME_OK) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on fn %d %s set\n", __FUNCTION__,
- fn, "sd_blocksize"));
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__,
+ "sd_blocksize"));
}
#endif
#ifdef DHD_DEBUG
MFREE(osh, cis[fn], SBSDIO_CIS_SIZE_LIMIT);
}
}
-#else
- BCM_REFERENCE(cis);
- BCM_REFERENCE(fn);
-#endif /* DHD_DEBUG */
-
+#if 0
+ if (dhd_conf_set_blksize(sdh)) {
+ bus->blocksize = 0;
+ }
+#endif
if (err) {
DHD_ERROR(("dhdsdio_probe: failure reading or parsing CIS\n"));
goto fail;
bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg));
#endif /* DHD_DEBUG */
+
bcmsdh_chipinfo(sdh, bus->sih->chip, bus->sih->chiprev);
if (!dhdsdio_chipmatch((uint16)bus->sih->chip)) {
else
bus->kso = TRUE;
+ if (CST4330_CHIPMODE_SDIOD(bus->sih->chipst)) {
+ }
+
si_sdiod_drive_strength_init(bus->sih, osh, dhd_sdiod_drive_strength);
+
/* Get info on the ARM and SOCRAM cores... */
if (!DHD_NOPMU(bus)) {
if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
switch ((uint16)bus->sih->chip) {
case BCM4335_CHIP_ID:
case BCM4339_CHIP_ID:
+ case BCM43349_CHIP_ID:
bus->dongle_ram_base = CR4_4335_RAM_BASE;
break;
case BCM4350_CHIP_ID:
case BCM4364_CHIP_ID:
bus->dongle_ram_base = CR4_4364_RAM_BASE;
break;
+ case BCM4347_CHIP_GRPID:
+ bus->dongle_ram_base = CR4_4347_RAM_BASE;
+ break;
case BCM4362_CHIP_ID:
bus->dongle_ram_base = CR4_4362_RAM_BASE;
break;
- case BCM43751_CHIP_ID:
- bus->dongle_ram_base = CR4_43751_RAM_BASE;
- break;
- case BCM43752_CHIP_ID:
- bus->dongle_ram_base = CR4_43752_RAM_BASE;
- break;
- case BCM4369_CHIP_ID:
- bus->dongle_ram_base = CR4_4369_RAM_BASE;
- break;
default:
bus->dongle_ram_base = 0;
DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
}
/* ...but normally deal with the SDPCMDEV core */
-#ifdef BCMSDIOLITE
- if (!(bus->regs = si_setcore(bus->sih, CC_CORE_ID, 0))) {
- DHD_ERROR(("%s: failed to find Chip Common core!\n", __FUNCTION__));
- goto fail;
- }
-#else
if (!(bus->regs = si_setcore(bus->sih, PCMCIA_CORE_ID, 0)) &&
!(bus->regs = si_setcore(bus->sih, SDIOD_CORE_ID, 0))) {
DHD_ERROR(("%s: failed to find SDIODEV core!\n", __FUNCTION__));
goto fail;
}
-#endif // endif
bus->sdpcmrev = si_corerev(bus->sih);
/* Set core control so an SDIO reset does a backplane reset */
OR_REG(osh, &bus->regs->corecontrol, CC_BPRESEN);
-#ifndef BCMSPI
bus->rxint_mode = SDIO_DEVICE_HMB_RXINT;
if ((bus->sih->buscoretype == SDIOD_CORE_ID) && (bus->sdpcmrev >= 4) &&
val |= CC_XMTDATAAVAIL_CTRL;
W_REG(osh, &bus->regs->corecontrol, val);
}
-#endif /* BCMSPI */
+
pktq_init(&bus->txq, (PRIOMASK + 1), QLEN);
dhdsdio_pktgen_init(bus);
#endif /* SDTEST */
-#ifndef BCMSPI
+ /* set PMU minimum resource mask to default */
+ dhd_bus_set_default_min_res_mask(bus);
/* Disable F2 to clear any intermediate frame state on the dongle */
bcmsdh_cfg_write(sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
-#endif /* !BCMSPI */
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
bus->sleeping = FALSE;
bus->rxflow = FALSE;
bus->prev_rxlim_hit = 0;
-#ifndef BCMSPI
/* Done with backplane-dependent accesses, can drop clock... */
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
-#endif /* !BCMSPI */
/* ...and initialize clock/power states */
bus->clkstate = CLK_SDONLY;
if (bcmsdh_iovar_op(sdh, "sd_blocksize", &fnum, sizeof(int32),
&bus->blocksize, sizeof(int32), FALSE) != BCME_OK) {
bus->blocksize = 0;
- DHD_ERROR(("%s: fail on fn %d %s get\n", __FUNCTION__, fnum, "sd_blocksize"));
+ DHD_ERROR(("%s: fail on %s get\n", __FUNCTION__, "sd_blocksize"));
} else {
- DHD_INFO(("%s: Initial value for fn %d %s is %d\n",
- __FUNCTION__, fnum, "sd_blocksize", bus->blocksize));
+ DHD_INFO(("%s: Initial value for %s is %d\n",
+ __FUNCTION__, "sd_blocksize", bus->blocksize));
dhdsdio_tune_fifoparam(bus);
}
return ret;
}
+void
+dhd_set_path_params(struct dhd_bus *bus)
+{
+ /* External conf takes precedence if specified */
+ dhd_conf_preinit(bus->dhd);
+
+ if (bus->dhd->conf_path[0] == '\0') {
+ dhd_conf_set_path(bus->dhd, "config.txt", bus->dhd->conf_path, bus->nv_path);
+ }
+ if (bus->dhd->clm_path[0] == '\0') {
+ dhd_conf_set_path(bus->dhd, "clm.blob", bus->dhd->clm_path, bus->fw_path);
+ }
+#ifdef CONFIG_PATH_AUTO_SELECT
+ dhd_conf_set_conf_name_by_chip(bus->dhd, bus->dhd->conf_path);
+#endif
+
+ dhd_conf_read_config(bus->dhd, bus->dhd->conf_path);
+
+ dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
+ dhd_conf_set_nv_name_by_chip(bus->dhd, bus->nv_path);
+ dhd_conf_set_clm_name_by_chip(bus->dhd, bus->dhd->clm_path);
+
+ dhd_conf_set_fw_name_by_mac(bus->dhd, bus->sdh, bus->fw_path);
+ dhd_conf_set_nv_name_by_mac(bus->dhd, bus->sdh, bus->nv_path);
+
+ printf("Final fw_path=%s\n", bus->fw_path);
+ printf("Final nv_path=%s\n", bus->nv_path);
+ printf("Final clm_path=%s\n", bus->dhd->clm_path);
+ printf("Final conf_path=%s\n", bus->dhd->conf_path);
+
+}
+
void
dhd_set_bus_params(struct dhd_bus *bus)
{
{
int ret;
-#if defined(SUPPORT_MULTIPLE_REVISION)
- if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
- DHD_ERROR(("%s: fail to concatnate revison \n",
- __FUNCTION__));
- return BCME_BADARG;
- }
-#endif /* SUPPORT_MULTIPLE_REVISION */
#if defined(DHD_BLOB_EXISTENCE_CHECK)
dhd_set_blob_support(bus->dhd, bus->fw_path);
__FUNCTION__, bus->fw_path, bus->nv_path));
DHD_OS_WAKE_LOCK(bus->dhd);
- dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
- dhd_set_bus_params(bus);
-
/* Download the firmware */
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ dhd_set_path_params(bus);
+ dhd_set_bus_params(bus);
+
ret = _dhdsdio_download_firmware(bus);
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
ASSERT(osh);
if (bus->dhd) {
-#if defined(DEBUGGER) || defined(DHD_DSCOPE)
- debugger_close();
-#endif /* DEBUGGER || DHD_DSCOPE */
dongle_isolation = bus->dhd->dongle_isolation;
dhd_detach(bus->dhd);
}
#ifdef DHD_DEBUG
if (bus->console.buf != NULL)
MFREE(osh, bus->console.buf, bus->console.bufsize);
-#endif // endif
+#endif
#ifdef DHDENABLE_TAILPAD
if (bus->pad_pkt)
if (bus->rxbuf) {
#ifndef CONFIG_DHD_USE_STATIC_BUF
MFREE(osh, bus->rxbuf, bus->rxblen);
-#endif // endif
+#endif
bus->rxctl = bus->rxbuf = NULL;
bus->rxlen = 0;
}
if (bus->databuf) {
#ifndef CONFIG_DHD_USE_STATIC_BUF
MFREE(osh, bus->databuf, MAX_DATA_BUF);
-#endif // endif
+#endif
bus->databuf = NULL;
}
}
+
static void
dhdsdio_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
{
}
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
- /* stop all interface network queue. */
- dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
bus->dhd->busstate = DHD_BUS_SUSPEND;
if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
DHD_ERROR(("Tx Request is not ended\n"));
bus->dhd->busstate = DHD_BUS_DATA;
- /* resume all interface network queue. */
- dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
return -EBUSY;
}
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
if (ret) {
bus->dhd->busstate = DHD_BUS_DATA;
- /* resume all interface network queue. */
- dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
}
DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
dhd_os_busbusy_wake(bus->dhd);
DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+#if defined(OOB_INTR_ONLY)
if (dhd_os_check_if_up(bus->dhd))
bcmsdh_oob_intr_set(bus->sdh, TRUE);
-#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+#endif
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
bus->dhd->busstate = DHD_BUS_DATA;
dhd_os_busbusy_wake(bus->dhd);
- /* resume all interface network queue. */
- dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
return 0;
}
+
/* Register/Unregister functions are called by the main DHD entry
* point (e.g. module insertion) to link with the bus driver, in
* order to look for or await the device.
}
#endif /* defined(BCMLXSDMMC) */
+#ifdef BCMEMBEDIMAGE
+static int
+dhdsdio_download_code_array(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ unsigned char *ularray = NULL;
+
+ DHD_INFO(("%s: download embedded firmware...\n", __FUNCTION__));
+
+ /* Download image */
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ /* check if CR4 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ /* if address is 0, store the reset instruction to be written in 0 */
+
+ if (offset == 0) {
+ bus->resetinstr = *(((uint32*)dlarray));
+ /* Add start of RAM address to the address given by user */
+ offset += bus->dongle_ram_base;
+ }
+ }
+
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ (uint8 *) (dlarray + offset), MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, TRUE, offset,
+ (uint8 *) (dlarray + offset), sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+#ifdef DHD_DEBUG
+ /* Upload and compare the downloaded code */
+ {
+ ularray = MALLOC(bus->dhd->osh, bus->ramsize);
+ /* Upload image to verify downloaded contents. */
+ offset = 0;
+ memset(ularray, 0xaa, bus->ramsize);
+ while ((offset + MEMBLOCK) < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset, ularray + offset, MEMBLOCK);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+ if (offset < sizeof(dlarray)) {
+ bcmerror = dhdsdio_membytes(bus, FALSE, offset,
+ ularray + offset, sizeof(dlarray) - offset);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, sizeof(dlarray) - offset, offset));
+ goto err;
+ }
+ }
+
+ if (memcmp(dlarray, ularray, sizeof(dlarray))) {
+ DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+ __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+ goto err;
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+ __FUNCTION__, dlimagename, dlimagever, dlimagedate));
+
+ }
+#endif /* DHD_DEBUG */
+
+err:
+ if (ularray)
+ MFREE(bus->dhd->osh, ularray, bus->ramsize);
+ return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
static int
dhdsdio_download_code_file(struct dhd_bus *bus, char *pfw_path)
{
#ifdef DHD_DEBUG_DOWNLOADTIME
unsigned long initial_jiffies = 0;
uint firmware_sz = 0;
-#endif // endif
+#endif
DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
- image = dhd_os_open_image1(bus->dhd, pfw_path);
+ image = dhd_os_open_image(pfw_path);
if (image == NULL) {
printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
goto err;
/* Update the dongle image download block size depending on the F1 block size */
if (sd_f1_blocksize == 512)
memblock_size = MAX_MEMBLOCK;
+
memptr = memblock = MALLOC(bus->dhd->osh, memblock_size + DHD_SDALIGN);
if (memblock == NULL) {
DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__,
#ifdef DHD_DEBUG_DOWNLOADTIME
initial_jiffies = jiffies;
-#endif // endif
+#endif
/* Download image */
while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) {
offset += memblock_size;
#ifdef DHD_DEBUG_DOWNLOADTIME
firmware_sz += len;
-#endif // endif
+#endif
}
#ifdef DHD_DEBUG_DOWNLOADTIME
DHD_ERROR(("Firmware download time for %u bytes: %u ms\n",
firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies)));
-#endif // endif
+#endif
err:
if (memblock)
}
if (image)
- dhd_os_close_image1(bus->dhd, image);
+ dhd_os_close_image(image);
return bcmerror;
}
#ifdef DHD_DEBUG_DOWNLOADTIME
unsigned long initial_jiffies = 0;
uint firmware_sz = 0;
-#endif // endif
+#endif
DHD_INFO(("%s: download firmware %s\n", __FUNCTION__, ucode_path));
ucode_base = dhdsdio_ucode_base(bus);
- image = dhd_os_open_image1(bus->dhd, ucode_path);
+ image = dhd_os_open_image(ucode_path);
if (image == NULL)
goto err;
#ifdef DHD_DEBUG_DOWNLOADTIME
initial_jiffies = jiffies;
-#endif // endif
+#endif
/* Download image */
while ((len = dhd_os_get_image_block((char*)memptr, memblock_size, image))) {
offset += memblock_size;
#ifdef DHD_DEBUG_DOWNLOADTIME
firmware_sz += len;
-#endif // endif
+#endif
}
#ifdef DHD_DEBUG_DOWNLOADTIME
DHD_ERROR(("ucode download time for %u bytes: %u ms\n",
firmware_sz, jiffies_to_msecs(jiffies - initial_jiffies)));
-#endif // endif
+#endif
err:
if (memblock)
MFREE(bus->dhd->osh, memblock, memblock_size + DHD_SDALIGN);
if (image)
- dhd_os_close_image1(bus->dhd, image);
+ dhd_os_close_image(image);
return bcmerror;
-} /* dhdsdio_download_ucode_file */
+}
void
dhd_bus_ucode_download(struct dhd_bus *bus)
/* For Get nvram from UEFI */
if (nvram_file_exists) {
- image = dhd_os_open_image1(bus->dhd, pnv_path);
+ image = dhd_os_open_image(pnv_path);
if (image == NULL) {
printf("%s: Open nvram file failed %s\n", __FUNCTION__, pnv_path);
goto err;
MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
if (image)
- dhd_os_close_image1(bus->dhd, image);
+ dhd_os_close_image(image);
return bcmerror;
}
/* Out immediately if no image to download */
if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
- return bcmerror;
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ return 0;
+#endif
}
/* Keep arm in reset */
if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
if (dhdsdio_download_code_file(bus, bus->fw_path)) {
DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
goto err;
+#endif
} else {
embed = FALSE;
dlok = TRUE;
}
}
+#ifdef BCMEMBEDIMAGE
+ if (embed) {
+ if (dhdsdio_download_code_array(bus)) {
+ DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+ goto err;
+ } else {
+ dlok = TRUE;
+ }
+ }
+#else
BCM_REFERENCE(embed);
+#endif
if (!dlok) {
DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
goto err;
return bus->dhd;
}
-void *
+const void *
dhd_bus_sih(struct dhd_bus *bus)
{
- return (void *)bus->sih;
+ return (const void *)bus->sih;
}
void *
return;
}
+
int
dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
{
/* Expect app to have torn down any connection before calling */
/* Stop the bus, disable F2 */
dhd_bus_stop(bus, FALSE);
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+
+#if defined(OOB_INTR_ONLY)
/* Clean up any pending IRQ */
dhd_enable_oob_intr(bus, FALSE);
bcmsdh_oob_intr_set(bus->sdh, FALSE);
bcmsdh_oob_intr_unregister(bus->sdh);
-#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+#endif
/* Clean tx/rx buffer pointers, detach from the dongle */
dhdsdio_release_dongle(bus, bus->dhd->osh, TRUE, TRUE);
bus->dhd->dongle_reset = TRUE;
- DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
bus->dhd->up = FALSE;
dhd_txglom_enable(dhdp, FALSE);
dhd_os_sdunlock(dhdp);
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
} else {
/* App must have restored power to device before calling */
- printf("\n\n%s: == Power ON ==\n", __FUNCTION__);
+ printf("\n\n%s: == WLAN ON ==\n", __FUNCTION__);
if (bus->dhd->dongle_reset) {
/* Turn on WLAN */
dhd_os_sdlock(dhdp);
- /* Reset SD client -- required if devreset is called
- * via 'dhd devreset' iovar
- */
+ /* Reset SD client */
bcmsdh_reset(bus->sdh);
+
/* Attempt to re-attach & download */
if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
- (uint32 *)(uintptr)si_enum_base(bus->cl_devid),
+ (uint32 *)SI_ENUM_BASE,
bus->cl_devid)) {
DHD_LINUX_GENERAL_LOCK(bus->dhd, flags);
- DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
bus->dhd->busstate = DHD_BUS_DOWN;
DHD_LINUX_GENERAL_UNLOCK(bus->dhd, flags);
-
/* Attempt to download binary to the dongle */
if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) {
/* Re-init bus, enable F2 transfer */
bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
if (bcmerror == BCME_OK) {
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+#if defined(OOB_INTR_ONLY)
dhd_enable_oob_intr(bus, TRUE);
bcmsdh_oob_intr_register(bus->sdh,
dhdsdio_isr, bus);
bcmsdh_oob_intr_set(bus->sdh, TRUE);
#elif defined(FORCE_WOWLAN)
dhd_enable_oob_intr(bus, TRUE);
-#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+#endif
bus->dhd->dongle_reset = FALSE;
bus->dhd->up = TRUE;
#if !defined(IGNORE_ETH0_DOWN)
/* Restore flow control */
dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
-#endif // endif
+#endif
dhd_os_wd_timer(dhdp, dhd_watchdog_ms);
DHD_TRACE(("%s: WLAN ON DONE\n", __FUNCTION__));
printf("Will call dhd_bus_start instead\n");
dhd_bus_resume(dhdp, 1);
#if defined(HW_OOB) || defined(FORCE_WOWLAN)
- dhd_conf_set_hw_oob_intr(bus->sdh, bus->sih); // terence 20120615: fix for OOB initial issue
+ dhd_conf_set_hw_oob_intr(bus->sdh, bus->sih->chip); // terence 20120615: fix for OOB initial issue
#endif
if ((bcmerror = dhd_bus_start(dhdp)) != 0)
DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
return dhdsdio_membytes(bus, set, address, data, size);
}
-#if defined(SUPPORT_MULTIPLE_REVISION)
-static int
-concate_revision_bcm4335(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
-
- uint chipver;
-#if defined(SUPPORT_MULTIPLE_CHIPS)
- char chipver_tag[10] = "_4335";
-#else
- char chipver_tag[4] = {0, };
-#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
-
- DHD_TRACE(("%s: BCM4335 Multiple Revision Check\n", __FUNCTION__));
- if (bus->sih->chip != BCM4335_CHIP_ID) {
- DHD_ERROR(("%s:Chip is not BCM4335\n", __FUNCTION__));
- return -1;
- }
- chipver = bus->sih->chiprev;
- DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
- if (chipver == 0x0) {
- DHD_ERROR(("----- CHIP bcm4335_A0 -----\n"));
- strcat(chipver_tag, "_a0");
- } else if (chipver == 0x1) {
- DHD_ERROR(("----- CHIP bcm4335_B0 -----\n"));
-#if defined(SUPPORT_MULTIPLE_CHIPS)
- strcat(chipver_tag, "_b0");
-#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
- }
-
- strcat(fw_path, chipver_tag);
- strcat(nv_path, chipver_tag);
- return 0;
-}
-
-static int
-concate_revision_bcm4339(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
-
- uint chipver;
-#if defined(SUPPORT_MULTIPLE_CHIPS)
- char chipver_tag[10] = "_4339";
-#else
- char chipver_tag[4] = {0, };
-#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
-
- DHD_TRACE(("%s: BCM4339 Multiple Revision Check\n", __FUNCTION__));
- if (bus->sih->chip != BCM4339_CHIP_ID) {
- DHD_ERROR(("%s:Chip is not BCM4339\n", __FUNCTION__));
- return -1;
- }
- chipver = bus->sih->chiprev;
- DHD_ERROR(("CHIP VER = [0x%x]\n", chipver));
- if (chipver == 0x1) {
- DHD_ERROR(("----- CHIP bcm4339_A0 -----\n"));
- strcat(chipver_tag, "_a0");
- } else {
- DHD_ERROR(("----- CHIP bcm4339 unknown revision %d -----\n",
- chipver));
- }
-
- strcat(fw_path, chipver_tag);
- strcat(nv_path, chipver_tag);
- return 0;
-}
-
-static int concate_revision_bcm4350(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
- uint32 chip_ver;
-#if defined(SUPPORT_MULTIPLE_CHIPS)
- char chipver_tag[10] = {0, };
-#else
- char chipver_tag[4] = {0, };
-#endif /* defined(SUPPORT_MULTIPLE_CHIPS) */
- chip_ver = bus->sih->chiprev;
-
-#if defined(SUPPORT_MULTIPLE_CHIPS)
- if (chip_ver == 3)
- strcat(chipver_tag, "_4354");
- else
- strcat(chipver_tag, "_4350");
-#endif // endif
-
- if (chip_ver == 3) {
- DHD_ERROR(("----- CHIP 4354 A0 -----\n"));
- strcat(chipver_tag, "_a0");
- } else {
- DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
- }
-
- strcat(fw_path, chipver_tag);
- strcat(nv_path, chipver_tag);
- return 0;
-}
-
-static int concate_revision_bcm4354(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
- uint32 chip_ver;
-#if defined(SUPPORT_MULTIPLE_CHIPS)
- char chipver_tag[10] = "_4354";
-#else
- char chipver_tag[4] = {0, };
-#endif /* SUPPORT_MULTIPLE_CHIPS */
-
- chip_ver = bus->sih->chiprev;
- if (chip_ver == 1) {
- DHD_ERROR(("----- CHIP 4354 A1 -----\n"));
- strcat(chipver_tag, "_a1");
- } else {
- DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
- }
-
- strcat(fw_path, chipver_tag);
- strcat(nv_path, chipver_tag);
-
- return 0;
-}
-
-static int
-concate_revision_bcm43454(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
- char chipver_tag[10] = {0, };
-#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT
- int base_system_rev_for_nv = 0;
-#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */
-
- DHD_TRACE(("%s: BCM43454 Multiple Revision Check\n", __FUNCTION__));
- if (bus->sih->chip != BCM43454_CHIP_ID) {
- DHD_ERROR(("%s:Chip is not BCM43454!\n", __FUNCTION__));
- return -1;
- }
-#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_DT
- base_system_rev_for_nv = dhd_get_system_rev();
- if (base_system_rev_for_nv > 0) {
- DHD_ERROR(("----- Board Rev [%d] -----\n", base_system_rev_for_nv));
- sprintf(chipver_tag, "_r%02d", base_system_rev_for_nv);
- }
-#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_DT */
-#ifdef SUPPORT_MULTIPLE_BOARD_REV_FROM_HW
- DHD_ERROR(("----- Rev [%d] Fot MULTIPLE Board. -----\n", system_hw_rev));
- if ((system_hw_rev >= 8) && (system_hw_rev <= 11)) {
- DHD_ERROR(("This HW is Rev 08 ~ 11. this is For FD-HW\n"));
- strcat(chipver_tag, "_FD");
- }
-#endif /* SUPPORT_MULTIPLE_BOARD_REV_FROM_HW */
-
- strcat(nv_path, chipver_tag);
- return 0;
-}
-
-int
-concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
-{
- int res = 0;
-
- if (!bus || !bus->sih) {
- DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
- return -1;
- }
-
- switch (bus->sih->chip) {
- case BCM4335_CHIP_ID:
- res = concate_revision_bcm4335(bus, fw_path, nv_path);
-
- break;
- case BCM4339_CHIP_ID:
- res = concate_revision_bcm4339(bus, fw_path, nv_path);
- break;
- case BCM4350_CHIP_ID:
- res = concate_revision_bcm4350(bus, fw_path, nv_path);
- break;
- case BCM4354_CHIP_ID:
- res = concate_revision_bcm4354(bus, fw_path, nv_path);
- break;
- case BCM43454_CHIP_ID:
- res = concate_revision_bcm43454(bus, fw_path, nv_path);
- break;
-
- default:
- DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
- return res;
- }
-
- if (res == 0) {
- }
- return res;
-}
-#endif /* SUPPORT_MULTIPLE_REVISION */
void
dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path,
#ifdef PROP_TXSTATUS
wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED);
-#endif // endif
+#endif
if (!wlfc_enabled) {
#ifdef DHDTCPACK_SUPPRESS
/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
#endif /* BCMSDIO */
#ifdef DEBUGGER
-static uint32
-dhd_sdio_reg_read(struct dhd_bus *bus, ulong addr)
+uint32 dhd_sdio_reg_read(void *h, uint32 addr)
{
uint32 rval;
+ struct dhd_bus *bus = (struct dhd_bus *) h;
dhd_os_sdlock(bus->dhd);
return rval;
}
-static void
-dhd_sdio_reg_write(struct dhd_bus *bus, ulong addr, uint32 val)
+void dhd_sdio_reg_write(void *h, uint32 addr, uint32 val)
{
+ struct dhd_bus *bus = (struct dhd_bus *) h;
+
dhd_os_sdlock(bus->dhd);
BUS_WAKE(bus);
#endif /* DEBUGGER */
+
#if defined(BT_OVER_SDIO)
uint8 dhd_bus_cfg_read(void *h, uint fun_num, uint32 addr, int *err)
{
void *image = NULL;
uint8 *mem_blk = NULL, *mem_ptr = NULL, *data_ptr = NULL;
+
uint32 offset_addr = 0, offset_len = 0, bytes_to_write = 0;
char *line = NULL;
uint32 dest_addr = 0, num_bytes;
uint16 hiAddress = 0;
- uint32 start_addr, start_data, end_addr, end_data, i, index, pad,
- bt2wlan_pwrup_adr;
+ uint32 start_addr, start_data, end_addr, end_data, i, index, pad;
+ uint32 bt2wlan_pwrup_adr;
int addr_mode = BTFW_ADDR_MODE_EXTENDED;
return 0;
}
- image = dhd_os_open_image1(bus->dhd, bus->btfw_path);
+ image = dhd_os_open_image(bus->btfw_path);
if (image == NULL)
goto err;
MFREE(bus->dhd->osh, line, BTFW_MAX_STR_LEN);
if (image)
- dhd_os_close_image1(bus->dhd, image);
+ dhd_os_close_image(image);
return bcm_error;
}
switch (bus->sih->chip) {
case BCM4339_CHIP_ID:
- bcmsdh_reg_write(bus->sdh, SI_ENUM_BASE(bus->sih) + 0x618, 4, 0x3fcaf377);
+ bcmsdh_reg_write(bus->sdh, SI_ENUM_BASE + 0x618, 4, 0x3fcaf377);
if (bcmsdh_regfail(bus->sdh)) {
DHD_ERROR(("%s:%d Setting min_res_mask failed\n", __FUNCTION__, __LINE__));
return FALSE;
}
}
-#ifdef DHD_ULP
+#ifdef DHD_ULP
/* Function to disable console messages on entering ULP mode */
void
dhd_bus_ulp_disable_console(dhd_pub_t *dhdp)
/* Flush the console buffer before disabling */
dhdsdio_readconsole(dhdp->bus);
- dhdp->dhd_console_ms = 0;
+ dhd_console_ms = 0;
#endif /* DHD_DEBUG */
}
dhd_os_wd_timer(bus->dhd, dhd_watchdog_ms);
dhd_ulp_set_ulp_state(bus->dhd, DHD_ULP_READY);
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+#if defined(OOB_INTR_ONLY)
dhd_enable_oob_intr(bus, TRUE);
bcmsdh_oob_intr_set(bus->sdh, TRUE);
-#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+#endif
#ifdef DHD_DEBUG
/* Re-enable the console messages on FW redownload to default value */
dhd_ulp_restore_console_interval(bus->dhd);
return bcmsdh_set_get_wake(dhd->bus->sdh, 0);
}
#endif /* DHD_WAKE_STATUS */
-
-int
-dhd_bus_sleep(dhd_pub_t *dhdp, bool sleep, uint32 *intstatus)
-{
- dhd_bus_t *bus = dhdp->bus;
- uint32 retry = 0;
- int ret = 0;
-
- if (bus) {
- dhd_os_sdlock(dhdp);
- BUS_WAKE(bus);
- R_SDREG(*intstatus, &bus->regs->intstatus, retry);
- if (sleep) {
- if (SLPAUTO_ENAB(bus)) {
- ret = dhdsdio_bussleep(bus, sleep);
- if (ret != BCME_BUSY)
- dhd_os_wd_timer(bus->dhd, 0);
- } else
- dhdsdio_clkctl(bus, CLK_NONE, FALSE);
- }
- dhd_os_sdunlock(dhdp);
- } else {
- DHD_ERROR(("bus is NULL\n"));
- ret = -1;
- }
-
- return ret;
-}
\ No newline at end of file
--- /dev/null
+/*
+ * drivers/amlogic/wifi/dhd_static_buf.c
+ *
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "Wifi: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/skbuff.h>
+#include <linux/wlan_plat.h>
+#include <linux/amlogic/dhd_buf.h>
+
+#define DHD_STATIC_VERSION_STR "1.579.77.41.9"
+
+#define BCMDHD_SDIO
+#define BCMDHD_PCIE
+
+enum dhd_prealloc_index {
+ DHD_PREALLOC_PROT = 0,
+#if defined(BCMDHD_SDIO)
+ DHD_PREALLOC_RXBUF = 1,
+ DHD_PREALLOC_DATABUF = 2,
+#endif
+ DHD_PREALLOC_OSL_BUF = 3,
+ DHD_PREALLOC_SKB_BUF = 4,
+ DHD_PREALLOC_WIPHY_ESCAN0 = 5,
+ DHD_PREALLOC_WIPHY_ESCAN1 = 6,
+ DHD_PREALLOC_DHD_INFO = 7,
+ DHD_PREALLOC_DHD_WLFC_INFO = 8,
+#ifdef BCMDHD_PCIE
+ DHD_PREALLOC_IF_FLOW_LKUP = 9,
+#endif
+ DHD_PREALLOC_MEMDUMP_BUF = 10,
+ DHD_PREALLOC_MEMDUMP_RAM = 11,
+ DHD_PREALLOC_DHD_WLFC_HANGER = 12,
+ DHD_PREALLOC_PKTID_MAP = 13,
+ DHD_PREALLOC_PKTID_MAP_IOCTL = 14,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF = 15,
+ DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX = 16,
+ DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF = 17,
+ DHD_PREALLOC_STAT_REPORT_BUF = 18,
+ DHD_PREALLOC_WL_ESCAN_INFO = 19,
+ DHD_PREALLOC_FW_VERBOSE_RING = 20,
+ DHD_PREALLOC_FW_EVENT_RING = 21,
+ DHD_PREALLOC_DHD_EVENT_RING = 22,
+ DHD_PREALLOC_NAN_EVENT_RING = 23,
+ DHD_PREALLOC_MAX
+};
+
+#define STATIC_BUF_MAX_NUM 20
+#define STATIC_BUF_SIZE (PAGE_SIZE*2)
+
+#define DHD_PREALLOC_PROT_SIZE (16 * 1024)
+#define DHD_PREALLOC_RXBUF_SIZE (24 * 1024)
+#define DHD_PREALLOC_DATABUF_SIZE (64 * 1024)
+#define DHD_PREALLOC_OSL_BUF_SIZE (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+#define DHD_PREALLOC_WIPHY_ESCAN0_SIZE (64 * 1024)
+#define DHD_PREALLOC_DHD_INFO_SIZE (32 * 1024)
+#define DHD_PREALLOC_MEMDUMP_RAM_SIZE (810 * 1024)
+#define DHD_PREALLOC_DHD_WLFC_HANGER_SIZE (73 * 1024)
+#define DHD_PREALLOC_WL_ESCAN_INFO_SIZE (66 * 1024)
+#ifdef CONFIG_64BIT
+#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024 * 2)
+#else
+#define DHD_PREALLOC_IF_FLOW_LKUP_SIZE (20 * 1024)
+#endif
+#define FW_VERBOSE_RING_SIZE (64 * 1024)
+#define FW_EVENT_RING_SIZE (64 * 1024)
+#define DHD_EVENT_RING_SIZE (64 * 1024)
+#define NAN_EVENT_RING_SIZE (64 * 1024)
+
+#if defined(CONFIG_64BIT)
+#define WLAN_DHD_INFO_BUF_SIZE (24 * 1024)
+#define WLAN_DHD_WLFC_BUF_SIZE (64 * 1024)
+#define WLAN_DHD_IF_FLOW_LKUP_SIZE (64 * 1024)
+#else
+#define WLAN_DHD_INFO_BUF_SIZE (16 * 1024)
+#define WLAN_DHD_WLFC_BUF_SIZE (24 * 1024)
+#define WLAN_DHD_IF_FLOW_LKUP_SIZE (20 * 1024)
+#endif /* CONFIG_64BIT */
+#define WLAN_DHD_MEMDUMP_SIZE (800 * 1024)
+
+#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
+#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
+#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
+
+#define DHD_SKB_1PAGE_BUF_NUM 8
+#ifdef BCMDHD_PCIE
+#define DHD_SKB_2PAGE_BUF_NUM 64
+#elif defined(BCMDHD_SDIO)
+#define DHD_SKB_2PAGE_BUF_NUM 8
+#endif
+#define DHD_SKB_4PAGE_BUF_NUM 1
+
+/* The number is defined in linux_osl.c
+ * WLAN_SKB_1_2PAGE_BUF_NUM => STATIC_PKT_1_2PAGE_NUM
+ * WLAN_SKB_BUF_NUM => STATIC_PKT_MAX_NUM
+ */
+#define WLAN_SKB_1_2PAGE_BUF_NUM ((DHD_SKB_1PAGE_BUF_NUM) + \
+ (DHD_SKB_2PAGE_BUF_NUM))
+#define WLAN_SKB_BUF_NUM ((WLAN_SKB_1_2PAGE_BUF_NUM) + (DHD_SKB_4PAGE_BUF_NUM))
+
+void *wlan_static_prot;
+void *wlan_static_rxbuf;
+void *wlan_static_databuf;
+void *wlan_static_osl_buf;
+void *wlan_static_scan_buf0;
+void *wlan_static_scan_buf1;
+void *wlan_static_dhd_info_buf;
+void *wlan_static_dhd_wlfc_info_buf;
+void *wlan_static_if_flow_lkup;
+void *wlan_static_dhd_memdump_ram_buf;
+void *wlan_static_dhd_wlfc_hanger_buf;
+void *wlan_static_wl_escan_info_buf;
+void *wlan_static_fw_verbose_ring_buf;
+void *wlan_static_fw_event_ring_buf;
+void *wlan_static_dhd_event_ring_buf;
+void *wlan_static_nan_event_ring_buf;
+
+static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM];
+
+void *bcmdhd_mem_prealloc(int section, unsigned long size)
+{
+ pr_err("%s: sectoin %d, %ld\n", __func__, section, size);
+ if (section == DHD_PREALLOC_PROT)
+ return wlan_static_prot;
+
+#if defined(BCMDHD_SDIO)
+ if (section == DHD_PREALLOC_RXBUF)
+ return wlan_static_rxbuf;
+
+ if (section == DHD_PREALLOC_DATABUF)
+ return wlan_static_databuf;
+#endif /* BCMDHD_SDIO */
+
+ if (section == DHD_PREALLOC_SKB_BUF)
+ return wlan_static_skb;
+
+ if (section == DHD_PREALLOC_WIPHY_ESCAN0)
+ return wlan_static_scan_buf0;
+
+ if (section == DHD_PREALLOC_WIPHY_ESCAN1)
+ return wlan_static_scan_buf1;
+
+ if (section == DHD_PREALLOC_OSL_BUF) {
+ if (size > DHD_PREALLOC_OSL_BUF_SIZE) {
+ pr_err("request OSL_BUF(%lu) > %ld\n",
+ size, DHD_PREALLOC_OSL_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_osl_buf;
+ }
+
+ if (section == DHD_PREALLOC_DHD_INFO) {
+ if (size > DHD_PREALLOC_DHD_INFO_SIZE) {
+ pr_err("request DHD_INFO size(%lu) > %d\n",
+ size, DHD_PREALLOC_DHD_INFO_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_info_buf;
+ }
+ if (section == DHD_PREALLOC_DHD_WLFC_INFO) {
+ if (size > WLAN_DHD_WLFC_BUF_SIZE) {
+ pr_err("request DHD_WLFC_INFO size(%lu) > %d\n",
+ size, WLAN_DHD_WLFC_BUF_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_wlfc_info_buf;
+ }
+#ifdef BCMDHD_PCIE
+ if (section == DHD_PREALLOC_IF_FLOW_LKUP) {
+ if (size > DHD_PREALLOC_IF_FLOW_LKUP_SIZE) {
+ pr_err("request DHD_IF_FLOW_LKUP size(%lu) > %d\n",
+ size, DHD_PREALLOC_IF_FLOW_LKUP_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_if_flow_lkup;
+ }
+#endif /* BCMDHD_PCIE */
+ if (section == DHD_PREALLOC_MEMDUMP_RAM) {
+ if (size > DHD_PREALLOC_MEMDUMP_RAM_SIZE) {
+ pr_err("request DHD_PREALLOC_MEMDUMP_RAM_SIZE(%lu) > %d\n",
+ size, DHD_PREALLOC_MEMDUMP_RAM_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_dhd_memdump_ram_buf;
+ }
+ if (section == DHD_PREALLOC_DHD_WLFC_HANGER) {
+ if (size > DHD_PREALLOC_DHD_WLFC_HANGER_SIZE) {
+ pr_err("request DHD_WLFC_HANGER size(%lu) > %d\n",
+ size, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE);
+ return NULL;
+ }
+ return wlan_static_dhd_wlfc_hanger_buf;
+ }
+ if (section == DHD_PREALLOC_WL_ESCAN_INFO) {
+ if (size > DHD_PREALLOC_WL_ESCAN_INFO_SIZE) {
+ pr_err("request DHD_PREALLOC_WL_ESCAN_INFO_SIZE(%lu) > %d\n",
+ size, DHD_PREALLOC_WL_ESCAN_INFO_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_wl_escan_info_buf;
+ }
+ if (section == DHD_PREALLOC_FW_VERBOSE_RING) {
+ if (size > FW_VERBOSE_RING_SIZE) {
+ pr_err("request DHD_PREALLOC_FW_VERBOSE_RING(%lu) > %d\n",
+ size, FW_VERBOSE_RING_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_fw_verbose_ring_buf;
+ }
+ if (section == DHD_PREALLOC_FW_EVENT_RING) {
+ if (size > FW_EVENT_RING_SIZE) {
+ pr_err("request DHD_PREALLOC_FW_EVENT_RING(%lu) > %d\n",
+ size, FW_EVENT_RING_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_fw_event_ring_buf;
+ }
+ if (section == DHD_PREALLOC_DHD_EVENT_RING) {
+ if (size > DHD_EVENT_RING_SIZE) {
+ pr_err("request DHD_PREALLOC_DHD_EVENT_RING(%lu) > %d\n",
+ size, DHD_EVENT_RING_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_dhd_event_ring_buf;
+ }
+ if (section == DHD_PREALLOC_NAN_EVENT_RING) {
+ if (size > NAN_EVENT_RING_SIZE) {
+ pr_err("request DHD_PREALLOC_NAN_EVENT_RING(%lu) > %d\n",
+ size, NAN_EVENT_RING_SIZE);
+ return NULL;
+ }
+
+ return wlan_static_nan_event_ring_buf;
+ }
+ if ((section < 0) || (section > DHD_PREALLOC_MAX))
+ pr_err("request section id(%d) is out of max index %d\n",
+ section, DHD_PREALLOC_MAX);
+
+ pr_err("%s: failed to alloc section %d, size=%ld\n",
+ __func__, section, size);
+
+ return NULL;
+}
+EXPORT_SYMBOL(bcmdhd_mem_prealloc);
+
+int bcmdhd_init_wlan_mem(void)
+{
+ int i;
+ int j;
+ printk(KERN_ERR "%s(): %s\n", __func__, DHD_STATIC_VERSION_STR);
+
+ for (i = 0; i < DHD_SKB_1PAGE_BUF_NUM; i++) {
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);
+ if (!wlan_static_skb[i]) {
+ goto err_skb_alloc;
+ }
+ pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__,
+ DHD_PREALLOC_SKB_BUF, i, DHD_SKB_1PAGE_BUFSIZE);
+ }
+
+ for (i = DHD_SKB_1PAGE_BUF_NUM; i < WLAN_SKB_1_2PAGE_BUF_NUM; i++) {
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);
+ if (!wlan_static_skb[i]) {
+ goto err_skb_alloc;
+ }
+ pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__,
+ DHD_PREALLOC_SKB_BUF, i, DHD_SKB_2PAGE_BUFSIZE);
+ }
+
+#if defined(BCMDHD_SDIO)
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);
+ if (!wlan_static_skb[i])
+ goto err_skb_alloc;
+ pr_err("%s: sectoin %d skb[%d], size=%ld\n", __func__,
+ DHD_PREALLOC_SKB_BUF, i, DHD_SKB_4PAGE_BUFSIZE);
+#endif /* BCMDHD_SDIO */
+
+ wlan_static_prot = kmalloc(DHD_PREALLOC_PROT_SIZE, GFP_KERNEL);
+ if (!wlan_static_prot)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_PROT, DHD_PREALLOC_PROT_SIZE);
+
+#if defined(BCMDHD_SDIO)
+ wlan_static_rxbuf = kmalloc(DHD_PREALLOC_RXBUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_rxbuf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_RXBUF, DHD_PREALLOC_RXBUF_SIZE);
+
+ wlan_static_databuf = kmalloc(DHD_PREALLOC_DATABUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_databuf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_DATABUF, DHD_PREALLOC_DATABUF_SIZE);
+#endif /* BCMDHD_SDIO */
+
+ wlan_static_osl_buf = kmalloc(DHD_PREALLOC_OSL_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_osl_buf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%ld\n", __func__,
+ DHD_PREALLOC_OSL_BUF, DHD_PREALLOC_OSL_BUF_SIZE);
+
+ wlan_static_scan_buf0 = kmalloc(DHD_PREALLOC_WIPHY_ESCAN0_SIZE, GFP_KERNEL);
+ if (!wlan_static_scan_buf0)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_WIPHY_ESCAN0, DHD_PREALLOC_WIPHY_ESCAN0_SIZE);
+
+ wlan_static_dhd_info_buf = kmalloc(DHD_PREALLOC_DHD_INFO_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_info_buf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_DHD_INFO, DHD_PREALLOC_DHD_INFO_SIZE);
+
+ wlan_static_dhd_wlfc_info_buf = kmalloc(WLAN_DHD_WLFC_BUF_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_wlfc_info_buf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_DHD_WLFC_INFO, WLAN_DHD_WLFC_BUF_SIZE);
+
+#ifdef BCMDHD_PCIE
+ wlan_static_if_flow_lkup = kmalloc(DHD_PREALLOC_IF_FLOW_LKUP_SIZE, GFP_KERNEL);
+ if (!wlan_static_if_flow_lkup)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_IF_FLOW_LKUP, DHD_PREALLOC_IF_FLOW_LKUP_SIZE);
+#endif /* BCMDHD_PCIE */
+
+ wlan_static_dhd_memdump_ram_buf = kmalloc(DHD_PREALLOC_MEMDUMP_RAM_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_memdump_ram_buf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_MEMDUMP_RAM, DHD_PREALLOC_MEMDUMP_RAM_SIZE);
+
+ wlan_static_dhd_wlfc_hanger_buf = kmalloc(DHD_PREALLOC_DHD_WLFC_HANGER_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_wlfc_hanger_buf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_DHD_WLFC_HANGER, DHD_PREALLOC_DHD_WLFC_HANGER_SIZE);
+
+ wlan_static_wl_escan_info_buf = kmalloc(DHD_PREALLOC_WL_ESCAN_INFO_SIZE, GFP_KERNEL);
+ if (!wlan_static_wl_escan_info_buf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_WL_ESCAN_INFO, DHD_PREALLOC_WL_ESCAN_INFO_SIZE);
+
+ wlan_static_fw_verbose_ring_buf = kmalloc(FW_VERBOSE_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_fw_verbose_ring_buf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_FW_VERBOSE_RING, FW_VERBOSE_RING_SIZE);
+
+ wlan_static_fw_event_ring_buf = kmalloc(FW_EVENT_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_fw_event_ring_buf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_FW_EVENT_RING, FW_EVENT_RING_SIZE);
+
+ wlan_static_dhd_event_ring_buf = kmalloc(DHD_EVENT_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_dhd_event_ring_buf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_DHD_EVENT_RING, DHD_EVENT_RING_SIZE);
+
+ wlan_static_nan_event_ring_buf = kmalloc(NAN_EVENT_RING_SIZE, GFP_KERNEL);
+ if (!wlan_static_nan_event_ring_buf)
+ goto err_mem_alloc;
+ pr_err("%s: sectoin %d, size=%d\n", __func__,
+ DHD_PREALLOC_NAN_EVENT_RING, NAN_EVENT_RING_SIZE);
+
+ return 0;
+
+err_mem_alloc:
+
+ kfree(wlan_static_prot);
+ kfree(wlan_static_rxbuf);
+ kfree(wlan_static_databuf);
+ kfree(wlan_static_osl_buf);
+ kfree(wlan_static_scan_buf0);
+ kfree(wlan_static_scan_buf1);
+ kfree(wlan_static_dhd_info_buf);
+ kfree(wlan_static_dhd_wlfc_info_buf);
+ kfree(wlan_static_if_flow_lkup);
+ kfree(wlan_static_dhd_memdump_ram_buf);
+ kfree(wlan_static_dhd_wlfc_hanger_buf);
+ kfree(wlan_static_wl_escan_info_buf);
+ kfree(wlan_static_fw_verbose_ring_buf);
+ kfree(wlan_static_fw_event_ring_buf);
+ kfree(wlan_static_dhd_event_ring_buf);
+ kfree(wlan_static_nan_event_ring_buf);
+ pr_err("%s: Failed to mem_alloc for WLAN\n", __func__);
+
+ i = WLAN_SKB_BUF_NUM;
+
+err_skb_alloc:
+ pr_err("%s: Failed to skb_alloc for WLAN\n", __func__);
+ for (j = 0; j < i; j++)
+ dev_kfree_skb(wlan_static_skb[j]);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(bcmdhd_init_wlan_mem);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("AMLOGIC");
+MODULE_DESCRIPTION("wifi device tree driver");
/*
* DHD PROP_TXSTATUS Module.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_wlfc.c 743239 2018-01-25 08:33:18Z $
+ * $Id: dhd_wlfc.c 679733 2017-01-17 06:40:39Z $
*
*/
+
#include <typedefs.h>
#include <osl.h>
#include <dhd_dbg.h>
#include <dhd_config.h>
-#include <wl_android.h>
#ifdef PROP_TXSTATUS /* a form of flow control between host and dongle */
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
-#endif // endif
+#endif
#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
+
/*
* wlfc naming and lock rules:
*
#define WLFC_THREAD_RETRY_WAIT_MS 10000 /* 10 sec */
#endif /* defined (DHD_WLFC_THREAD) */
+
#ifdef PROP_TXSTATUS
#define DHD_WLFC_QMON_COMPLETE(entry)
+
/** reordering related */
#if defined(DHD_WLFC_THREAD)
ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
ASSERT(!pktq_full(pq));
- ASSERT(!pktqprec_full(pq, prec));
+ ASSERT(!pktq_pfull(pq, prec));
q = &pq->q[prec];
+ PKTSETLINK(p, NULL);
if (q->head == NULL) {
/* empty queue */
q->head = p;
exit:
- q->n_pkts++;
- pq->n_pkts_tot++;
+ q->len++;
+ pq->len++;
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
}
}
- q->n_pkts--;
- pq->n_pkts_tot--;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
+ q->len--;
+ pq->len--;
PKTSETLINK(p, NULL);
if (PKTSUMNEEDED(p))
h->flags |= BDC_FLAG_SUM_NEEDED;
+
h->priority = (PKTPRIO(p) & BDC_PRIORITY_MASK);
h->flags2 = 0;
h->dataOffset = dataOffset >> 2;
void *pout = NULL;
ASSERT(dhdp && p);
- if (prec < 0 || prec >= WLFC_PSQ_PREC_COUNT) {
- ASSERT(0);
- return BCME_BADARG;
- }
+ ASSERT(prec >= 0 && prec <= WLFC_PSQ_PREC_COUNT);
ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
/* Fast case, precedence queue is not full and we are also not
* exceeding total queue length
*/
- if (!pktqprec_full(pq, prec) && !pktq_full(pq)) {
+ if (!pktq_pfull(pq, prec) && !pktq_full(pq)) {
goto exit;
}
/* Determine precedence from which to evict packet, if any */
- if (pktqprec_full(pq, prec)) {
+ if (pktq_pfull(pq, prec)) {
eprec = prec;
} else if (pktq_full(pq)) {
p = pktq_peek_tail(pq, &eprec);
return FALSE;
}
if ((eprec > prec) || (eprec < 0)) {
- if (!pktqprec_empty(pq, prec)) {
+ if (!pktq_pempty(pq, prec)) {
eprec = prec;
} else {
return FALSE;
/* Evict if needed */
if (eprec >= 0) {
/* Detect queueing to unconfigured precedence */
- ASSERT(!pktqprec_empty(pq, eprec));
+ ASSERT(!pktq_pempty(pq, eprec));
/* Evict all fragmented frames */
dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop);
}
if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id))
return;
- if ((pq->n_pkts_tot <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
+ if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
/* start traffic */
ctx->hostif_flow_state[if_id] = OFF;
/*
WLFC_DBGMESG(("qlen:%02d, if:%02d, ->OFF, start traffic %s()\n",
- pq->n_pkts_tot, if_id, __FUNCTION__));
+ pq->len, if_id, __FUNCTION__));
*/
WLFC_DBGMESG(("F"));
ctx->toggle_host_if = 0;
}
- if (pq->n_pkts_tot >= WLFC_FLOWCONTROL_HIWATER && ctx->hostif_flow_state[if_id] == OFF) {
+ if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) {
/* stop traffic */
ctx->hostif_flow_state[if_id] = ON;
/*
WLFC_DBGMESG(("qlen:%02d, if:%02d, ->ON, stop traffic %s()\n",
- pq->n_pkts_tot, if_id, __FUNCTION__));
+ pq->len, if_id, __FUNCTION__));
*/
WLFC_DBGMESG(("N"));
DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1);
#ifdef PROP_TXSTATUS_DEBUG
ctx->stats.signal_only_pkts_sent++;
-#endif // endif
+#endif
#if defined(BCMPCIE)
rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx);
#else
rc = dhd_bus_txdata(dhdp->bus, p);
-#endif // endif
+#endif
if (rc != BCME_OK) {
_dhd_wlfc_pullheader(ctx, p);
PKTFREE(ctx->osh, p, TRUE);
bool rc = FALSE;
if (entry->state == WLFC_STATE_CLOSE) {
- if ((pktqprec_n_pkts(&entry->psq, (prec << 1)) == 0) &&
- (pktqprec_n_pkts(&entry->psq, ((prec << 1) + 1)) == 0)) {
+ if ((pktq_plen(&entry->psq, (prec << 1)) == 0) &&
+ (pktq_plen(&entry->psq, ((prec << 1) + 1)) == 0)) {
/* no packets in both 'normal' and 'suspended' queues */
if (entry->traffic_pending_bmp & NBITVAL(prec)) {
rc = TRUE;
#ifdef PROP_TXSTATUS_DEBUG
h->items[hslot].push_time =
OSL_SYSUPTIME();
-#endif // endif
+#endif
} else {
DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n",
__FUNCTION__, rc));
ASSERT(entry);
if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) &&
-#ifdef PROPTX_MAXCOUNT
- (entry->transit_count < entry->transit_maxcount) &&
-#endif /* PROPTX_MAXCOUNT */
(entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) &&
(!entry->suppressed)) {
*ac_credit_spent = credit_spent;
entry->requested_credit--;
#ifdef PROP_TXSTATUS_DEBUG
entry->dstncredit_sent_packets++;
-#endif // endif
+#endif
} else if (entry->requested_packet > 0) {
entry->requested_packet--;
DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
return BCME_ERROR;
}
+
/* A packet has been pushed, update traffic availability bitmap, if applicable */
_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
}
/* Optimize flush, if pktq len = 0, just return.
* pktq len of 0 means pktq's prec q's are all empty.
*/
- if (pq->n_pkts_tot == 0) {
+ if (pq->len == 0) {
return;
}
}
PKTFREE(ctx->osh, p, dir);
- q->n_pkts--;
- pq->n_pkts_tot--;
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
-
+ q->len--;
+ pq->len--;
p = (head ? q->head : PKTLINK(prev));
} else {
prev = p;
}
if (q->head == NULL) {
- ASSERT(q->n_pkts == 0);
+ ASSERT(q->len == 0);
q->tail = NULL;
}
}
if (fn == NULL)
- ASSERT(pq->n_pkts_tot == 0);
+ ASSERT(pq->len == 0);
} /* _dhd_wlfc_pktq_flush */
#ifndef BCMDBUS
}
}
- q->n_pkts--;
-
- pq->n_pkts_tot--;
+ q->len--;
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
+ pq->len--;
PKTSETLINK(p, NULL);
}
dhd_os_sdunlock_txq(dhd);
+
while ((pkt = head)) {
head = PKTLINK(pkt);
PKTSETLINK(pkt, NULL);
for (i = 0; i < total_entries; i++) {
if (table[i].occupied) {
/* release packets held in PSQ (both delayed and suppressed) */
- if (table[i].psq.n_pkts_tot) {
+ if (table[i].psq.len) {
WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n",
- __FUNCTION__, i, table[i].psq.n_pkts_tot));
+ __FUNCTION__, i, table[i].psq.len));
_dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE,
fn, arg, Q_TYPE_PSQ);
}
/* free packets held in AFQ */
- if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.n_pkts_tot)) {
+ if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.len)) {
_dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE,
fn, arg, Q_TYPE_AFQ);
}
{
int rc = BCME_OK;
+
if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) {
entry->occupied = 1;
entry->state = WLFC_STATE_OPEN;
if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
entry->suppressed = FALSE;
entry->transit_count = 0;
-#ifdef PROPTX_MAXCOUNT
- entry->transit_maxcount = wl_ext_get_wlfc_maxcount(ctx->dhdp, ifid);
-#endif /* PROPTX_MAXCOUNT */
entry->suppr_transit_count = 0;
entry->onbus_pkts_count = 0;
}
return rc;
} /* _dhd_wlfc_mac_entry_update */
+
#ifdef LIMIT_BORROW
/** LIMIT_BORROW specific function */
}
}
- q->n_pkts--;
- pq->n_pkts_tot--;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
-
+ q->len--;
+ pq->len--;
ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
ctx->pkt_cnt_per_ac[prec]--;
remove_from_hanger = 0;
} else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
wlfc->stats.wlc_tossed_pkts += len;
- } else if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) {
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_SUPPRESS_ACKED) {
wlfc->stats.pkt_freed += len;
- } else if (status_flag == WLFC_CTL_PKTFLAG_EXPIRED) {
- wlfc->stats.pkt_exptime += len;
- } else if (status_flag == WLFC_CTL_PKTFLAG_DROPPED) {
- wlfc->stats.pkt_dropped += len;
}
if (dhd->proptxstatus_txstatus_ignore) {
uint32 delta;
old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time;
+
wlfc->stats.latency_sample_count++;
if (new_t > old_t)
delta = new_t - old_t;
}
#ifdef PROP_TXSTATUS_DEBUG
entry->dstncredit_acks++;
-#endif // endif
+#endif
}
if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
#ifdef PROP_TXSTATUS_DEBUG
wlfc->stats.fifo_credits_back[i] += credits[i];
-#endif // endif
+#endif
/* update FIFO credits */
if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
uint8 ifid;
uint8* ea;
- WLFC_DBGMESG(("%s(), mac ["MACDBG"],%s,idx:%d,id:0x%02x\n",
- __FUNCTION__, MAC2STRDBG(&value[2]),
+ WLFC_DBGMESG(("%s(), mac [%02x:%02x:%02x:%02x:%02x:%02x],%s,idx:%d,id:0x%02x\n",
+ __FUNCTION__, value[2], value[3], value[4], value[5], value[6], value[7],
((type == WLFC_CTL_TYPE_MACDESC_ADD) ? "ADD":"DEL"),
WLFC_MAC_DESC_GET_LOOKUP_INDEX(value[0]), value[0]));
wlfc->single_ac = 0;
wlfc->single_ac_timestamp = 0;
+
exit:
DHD_ERROR(("%s: ret=%d\n", __FUNCTION__, rc));
dhd_os_wlfc_unblock(dhd);
no_credit = FALSE;
}
}
-#endif // endif
+#endif
commit_info.needs_hdr = 1;
commit_info.mac_entry = NULL;
commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
if (lender != -1 && dhdp->wlfc_borrow_allowed) {
_dhd_wlfc_return_credit(ctx, lender, ac);
}
-#endif // endif
+#endif
break;
}
dhdp->wlfc_borrow_allowed) {
_dhd_wlfc_return_credit(ctx, lender, ac);
}
-#endif // endif
+#endif
} else {
#ifdef LIMIT_BORROW
if (lender != -1 && dhdp->wlfc_borrow_allowed) {
_dhd_wlfc_return_credit(ctx, lender, ac);
}
-#endif // endif
+#endif
bus_retry_count++;
if (bus_retry_count >= BUS_RETRIES) {
DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
}
else
break;
-#endif // endif
+#endif
commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
&(commit_info.ac_fifo_credit_spent),
&(commit_info.needs_hdr),
/* before borrow only one ac exists and now this only ac is empty */
#ifdef LIMIT_BORROW
_dhd_wlfc_return_credit(ctx, lender, ac);
-#endif // endif
+#endif
break;
}
if (commit_info.ac_fifo_credit_spent) {
#ifndef LIMIT_BORROW
ctx->FIFO_credit[ac]--;
-#endif // endif
+#endif
} else {
#ifdef LIMIT_BORROW
_dhd_wlfc_return_credit(ctx, lender, ac);
-#endif // endif
+#endif
}
} else {
#ifdef LIMIT_BORROW
_dhd_wlfc_return_credit(ctx, lender, ac);
-#endif // endif
+#endif
bus_retry_count++;
if (bus_retry_count >= BUS_RETRIES) {
DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
#ifdef PROP_TXSTATUS_DEBUG
wlfc->stats.signal_only_pkts_freed++;
-#endif // endif
+#endif
/* is this a signal-only packet? */
_dhd_wlfc_pullheader(wlfc, txp);
PKTFREE(wlfc->osh, txp, TRUE);
dhd_os_wlfc_block(dhd);
if (dhd->wlfc_enabled) {
- DHD_INFO(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
+ DHD_ERROR(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
dhd_os_wlfc_unblock(dhd);
return BCME_OK;
}
WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
/*
try to enable/disable signaling by sending "tlv" iovar. if that fails,
fallback to no flow control? Print a message for now.
Leaving the message for now, it should be removed after a while; once
the tlv situation is stable.
*/
- DHD_INFO(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+ DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
dhd->wlfc_enabled?"enabled":"disabled", tlv));
}
ret = dhd_wl_ioctl_get_intiovar(dhd, "wlfc_mode", &fw_caps, WLC_GET_VAR, FALSE, 0);
if (!ret) {
- DHD_INFO(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
+ DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
if (WLFC_IS_OLD_DEF(fw_caps)) {
#ifdef BCMDBUS
}
}
- DHD_INFO(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
+ DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
#ifdef LIMIT_BORROW
dhd->wlfc_borrow_allowed = TRUE;
-#endif // endif
+#endif
dhd_os_wlfc_unblock(dhd);
if (dhd->plat_init)
_dhd_wlfc_hanger_delete(dhd, h);
}
+
/* free top structure */
DHD_OS_PREFREE(dhd, dhd->wlfc_state,
sizeof(athost_wl_status_info_t));
dhd->proptxstatus_mode = hostreorder ?
WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE;
+ DHD_ERROR(("%s: wlfc_mode=0x%x, tlv=%d\n", __FUNCTION__, dhd->wlfc_mode, tlv));
+
dhd_os_wlfc_unblock(dhd);
if (dhd->plat_deinit)
ea = interfaces[i].ea;
bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
- "["MACDBG"], if:%d, type: %s "
+ "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s "
"netif_flow_control:%s\n", i,
- MAC2STRDBG(ea), interfaces[i].interface_id,
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+ interfaces[i].interface_id,
iftype_desc, ((wlfc->hostif_flow_state[i] == OFF)
? " OFF":" ON"));
"(trans,supp_trans,onbus)"
"= (%d,%s,%d),(%d,%d,%d)\n",
i,
- interfaces[i].psq.n_pkts_tot,
+ interfaces[i].psq.len,
((interfaces[i].state ==
WLFC_STATE_OPEN) ? "OPEN":"CLOSE"),
interfaces[i].requested_credit,
"(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d),"
"(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
i,
- interfaces[i].psq.q[0].n_pkts,
- interfaces[i].psq.q[1].n_pkts,
- interfaces[i].afq.q[0].n_pkts,
- interfaces[i].psq.q[2].n_pkts,
- interfaces[i].psq.q[3].n_pkts,
- interfaces[i].afq.q[1].n_pkts,
- interfaces[i].psq.q[4].n_pkts,
- interfaces[i].psq.q[5].n_pkts,
- interfaces[i].afq.q[2].n_pkts,
- interfaces[i].psq.q[6].n_pkts,
- interfaces[i].psq.q[7].n_pkts,
- interfaces[i].afq.q[3].n_pkts,
- interfaces[i].psq.q[8].n_pkts,
- interfaces[i].psq.q[9].n_pkts,
- interfaces[i].afq.q[4].n_pkts);
+ interfaces[i].psq.q[0].len,
+ interfaces[i].psq.q[1].len,
+ interfaces[i].afq.q[0].len,
+ interfaces[i].psq.q[2].len,
+ interfaces[i].psq.q[3].len,
+ interfaces[i].afq.q[1].len,
+ interfaces[i].psq.q[4].len,
+ interfaces[i].psq.q[5].len,
+ interfaces[i].afq.q[2].len,
+ interfaces[i].psq.q[6].len,
+ interfaces[i].psq.q[7].len,
+ interfaces[i].afq.q[3].len,
+ interfaces[i].psq.q[8].len,
+ interfaces[i].psq.q[9].len,
+ interfaces[i].afq.q[4].len);
}
}
if (mac_table[i].occupied) {
ea = mac_table[i].ea;
bcm_bprintf(strbuf, "MAC_table[%d].ea = "
- "["MACDBG"], if:%d \n", i,
- MAC2STRDBG(ea), mac_table[i].interface_id);
+ "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d \n", i,
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+ mac_table[i].interface_id);
bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit),"
"(trans,supp_trans,onbus)"
"= (%d,%s,%d),(%d,%d,%d)\n",
i,
- mac_table[i].psq.n_pkts_tot,
+ mac_table[i].psq.len,
((mac_table[i].state ==
WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
mac_table[i].requested_credit,
#ifdef PROP_TXSTATUS_DEBUG
bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
i, mac_table[i].opened_ct, mac_table[i].closed_ct);
-#endif // endif
+#endif
bcm_bprintf(strbuf, "MAC_table[%d].PSQ"
"(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
"(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d),"
"(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
i,
- mac_table[i].psq.q[0].n_pkts,
- mac_table[i].psq.q[1].n_pkts,
- mac_table[i].afq.q[0].n_pkts,
- mac_table[i].psq.q[2].n_pkts,
- mac_table[i].psq.q[3].n_pkts,
- mac_table[i].afq.q[1].n_pkts,
- mac_table[i].psq.q[4].n_pkts,
- mac_table[i].psq.q[5].n_pkts,
- mac_table[i].afq.q[2].n_pkts,
- mac_table[i].psq.q[6].n_pkts,
- mac_table[i].psq.q[7].n_pkts,
- mac_table[i].afq.q[3].n_pkts,
- mac_table[i].psq.q[8].n_pkts,
- mac_table[i].psq.q[9].n_pkts,
- mac_table[i].afq.q[4].n_pkts);
+ mac_table[i].psq.q[0].len,
+ mac_table[i].psq.q[1].len,
+ mac_table[i].afq.q[0].len,
+ mac_table[i].psq.q[2].len,
+ mac_table[i].psq.q[3].len,
+ mac_table[i].afq.q[1].len,
+ mac_table[i].psq.q[4].len,
+ mac_table[i].psq.q[5].len,
+ mac_table[i].afq.q[2].len,
+ mac_table[i].psq.q[6].len,
+ mac_table[i].psq.q[7].len,
+ mac_table[i].afq.q[3].len,
+ mac_table[i].psq.q[8].len,
+ mac_table[i].psq.q[9].len,
+ mac_table[i].afq.q[4].len);
}
}
}
#endif /* PROP_TXSTATUS_DEBUG */
bcm_bprintf(strbuf, "\n");
- bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out),"
- "(dropped,hdr_only,wlc_tossed,wlc_dropped,wlc_exptime)"
+ bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out),(dropped,hdr_only,wlc_tossed)"
"(freed,free_err,rollback)) = "
- "((%d,%d,%d,%d,%d),(%d,%d,%d,%d,%d),(%d,%d,%d))\n",
+ "((%d,%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
wlfc->stats.pktin,
wlfc->stats.pkt2bus,
wlfc->stats.txstatus_in,
wlfc->stats.pktdropped,
wlfc->stats.wlfc_header_only_pkt,
wlfc->stats.wlc_tossed_pkts,
- wlfc->stats.pkt_dropped,
- wlfc->stats.pkt_exptime,
wlfc->stats.pkt_freed,
wlfc->stats.pkt_free_err, wlfc->stats.rollback);
return BCME_OK;
}
-#ifdef PROPTX_MAXCOUNT
-int dhd_wlfc_update_maxcount(dhd_pub_t *dhdp, uint8 ifid, int maxcount)
-{
- athost_wl_status_info_t* ctx;
- int rc = 0;
-
- if (dhdp == NULL) {
- DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
- return BCME_BADARG;
- }
-
- dhd_os_wlfc_block(dhdp);
-
- if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
- rc = WLFC_UNSUPPORTED;
- goto exit;
- }
-
- if (ifid >= WLFC_MAX_IFNUM) {
- DHD_ERROR(("%s: bad ifid\n", __FUNCTION__));
- rc = BCME_BADARG;
- goto exit;
- }
-
- ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
- ctx->destination_entries.interfaces[ifid].transit_maxcount = maxcount;
-exit:
- dhd_os_wlfc_unblock(dhdp);
- return rc;
-}
-#endif /* PROPTX_MAXCOUNT */
-
#endif /* PROP_TXSTATUS */
/*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_wlfc.h 690477 2017-03-16 10:17:17Z $
+ * $Id: dhd_wlfc.h 671530 2016-11-22 08:43:33Z $
*
*/
#ifndef __wlfc_host_driver_definitions_h__
#define __wlfc_host_driver_definitions_h__
+
/* #define OOO_DEBUG */
#define KERNEL_THREAD_RETURN_TYPE int
void* pkt;
#ifdef PROP_TXSTATUS_DEBUG
uint32 push_time;
-#endif // endif
+#endif
struct wlfc_hanger_item *next;
} wlfc_hanger_item_t;
#define WLFC_FLOWCONTROL_HIWATER (WLFC_PSQ_LEN - 256)
#undef WLFC_FLOWCONTROL_LOWATER
#define WLFC_FLOWCONTROL_LOWATER (WLFC_FLOWCONTROL_HIWATER / 4)
-#endif // endif
+#endif
#define WLFC_LOG_BUF_SIZE (1024*1024)
/** flag. TRUE when remote MAC is in suppressed state */
uint8 suppressed;
+
#ifdef PROP_TXSTATUS_DEBUG
uint32 dstncredit_sent_packets;
uint32 dstncredit_acks;
uint32 opened_ct;
uint32 closed_ct;
-#endif // endif
-#ifdef PROPTX_MAXCOUNT
- /** Max Number of packets at dongle for this entry. */
- int transit_maxcount;
-#endif /* PROPTX_MAXCOUNT */
+#endif
struct wlfc_mac_descriptor* prev;
struct wlfc_mac_descriptor* next;
} wlfc_mac_descriptor_t;
uint32 d11_suppress;
uint32 wl_suppress;
uint32 bad_suppress;
- uint32 pkt_dropped;
- uint32 pkt_exptime;
uint32 pkt_freed;
uint32 pkt_free_err;
uint32 psq_wlsup_retx;
uint32 dropped_qfull[6];
uint32 signal_only_pkts_sent;
uint32 signal_only_pkts_freed;
-#endif // endif
+#endif
uint32 cleanup_txq_cnt;
uint32 cleanup_psq_cnt;
uint32 cleanup_fw_cnt;
#define WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac) do {} while (0)
#define WLFC_HOST_FIFO_CREDIT_INC_BACKCTRS(ctx, ac) do {} while (0)
#define WLFC_HOST_FIFO_DROPPEDCTR_INC(ctx, ac) do {} while (0)
-#endif // endif
+#endif
#define WLFC_PACKET_BOUND 10
#define WLFC_FCMODE_NONE 0
#define WLFC_FCMODE_IMPLIED_CREDIT 1
#else
#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0)
#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0)
-#endif // endif
+#endif
#ifdef BCM_OBJECT_TRACE
#define DHD_PKTTAG_SET_SN(tag, val) ((dhd_pkttag_t*)(tag))->sn = (val)
int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val);
int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val);
-#ifdef PROPTX_MAXCOUNT
-int dhd_wlfc_update_maxcount(dhd_pub_t *dhdp, uint8 ifid, int maxcount);
-#endif /* PROPTX_MAXCOUNT */
#endif /* __wlfc_host_driver_definitions_h__ */
* Common stats definitions for clients of dongle
* ports
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dngl_stats.h 716269 2017-08-17 09:22:46Z $
+ * $Id: dngl_stats.h 681171 2017-01-25 05:27:08Z $
*/
#ifndef _dngl_stats_h_
typedef struct {
wifi_interface_mode mode; /* interface mode */
uint8 mac_addr[6]; /* interface mac address (self) */
- uint8 PAD[2];
wifi_connection_state state; /* connection state (valid for STA, CLI only) */
wifi_roam_state roaming; /* roaming state */
uint32 capabilities; /* WIFI_CAPABILITY_XXX (self) */
uint8 ssid[DOT11_MAX_SSID_LEN+1]; /* null terminated SSID */
uint8 bssid[ETHER_ADDR_LEN]; /* bssid */
- uint8 PAD[1];
uint8 ap_country_str[3]; /* country string advertised by AP */
uint8 country_str[3]; /* country string for this association */
- uint8 PAD[2];
} wifi_interface_info;
typedef wifi_interface_info *wifi_interface_handle;
/* channel statistics */
typedef struct {
wifi_channel_info channel; /* channel */
- uint32 on_time; /* msecs the radio is awake (32 bits number
+ uint32 on_time; /* msecs the radio is awake (32 bits number
* accruing over time)
*/
uint32 cca_busy_time; /* msecs the CCA register is busy (32 bits number
/*
* Dongle WL Header definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
+++ /dev/null
-/*
- * IE/TLV fragmentation/defragmentation support for
- * Broadcom 802.11bang Networking Device Driver
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- * $Id$
- *
- * <<Broadcom-WL-IPTag/Open:>>
- */
-
-#include <bcmutils.h>
-#include <frag.h>
-#include <802.11.h>
-
-/* defrag a fragmented dot11 ie/tlv. if space does not permit, return the needed
- * ie length to contain all the fragments with status BCME_BUFTOOSHORT.
- * out_len is in/out parameter, max length on input, used/required length on output
- */
-int
-bcm_tlv_dot11_defrag(const void *buf, uint buf_len, uint8 id, bool id_ext,
- uint8 *out, uint *out_len)
-{
- int err = BCME_OK;
- const bcm_tlv_t *ie;
- uint tot_len = 0;
- uint out_left;
-
- /* find the ie; includes validation */
- ie = bcm_parse_tlvs_dot11(buf, buf_len, id, id_ext);
- if (!ie) {
- err = BCME_IE_NOTFOUND;
- goto done;
- }
-
- out_left = (out && out_len) ? *out_len : 0;
-
- /* first fragment */
- tot_len = id_ext ? ie->len - 1 : ie->len;
-
- /* copy out if output space permits */
- if (out_left < tot_len) {
- err = BCME_BUFTOOSHORT;
- out_left = 0; /* prevent further copy */
- } else {
- memcpy(out, &ie->data[id_ext ? 1 : 0], tot_len);
- out += tot_len;
- out_left -= tot_len;
- }
-
- /* if not fragmened or not fragmentable per 802.11 table 9-77 11md0.1 bail
- * we can introduce the latter check later
- */
- if (ie->len != BCM_TLV_MAX_DATA_SIZE) {
- goto done;
- }
-
- /* adjust buf_len to length after ie including it */
- buf_len -= (uint)(((const uint8 *)ie - (const uint8 *)buf));
-
- /* update length from fragments, okay if no next ie */
- while ((ie = bcm_next_tlv(ie, &buf_len)) &&
- (ie->id == DOT11_MNG_FRAGMENT_ID)) {
- /* note: buf_len starts at next ie and last frag may be partial */
- if (out_left < ie->len) {
- err = BCME_BUFTOOSHORT;
- out_left = 0;
- } else {
- memcpy(out, &ie->data[0], ie->len);
- out += ie->len;
- out_left -= ie->len;
- }
-
- tot_len += ie->len + BCM_TLV_HDR_SIZE;
-
- /* all but last should be of max size */
- if (ie->len < BCM_TLV_MAX_DATA_SIZE) {
- break;
- }
- }
-
-done:
- if (out_len) {
- *out_len = tot_len;
- }
-
- return err;
-}
-
-int
-bcm_tlv_dot11_frag_tot_len(const void *buf, uint buf_len,
- uint8 id, bool id_ext, uint *ie_len)
-{
- return bcm_tlv_dot11_defrag(buf, buf_len, id, id_ext, NULL, ie_len);
-}
+++ /dev/null
-/*
- * IE/TLV (de)fragmentation declarations/definitions for
- * Broadcom 802.11abgn Networking Device Driver
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id$
- *
- */
-
-#ifndef __FRAG_H__
-#define __FRAG_H__
-
-int bcm_tlv_dot11_frag_tot_len(const void *buf, uint buf_len,
- uint8 id, bool id_ext, uint *ie_len);
-
-#endif /* __FRAG_H__ */
/*
* HND generic packet pool operation primitives
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hnd_pktpool.c 677681 2017-01-04 09:10:30Z $
+ * $Id: hnd_pktpool.c 613891 2016-01-20 10:05:44Z $
*/
#include <typedefs.h>
#include <osl_ext.h>
#include <bcmutils.h>
#include <hnd_pktpool.h>
-#ifdef BCMRESVFRAGPOOL
-#include <hnd_resvpool.h>
-#endif /* BCMRESVFRAGPOOL */
-#ifdef BCMFRWDPOOLREORG
-#include <hnd_poolreorg.h>
-#endif /* BCMFRWDPOOLREORG */
+
/* mutex macros for thread safe */
#ifdef HND_PKTPOOL_THREAD_SAFE
#define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
#define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
#define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
-#endif // endif
+#endif
/* Registry size is one larger than max pools, as slot #0 is reserved */
#define PKTPOOLREG_RSVD_ID (0U)
/** accessor functions required when ROMming this file, forced into RAM */
+
pktpool_t *
BCMRAMFN(get_pktpools_registry)(int id)
{
/**
* pktpool_init:
* User provides a pktpool_t structure and specifies the number of packets to
- * be pre-filled into the pool (n_pkts).
+ * be pre-filled into the pool (pplen).
* pktpool_init first attempts to register the pool and fetch a unique poolid.
* If registration fails, it is considered an BCME_ERR, caused by either the
* registry was not pre-created (pktpool_attach) or the registry is full.
* of packets to be allocated during pktpool_init and fill the pool up after
* reclaim stage.
*
- * @param n_pkts Number of packets to be pre-filled into the pool
- * @param max_pkt_bytes The size of all packets in a pool must be the same. E.g. PKTBUFSZ.
- * @param type e.g. 'lbuf_frag'
+ * @param pplen Number of packets to be pre-filled into the pool
+ * @param plen The size of all packets in a pool must be the same, [bytes] units. E.g. PKTBUFSZ.
+ * @param type e.g. 'lbuf_frag'
*/
int
-pktpool_init(osl_t *osh, pktpool_t *pktp, int *n_pkts, int max_pkt_bytes, bool istx,
- uint8 type)
+pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type)
{
int i, err = BCME_OK;
int pktplen;
ASSERT(pktp != NULL);
ASSERT(osh != NULL);
- ASSERT(n_pkts != NULL);
+ ASSERT(pplen != NULL);
- pktplen = *n_pkts;
+ pktplen = *pplen;
bzero(pktp, sizeof(pktpool_t));
pktp->inited = TRUE;
pktp->istx = istx ? TRUE : FALSE;
- pktp->max_pkt_bytes = (uint16)max_pkt_bytes;
+ pktp->plen = (uint16)plen;
pktp->type = type;
if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
for (i = 0; i < pktplen; i++) {
void *p;
- p = PKTGET(osh, max_pkt_bytes, TRUE);
+ p = PKTGET(osh, plen, TRUE);
if (p == NULL) {
/* Not able to allocate all requested pkts
#ifdef BCMDBG_POOL
pktp->dbg_q[pktp->dbg_qlen++].p = p;
-#endif // endif
+#endif
}
exit:
- pktp->n_pkts = pktp->avail;
+ pktp->len = pktp->avail;
- *n_pkts = pktp->n_pkts; /* number of packets managed by pool */
+ *pplen = pktp->len; /* number of packets managed by pool */
return err;
} /* pktpool_init */
#ifdef BCMDBG_POOL
{
int i;
- for (i = 0; i <= pktp->n_pkts; i++) {
+ for (i = 0; i <= pktp->len; i++) {
pktp->dbg_q[i].p = NULL;
}
}
-#endif // endif
+#endif
while (pktp->freelist != NULL) {
void * p = pktp->freelist;
PKTFREE(osh, p, pktp->istx); /* free the packet */
freed++;
- ASSERT(freed <= pktp->n_pkts);
+ ASSERT(freed <= pktp->len);
}
pktp->avail -= freed;
ASSERT(pktp->avail == 0);
- pktp->n_pkts -= freed;
+ pktp->len -= freed;
pktpool_deregister(pktp); /* release previously acquired unique pool id */
POOLSETID(pktp, PKTPOOL_INVALID_ID);
pktp->inited = FALSE;
/* Are there still pending pkts? */
- ASSERT(pktp->n_pkts == 0);
+ ASSERT(pktp->len == 0);
return 0;
}
{
void *p;
int err = 0;
- int n_pkts, psize, maxlen;
+ int len, psize, maxlen;
/* protect shared resource */
if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return BCME_ERROR;
- ASSERT(pktp->max_pkt_bytes != 0);
+ ASSERT(pktp->plen != 0);
maxlen = pktp->maxlen;
psize = minimal ? (maxlen >> 2) : maxlen;
- for (n_pkts = (int)pktp->n_pkts; n_pkts < psize; n_pkts++) {
+ for (len = (int)pktp->len; len < psize; len++) {
- p = PKTGET(osh, pktp->n_pkts, TRUE);
+ p = PKTGET(osh, pktp->len, TRUE);
if (p == NULL) {
err = BCME_NOMEM;
return err;
}
-#ifdef BCMPOOLRECLAIM
-/* New API to decrease the pkts from pool, but not deinit
-*/
-uint16
-pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt)
-{
- uint16 freed = 0;
-
- pktpool_cb_extn_t cb = NULL;
- void *arg = NULL;
-
- ASSERT(osh != NULL);
- ASSERT(pktp != NULL);
-
- /* protect shared resource */
- if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
- return freed;
- }
-
- if (pktp->avail < free_cnt) {
- free_cnt = pktp->avail;
- }
-
- if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
- /* If pool is shared rx frag pool, use call back fn to reclaim host address
- * and Rx cpl ID associated with the pkt.
- */
- ASSERT(pktp->cbext.cb != NULL);
-
- cb = pktp->cbext.cb;
- arg = pktp->cbext.arg;
-
- } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
- /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
- * associated with the pkt.
- */
- cb = pktp->rxcplidfn.cb;
- arg = pktp->rxcplidfn.arg;
- }
-
- while ((pktp->freelist != NULL) && (free_cnt)) {
- void * p = pktp->freelist;
-
- pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
- PKTSETFREELIST(p, NULL);
-
- if (cb != NULL) {
- if (cb(pktp, arg, p, REMOVE_RXCPLID)) {
- PKTSETFREELIST(p, pktp->freelist);
- pktp->freelist = p;
- break;
- }
- }
-
- PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
-
- PKTFREE(osh, p, pktp->istx); /* free the packet */
-
- freed++;
- free_cnt--;
- }
-
- pktp->avail -= freed;
-
- pktp->n_pkts -= freed;
-
- /* protect shared resource */
- if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
- return freed;
- }
-
- return freed;
-}
-#endif /* #ifdef BCMPOOLRECLAIM */
-
-/* New API to empty the pkts from pool, but not deinit
-* NOTE: caller is responsible to ensure,
-* all pkts are available in pool for free; else LEAK !
-*/
-int
-pktpool_empty(osl_t *osh, pktpool_t *pktp)
-{
- uint16 freed = 0;
-
- ASSERT(osh != NULL);
- ASSERT(pktp != NULL);
-
- /* protect shared resource */
- if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return BCME_ERROR;
-
-#ifdef BCMDBG_POOL
- {
- int i;
- for (i = 0; i <= pktp->n_pkts; i++) {
- pktp->dbg_q[i].p = NULL;
- }
- }
-#endif // endif
-
- while (pktp->freelist != NULL) {
- void * p = pktp->freelist;
-
- pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
- PKTSETFREELIST(p, NULL);
-
- PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
-
- PKTFREE(osh, p, pktp->istx); /* free the packet */
-
- freed++;
- ASSERT(freed <= pktp->n_pkts);
- }
-
- pktp->avail -= freed;
- ASSERT(pktp->avail == 0);
-
- pktp->n_pkts -= freed;
-
- ASSERT(pktp->n_pkts == 0);
-
- /* protect shared resource */
- if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
- return BCME_ERROR;
-
- return 0;
-}
-
static void *
pktpool_deq(pktpool_t *pktp)
{
p = pktp->freelist; /* dequeue packet from head of pktpool free list */
pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
+
PKTSETFREELIST(p, NULL);
pktp->avail--;
PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
pktp->freelist = p; /* free list points to newly inserted packet */
+
pktp->avail++;
- ASSERT(pktp->avail <= pktp->n_pkts);
+ ASSERT(pktp->avail <= pktp->len);
}
/** utility for registering host addr fill function called from pciedev */
ASSERT(cb != NULL);
- for (i = 0; i < pktp->cbcnt; i++) {
- ASSERT(pktp->cbs[i].cb != NULL);
- if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
- pktp->cbs[i].refcnt++;
- goto done;
- }
- }
-
i = pktp->cbcnt;
if (i == PKTPOOL_CB_MAX_AVL) {
err = BCME_ERROR;
ASSERT(pktp->cbs[i].cb == NULL);
pktp->cbs[i].cb = cb;
pktp->cbs[i].arg = arg;
- pktp->cbs[i].refcnt++;
pktp->cbcnt++;
done:
return err;
}
-/* No BCMATTACHFN as it is used in a non-attach function */
-int
-pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
-{
- int err = 0;
- int i, k;
-
- /* protect shared resource */
- if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
- return BCME_ERROR;
- }
-
- ASSERT(cb != NULL);
-
- for (i = 0; i < pktp->cbcnt; i++) {
- ASSERT(pktp->cbs[i].cb != NULL);
- if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
- pktp->cbs[i].refcnt--;
- if (pktp->cbs[i].refcnt) {
- /* Still there are references to this callback */
- goto done;
- }
- /* Moving any more callbacks to fill the hole */
- for (k = i+1; k < pktp->cbcnt; i++, k++) {
- pktp->cbs[i].cb = pktp->cbs[k].cb;
- pktp->cbs[i].arg = pktp->cbs[k].arg;
- pktp->cbs[i].refcnt = pktp->cbs[k].refcnt;
- }
-
- /* reset the last callback */
- pktp->cbs[i].cb = NULL;
- pktp->cbs[i].arg = NULL;
- pktp->cbs[i].refcnt = 0;
-
- pktp->cbcnt--;
- goto done;
- }
- }
-
-done:
- /* protect shared resource */
- if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
- return BCME_ERROR;
- }
-
- return err;
-}
-
/** Registers callback functions */
int
pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
+
p = pktpool_deq(pktp);
if (p == NULL) {
goto done;
}
+
done:
/* protect shared resource */
if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
ASSERT(p != NULL);
#ifdef BCMDBG_POOL
/* pktpool_stop_trigger(pktp, p); */
-#endif // endif
+#endif
pktpool_enq(pktp, p);
ASSERT(p != NULL);
- if (pktp->n_pkts == pktp->maxlen) {
+ if (pktp->len == pktp->maxlen) {
err = BCME_RANGE;
goto done;
}
/* pkts in pool have same length */
- ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p));
+ ASSERT(pktp->plen == PKTLEN(OSH_NULL, p));
PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
- pktp->n_pkts++;
+ pktp->len++;
pktpool_enq(pktp, p);
#ifdef BCMDBG_POOL
pktp->dbg_q[pktp->dbg_qlen++].p = p;
-#endif // endif
+#endif
done:
/* protect shared resource */
* since we currently do not reduce the pool len
* already allocated
*/
- pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen;
+ pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen;
/* protect shared resource */
if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
#ifdef BCMFRAGPOOL
pktpool_t *pktpool_shared_lfrag = NULL;
-#ifdef BCMRESVFRAGPOOL
-pktpool_t *pktpool_resv_lfrag = NULL;
-struct resv_info *resv_pool_info = NULL;
-#endif /* BCMRESVFRAGPOOL */
#endif /* BCMFRAGPOOL */
pktpool_t *pktpool_shared_rxlfrag = NULL;
int
hnd_pktpool_init(osl_t *osh)
{
- int err = BCME_OK;
+ int err;
int n;
/* Construct a packet pool registry before initializing packet pools */
err = BCME_NOMEM;
goto error2;
}
-#if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
- resv_pool_info = hnd_resv_pool_alloc(osh);
- if (resv_pool_info == NULL) {
- ASSERT(0);
- goto error2;
- }
- pktpool_resv_lfrag = resv_pool_info->pktp;
- if (pktpool_resv_lfrag == NULL) {
- ASSERT(0);
- goto error2;
- }
-#endif /* RESVFRAGPOOL */
-#endif /* FRAGPOOL */
+#endif
#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
err = BCME_NOMEM;
goto error3;
}
-#endif // endif
+#endif
+
/*
* At this early stage, there's not enough memory to allocate all
* were not filled into the pool.
*/
n = 1;
- MALLOC_SET_NOPERSIST(osh); /* Ensure subsequent allocations are non-persist */
if ((err = pktpool_init(osh, pktpool_shared,
&n, PKTBUFSZ, FALSE, lbuf_basic)) != BCME_OK) {
ASSERT(0);
goto error5;
}
pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
-#if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
- n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */
- if (pktpool_init(osh, pktpool_resv_lfrag,
- &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) {
- ASSERT(0);
- goto error5;
- }
- pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN);
-#endif /* RESVFRAGPOOL */
-#endif /* BCMFRAGPOOL */
+#endif
#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
n = 1;
if ((err = pktpool_init(osh, pktpool_shared_rxlfrag,
goto error6;
}
pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
-#endif // endif
-
-#if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
- /* Attach poolreorg module */
- if ((frwd_poolreorg_info = poolreorg_attach(osh,
-#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
- pktpool_shared_lfrag,
-#else
- NULL,
-#endif // endif
-#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
- pktpool_shared_rxlfrag,
-#else
- NULL,
-#endif // endif
- pktpool_shared)) == NULL) {
- ASSERT(0);
- goto error7;
- }
-#endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
+#endif
pktpool_osh = osh;
- MALLOC_CLEAR_NOPERSIST(osh);
return BCME_OK;
-#if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
- /* detach poolreorg module */
- poolreorg_detach(frwd_poolreorg_info);
-error7:
-#endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
-
#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
- pktpool_deinit(osh, pktpool_shared_rxlfrag);
error6:
-#endif // endif
+#endif
#if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
pktpool_deinit(osh, pktpool_shared_lfrag);
error5:
-#endif // endif
+#endif
#if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \
(defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED))
pktpool_deinit(osh, pktpool_shared);
-#endif // endif
+#endif
error4:
#if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
error1:
pktpool_dettach(osh);
error0:
- MALLOC_CLEAR_NOPERSIST(osh);
return err;
} /* hnd_pktpool_init */
-/** is called at each 'wl up' */
int
hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
{
return (pktpool_fill(pktpool_osh, pktpool, minimal));
}
-/** refills pktpools after reclaim, is called once */
+/** refills pktpools after reclaim */
void
hnd_pktpool_refill(bool minimal)
{
if (POOL_ENAB(pktpool_shared)) {
+ pktpool_fill(pktpool_osh, pktpool_shared, minimal);
+ }
+/* fragpool reclaim */
+#ifdef BCMFRAGPOOL
+ if (POOL_ENAB(pktpool_shared_lfrag)) {
#if defined(SRMEM)
if (SRMEM_ENAB()) {
- int maxlen = pktpool_max_pkts(pktpool_shared);
- int n_pkts = pktpool_tot_pkts(pktpool_shared);
+ int maxlen = pktpool_maxlen(pktpool_shared);
+ int len = pktpool_len(pktpool_shared);
- for (; n_pkts < maxlen; n_pkts++) {
+ for (; len < maxlen; len++) {
void *p;
- if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) == NULL)
+ if ((p = PKTSRGET(pktpool_plen(pktpool_shared))) == NULL)
break;
pktpool_add(pktpool_shared, p);
}
}
#endif /* SRMEM */
- pktpool_fill(pktpool_osh, pktpool_shared, minimal);
- }
-/* fragpool reclaim */
-#ifdef BCMFRAGPOOL
- if (POOL_ENAB(pktpool_shared_lfrag)) {
pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
}
#endif /* BCMFRAGPOOL */
if (POOL_ENAB(pktpool_shared_rxlfrag)) {
pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
}
-#endif // endif
-#if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL)
- if (POOL_ENAB(pktpool_resv_lfrag)) {
- int resv_size = (PKTFRAGSZ + LBUFFRAGSZ)*RESV_FRAG_POOL_LEN;
- hnd_resv_pool_init(resv_pool_info, resv_size);
- hnd_resv_pool_enable(resv_pool_info);
- }
-#endif /* BCMRESVFRAGPOOL */
+#endif
}
#endif /* BCMPKTPOOL */
/*
* HND generic pktq operation primitives
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hnd_pktq.c 698847 2017-05-11 00:10:48Z $
+ * $Id: hnd_pktq.c 644628 2016-06-21 06:25:58Z $
*/
#include <typedefs.h>
#define HND_PKTQ_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
#define HND_PKTQ_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
#define HND_PKTQ_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
-#endif /* HND_PKTQ_THREAD_SAFE */
+#endif /* */
-/* status during txfifo sync */
-#if defined(WLAMPDU_MAC) && defined(PROP_TXSTATUS)
-#define TXQ_PKT_DEL 0x01
-#define HEAD_PKT_FLUSHED 0xFF
-#endif /* defined(WLAMPDU_MAC) && defined(PROP_TXSTATUS) */
/*
* osl multiple-precedence packet queue
* hi_prec is always >= the number of the highest non-empty precedence
ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
ASSERT(!pktq_full(pq));
- ASSERT(!pktqprec_full(pq, prec));
+ ASSERT(!pktq_pfull(pq, prec));
q = &pq->q[prec];
q->head = p;
q->tail = p;
- q->n_pkts++;
+ q->len++;
- pq->n_pkts_tot++;
+ pq->len++;
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
return p;
}
-/*
- * osl simple, non-priority packet queue
- */
-void * BCMFASTPATH
-spktq_enq(struct spktq *spq, void *p)
-{
- struct pktq_prec *q;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return NULL;
-
- ASSERT(!spktq_full(spq));
-
- PKTSETLINK(p, NULL);
-
- q = &spq->q;
-
- if (q->head)
- PKTSETLINK(q->tail, p);
- else
- q->head = p;
-
- q->tail = p;
- q->n_pkts++;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
- return NULL;
-
- return p;
-}
-
void * BCMFASTPATH
pktq_penq_head(struct pktq *pq, int prec, void *p)
{
ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
ASSERT(!pktq_full(pq));
- ASSERT(!pktqprec_full(pq, prec));
+ ASSERT(!pktq_pfull(pq, prec));
q = &pq->q[prec];
PKTSETLINK(p, q->head);
q->head = p;
- q->n_pkts++;
+ q->len++;
- pq->n_pkts_tot++;
+ pq->len++;
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
return p;
}
-void * BCMFASTPATH
-spktq_enq_head(struct spktq *spq, void *p)
-{
- struct pktq_prec *q;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return NULL;
-
- ASSERT(!spktq_full(spq));
-
- PKTSETLINK(p, NULL);
-
- q = &spq->q;
-
- if (q->head == NULL)
- q->tail = p;
-
- PKTSETLINK(p, q->head);
- q->head = p;
- q->n_pkts++;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
- return NULL;
-
- return p;
-}
-
-void * BCMFASTPATH
-pktq_pdeq(struct pktq *pq, int prec)
-{
- struct pktq_prec *q;
- void *p;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return NULL;
-
- ASSERT(prec >= 0 && prec < pq->num_prec);
-
- q = &pq->q[prec];
-
- if ((p = q->head) == NULL)
- goto done;
-
- if ((q->head = PKTLINK(p)) == NULL)
- q->tail = NULL;
-
- q->n_pkts--;
-
- pq->n_pkts_tot--;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
-
- PKTSETLINK(p, NULL);
-
-done:
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
- return NULL;
-
- return p;
-}
-
-void * BCMFASTPATH
-spktq_deq(struct spktq *spq)
-{
- struct pktq_prec *q;
- void *p;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return NULL;
-
- q = &spq->q;
-
- if ((p = q->head) == NULL)
- goto done;
-
- if ((q->head = PKTLINK(p)) == NULL)
- q->tail = NULL;
-
- q->n_pkts--;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
-
- PKTSETLINK(p, NULL);
-
-done:
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
- return NULL;
-
- return p;
-}
-
-void * BCMFASTPATH
-pktq_pdeq_tail(struct pktq *pq, int prec)
-{
- struct pktq_prec *q;
- void *p, *prev;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return NULL;
-
- ASSERT(prec >= 0 && prec < pq->num_prec);
-
- q = &pq->q[prec];
-
- if ((p = q->head) == NULL)
- goto done;
-
- for (prev = NULL; p != q->tail; p = PKTLINK(p))
- prev = p;
-
- if (prev)
- PKTSETLINK(prev, NULL);
- else
- q->head = NULL;
-
- q->tail = prev;
- q->n_pkts--;
-
- pq->n_pkts_tot--;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
-done:
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
- return NULL;
-
- return p;
-}
-
-void * BCMFASTPATH
-spktq_deq_tail(struct spktq *spq)
-{
- struct pktq_prec *q;
- void *p, *prev;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return NULL;
-
- q = &spq->q;
-
- if ((p = q->head) == NULL)
- goto done;
-
- for (prev = NULL; p != q->tail; p = PKTLINK(p))
- prev = p;
-
- if (prev)
- PKTSETLINK(prev, NULL);
- else
- q->head = NULL;
-
- q->tail = prev;
- q->n_pkts--;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
-done:
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
- return NULL;
-
- return p;
-}
-
-void *
-pktq_peek_tail(struct pktq *pq, int *prec_out)
-{
- int prec;
- void *p = NULL;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return NULL;
-
- if (pq->n_pkts_tot == 0)
- goto done;
-
- for (prec = 0; prec < pq->hi_prec; prec++)
- if (pq->q[prec].head)
- break;
-
- if (prec_out)
- *prec_out = prec;
-
- p = pq->q[prec].tail;
-
-done:
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
- return NULL;
-
- return p;
-}
-
/*
* Append spktq 'list' to the tail of pktq 'pq'
*/
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return;
- list_q = &list->q;
+ list_q = &list->q[0];
/* empty list check */
if (list_q->head == NULL)
ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */
ASSERT(!pktq_full(pq));
- ASSERT(!pktqprec_full(pq, prec));
+ ASSERT(!pktq_pfull(pq, prec));
q = &pq->q[prec];
q->head = list_q->head;
q->tail = list_q->tail;
- q->n_pkts += list_q->n_pkts;
- pq->n_pkts_tot += list_q->n_pkts;
+ q->len += list_q->len;
+ pq->len += list_q->len;
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
-#ifdef WL_TXQ_STALL
- list_q->dequeue_count += list_q->n_pkts;
-#endif // endif
-
list_q->head = NULL;
list_q->tail = NULL;
- list_q->n_pkts = 0;
+ list_q->len = 0;
+ list->len = 0;
done:
/* protect shared resource */
return;
}
-/*
- * Append spktq 'list' to the tail of spktq 'spq'
- */
-void BCMFASTPATH
-spktq_append(struct spktq *spq, struct spktq *list)
-{
- struct pktq_prec *q;
- struct pktq_prec *list_q;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return;
-
- list_q = &list->q;
-
- /* empty list check */
- if (list_q->head == NULL)
- goto done;
-
- ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */
-
- ASSERT(!spktq_full(spq));
-
- q = &spq->q;
-
- if (q->head)
- PKTSETLINK(q->tail, list_q->head);
- else
- q->head = list_q->head;
-
- q->tail = list_q->tail;
- q->n_pkts += list_q->n_pkts;
-
-#ifdef WL_TXQ_STALL
- list_q->dequeue_count += list_q->n_pkts;
-#endif // endif
-
- list_q->head = NULL;
- list_q->tail = NULL;
- list_q->n_pkts = 0;
-
-done:
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
- return;
-}
-
/*
* Prepend spktq 'list' to the head of pktq 'pq'
*/
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return;
- list_q = &list->q;
+ list_q = &list->q[0];
/* empty list check */
if (list_q->head == NULL)
ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */
ASSERT(!pktq_full(pq));
- ASSERT(!pktqprec_full(pq, prec));
+ ASSERT(!pktq_pfull(pq, prec));
q = &pq->q[prec];
q->tail = list_q->tail;
}
- q->n_pkts += list_q->n_pkts;
- pq->n_pkts_tot += list_q->n_pkts;
+ q->len += list_q->len;
+ pq->len += list_q->len;
if (pq->hi_prec < prec)
pq->hi_prec = (uint8)prec;
-#ifdef WL_TXQ_STALL
- list_q->dequeue_count += list_q->n_pkts;
-#endif // endif
-
list_q->head = NULL;
list_q->tail = NULL;
- list_q->n_pkts = 0;
+ list_q->len = 0;
+ list->len = 0;
done:
/* protect shared resource */
return;
}
-/*
- * Prepend spktq 'list' to the head of spktq 'spq'
- */
-void BCMFASTPATH
-spktq_prepend(struct spktq *spq, struct spktq *list)
+void * BCMFASTPATH
+pktq_pdeq(struct pktq *pq, int prec)
{
struct pktq_prec *q;
- struct pktq_prec *list_q;
+ void *p;
/* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return;
-
- list_q = &list->q;
-
- /* empty list check */
- if (list_q->head == NULL)
- goto done;
-
- ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
- ASSERT(!spktq_full(spq));
+ ASSERT(prec >= 0 && prec < pq->num_prec);
- q = &spq->q;
+ q = &pq->q[prec];
- /* set the tail packet of list to point at the former pq head */
- PKTSETLINK(list_q->tail, q->head);
- /* the new q head is the head of list */
- q->head = list_q->head;
+ if ((p = q->head) == NULL)
+ goto done;
- /* If the q tail was non-null, then it stays as is.
- * If the q tail was null, it is now the tail of list
- */
- if (q->tail == NULL) {
- q->tail = list_q->tail;
- }
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
- q->n_pkts += list_q->n_pkts;
+ q->len--;
-#ifdef WL_TXQ_STALL
- list_q->dequeue_count += list_q->n_pkts;
-#endif // endif
+ pq->len--;
- list_q->head = NULL;
- list_q->tail = NULL;
- list_q->n_pkts = 0;
+ PKTSETLINK(p, NULL);
done:
/* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
- return;
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
}
void * BCMFASTPATH
if ((p = PKTLINK(prev_p)) == NULL)
goto done;
- q->n_pkts--;
+ q->len--;
- pq->n_pkts_tot--;
+ pq->len--;
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
PKTSETLINK(prev_p, PKTLINK(p));
PKTSETLINK(p, NULL);
}
}
- q->n_pkts--;
+ q->len--;
- pq->n_pkts_tot--;
+ pq->len--;
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
PKTSETLINK(p, NULL);
done:
return p;
}
+void * BCMFASTPATH
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ goto done;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+done:
+ /* protect shared resource */
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
+ return NULL;
+
+ return p;
+}
+
bool BCMFASTPATH
pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
{
q->tail = p;
}
- q->n_pkts--;
- pq->n_pkts_tot--;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
-
+ q->len--;
+ pq->len--;
PKTSETLINK(pktbuf, NULL);
ret = TRUE;
q->head = NULL;
q->tail = NULL;
- q->n_pkts = 0;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count += wq.n_pkts;
-#endif // endif
+ q->len = 0;
- pq->n_pkts_tot -= wq.n_pkts;
+ pq->len -= wq.len;
/* protect shared resource */
if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
/* unlink the current packet from the list */
wq.head = PKTLINK(p);
PKTSETLINK(p, NULL);
- wq.n_pkts--;
-
-#ifdef WL_TXQ_STALL
- wq.dequeue_count++;
-#endif // endif
+ wq.len--;
/* call the filter function on current packet */
ASSERT(fltr != NULL);
}
}
- ASSERT(wq.n_pkts == 0);
+ ASSERT(wq.len == 0);
}
void
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return;
- /* Optimize if pktq n_pkts = 0, just return.
+ /* Optimize if pktq len = 0, just return.
* pktq len of 0 means pktq's prec q's are all empty.
*/
- if (pq->n_pkts_tot > 0) {
+ if (pq->len > 0) {
filter = TRUE;
}
}
}
-void
-spktq_filter(struct spktq *spq, pktq_filter_t fltr, void* fltr_ctx,
- defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx)
-{
- struct pktq_prec wq;
- struct pktq_prec *q;
- void *p = NULL;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return;
-
- q = &spq->q;
-
- /* Optimize if pktq_prec n_pkts = 0, just return. */
- if (q->n_pkts == 0) {
- (void)HND_PKTQ_MUTEX_RELEASE(&spq->mutex);
- return;
- }
-
- wq = *q;
-
- q->head = NULL;
- q->tail = NULL;
- q->n_pkts = 0;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count += wq.n_pkts;
-#endif // endif
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
- return;
-
- /* start with the head of the work queue */
-
- while ((p = wq.head) != NULL) {
- /* unlink the current packet from the list */
- wq.head = PKTLINK(p);
- PKTSETLINK(p, NULL);
- wq.n_pkts--;
-
-#ifdef WL_TXQ_STALL
- wq.dequeue_count++;
-#endif // endif
-
- /* call the filter function on current packet */
- ASSERT(fltr != NULL);
- switch ((*fltr)(fltr_ctx, p)) {
- case PKT_FILTER_NOACTION:
- /* put this packet back */
- spktq_enq(spq, p);
- break;
-
- case PKT_FILTER_DELETE:
- /* delete this packet */
- ASSERT(defer != NULL);
- (*defer)(defer_ctx, p);
- break;
-
- case PKT_FILTER_REMOVE:
- /* pkt already removed from list */
- break;
-
- default:
- ASSERT(0);
- break;
- }
- }
-
- ASSERT(wq.n_pkts == 0);
-
- ASSERT(flush != NULL);
- (*flush)(flush_ctx);
-}
-
bool
-pktq_init(struct pktq *pq, int num_prec, int max_pkts)
+pktq_init(struct pktq *pq, int num_prec, int max_len)
{
int prec;
+ if (HND_PKTQ_MUTEX_CREATE("pktq", &pq->mutex) != OSL_EXT_SUCCESS)
+ return FALSE;
+
ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
/* pq is variable size; only zero out what's requested */
bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
- if (HND_PKTQ_MUTEX_CREATE("pktq", &pq->mutex) != OSL_EXT_SUCCESS)
- return FALSE;
-
pq->num_prec = (uint16)num_prec;
- pq->max_pkts = (uint16)max_pkts;
+ pq->max = (uint16)max_len;
for (prec = 0; prec < num_prec; prec++)
- pq->q[prec].max_pkts = pq->max_pkts;
-
- return TRUE;
-}
-
-bool
-spktq_init(struct spktq *spq, int max_pkts)
-{
- bzero(spq, sizeof(struct spktq));
-
- if (HND_PKTQ_MUTEX_CREATE("spktq", &spq->mutex) != OSL_EXT_SUCCESS)
- return FALSE;
-
- spq->q.max_pkts = (uint16)max_pkts;
+ pq->q[prec].max = pq->max;
return TRUE;
}
return TRUE;
}
-bool
-spktq_deinit(struct spktq *spq)
-{
- BCM_REFERENCE(spq);
- if (HND_PKTQ_MUTEX_DELETE(&spq->mutex) != OSL_EXT_SUCCESS)
- return FALSE;
-
- return TRUE;
-}
-
void
-pktq_set_max_plen(struct pktq *pq, int prec, int max_pkts)
+pktq_set_max_plen(struct pktq *pq, int prec, int max_len)
{
ASSERT(prec >= 0 && prec < pq->num_prec);
return;
if (prec < pq->num_prec)
- pq->q[prec].max_pkts = (uint16)max_pkts;
+ pq->q[prec].max = (uint16)max_len;
/* protect shared resource */
if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
- if (pq->n_pkts_tot == 0)
+ if (pq->len == 0)
goto done;
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
if ((q->head = PKTLINK(p)) == NULL)
q->tail = NULL;
- q->n_pkts--;
+ q->len--;
- pq->n_pkts_tot--;
-
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
+ pq->len--;
if (prec_out)
*prec_out = prec;
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
- if (pq->n_pkts_tot == 0)
+ if (pq->len == 0)
goto done;
for (prec = 0; prec < pq->hi_prec; prec++)
q->head = NULL;
q->tail = prev;
- q->n_pkts--;
-
- pq->n_pkts_tot--;
+ q->len--;
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
+ pq->len--;
if (prec_out)
*prec_out = prec;
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
- if (pq->n_pkts_tot == 0)
+ if (pq->len == 0)
goto done;
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
}
void *
-spktq_peek(struct spktq *spq)
+pktq_peek_tail(struct pktq *pq, int *prec_out)
{
+ int prec;
void *p = NULL;
/* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
+ if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
- if (spq->q.n_pkts == 0)
+ if (pq->len == 0)
goto done;
- p = spq->q.head;
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ p = pq->q[prec].tail;
done:
/* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
+ if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
return NULL;
return p;
}
}
-void
-spktq_flush(osl_t *osh, struct spktq *spq, bool dir)
-{
- void *p;
-
- /* no need for a mutex protection! */
-
- /* start with the head of the list */
- while ((p = spktq_deq(spq)) != NULL) {
-
- /* delete this packet */
- PKTFREE(osh, p, dir);
- }
-}
-
void
pktq_flush(osl_t *osh, struct pktq *pq, bool dir)
{
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return;
- /* Optimize flush, if pktq n_pkts_tot = 0, just return.
+ /* Optimize flush, if pktq len = 0, just return.
* pktq len of 0 means pktq's prec q's are all empty.
*/
- if (pq->n_pkts_tot > 0) {
+ if (pq->len > 0) {
flush = TRUE;
}
for (prec = 0; prec <= pq->hi_prec; prec++)
if (prec_bmp & (1 << prec))
- len += pq->q[prec].n_pkts;
+ len += pq->q[prec].len;
/* protect shared resource */
if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
- if (pq->n_pkts_tot == 0)
+ if (pq->len == 0)
goto done;
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return NULL;
- if (pq->n_pkts_tot == 0)
+ if (pq->len == 0)
goto done;
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
if ((q->head = PKTLINK(p)) == NULL)
q->tail = NULL;
- q->n_pkts--;
+ q->len--;
// terence 20150308: fix for non-null pointer of skb->prev sent from ndo_start_xmit
- if (q->n_pkts == 0) {
+ if (q->len == 0) {
q->head = NULL;
q->tail = NULL;
}
-#ifdef WL_TXQ_STALL
- q->dequeue_count++;
-#endif // endif
-
if (prec_out)
*prec_out = prec;
- pq->n_pkts_tot--;
+ pq->len--;
PKTSETLINK(p, NULL);
#ifdef HND_PKTQ_THREAD_SAFE
int
-pktqprec_avail_pkts(struct pktq *pq, int prec)
+pktq_pavail(struct pktq *pq, int prec)
{
int ret;
ASSERT(prec >= 0 && prec < pq->num_prec);
- ret = pq->q[prec].max_pkts - pq->q[prec].n_pkts;
+ ret = pq->q[prec].max - pq->q[prec].len;
/* protect shared resource */
if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
}
bool
-pktqprec_full(struct pktq *pq, int prec)
+pktq_pfull(struct pktq *pq, int prec)
{
bool ret;
ASSERT(prec >= 0 && prec < pq->num_prec);
- ret = pq->q[prec].n_pkts >= pq->q[prec].max_pkts;
+ ret = pq->q[prec].len >= pq->q[prec].max;
/* protect shared resource */
if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return 0;
- ret = pq->max_pkts - pq->n_pkts_tot;
+ ret = pq->max - pq->len;
/* protect shared resource */
if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
return ret;
}
-int
-spktq_avail(struct spktq *spq)
-{
- int ret;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return 0;
-
- ret = spq->q.max_pkts - spq->q.n_pkts;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
- return 0;
-
- return ret;
-}
-
bool
pktq_full(struct pktq *pq)
{
if (HND_PKTQ_MUTEX_ACQUIRE(&pq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
return FALSE;
- ret = pq->n_pkts_tot >= pq->max_pkts;
+ ret = pq->len >= pq->max;
/* protect shared resource */
if (HND_PKTQ_MUTEX_RELEASE(&pq->mutex) != OSL_EXT_SUCCESS)
return ret;
}
-
-bool
-spktq_full(struct spktq *spq)
-{
- bool ret;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_ACQUIRE(&spq->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
- return FALSE;
-
- ret = spq->q.n_pkts >= spq->q.max_pkts;
-
- /* protect shared resource */
- if (HND_PKTQ_MUTEX_RELEASE(&spq->mutex) != OSL_EXT_SUCCESS)
- return FALSE;
-
- return ret;
-}
-
#endif /* HND_PKTQ_THREAD_SAFE */
+++ /dev/null
-/*
- * Misc utility routines for accessing lhl specific features
- * of the SiliconBackplane-based Broadcom chips.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: hndpmu.c 547757 2015-04-13 10:18:04Z $
- */
-
-#include <hndpmu.h>
-#include <hndlhl.h>
-#include <sbchipc.h>
-#include <hndsoc.h>
-#include <bcmdevs.h>
-#include <osl.h>
-#include <sbgci.h>
-#include <siutils.h>
-#include <bcmutils.h>
-#ifdef BCMULP
-#include <ulp.h>
-#endif // endif
-
-#define SI_LHL_EXT_WAKE_REQ_MASK_MAGIC 0x7FBBF7FF /* magic number for LHL EXT */
-
-/* PmuRev1 has a 24-bit PMU RsrcReq timer. However it pushes all other bits
- * upward. To make the code to run for all revs we use a variable to tell how
- * many bits we need to shift.
- */
-#define FLAGS_SHIFT 14
-#define LHL_ERROR(args) printf args
-
-void
-si_lhl_setup(si_t *sih, osl_t *osh)
-{
- if (CHIPID(sih->chip) == BCM43012_CHIP_ID) {
- /* Enable PMU sleep mode0 */
- LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_2);
- /* Modify as per the
- BCM43012/LHL#LHL-RecommendedsettingforvariousPMUSleepModes:
- */
- LHL_REG(sih, lhl_top_pwrup_ctl_adr, LHL_PWRUP_CTL_MASK, LHL_PWRUP_CTL);
- LHL_REG(sih, lhl_top_pwrup2_ctl_adr, LHL_PWRUP2_CTL_MASK, LHL_PWRUP2_CTL);
- LHL_REG(sih, lhl_top_pwrdn_ctl_adr, LHL_PWRDN_CTL_MASK, LHL_PWRDN_SLEEP_CNT);
- LHL_REG(sih, lhl_top_pwrdn2_ctl_adr, LHL_PWRDN2_CTL_MASK, LHL_PWRDN2_CTL);
- } else if (BCM4347_CHIP(sih->chip)) {
- if (LHL_IS_PSMODE_1(sih)) {
- LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_1);
- } else {
- LHL_REG(sih, lhl_top_pwrseq_ctl_adr, LHL_PWRSEQ_CTL, PMU_SLEEP_MODE_0);
- }
-
- LHL_REG(sih, lhl_top_pwrup_ctl_adr, LHL_PWRUP_CTL_MASK, LHL_PWRUP_CTL_4347);
- LHL_REG(sih, lhl_top_pwrup2_ctl_adr, LHL_PWRUP2_CTL_MASK, LHL_PWRUP2_CTL);
- LHL_REG(sih, lhl_top_pwrdn_ctl_adr,
- LHL_PWRDN_CTL_MASK, LHL_PWRDN_SLEEP_CNT);
- LHL_REG(sih, lhl_top_pwrdn2_ctl_adr, LHL_PWRDN2_CTL_MASK, LHL_PWRDN2_CTL);
-
- /*
- * Enable wakeup on GPIO1, PCIE clkreq and perst signal,
- * GPIO[0] is mapped to GPIO1
- * GPIO[1] is mapped to PCIE perst
- * GPIO[2] is mapped to PCIE clkreq
- */
-
- /* GPIO1 */
- /* Clear any old interrupt status */
- LHL_REG(sih, gpio_int_st_port_adr[0],
- 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN);
- /* active high level trigger */
- LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_GPIO1_GPIO_PIN], ~0,
- 1 << GCI_GPIO_STS_WL_DIN_SELECT);
- LHL_REG(sih, gpio_int_en_port_adr[0],
- 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN);
- LHL_REG(sih, gpio_int_st_port_adr[0],
- 1 << PCIE_GPIO1_GPIO_PIN, 1 << PCIE_GPIO1_GPIO_PIN);
-#if !defined(_CFEZ_)
- si_gci_set_functionsel(sih, 1, CC4347_FNSEL_SAMEASPIN);
-#endif // endif
-
- /* PCIE perst */
- LHL_REG(sih, gpio_int_st_port_adr[0],
- 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN);
- LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_PERST_GPIO_PIN], ~0,
- (1 << GCI_GPIO_STS_EDGE_TRIG_BIT |
- 1 << GCI_GPIO_STS_WL_DIN_SELECT));
- LHL_REG(sih, gpio_int_en_port_adr[0],
- 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN);
- LHL_REG(sih, gpio_int_st_port_adr[0],
- 1 << PCIE_PERST_GPIO_PIN, 1 << PCIE_PERST_GPIO_PIN);
-
- /* PCIE clkreq */
- LHL_REG(sih, gpio_int_st_port_adr[0],
- 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN);
- LHL_REG(sih, gpio_ctrl_iocfg_p_adr[PCIE_CLKREQ_GPIO_PIN], ~0,
- (1 << GCI_GPIO_STS_EDGE_TRIG_BIT |
- 1 << GCI_GPIO_STS_NEG_EDGE_TRIG_BIT |
- 1 << GCI_GPIO_STS_WL_DIN_SELECT));
- LHL_REG(sih, gpio_int_en_port_adr[0],
- 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN);
- LHL_REG(sih, gpio_int_st_port_adr[0],
- 1 << PCIE_CLKREQ_GPIO_PIN, 1 << PCIE_CLKREQ_GPIO_PIN);
- }
-}
-
-/* To skip this function, specify a invalid "lpo_select" value in nvram */
-int
-si_lhl_set_lpoclk(si_t *sih, osl_t *osh, uint32 lpo_force)
-{
- gciregs_t *gciregs;
- uint clk_det_cnt, status;
- int lhl_wlclk_sel;
- uint32 lpo = 0;
- int timeout = 0;
- gciregs = si_setcore(sih, GCI_CORE_ID, 0);
-
- ASSERT(gciregs != NULL);
-
- /* Apply nvram override to lpo */
- if ((lpo_force == LHL_LPO_AUTO) && ((lpo = (uint32)getintvar(NULL, "lpo_select")) == 0)) {
- lpo = LHL_OSC_32k_ENAB;
- } else {
- lpo = lpo_force;
- }
-
- /* Power up the desired LPO */
- switch (lpo) {
- case LHL_EXT_LPO_ENAB:
- LHL_REG(sih, lhl_main_ctl_adr, EXTLPO_BUF_PD, 0);
- lhl_wlclk_sel = LHL_EXT_SEL;
- break;
-
- case LHL_LPO1_ENAB:
- LHL_REG(sih, lhl_main_ctl_adr, LPO1_PD_EN, 0);
- lhl_wlclk_sel = LHL_LPO1_SEL;
- break;
-
- case LHL_LPO2_ENAB:
- LHL_REG(sih, lhl_main_ctl_adr, LPO2_PD_EN, 0);
- lhl_wlclk_sel = LHL_LPO2_SEL;
- break;
-
- case LHL_OSC_32k_ENAB:
- LHL_REG(sih, lhl_main_ctl_adr, OSC_32k_PD, 0);
- lhl_wlclk_sel = LHL_32k_SEL;
- break;
-
- default:
- goto done;
- }
-
- LHL_REG(sih, lhl_clk_det_ctl_adr,
- LHL_CLK_DET_CTL_AD_CNTR_CLK_SEL, lhl_wlclk_sel);
-
- /* Detect the desired LPO */
-
- LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN, 0);
- LHL_REG(sih, lhl_clk_det_ctl_adr,
- LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR, LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR);
- timeout = 0;
- clk_det_cnt =
- ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >>
- LHL_CLK_DET_CNT_SHIFT);
- while (clk_det_cnt != 0 && timeout <= LPO_SEL_TIMEOUT) {
- OSL_DELAY(10);
- clk_det_cnt =
- ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >>
- LHL_CLK_DET_CNT_SHIFT);
- timeout++;
- }
-
- if (clk_det_cnt != 0) {
- LHL_ERROR(("Clock not present as clear did not work timeout = %d\n", timeout));
- goto error;
- }
- LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR, 0);
- LHL_REG(sih, lhl_clk_det_ctl_adr, LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN,
- LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN);
- clk_det_cnt =
- ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >>
- LHL_CLK_DET_CNT_SHIFT);
- timeout = 0;
-
- while (clk_det_cnt <= CLK_DET_CNT_THRESH && timeout <= LPO_SEL_TIMEOUT) {
- OSL_DELAY(10);
- clk_det_cnt =
- ((R_REG(osh, &gciregs->lhl_clk_det_ctl_adr) & LHL_CLK_DET_CNT) >>
- LHL_CLK_DET_CNT_SHIFT);
- timeout++;
- }
-
- if (timeout >= LPO_SEL_TIMEOUT) {
- LHL_ERROR(("LPO is not available timeout = %u\n, timeout", timeout));
- goto error;
- }
-
- /* Select the desired LPO */
-
- LHL_REG(sih, lhl_main_ctl_adr,
- LHL_MAIN_CTL_ADR_LHL_WLCLK_SEL, (lhl_wlclk_sel) << LPO_SEL_SHIFT);
-
- status = ((R_REG(osh, &gciregs->lhl_clk_status_adr) & LHL_MAIN_CTL_ADR_FINAL_CLK_SEL) ==
- (unsigned)(((1 << lhl_wlclk_sel) << LPO_FINAL_SEL_SHIFT))) ? 1 : 0;
- timeout = 0;
- while (!status && timeout <= LPO_SEL_TIMEOUT) {
- OSL_DELAY(10);
- status =
- ((R_REG(osh, &gciregs->lhl_clk_status_adr) & LHL_MAIN_CTL_ADR_FINAL_CLK_SEL) ==
- (unsigned)(((1 << lhl_wlclk_sel) << LPO_FINAL_SEL_SHIFT))) ? 1 : 0;
- timeout++;
- }
-
- if (timeout >= LPO_SEL_TIMEOUT) {
- LHL_ERROR(("LPO is not available timeout = %u\n, timeout", timeout));
- goto error;
- }
- /* Power down the rest of the LPOs */
-
- if (lpo != LHL_EXT_LPO_ENAB) {
- LHL_REG(sih, lhl_main_ctl_adr, EXTLPO_BUF_PD, EXTLPO_BUF_PD);
- }
-
- if (lpo != LHL_LPO1_ENAB) {
- LHL_REG(sih, lhl_main_ctl_adr, LPO1_PD_EN, LPO1_PD_EN);
- LHL_REG(sih, lhl_main_ctl_adr, LPO1_PD_SEL, LPO1_PD_SEL_VAL);
- }
- if (lpo != LHL_LPO2_ENAB) {
- LHL_REG(sih, lhl_main_ctl_adr, LPO2_PD_EN, LPO2_PD_EN);
- LHL_REG(sih, lhl_main_ctl_adr, LPO2_PD_SEL, LPO2_PD_SEL_VAL);
- }
- if (lpo != LHL_OSC_32k_ENAB) {
- LHL_REG(sih, lhl_main_ctl_adr, OSC_32k_PD, OSC_32k_PD);
- }
- if (lpo != RADIO_LPO_ENAB) {
- si_gci_chipcontrol(sih, CC_GCI_CHIPCTRL_06, LPO_SEL, 0);
- }
-done:
- return BCME_OK;
-error:
- ROMMABLE_ASSERT(0);
- return BCME_ERROR;
-}
-
-void
-si_lhl_timer_config(si_t *sih, osl_t *osh, int timer_type)
-{
- uint origidx;
- pmuregs_t *pmu = NULL;
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
-
- ASSERT(pmu != NULL);
-
- switch (timer_type) {
- case LHL_MAC_TIMER:
- /* Enable MAC Timer interrupt */
- LHL_REG(sih, lhl_wl_mactim0_intrp_adr,
- (LHL_WL_MACTIM0_INTRP_EN | LHL_WL_MACTIM0_INTRP_EDGE_TRIGGER),
- (LHL_WL_MACTIM0_INTRP_EN | LHL_WL_MACTIM0_INTRP_EDGE_TRIGGER));
-
- /* Programs bits for MACPHY_CLK_AVAIL and all its dependent bits in
- * MacResourceReqMask0.
- */
- PMU_REG(sih, mac_res_req_mask, ~0, si_pmu_rsrc_macphy_clk_deps(sih, osh, 0));
-
- /* One time init of mac_res_req_timer to enable interrupt and clock request */
- HND_PMU_SYNC_WR(sih, pmu, pmu, osh,
- PMUREGADDR(sih, pmu, pmu, mac_res_req_timer),
- ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT));
-
- if (si_numd11coreunits(sih) > 1) {
- LHL_REG(sih, lhl_wl_mactim1_intrp_adr,
- (LHL_WL_MACTIM0_INTRP_EN | LHL_WL_MACTIM0_INTRP_EDGE_TRIGGER),
- (LHL_WL_MACTIM0_INTRP_EN | LHL_WL_MACTIM0_INTRP_EDGE_TRIGGER));
-
- PMU_REG(sih, mac_res_req_mask1, ~0,
- si_pmu_rsrc_macphy_clk_deps(sih, osh, 1));
-
- HND_PMU_SYNC_WR(sih, pmu, pmu, osh,
- PMUREGADDR(sih, pmu, pmu, mac_res_req_timer1),
- ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT));
- }
-
- break;
-
- case LHL_ARM_TIMER:
- /* Enable ARM Timer interrupt */
- LHL_REG(sih, lhl_wl_armtim0_intrp_adr,
- (LHL_WL_ARMTIM0_INTRP_EN | LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER),
- (LHL_WL_ARMTIM0_INTRP_EN | LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER));
-
- /* Programs bits for HT_AVAIL and all its dependent bits in ResourceReqMask0 */
- PMU_REG(sih, res_req_mask, ~0, si_pmu_rsrc_ht_avail_clk_deps(sih, osh));
-
- /* One time init of res_req_timer to enable interrupt and clock request
- * For low power request only ALP (HT_AVAIL is anyway requested by res_req_mask)
- */
- HND_PMU_SYNC_WR(sih, pmu, pmu, osh,
- PMUREGADDR(sih, pmu, pmu, res_req_timer),
- ((PRRT_ALP_REQ | PRRT_HQ_REQ | PRRT_INTEN) << FLAGS_SHIFT));
- break;
- }
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
-void
-si_lhl_timer_enable(si_t *sih)
-{
- /* Enable clks for pmu int propagation */
- PMU_REG(sih, pmuintctrl0, PMU_INTC_ALP_REQ, PMU_INTC_ALP_REQ);
-
- PMU_REG(sih, pmuintmask0, RSRC_INTR_MASK_TIMER_INT_0, RSRC_INTR_MASK_TIMER_INT_0);
- LHL_REG(sih, lhl_main_ctl_adr, LHL_FAST_WRITE_EN, LHL_FAST_WRITE_EN);
- PMU_REG(sih, pmucontrol_ext, PCTL_EXT_USE_LHL_TIMER, PCTL_EXT_USE_LHL_TIMER);
-}
-
-void
-si_lhl_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period)
-{
- gciregs_t *gciregs;
- if (CHIPID(sih->chip) == BCM43012_CHIP_ID) {
- gciregs = si_setcore(sih, GCI_CORE_ID, 0);
- ASSERT(gciregs != NULL);
- W_REG(osh, &gciregs->lhl_wl_ilp_val_adr, ilp_period);
- }
-}
-
-#ifdef BCMULP
-void
-si_lhl_disable_sdio_wakeup(si_t *sih)
-{
- /* Disable the interrupt */
- LHL_REG(sih, gpio_int_en_port_adr[0], (1 << ULP_SDIO_CMD_PIN), 0);
-
- /* Clear the pending interrupt status */
- LHL_REG(sih, gpio_int_st_port_adr[0], (1 << ULP_SDIO_CMD_PIN), (1 << ULP_SDIO_CMD_PIN));
-}
-
-void
-si_lhl_enable_sdio_wakeup(si_t *sih, osl_t *osh)
-{
-
- gciregs_t *gciregs;
- pmuregs_t *pmu;
- gciregs = si_setcore(sih, GCI_CORE_ID, 0);
- ASSERT(gciregs != NULL);
- if (CHIPID(sih->chip) == BCM43012_CHIP_ID) {
- /* For SDIO_CMD configure P8 for wake on negedge
- * LHL 0 -> edge trigger intr mode,
- * 1 -> neg edge trigger intr mode ,
- * 6 -> din from wl side enable
- */
- OR_REG(osh, &gciregs->gpio_ctrl_iocfg_p_adr[ULP_SDIO_CMD_PIN],
- (1 << GCI_GPIO_STS_EDGE_TRIG_BIT |
- 1 << GCI_GPIO_STS_NEG_EDGE_TRIG_BIT |
- 1 << GCI_GPIO_STS_WL_DIN_SELECT));
- /* Clear any old interrupt status */
- OR_REG(osh, &gciregs->gpio_int_st_port_adr[0], 1 << ULP_SDIO_CMD_PIN);
-
- /* LHL GPIO[8] intr en , GPIO[8] is mapped to SDIO_CMD */
- /* Enable P8 to generate interrupt */
- OR_REG(osh, &gciregs->gpio_int_en_port_adr[0], 1 << ULP_SDIO_CMD_PIN);
-
- /* Clear LHL GPIO status to trigger GCI Interrupt */
- OR_REG(osh, &gciregs->gci_intstat, GCI_INTSTATUS_LHLWLWAKE);
- /* Enable LHL GPIO Interrupt to trigger GCI Interrupt */
- OR_REG(osh, &gciregs->gci_intmask, GCI_INTMASK_LHLWLWAKE);
- OR_REG(osh, &gciregs->gci_wakemask, GCI_WAKEMASK_LHLWLWAKE);
- /* Note ->Enable GCI interrupt to trigger Chipcommon interrupt
- * Set EciGciIntEn in IntMask and will be done from FCBS saved tuple
- */
- /* Enable LHL to trigger extWake upto HT_AVAIL */
- /* LHL GPIO Interrupt is mapped to extWake[7] */
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- ASSERT(pmu != NULL);
- /* Set bit 4 and 7 in ExtWakeMask */
- W_REG(osh, &pmu->extwakemask[0], CI_ECI | CI_WECI);
- /* Program bits for MACPHY_CLK_AVAIL rsrc in ExtWakeReqMaskN */
- W_REG(osh, &pmu->extwakereqmask[0], SI_LHL_EXT_WAKE_REQ_MASK_MAGIC);
- /* Program 0 (no need to request explicitly for any backplane clk) */
- W_REG(osh, &pmu->extwakectrl, 0x0);
- /* Note: Configure MAC/Ucode to receive interrupt
- * it will be done from saved tuple using FCBS code
- */
- }
-}
-#endif /* BCMULP */
-
-lhl_reg_set_t lv_sleep_mode_4369_lhl_reg_set[] =
-{
- /* set wl_sleep_en */
- {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 0), (1 << 0)},
-
- /* set top_pwrsw_en, top_slb_en, top_iso_en */
- {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), BCM_MASK32(5, 3), (0x0 << 3)},
-
- /* set VMUX_asr_sel_en */
- {LHL_REG_OFF(lhl_top_pwrseq_ctl_adr), (1 << 8), (1 << 8)},
-
- /* lhl_lp_main_ctl_adr, disable lp_mode_en, set CSR and ASR field enables for LV mode */
- {LHL_REG_OFF(lhl_lp_main_ctl_adr), BCM_MASK32(21, 0), 0x3F89FF},
-
- /* lhl_lp_main_ctl1_adr, set CSR field values - CSR_adj - 0.64V and trim_adj -5mV */
- {LHL_REG_OFF(lhl_lp_main_ctl1_adr), BCM_MASK32(23, 0), 0x9E8F97},
-
- /* lhl_lp_main_ctl2_adr, set ASR field values - ASR_adj - 0.76V and trim_adj +5mV */
- {LHL_REG_OFF(lhl_lp_main_ctl2_adr), BCM_MASK32(13, 0), 0x07EE},
-
- /* lhl_lp_dn_ctl_adr, set down count for CSR fields- adj, mode, overi_dis */
- {LHL_REG_OFF(lhl_lp_dn_ctl_adr), ~0, ((LHL4369_CSR_OVERI_DIS_DWN_CNT << 16) |
- (LHL4369_CSR_MODE_DWN_CNT << 8) | (LHL4369_CSR_ADJ_DWN_CNT << 0))},
-
- /* lhl_lp_up_ctl_adr, set up count for CSR fields- adj, mode, overi_dis */
- {LHL_REG_OFF(lhl_lp_up_ctl_adr), ~0, ((LHL4369_CSR_OVERI_DIS_UP_CNT << 16) |
- (LHL4369_CSR_MODE_UP_CNT << 8) | (LHL4369_CSR_ADJ_UP_CNT << 0))},
-
- /* lhl_lp_dn_ctl1_adr, set down count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
- {LHL_REG_OFF(lhl_lp_dn_ctl1_adr), ~0, ((LHL4369_VDDC_SW_DIS_DWN_CNT << 24) |
- (LHL4369_ASR_ADJ_DWN_CNT << 16) | (LHL4369_HPBG_CHOP_DIS_DWN_CNT << 0))},
-
- /* lhl_lp_up_ctl1_adr, set up count for hpbg_chop_dis, ASR_adj, vddc_sw_dis */
- {LHL_REG_OFF(lhl_lp_up_ctl1_adr), ~0, ((LHL4369_VDDC_SW_DIS_UP_CNT << 24) |
- (LHL4369_ASR_ADJ_UP_CNT << 16) | (LHL4369_HPBG_CHOP_DIS_UP_CNT << 0))},
-
- /* lhl_lp_dn_ctl4_adr, set down count for ASR fields -
- * clk4m_dis, lppfm_mode, mode_sel, manual_mode
- */
- {LHL_REG_OFF(lhl_lp_dn_ctl4_adr), ~0, ((LHL4369_ASR_MANUAL_MODE_DWN_CNT << 24) |
- (LHL4369_ASR_MODE_SEL_DWN_CNT << 16) | (LHL4369_ASR_LPPFM_MODE_DWN_CNT << 8) |
- (LHL4369_ASR_CLK4M_DIS_DWN_CNT << 0))},
-
- /* lhl_lp_up_ctl4_adr, set up count for ASR fields -
- * clk4m_dis, lppfm_mode, mode_sel, manual_mode
- */
- {LHL_REG_OFF(lhl_lp_up_ctl4_adr), ~0, ((LHL4369_ASR_MANUAL_MODE_UP_CNT << 24) |
- (LHL4369_ASR_MODE_SEL_UP_CNT << 16)| (LHL4369_ASR_LPPFM_MODE_UP_CNT << 8) |
- (LHL4369_ASR_CLK4M_DIS_UP_CNT << 0))},
-
- /* lhl_lp_dn_ctl3_adr, set down count for hpbg_pu, srbg_ref, ASR_overi_dis,
- * CSR_pfm_pwr_slice_en
- */
- {LHL_REG_OFF(lhl_lp_dn_ctl3_adr), ~0, ((LHL4369_PFM_PWR_SLICE_DWN_CNT << 24) |
- (LHL4369_ASR_OVERI_DIS_DWN_CNT << 16) | (LHL4369_SRBG_REF_SEL_DWN_CNT << 8) |
- (LHL4369_HPBG_PU_EN_DWN_CNT << 0))},
-
- /* lhl_lp_up_ctl3_adr, set up count for hpbg_pu, srbg_ref, ASR_overi_dis,
- * CSR_pfm_pwr_slice_en
- */
- {LHL_REG_OFF(lhl_lp_up_ctl3_adr), ~0, ((LHL4369_PFM_PWR_SLICE_UP_CNT << 24) |
- (LHL4369_ASR_OVERI_DIS_UP_CNT << 16) | (LHL4369_SRBG_REF_SEL_UP_CNT << 8) |
- (LHL4369_HPBG_PU_EN_UP_CNT << 0))},
-
- /* lhl_lp_dn_ctl2_adr, set down count for CSR_trim_adj */
- {LHL_REG_OFF(lhl_lp_dn_ctl2_adr), ~0, (LHL4369_CSR_TRIM_ADJ_DWN_CNT << 16)},
-
- /* lhl_lp_up_ctl2_adr, set up count for CSR_trim_adj */
- {LHL_REG_OFF(lhl_lp_up_ctl2_adr), ~0, (LHL4369_CSR_TRIM_ADJ_UP_CNT << 16)},
-
- /* lhl_lp_dn_ctl5_adr, set down count for ASR_trim_adj */
- {LHL_REG_OFF(lhl_lp_dn_ctl5_adr), ~0, (LHL4369_ASR_TRIM_ADJ_DWN_CNT << 0)},
-
- /* lhl_lp_up_ctl5_adr, set down count for ASR_trim_adj */
- {LHL_REG_OFF(lhl_lp_up_ctl5_adr), ~0, (LHL4369_ASR_TRIM_ADJ_UP_CNT << 0)},
-
- /* Change the default down count values for the resources */
- /* lhl_top_pwrdn_ctl_adr, set down count for top_level_sleep, iso, slb and pwrsw */
- {LHL_REG_OFF(lhl_top_pwrdn_ctl_adr), ~0, ((LHL4369_PWRSW_EN_DWN_CNT << 24) |
- (LHL4369_SLB_EN_DWN_CNT << 16) | (LHL4369_ISO_EN_DWN_CNT << 8))},
-
- /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
- {LHL_REG_OFF(lhl_top_pwrdn2_ctl_adr), ~0, (LHL4369_VMUX_ASR_SEL_DWN_CNT << 16)},
-
- /* Change the default up count values for the resources */
- /* lhl_top_pwrup_ctl_adr, set up count for top_level_sleep, iso, slb and pwrsw */
- {LHL_REG_OFF(lhl_top_pwrup_ctl_adr), ~0, ((LHL4369_PWRSW_EN_UP_CNT << 24) |
- (LHL4369_SLB_EN_UP_CNT << 16) | (LHL4369_ISO_EN_UP_CNT << 8))},
-
- /* lhl_top_pwrdn2_ctl_adr, set down count for VMUX_asr_sel */
- {LHL_REG_OFF(lhl_top_pwrup2_ctl_adr), ~0, ((LHL4369_VMUX_ASR_SEL_UP_CNT << 16))},
-
- /* Enable lhl interrupt */
- {LHL_REG_OFF(gci_intmask), (1 << 30), (1 << 30)},
-
- /* Enable LHL Wake up */
- {LHL_REG_OFF(gci_wakemask), (1 << 30), (1 << 30)},
-
- /* Making forceOTPpwrOn 0 */
- {LHL_REG_OFF(otpcontrol), (1 << 16), 0}
-};
-
-/* LV sleep mode summary:
- * LV mode is where both ABUCK and CBUCK are programmed to low voltages during
- * sleep, and VMUX selects ABUCK as VDDOUT_AON. LPLDO needs to power off.
- * With ASR ON, LPLDO OFF
- */
-void
-si_set_lv_sleep_mode_lhl_config_4369(si_t *sih)
-{
- uint i;
- uint coreidx = si_findcoreidx(sih, GCI_CORE_ID, 0);
- lhl_reg_set_t *regs = lv_sleep_mode_4369_lhl_reg_set;
-
- /* Enable LHL LV mode:
- * lhl_top_pwrseq_ctl_adr, set wl_sleep_en, iso_en, slb_en, pwrsw_en,VMUX_asr_sel_en
- */
- for (i = 0; i < ARRAYSIZE(lv_sleep_mode_4369_lhl_reg_set); i++) {
- si_corereg(sih, coreidx, regs[i].offset, regs[i].mask, regs[i].val);
- }
-}
+++ /dev/null
-/*
- * Utility routines for configuring different memories in Broadcom chips.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: $
- */
-
-#include <typedefs.h>
-#include <sbchipc.h>
-#include <hndsoc.h>
-#include <bcmdevs.h>
-#include <osl.h>
-#include <sbgci.h>
-#include <siutils.h>
-#include <bcmutils.h>
-#include <hndmem.h>
-
-#define IS_MEMTYPE_VALID(mem) ((mem >= MEM_SOCRAM) && (mem < MEM_MAX))
-#define IS_MEMCONFIG_VALID(cfg) ((cfg >= PDA_CONFIG_CLEAR) && (cfg < PDA_CONFIG_MAX))
-
-/* Returns the number of banks in a given memory */
-int
-hndmem_num_banks(si_t *sih, int mem)
-{
- uint32 savecore, mem_info;
- int num_banks = 0;
- gciregs_t *gciregs;
- osl_t *osh = si_osh(sih);
-
- if (!IS_MEMTYPE_VALID(mem)) {
- goto exit;
- }
-
- savecore = si_coreidx(sih);
-
- /* TODO: Check whether SOCRAM core is present or not. If not, bail out */
- /* In future we need to add code for TCM based chips as well */
- if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) {
- goto exit;
- }
-
- if (sih->gcirev >= 9) {
- gciregs = si_setcore(sih, GCI_CORE_ID, 0);
-
- mem_info = R_REG(osh, &gciregs->wlan_mem_info);
-
- switch (mem) {
- case MEM_SOCRAM:
- num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_MASK) >>
- WLAN_MEM_INFO_REG_NUMSOCRAMBANKS_SHIFT;
- break;
- case MEM_BM:
- num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACBM_MASK) >>
- WLAN_MEM_INFO_REG_NUMD11MACBM_SHIFT;
- break;
- case MEM_UCM:
- num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACUCM_MASK) >>
- WLAN_MEM_INFO_REG_NUMD11MACUCM_SHIFT;
- break;
- case MEM_SHM:
- num_banks = (mem_info & WLAN_MEM_INFO_REG_NUMD11MACSHM_MASK) >>
- WLAN_MEM_INFO_REG_NUMD11MACSHM_SHIFT;
- break;
- default:
- ASSERT(0);
- break;
- }
- } else {
- /* TODO: Figure out bank information using SOCRAM registers */
- }
-
- si_setcoreidx(sih, savecore);
-exit:
- return num_banks;
-}
-
-/* Returns the size of a give bank in a given memory */
-int
-hndmem_bank_size(si_t *sih, hndmem_type_t mem, int bank_num)
-{
- uint32 savecore, bank_info, reg_data;
- int bank_sz = 0;
- gciregs_t *gciregs;
- osl_t *osh = si_osh(sih);
-
- if (!IS_MEMTYPE_VALID(mem)) {
- goto exit;
- }
-
- savecore = si_coreidx(sih);
-
- /* TODO: Check whether SOCRAM core is present or not. If not, bail out */
- /* In future we need to add code for TCM based chips as well */
- if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) {
- goto exit;
- }
-
- if (sih->gcirev >= 9) {
- gciregs = si_setcore(sih, GCI_CORE_ID, 0);
-
- reg_data = ((mem &
- GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) <<
- GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) |
- ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK)
- << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT);
- W_REG(osh, &gciregs->gci_indirect_addr, reg_data);
-
- bank_info = R_REG(osh, &gciregs->wlan_bankxinfo);
- bank_sz = (bank_info & WLAN_BANKXINFO_BANK_SIZE_MASK) >>
- WLAN_BANKXINFO_BANK_SIZE_SHIFT;
- } else {
- /* TODO: Figure out bank size using SOCRAM registers */
- }
-
- si_setcoreidx(sih, savecore);
-exit:
- return bank_sz;
-}
-
-/* Returns the start address of given memory */
-uint32
-hndmem_mem_base(si_t *sih, hndmem_type_t mem)
-{
- uint32 savecore, base_addr = 0;
-
- /* Currently only support of SOCRAM is available in hardware */
- if (mem != MEM_SOCRAM) {
- goto exit;
- }
-
- savecore = si_coreidx(sih);
-
- if (si_setcore(sih, SOCRAM_CORE_ID, 0))
- {
- base_addr = si_get_slaveport_addr(sih, CORE_SLAVE_PORT_1,
- CORE_BASE_ADDR_0, SOCRAM_CORE_ID, 0);
- } else {
- /* TODO: Add code to get the base address of TCM */
- base_addr = 0;
- }
-
- si_setcoreidx(sih, savecore);
-
-exit:
- return base_addr;
-}
-
-#ifdef BCMDEBUG
-char *hndmem_type_str[] =
- {
- "SOCRAM", /* 0 */
- "BM", /* 1 */
- "UCM", /* 2 */
- "SHM", /* 3 */
- };
-
-/* Dumps the complete memory information */
-void
-hndmem_dump_meminfo_all(si_t *sih)
-{
- int mem, bank, bank_cnt, bank_sz;
-
- for (mem = MEM_SOCRAM; mem < MEM_MAX; mem++) {
- bank_cnt = hndmem_num_banks(sih, mem);
-
- printf("\nMemtype: %s\n", hndmem_type_str[mem]);
- for (bank = 0; bank < bank_cnt; bank++) {
- bank_sz = hndmem_bank_size(sih, mem, bank);
- printf("Bank-%d: %d KB\n", bank, bank_sz);
- }
- }
-}
-#endif /* BCMDEBUG */
-
-/* Configures the Sleep PDA for a particular bank for a given memory type */
-int
-hndmem_sleeppda_bank_config(si_t *sih, hndmem_type_t mem, int bank_num,
- hndmem_config_t config, uint32 pda)
-{
- uint32 savecore, reg_data;
- gciregs_t *gciregs;
- int err = BCME_OK;
- osl_t *osh = si_osh(sih);
-
- /* TODO: Check whether SOCRAM core is present or not. If not, bail out */
- /* In future we need to add code for TCM based chips as well */
- if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) {
- err = BCME_UNSUPPORTED;
- goto exit;
- }
-
- /* Sleep PDA is supported only by GCI rev >= 9 */
- if (sih->gcirev < 9) {
- err = BCME_UNSUPPORTED;
- goto exit;
- }
-
- if (!IS_MEMTYPE_VALID(mem)) {
- err = BCME_BADOPTION;
- goto exit;
- }
-
- if (!IS_MEMCONFIG_VALID(config)) {
- err = BCME_BADOPTION;
- goto exit;
- }
-
- savecore = si_coreidx(sih);
- gciregs = si_setcore(sih, GCI_CORE_ID, 0);
-
- reg_data = ((mem &
- GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) <<
- GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) |
- ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK)
- << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT);
-
- W_REG(osh, &gciregs->gci_indirect_addr, reg_data);
-
- if (config == PDA_CONFIG_SET_PARTIAL) {
- W_REG(osh, &gciregs->wlan_bankxsleeppda, pda);
- W_REG(osh, &gciregs->wlan_bankxkill, 0);
- }
- else if (config == PDA_CONFIG_SET_FULL) {
- W_REG(osh, &gciregs->wlan_bankxsleeppda, WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK);
- W_REG(osh, &gciregs->wlan_bankxkill, WLAN_BANKX_PKILL_REG_SLEEPPDA_MASK);
- } else {
- W_REG(osh, &gciregs->wlan_bankxsleeppda, 0);
- W_REG(osh, &gciregs->wlan_bankxkill, 0);
- }
-
- si_setcoreidx(sih, savecore);
-
-exit:
- return err;
-}
-
-/* Configures the Active PDA for a particular bank for a given memory type */
-int
-hndmem_activepda_bank_config(si_t *sih, hndmem_type_t mem,
- int bank_num, hndmem_config_t config, uint32 pda)
-{
- uint32 savecore, reg_data;
- gciregs_t *gciregs;
- int err = BCME_OK;
- osl_t *osh = si_osh(sih);
-
- if (!IS_MEMTYPE_VALID(mem)) {
- err = BCME_BADOPTION;
- goto exit;
- }
-
- if (!IS_MEMCONFIG_VALID(config)) {
- err = BCME_BADOPTION;
- goto exit;
- }
-
- savecore = si_coreidx(sih);
-
- /* TODO: Check whether SOCRAM core is present or not. If not, bail out */
- /* In future we need to add code for TCM based chips as well */
- if (!si_setcore(sih, SOCRAM_CORE_ID, 0)) {
- err = BCME_UNSUPPORTED;
- goto exit;
- }
-
- if (sih->gcirev >= 9) {
- gciregs = si_setcore(sih, GCI_CORE_ID, 0);
-
- reg_data = ((mem &
- GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_MASK) <<
- GCI_INDIRECT_ADDRESS_REG_GPIOINDEX_SHIFT) |
- ((bank_num & GCI_INDIRECT_ADDRESS_REG_REGINDEX_MASK)
- << GCI_INDIRECT_ADDRESS_REG_REGINDEX_SHIFT);
-
- W_REG(osh, &gciregs->gci_indirect_addr, reg_data);
-
- if (config == PDA_CONFIG_SET_PARTIAL) {
- W_REG(osh, &gciregs->wlan_bankxactivepda, pda);
- }
- else if (config == PDA_CONFIG_SET_FULL) {
- W_REG(osh, &gciregs->wlan_bankxactivepda,
- WLAN_BANKX_SLEEPPDA_REG_SLEEPPDA_MASK);
- } else {
- W_REG(osh, &gciregs->wlan_bankxactivepda, 0);
- }
- } else {
- /* TODO: Configure SOCRAM PDA using SOCRAM registers */
- err = BCME_UNSUPPORTED;
- }
-
- si_setcoreidx(sih, savecore);
-
-exit:
- return err;
-}
-
-/* Configures the Sleep PDA for all the banks for a given memory type */
-int
-hndmem_sleeppda_config(si_t *sih, hndmem_type_t mem, hndmem_config_t config)
-{
- int bank;
- int num_banks = hndmem_num_banks(sih, mem);
- int err = BCME_OK;
-
- /* Sleep PDA is supported only by GCI rev >= 9 */
- if (sih->gcirev < 9) {
- err = BCME_UNSUPPORTED;
- goto exit;
- }
-
- if (!IS_MEMTYPE_VALID(mem)) {
- err = BCME_BADOPTION;
- goto exit;
- }
-
- if (!IS_MEMCONFIG_VALID(config)) {
- err = BCME_BADOPTION;
- goto exit;
- }
-
- for (bank = 0; bank < num_banks; bank++)
- {
- err = hndmem_sleeppda_bank_config(sih, mem, bank, config, 0);
- }
-
-exit:
- return err;
-}
-
-/* Configures the Active PDA for all the banks for a given memory type */
-int
-hndmem_activepda_config(si_t *sih, hndmem_type_t mem, hndmem_config_t config)
-{
- int bank;
- int num_banks = hndmem_num_banks(sih, mem);
- int err = BCME_OK;
-
- if (!IS_MEMTYPE_VALID(mem)) {
- err = BCME_BADOPTION;
- goto exit;
- }
-
- if (!IS_MEMCONFIG_VALID(config)) {
- err = BCME_BADOPTION;
- goto exit;
- }
-
- for (bank = 0; bank < num_banks; bank++)
- {
- err = hndmem_activepda_bank_config(sih, mem, bank, config, 0);
- }
-
-exit:
- return err;
-}
-
-/* Turn off/on all the possible banks in a given memory range.
- * Currently this works only for SOCRAM as this is restricted by HW.
- */
-int
-hndmem_activepda_mem_config(si_t *sih, hndmem_type_t mem, uint32 mem_start,
- uint32 size, hndmem_config_t config)
-{
- int bank, bank_sz, num_banks;
- int mem_end;
- int bank_start_addr, bank_end_addr;
- int err = BCME_OK;
-
- /* We can get bank size for only SOCRAM/TCM only. Support is not avilable
- * for other memories (BM, UCM and SHM)
- */
- if (mem != MEM_SOCRAM) {
- err = BCME_UNSUPPORTED;
- goto exit;
- }
-
- num_banks = hndmem_num_banks(sih, mem);
- bank_start_addr = hndmem_mem_base(sih, mem);
- mem_end = mem_start + size - 1;
-
- for (bank = 0; bank < num_banks; bank++)
- {
- /* Bank size is spcified in bankXinfo register in terms on KBs */
- bank_sz = 1024 * hndmem_bank_size(sih, mem, bank);
-
- bank_end_addr = bank_start_addr + bank_sz - 1;
-
- if (config == PDA_CONFIG_SET_FULL) {
- /* Check if the bank is completely overlapping with the given mem range */
- if ((mem_start <= bank_start_addr) && (mem_end >= bank_end_addr)) {
- err = hndmem_activepda_bank_config(sih, mem, bank, config, 0);
- }
- } else {
- /* Check if the bank is completely overlaped with the given mem range */
- if (((mem_start <= bank_start_addr) && (mem_end >= bank_end_addr)) ||
- /* Check if the bank is partially overlaped with the given range */
- ((mem_start <= bank_end_addr) && (mem_end >= bank_start_addr))) {
- err = hndmem_activepda_bank_config(sih, mem, bank, config, 0);
- }
- }
-
- bank_start_addr += bank_sz;
- }
-
-exit:
- return err;
-}
* Misc utility routines for accessing PMU corerev specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hndpmu.c 783841 2018-10-09 06:24:16Z $
+ * $Id: hndpmu.c 657872 2016-09-02 22:17:34Z $
*/
-/**
- * @file
+
+/*
* Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs.
* However, in the context of this file the baseband ('BB') PLL/FLL is referred to.
*
- * Throughout this code, the prefixes 'pmu1_' and 'pmu2_' are used.
+ * Throughout this code, the prefixes 'pmu0_', 'pmu1_' and 'pmu2_' are used.
* They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012)
* pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports
* fractional frequency generation. pmu2_ does not support fractional frequency generation.
#include <bcmdevs.h>
#include <hndsoc.h>
#include <sbchipc.h>
-#include <hndchipc.h>
#include <hndpmu.h>
-#include <hndlhl.h>
#if defined(BCMULP)
#include <ulp.h>
#endif /* defined(BCMULP) */
#include <sbgci.h>
#ifdef EVENT_LOG_COMPILE
#include <event_log.h>
-#endif // endif
+#endif
#include <sbgci.h>
-#include <lpflags.h>
#define PMU_ERROR(args)
* to be on except on private builds.
*/
#define PMU_NONE(args)
-#define flags_shift 14
/** contains resource bit positions for a specific chip */
struct rsc_per_chip_s {
uint8 macphy_clkavail;
uint8 ht_start;
uint8 otp_pu;
- uint8 macphy_aux_clkavail;
};
typedef struct rsc_per_chip_s rsc_per_chip_t;
-#if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED)
-bool _pmustatsenab = TRUE;
+
+/* SDIO Pad drive strength to select value mappings.
+ * The last strength value in each table must be 0 (the tri-state value).
+ */
+typedef struct {
+ uint8 strength; /* Pad Drive Strength in mA */
+ uint8 sel; /* Chip-specific select value */
+} sdiod_drive_str_t;
+
+/* SDIO Drive Strength to sel value table for PMU Rev 1 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab1[] = {
+ {4, 0x2},
+ {2, 0x3},
+ {1, 0x0},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
+static const sdiod_drive_str_t sdiod_drive_strength_tab2[] = {
+ {12, 0x7},
+ {10, 0x6},
+ {8, 0x5},
+ {6, 0x4},
+ {4, 0x2},
+ {2, 0x1},
+ {0, 0x0} };
+
+
+/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab3[] = {
+ {32, 0x7},
+ {26, 0x6},
+ {22, 0x5},
+ {16, 0x4},
+ {12, 0x3},
+ {8, 0x2},
+ {4, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab4_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.2v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (2.5v) */
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab5_1v8[] = {
+ {6, 0x7},
+ {5, 0x6},
+ {4, 0x5},
+ {3, 0x4},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */
+
+/** SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const sdiod_drive_str_t sdiod_drive_strength_tab6_1v8[] = {
+ {3, 0x3},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
+
+/**
+ * SDIO Drive Strength to sel value table for 43143 PMU Rev 17, see Confluence 43143 Toplevel
+ * architecture page, section 'PMU Chip Control 1 Register definition', click link to picture
+ * BCM43143_sel_sdio_signals.jpg. Valid after PMU Chip Control 0 Register, bit31 (override) has
+ * been written '1'.
+ */
+#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33
+
+static const sdiod_drive_str_t sdiod_drive_strength_tab7_3v3[] = {
+ /* note: for 14, 10, 6 and 2mA hw timing is not met according to rtl team */
+ {16, 0x7},
+ {12, 0x5},
+ {8, 0x3},
+ {4, 0x1} }; /* note: 43143 does not support tristate */
+
#else
-bool _pmustatsenab = FALSE;
-#endif /* BCMPMU_STATS */
+
+static const sdiod_drive_str_t sdiod_drive_strength_tab7_1v8[] = {
+ /* note: for 7, 5, 3 and 1mA hw timing is not met according to rtl team */
+ {8, 0x7},
+ {6, 0x5},
+ {4, 0x3},
+ {2, 0x1} }; /* note: 43143 does not support tristate */
+
+#endif /* BCM_SDIO_VDDIO */
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
/**
* Balance between stable SDIO operation and power consumption is achieved using this function.
void
si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
{
- /*
- * Note:
- * This function used to set the SDIO drive strength via PMU_CHIPCTL1 for the
- * 43143, 4330, 4334, 4336, 43362 chips. These chips are now no longer supported, so
- * the code has been deleted.
- * Newer chips have the SDIO drive strength setting via a GCI Chip Control register,
- * but the bit definitions are chip-specific. We are keeping this function available
- * (accessed via DHD 'sdiod_drive' IOVar) in case these newer chips need to provide access.
- */
- UNUSED_PARAMETER(sih);
- UNUSED_PARAMETER(osh);
- UNUSED_PARAMETER(drivestrength);
-}
-
-void
-si_switch_pmu_dependency(si_t *sih, uint mode)
-{
-#ifdef DUAL_PMU_SEQUENCE
- osl_t *osh = si_osh(sih);
- uint32 current_res_state;
- uint32 min_mask, max_mask;
- const pmu_res_depend_t *pmu_res_depend_table = NULL;
- uint pmu_res_depend_table_sz = 0;
- uint origidx;
+ sdiod_drive_str_t *str_tab = NULL;
+ uint32 str_mask = 0; /* only alter desired bits in PMU chipcontrol 1 register */
+ uint32 str_shift = 0;
+ uint32 str_ovr_pmuctl = PMU_CHIPCTL0; /* PMU chipcontrol register containing override bit */
+ uint32 str_ovr_pmuval = 0; /* position of bit within this register */
pmuregs_t *pmu;
- chipcregs_t *cc;
- BCM_REFERENCE(cc);
+ uint origidx;
+ if (!(sih->cccaps & CC_CAP_PMU)) {
+ return;
+ }
+ BCM_REFERENCE(sdiod_drive_strength_tab1);
+ BCM_REFERENCE(sdiod_drive_strength_tab2);
+ /* Remember original core before switch to chipc/pmu */
origidx = si_coreidx(sih);
if (AOB_ENAB(sih)) {
pmu = si_setcore(sih, PMU_CORE_ID, 0);
- cc = si_setcore(sih, CC_CORE_ID, 0);
} else {
pmu = si_setcoreidx(sih, SI_CC_IDX);
- cc = si_setcoreidx(sih, SI_CC_IDX);
}
ASSERT(pmu != NULL);
- current_res_state = R_REG(osh, &pmu->res_state);
- min_mask = R_REG(osh, &pmu->min_res_mask);
- max_mask = R_REG(osh, &pmu->max_res_mask);
- W_REG(osh, &pmu->min_res_mask, (min_mask | current_res_state));
- switch (mode) {
- case PMU_4364_1x1_MODE:
- {
- if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
- pmu_res_depend_table = bcm4364a0_res_depend_1x1;
- pmu_res_depend_table_sz =
- ARRAYSIZE(bcm4364a0_res_depend_1x1);
- max_mask = PMU_4364_MAX_MASK_1x1;
- W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
- W_REG(osh, &pmu->res_updn_timer, PMU_4364_SAVE_RESTORE_UPDNTIME_1x1);
-#if defined(SAVERESTORE)
- if (SR_ENAB()) {
- /* Disable 3x3 SR engine */
- W_REG(osh, &cc->sr1_control0,
- CC_SR0_4364_SR_ENG_CLK_EN |
- CC_SR0_4364_SR_RSRC_TRIGGER |
- CC_SR0_4364_SR_WD_MEM_MIN_DIV |
- CC_SR0_4364_SR_INVERT_CLK |
- CC_SR0_4364_SR_ENABLE_HT |
- CC_SR0_4364_SR_ALLOW_PIC |
- CC_SR0_4364_SR_PMU_MEM_DISABLE);
- }
-#endif /* SAVERESTORE */
- }
- break;
+ switch (SDIOD_DRVSTR_KEY(CHIPID(sih->chip), PMUREV(sih->pmurev))) {
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
+ case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 11):
+ if (PMUREV(sih->pmurev) == 8) {
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab3;
+ } else if (PMUREV(sih->pmurev) == 11) {
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+ }
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab4_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab5_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab6_1v8;
+ str_mask = 0x00001800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+#if !defined(BCM_SDIO_VDDIO) || BCM_SDIO_VDDIO == 33
+ if (drivestrength >= ARRAYLAST(sdiod_drive_strength_tab7_3v3)->strength) {
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_3v3;
}
- case PMU_4364_3x3_MODE:
- {
- if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
- W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
- W_REG(osh, &pmu->res_updn_timer,
- PMU_4364_SAVE_RESTORE_UPDNTIME_3x3);
- /* Change the dependency table only if required */
- if ((max_mask != PMU_4364_MAX_MASK_3x3) ||
- (max_mask != PMU_4364_MAX_MASK_RSDB)) {
- pmu_res_depend_table = bcm4364a0_res_depend_rsdb;
- pmu_res_depend_table_sz =
- ARRAYSIZE(bcm4364a0_res_depend_rsdb);
- max_mask = PMU_4364_MAX_MASK_3x3;
- }
-#if defined(SAVERESTORE)
- if (SR_ENAB()) {
- /* Enable 3x3 SR engine */
- W_REG(osh, &cc->sr1_control0,
- CC_SR0_4364_SR_ENG_CLK_EN |
- CC_SR0_4364_SR_RSRC_TRIGGER |
- CC_SR0_4364_SR_WD_MEM_MIN_DIV |
- CC_SR0_4364_SR_INVERT_CLK |
- CC_SR0_4364_SR_ENABLE_HT |
- CC_SR0_4364_SR_ALLOW_PIC |
- CC_SR0_4364_SR_PMU_MEM_DISABLE |
- CC_SR0_4364_SR_ENG_EN_MASK);
- }
-#endif /* SAVERESTORE */
- }
- break;
+#else
+ if (drivestrength >= ARRAYLAST(sdiod_drive_strength_tab7_1v8)->strength) {
+ str_tab = (sdiod_drive_str_t *)&sdiod_drive_strength_tab7_1v8;
}
- case PMU_4364_RSDB_MODE:
- default:
- {
- if (CHIPID(sih->chip) == BCM4364_CHIP_ID) {
- W_REG(osh, &pmu->res_table_sel, RES4364_SR_SAVE_RESTORE);
- W_REG(osh, &pmu->res_updn_timer,
- PMU_4364_SAVE_RESTORE_UPDNTIME_3x3);
- /* Change the dependency table only if required */
- if ((max_mask != PMU_4364_MAX_MASK_3x3) ||
- (max_mask != PMU_4364_MAX_MASK_RSDB)) {
- pmu_res_depend_table =
- bcm4364a0_res_depend_rsdb;
- pmu_res_depend_table_sz =
- ARRAYSIZE(bcm4364a0_res_depend_rsdb);
- max_mask = PMU_4364_MAX_MASK_RSDB;
- }
-#if defined(SAVERESTORE)
- if (SR_ENAB()) {
- /* Enable 3x3 SR engine */
- W_REG(osh, &cc->sr1_control0,
- CC_SR0_4364_SR_ENG_CLK_EN |
- CC_SR0_4364_SR_RSRC_TRIGGER |
- CC_SR0_4364_SR_WD_MEM_MIN_DIV |
- CC_SR0_4364_SR_INVERT_CLK |
- CC_SR0_4364_SR_ENABLE_HT |
- CC_SR0_4364_SR_ALLOW_PIC |
- CC_SR0_4364_SR_PMU_MEM_DISABLE |
- CC_SR0_4364_SR_ENG_EN_MASK);
- }
-#endif /* SAVERESTORE */
- }
- break;
+#endif /* BCM_SDIO_VDDIO */
+ str_mask = 0x00000007;
+ str_ovr_pmuval = PMU43143_CC0_SDIO_DRSTR_OVR;
+ break;
+ default:
+ PMU_MSG(("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ bcm_chipname(
+ CHIPID(sih->chip), chn, 8), CHIPREV(sih->chiprev), PMUREV(sih->pmurev)));
+ break;
+ }
+
+ if (str_tab != NULL) {
+ uint32 cc_data_temp;
+ int i;
+
+ /* Pick the lowest available drive strength equal or greater than the
+ * requested strength. Drive strength of 0 requests tri-state.
+ */
+ for (i = 0; drivestrength < str_tab[i].strength; i++)
+ ;
+
+ if (i > 0 && drivestrength > str_tab[i].strength)
+ i--;
+
+ W_REG(osh, &pmu->chipcontrol_addr, PMU_CHIPCTL1);
+ cc_data_temp = R_REG(osh, &pmu->chipcontrol_data);
+ cc_data_temp &= ~str_mask;
+ cc_data_temp |= str_tab[i].sel << str_shift;
+ W_REG(osh, &pmu->chipcontrol_data, cc_data_temp);
+ if (str_ovr_pmuval) { /* enables the selected drive strength */
+ W_REG(osh, &pmu->chipcontrol_addr, str_ovr_pmuctl);
+ OR_REG(osh, &pmu->chipcontrol_data, str_ovr_pmuval);
}
+ PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
+ drivestrength, str_tab[i].strength));
}
- si_pmu_resdeptbl_upd(sih, osh, pmu, pmu_res_depend_table, pmu_res_depend_table_sz);
- W_REG(osh, &pmu->max_res_mask, max_mask);
- W_REG(osh, &pmu->min_res_mask, min_mask);
- si_pmu_wait_for_steady_state(sih, osh, pmu);
- /* Add some delay; allow resources to come up and settle. */
- OSL_DELAY(200);
+
+ /* Return to original core */
si_setcoreidx(sih, origidx);
-#endif /* DUAL_PMU_SEQUENCE */
-}
+} /* si_sdiod_drive_strength_init */
-#if defined(BCMULP)
+#if defined(BCMULP)
int
si_pmu_ulp_register(si_t *sih)
{
return BCME_OK;
}
-void
-si_pmu_ulp_chipconfig(si_t *sih, osl_t *osh)
-{
- uint32 reg_val;
-
- BCM_REFERENCE(reg_val);
-
- if (CHIPID(sih->chip) == BCM43012_CHIP_ID) {
- /* DS1 reset and clk enable init value config */
- si_pmu_chipcontrol(sih, PMU_CHIPCTL14, ~0x0,
- (PMUCCTL14_43012_ARMCM3_RESET_INITVAL |
- PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL |
- PMUCCTL14_43012_SDIOD_RESET_INIVAL |
- PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL |
- PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL |
- PMUCCTL14_43012_M2MDMA_RESET_INITVAL |
- PMUCCTL14_43012_DOT11MAC_PHY_CLK_EN_INITVAL |
- PMUCCTL14_43012_DOT11MAC_PHY_CNTL_EN_INITVAL));
-
- /* Clear SFlash clock request and enable High Quality clock */
- CHIPC_REG(sih, clk_ctl_st, CCS_SFLASH_CLKREQ | CCS_HQCLKREQ, CCS_HQCLKREQ);
-
- reg_val = PMU_REG(sih, min_res_mask, ~0x0, ULP_MIN_RES_MASK);
- ULP_DBG(("si_pmu_ulp_chipconfig: min_res_mask: 0x%08x\n", reg_val));
-
- /* Force power switch off */
- si_pmu_chipcontrol(sih, PMU_CHIPCTL2,
- (PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON |
- PMUCCTL02_43012_PHY_PWRSW_FORCE_ON), 0);
-
- }
-}
-
void
si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period)
{
pmuregs_t *pmu;
pmu = si_setcoreidx(sih, si_findcoreidx(sih, PMU_CORE_ID, 0));
W_REG(osh, &pmu->ILPPeriod, ilp_period);
- si_lhl_ilp_config(sih, osh, ilp_period);
}
-
-/** Initialize DS1 PMU hardware resources */
-void
-si_pmu_ds1_res_init(si_t *sih, osl_t *osh)
-{
- pmuregs_t *pmu;
- uint origidx;
- const pmu_res_updown_t *pmu_res_updown_table = NULL;
- uint pmu_res_updown_table_sz = 0;
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- switch (CHIPID(sih->chip)) {
- case BCM43012_CHIP_ID:
- pmu_res_updown_table = bcm43012a0_res_updown_ds1;
- pmu_res_updown_table_sz = ARRAYSIZE(bcm43012a0_res_updown_ds1);
- break;
-
- default:
- break;
- }
-
- /* Program up/down timers */
- while (pmu_res_updown_table_sz--) {
- ASSERT(pmu_res_updown_table != NULL);
- PMU_MSG(("DS1: Changing rsrc %d res_updn_timer to 0x%x\n",
- pmu_res_updown_table[pmu_res_updown_table_sz].resnum,
- pmu_res_updown_table[pmu_res_updown_table_sz].updown));
- W_REG(osh, &pmu->res_table_sel,
- pmu_res_updown_table[pmu_res_updown_table_sz].resnum);
- W_REG(osh, &pmu->res_updn_timer,
- pmu_res_updown_table[pmu_res_updown_table_sz].updown);
- }
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
#endif /* defined(BCMULP) */
-uint32
-si_pmu_wake_bit_offset(si_t *sih)
-{
- uint32 wakebit;
-
- switch (CHIPID(sih->chip)) {
- case BCM4347_CHIP_GRPID:
- wakebit = CC2_4347_GCI2WAKE_MASK;
- break;
- default:
- wakebit = 0;
- ASSERT(0);
- break;
- }
- return wakebit;
-}
void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask)
{
OSL_DELAY(1000);
return BCME_OK;
}
-
-#ifdef BCMPMU_STATS
-/*
- * 8 pmu statistics timer default map
- *
- * for CORE_RDY_AUX measure, set as below for timer 6 and 7 instead of CORE_RDY_MAIN.
- * //core-n active duration : pmu_rsrc_state(CORE_RDY_AUX)
- * { SRC_CORE_RDY_AUX, FALSE, TRUE, LEVEL_HIGH},
- * //core-n active duration : pmu_rsrc_state(CORE_RDY_AUX)
- * { SRC_CORE_RDY_AUX, FALSE, TRUE, EDGE_RISE}
- */
-static pmu_stats_timer_t pmustatstimer[] = {
- { SRC_LINK_IN_L12, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //link_in_l12
- { SRC_LINK_IN_L23, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //link_in_l23
- { SRC_PM_ST_IN_D0, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //pm_st_in_d0
- { SRC_PM_ST_IN_D3, FALSE, TRUE, PMU_STATS_LEVEL_HIGH}, //pm_st_in_d3
- //deep-sleep duration : pmu_rsrc_state(XTAL_PU)
- { SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_LEVEL_LOW},
- //deep-sleep entry count : pmu_rsrc_state(XTAL_PU)
- { SRC_XTAL_PU, FALSE, TRUE, PMU_STATS_EDGE_FALL},
- //core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN)
- { SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_LEVEL_HIGH},
- //core-n active duration : pmu_rsrc_state(CORE_RDY_MAIN)
- { SRC_CORE_RDY_MAIN, FALSE, TRUE, PMU_STATS_EDGE_RISE}
-};
-
-static void
-si_pmustatstimer_update(osl_t *osh, pmuregs_t *pmu, uint8 timerid)
-{
- uint32 stats_timer_ctrl;
-
- W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
- stats_timer_ctrl =
- ((pmustatstimer[timerid].src_num << PMU_ST_SRC_SHIFT) &
- PMU_ST_SRC_MASK) |
- ((pmustatstimer[timerid].cnt_mode << PMU_ST_CNT_MODE_SHIFT) &
- PMU_ST_CNT_MODE_MASK) |
- ((pmustatstimer[timerid].enable << PMU_ST_EN_SHIFT) & PMU_ST_EN_MASK) |
- ((pmustatstimer[timerid].int_enable << PMU_ST_INT_EN_SHIFT) & PMU_ST_INT_EN_MASK);
- W_REG(osh, &pmu->pmu_statstimer_ctrl, stats_timer_ctrl);
- W_REG(osh, &pmu->pmu_statstimer_N, 0);
-}
-
-void
-si_pmustatstimer_int_enable(si_t *sih)
-{
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK);
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
-void
-si_pmustatstimer_int_disable(si_t *sih)
-{
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- AND_REG(osh, &pmu->pmuintmask0, ~PMU_INT_STAT_TIMER_INT_MASK);
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
-void
-si_pmustatstimer_init(si_t *sih)
-{
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
- uint32 core_cap_ext;
- uint8 max_stats_timer_num;
- int8 i;
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
-
- max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
-
- for (i = 0; i < max_stats_timer_num; i++) {
- si_pmustatstimer_update(osh, pmu, i);
- }
-
- OR_REG(osh, &pmu->pmuintmask0, PMU_INT_STAT_TIMER_INT_MASK);
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
-void
-si_pmustatstimer_dump(si_t *sih)
-{
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
- uint32 core_cap_ext, pmucapabilities, AlpPeriod, ILPPeriod, pmuintmask0, pmuintstatus;
- uint8 max_stats_timer_num, max_stats_timer_src_num;
- uint32 stat_timer_ctrl, stat_timer_N;
- uint8 i;
- uint32 current_time_ms = OSL_SYSUPTIME();
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- pmucapabilities = R_REG(osh, &pmu->pmucapabilities);
- core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
- AlpPeriod = R_REG(osh, &pmu->slowclkperiod);
- ILPPeriod = R_REG(osh, &pmu->ILPPeriod);
-
- max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >>
- PCAP_EXT_ST_NUM_SHIFT) + 1;
- max_stats_timer_src_num = ((core_cap_ext & PCAP_EXT_ST_SRC_NUM_MASK) >>
- PCAP_EXT_ST_SRC_NUM_SHIFT) + 1;
-
- pmuintstatus = R_REG(osh, &pmu->pmuintstatus);
- pmuintmask0 = R_REG(osh, &pmu->pmuintmask0);
-
- PMU_ERROR(("%s : TIME %d\n", __FUNCTION__, current_time_ms));
-
- PMU_ERROR(("\tMAX Timer Num %d, MAX Source Num %d\n",
- max_stats_timer_num, max_stats_timer_src_num));
- PMU_ERROR(("\tpmucapabilities 0x%8x, core_cap_ext 0x%8x, AlpPeriod 0x%8x, ILPPeriod 0x%8x, "
- "pmuintmask0 0x%8x, pmuintstatus 0x%8x, pmurev %d\n",
- pmucapabilities, core_cap_ext, AlpPeriod, ILPPeriod,
- pmuintmask0, pmuintstatus, PMUREV(sih->pmurev)));
-
- for (i = 0; i < max_stats_timer_num; i++) {
- W_REG(osh, &pmu->pmu_statstimer_addr, i);
- stat_timer_ctrl = R_REG(osh, &pmu->pmu_statstimer_ctrl);
- stat_timer_N = R_REG(osh, &pmu->pmu_statstimer_N);
- PMU_ERROR(("\t Timer %d : control 0x%8x, %d\n",
- i, stat_timer_ctrl, stat_timer_N));
- }
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
-void
-si_pmustatstimer_start(si_t *sih, uint8 timerid)
-{
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- pmustatstimer[timerid].enable = TRUE;
-
- W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
- OR_REG(osh, &pmu->pmu_statstimer_ctrl, PMU_ST_ENAB << PMU_ST_EN_SHIFT);
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
-void
-si_pmustatstimer_stop(si_t *sih, uint8 timerid)
-{
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- pmustatstimer[timerid].enable = FALSE;
-
- W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
- AND_REG(osh, &pmu->pmu_statstimer_ctrl, ~(PMU_ST_ENAB << PMU_ST_EN_SHIFT));
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
-void
-si_pmustatstimer_clear(si_t *sih, uint8 timerid)
-{
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
- W_REG(osh, &pmu->pmu_statstimer_N, 0);
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
-void
-si_pmustatstimer_clear_overflow(si_t *sih)
-{
- uint8 i;
- uint32 core_cap_ext;
- uint8 max_stats_timer_num;
- uint32 timerN;
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- core_cap_ext = R_REG(osh, &pmu->core_cap_ext);
- max_stats_timer_num = ((core_cap_ext & PCAP_EXT_ST_NUM_MASK) >> PCAP_EXT_ST_NUM_SHIFT) + 1;
-
- for (i = 0; i < max_stats_timer_num; i++) {
- W_REG(osh, &pmu->pmu_statstimer_addr, i);
- timerN = R_REG(osh, &pmu->pmu_statstimer_N);
- if (timerN == 0xFFFFFFFF) {
- PMU_ERROR(("pmustatstimer overflow clear - timerid : %d\n", i));
- si_pmustatstimer_clear(sih, i);
- }
- }
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
-uint32
-si_pmustatstimer_read(si_t *sih, uint8 timerid)
-{
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
- uint32 stats_timer_N;
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- W_REG(osh, &pmu->pmu_statstimer_addr, timerid);
- stats_timer_N = R_REG(osh, &pmu->pmu_statstimer_N);
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-
- return stats_timer_N;
-}
-
-void
-si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid)
-{
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- pmustatstimer[timerid].src_num = src_num;
- si_pmustatstimer_update(osh, pmu, timerid);
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-
-void
-si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid)
-{
- pmuregs_t *pmu;
- uint origidx;
- osl_t *osh = si_osh(sih);
-
- /* Remember original core before switch to chipc/pmu */
- origidx = si_coreidx(sih);
- if (AOB_ENAB(sih)) {
- pmu = si_setcore(sih, PMU_CORE_ID, 0);
- } else {
- pmu = si_setcoreidx(sih, SI_CC_IDX);
- }
- ASSERT(pmu != NULL);
-
- pmustatstimer[timerid].cnt_mode = cnt_mode;
- si_pmustatstimer_update(osh, pmu, timerid);
-
- /* Return to original core */
- si_setcoreidx(sih, origidx);
-}
-#endif /* BCMPMU_STATS */
/*
* Fundamental types and constants relating to 802.11
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: 802.11.h 814166 2019-04-10 06:14:49Z $
+ * $Id: 802.11.h 700693 2017-05-20 20:29:07Z $
*/
#ifndef _802_11_H_
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
#ifndef _NET_ETHERNET_H_
#include <ethernet.h>
-#endif // endif
+#endif
#include <wpa.h>
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
+
#define DOT11_TU_TO_US 1024 /* 802.11 Time Unit is 1024 microseconds */
/* Generic 802.11 frame constants */
#define DOT11_A3_HDR_LEN 24 /* d11 header length with A3 */
#define DOT11_A4_HDR_LEN 30 /* d11 header length with A4 */
#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN /* MAC header length */
-#define DOT11_FCS_LEN 4u /* d11 FCS length */
+#define DOT11_FCS_LEN 4 /* d11 FCS length */
#define DOT11_ICV_LEN 4 /* d11 ICV length */
#define DOT11_ICV_AES_LEN 8 /* d11 ICV/AES length */
-#define DOT11_MAX_ICV_AES_LEN 16 /* d11 MAX ICV/AES length */
#define DOT11_QOS_LEN 2 /* d11 QoS length */
#define DOT11_HTC_LEN 4 /* d11 HT Control field length */
uint16 durid; /* duration/ID */
struct ether_addr ra; /* receiver address */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_CTS_LEN 10u /* d11 CTS frame length */
+#define DOT11_CTS_LEN 10 /* d11 CTS frame length */
BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
uint16 fc; /* frame control */
uint16 capability;
} BWL_POST_PACKED_STRUCT;
#define DOT11_BCN_PRB_LEN 12 /* 802.11 beacon/probe frame fixed length */
-#define DOT11_BCN_PRB_FIXED_LEN 12u /* 802.11 beacon/probe frame fixed length */
+#define DOT11_BCN_PRB_FIXED_LEN 12 /* 802.11 beacon/probe frame fixed length */
BWL_PRE_PACKED_STRUCT struct dot11_auth {
uint16 alg; /* algorithm */
uint8 margin;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tpc_rep dot11_tpc_rep_t;
-#define DOT11_MNG_IE_TPC_REPORT_SIZE (sizeof(dot11_tpc_rep_t))
#define DOT11_MNG_IE_TPC_REPORT_LEN 2 /* length of IE data, not including 2 byte header */
BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
typedef struct dot11_action_frmhdr dot11_action_frmhdr_t;
/* Action Field length */
-#define DOT11_ACTION_CATEGORY_LEN 1u
-#define DOT11_ACTION_ACTION_LEN 1u
-#define DOT11_ACTION_DIALOG_TOKEN_LEN 1u
-#define DOT11_ACTION_CAPABILITY_LEN 2u
-#define DOT11_ACTION_STATUS_CODE_LEN 2u
-#define DOT11_ACTION_REASON_CODE_LEN 2u
-#define DOT11_ACTION_TARGET_CH_LEN 1u
-#define DOT11_ACTION_OPER_CLASS_LEN 1u
+#define DOT11_ACTION_CATEGORY_LEN 1
+#define DOT11_ACTION_ACTION_LEN 1
+#define DOT11_ACTION_DIALOG_TOKEN_LEN 1
+#define DOT11_ACTION_CAPABILITY_LEN 2
+#define DOT11_ACTION_STATUS_CODE_LEN 2
+#define DOT11_ACTION_REASON_CODE_LEN 2
+#define DOT11_ACTION_TARGET_CH_LEN 1
+#define DOT11_ACTION_OPER_CLASS_LEN 1
#define DOT11_ACTION_FRMHDR_LEN 2
#define DOT11_EXTCAP_LEN_WNM_NOTIFICATION 6
#define DOT11_EXTCAP_LEN_TDLS_WBW 8
#define DOT11_EXTCAP_LEN_OPMODE_NOTIFICATION 8
-#define DOT11_EXTCAP_LEN_TWT 10u
/* TDLS Capabilities */
#define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */
typedef struct qos_cap_ie qos_cap_ie_t;
BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
- uint8 id; /* 11, DOT11_MNG_QBSS_LOAD_ID */
+ uint8 id; /* 11, DOT11_MNG_QBSS_LOAD_ID */
uint8 length;
- uint16 station_count; /* total number of STAs associated */
+ uint16 station_count; /* total number of STAs associated */
uint8 channel_utilization; /* % of time, normalized to 255, QAP sensed medium busy */
- uint16 aac; /* available admission capacity */
+ uint16 aac; /* available admission capacity */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
-#define BSS_LOAD_IE_SIZE 7 /* BSS load IE size */
+#define BSS_LOAD_IE_SIZE 7 /* BSS load IE size */
#define WLC_QBSS_LOAD_CHAN_FREE_MAX 0xff /* max for channel free score */
-/* Estimated Service Parameters (ESP) IE - 802.11-2016 9.4.2.174 */
-typedef BWL_PRE_PACKED_STRUCT struct dot11_esp_ie {
- uint8 id;
- uint8 length;
- uint8 id_ext;
- /* variable len info */
- uint8 esp_info_lists[];
-} BWL_POST_PACKED_STRUCT dot11_esp_ie_t;
-
-#define DOT11_ESP_IE_HDR_SIZE (OFFSETOF(dot11_esp_ie_t, esp_info_lists))
-
-/* ESP Information list - 802.11-2016 9.4.2.174 */
-typedef BWL_PRE_PACKED_STRUCT struct dot11_esp_ie_info_list {
- /* acess category, data format, ba win size */
- uint8 ac_df_baws;
- /* estimated air time fraction */
- uint8 eat_frac;
- /* data PPDU duration target (50us units) */
- uint8 ppdu_dur;
-} BWL_POST_PACKED_STRUCT dot11_esp_ie_info_list_t;
-
-#define DOT11_ESP_IE_INFO_LIST_SIZE (sizeof(dot11_esp_ie_info_list_t))
-
-#define DOT11_ESP_NBR_INFO_LISTS 4u /* max nbr of esp information lists */
-#define DOT11_ESP_INFO_LIST_AC_BK 0u /* access category of esp information list AC_BK */
-#define DOT11_ESP_INFO_LIST_AC_BE 1u /* access category of esp information list AC_BE */
-#define DOT11_ESP_INFO_LIST_AC_VI 2u /* access category of esp information list AC_VI */
-#define DOT11_ESP_INFO_LIST_AC_VO 3u /* access category of esp information list AC_VO */
-
-#define DOT11_ESP_INFO_LIST_DF_MASK 0x18 /* Data Format Mask */
-#define DOT11_ESP_INFO_LIST_BAWS_MASK 0xE0 /* BA window size mask */
-
/* nom_msdu_size */
#define FIXED_MSDU_SIZE 0x8000 /* MSDU size is fixed */
#define MSDU_SIZE_MASK 0x7fff /* (Nominal or fixed) MSDU size */
#ifndef CISCO_AIRONET_OUI
#define CISCO_AIRONET_OUI "\x00\x40\x96" /* Cisco AIRONET OUI */
-#endif // endif
+#endif
/* QoS FastLane IE. */
BWL_PRE_PACKED_STRUCT struct ccx_qfl_ie {
uint8 id; /* 221, DOT11_MNG_VS_ID */
#define DOT11_SHARED_KEY 1 /* d11 shared authentication */
#define DOT11_FAST_BSS 2 /* d11 fast bss authentication */
#define DOT11_SAE 3 /* d11 simultaneous authentication of equals */
-#define DOT11_FILS_SKEY 4 /* d11 fils shared key authentication w/o pfs */
-#define DOT11_FILS_SKEY_PFS 5 /* d11 fils shared key authentication w/ pfs */
+#define DOT11_FILS_SKEY_PFS 4 /* d11 fils shared key authentication w/o pfs */
+#define DOT11_FILS_SKEY 5 /* d11 fils shared key authentication w/ pfs */
#define DOT11_FILS_PKEY 6 /* d11 fils public key authentication */
#define DOT11_CHALLENGE_LEN 128 /* d11 challenge text length */
#define FC_SUBTYPE_ACTION_NOACK 14 /* action no-ack */
/* Control Subtypes */
-#define FC_SUBTYPE_TRIGGER 2 /* Trigger frame */
#define FC_SUBTYPE_CTL_WRAPPER 7 /* Control Wrapper */
#define FC_SUBTYPE_BLOCKACK_REQ 8 /* Block Ack Req */
#define FC_SUBTYPE_BLOCKACK 9 /* Block Ack */
#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION) /* action */
#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK) /* action no-ack */
-#define FC_CTL_TRIGGER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_TRIGGER) /* Trigger frame */
#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER) /* Control Wrapper */
#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ) /* Block Ack Req */
#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK) /* Block Ack */
/* 12 is unused by STA but could be used by AP/GO */
#define DOT11_RC_DISASSOC_BTM 12 /* Disassociated due to BSS Transition Magmt */
+
/* 32-39 are QSTA specific reasons added in 11e */
#define DOT11_RC_UNSPECIFIED_QOS 32 /* unspecified QoS-related reason */
#define DOT11_RC_INSUFFCIENT_BW 33 /* QAP lacks sufficient bandwidth */
#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 /* d11 mgmt OBSS Intolerant Channel list */
#define DOT11_MNG_HT_OBSS_ID 74 /* d11 mgmt OBSS HT info */
#define DOT11_MNG_MMIE_ID 76 /* d11 mgmt MIC IE */
-#define DOT11_MNG_NONTRANS_BSSID_CAP_ID 83 /* 11k nontransmitted BSSID capability */
-#define DOT11_MNG_MULTIPLE_BSSIDINDEX_ID 85 /* 11k multiple BSSID index */
#define DOT11_MNG_FMS_DESCR_ID 86 /* 11v FMS descriptor */
#define DOT11_MNG_FMS_REQ_ID 87 /* 11v FMS request id */
#define DOT11_MNG_FMS_RESP_ID 88 /* 11v FMS response id */
#define DOT11_MNG_AID_ID 197 /* Association ID IE */
#define DOT11_MNG_OPER_MODE_NOTIF_ID 199 /* d11 mgmt VHT oper mode notif */
#define DOT11_MNG_RNR_ID 201
+#define DOT11_MNG_HE_CAP_ID 202
+#define DOT11_MNG_HE_OP_ID 203
#define DOT11_MNG_FTM_PARAMS_ID 206
#define DOT11_MNG_TWT_ID 216 /* 11ah D5.0 */
#define DOT11_MNG_WPA_ID 221 /* d11 management WPA id */
#define DOT11_MNG_VS_ID 221 /* d11 management Vendor Specific IE */
#define DOT11_MNG_MESH_CSP_ID 222 /* d11 Mesh Channel Switch Parameter */
#define DOT11_MNG_FILS_IND_ID 240 /* 11ai FILS Indication element */
-#define DOT11_MNG_FRAGMENT_ID 242 /* IE's fragment ID */
/* The follwing ID extensions should be defined >= 255
* i.e. the values should include 255 (DOT11_MNG_ID_EXT_ID + ID Extension).
*/
#define DOT11_MNG_ID_EXT_ID 255 /* Element ID Extension 11mc D4.3 */
-#define EXT_MNG_OWE_DH_PARAM_ID 32u /* OWE DH Param ID - RFC 8110 */
-#define DOT11_MNG_OWE_DH_PARAM_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_OWE_DH_PARAM_ID)
-#define EXT_MNG_HE_CAP_ID 35u /* HE Capabilities, 11ax */
-#define DOT11_MNG_HE_CAP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_CAP_ID)
-#define EXT_MNG_HE_OP_ID 36u /* HE Operation IE, 11ax */
-#define DOT11_MNG_HE_OP_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_HE_OP_ID)
-#define EXT_MNG_UORA_ID 37u /* UORA Parameter Set */
-#define DOT11_MNG_UORA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_UORA_ID)
-#define EXT_MNG_MU_EDCA_ID 38u /* MU EDCA Parameter Set */
-#define DOT11_MNG_MU_EDCA_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_MU_EDCA_ID)
-#define EXT_MNG_SRPS_ID 39u /* Spatial Reuse Parameter Set */
-#define DOT11_MNG_SRPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_SRPS_ID)
-#define EXT_MNG_BSSCOLOR_CHANGE_ID 42u /* BSS Color Change Announcement */
-#define DOT11_MNG_BSSCOLOR_CHANGE_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_BSSCOLOR_CHANGE_ID)
-
-/* FILS and OCE ext ids */
-#define FILS_EXTID_MNG_REQ_PARAMS 2u /* FILS Request Parameters element */
-#define DOT11_MNG_FILS_REQ_PARAMS (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_REQ_PARAMS)
-#define FILS_EXTID_MNG_KEY_CONFIRMATION_ID 3u /* FILS Key Confirmation element */
-#define DOT11_MNG_FILS_KEY_CONFIRMATION (DOT11_MNG_ID_EXT_ID +\
- FILS_EXTID_MNG_KEY_CONFIRMATION_ID)
-#define FILS_EXTID_MNG_SESSION_ID 4u /* FILS Session element */
-#define DOT11_MNG_FILS_SESSION (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_SESSION_ID)
-#define FILS_EXTID_MNG_HLP_CONTAINER_ID 5u /* FILS HLP Container element */
-#define DOT11_MNG_FILS_HLP_CONTAINER (DOT11_MNG_ID_EXT_ID +\
- FILS_EXTID_MNG_HLP_CONTAINER_ID)
-#define FILS_EXTID_MNG_KEY_DELIVERY_ID 7u /* FILS Key Delivery element */
-#define DOT11_MNG_FILS_KEY_DELIVERY (DOT11_MNG_ID_EXT_ID +\
- FILS_EXTID_MNG_KEY_DELIVERY_ID)
-#define FILS_EXTID_MNG_WRAPPED_DATA_ID 8u /* FILS Wrapped Data element */
-#define DOT11_MNG_FILS_WRAPPED_DATA (DOT11_MNG_ID_EXT_ID +\
- FILS_EXTID_MNG_WRAPPED_DATA_ID)
-#define OCE_EXTID_MNG_ESP_ID 11u /* Estimated Service Parameters element */
-#define DOT11_MNG_ESP (DOT11_MNG_ID_EXT_ID + OCE_EXTID_MNG_ESP_ID)
-#define FILS_EXTID_MNG_NONCE_ID 13u /* FILS Nonce element */
-#define DOT11_MNG_FILS_NONCE (DOT11_MNG_ID_EXT_ID + FILS_EXTID_MNG_NONCE_ID)
-
-/* deprecated definitions, do not use, to be deleted later */
-#define FILS_HLP_CONTAINER_EXT_ID FILS_EXTID_MNG_HLP_CONTAINER_ID
-#define DOT11_ESP_EXT_ID OCE_EXTID_MNG_ESP_ID
-#define FILS_REQ_PARAMS_EXT_ID FILS_EXTID_MNG_REQ_PARAMS
-#define EXT_MNG_RAPS_ID 37u /* OFDMA Random Access Parameter Set */
-#define DOT11_MNG_RAPS_ID (DOT11_MNG_ID_EXT_ID + EXT_MNG_RAPS_ID)
-/* End of deprecated definitions */
+#define DOT11_MNG_RAPS_ID (DOT11_MNG_ID_EXT_ID+11) /* OFDMA Random Access Parameter Set */
+
+/* FILS ext ids */
+#define FILS_REQ_PARAMS_EXT_ID 2
+#define DOT11_MNG_FILS_REQ_PARAMS (DOT11_MNG_ID_EXT_ID + FILS_REQ_PARAMS_EXT_ID)
+#define FILS_SESSION_EXT_ID 4
+#define DOT11_MNG_FILS_SESSION (DOT11_MNG_ID_EXT_ID + FILS_SESSION_EXT_ID)
+#define FILS_HLP_CONTAINER_EXT_ID 5
+#define DOT11_MNG_FILS_HLP_CONTAINER (DOT11_MNG_ID_EXT_ID + FILS_HLP_CONTAINER_EXT_ID)
+#define FILS_WRAPPED_DATA_EXT_ID 8
+#define DOT11_MNG_FILS_WRAPPED_DATA (DOT11_MNG_ID_EXT_ID + FILS_WRAPPED_DATA_EXT_ID)
+#define FILS_NONCE_EXT_ID 13
+#define DOT11_MNG_FILS_NONCE (DOT11_MNG_ID_EXT_ID + FILS_NONCE_EXT_ID)
#define DOT11_MNG_IE_ID_EXT_MATCH(_ie, _id) (\
((_ie)->id == DOT11_MNG_ID_EXT_ID) && \
#define DOT11_EXT_CAP_TIMBC 18
/* BSS Transition Management support bit position */
#define DOT11_EXT_CAP_BSSTRANS_MGMT 19
-/* Multiple BSSID support position */
-#define DOT11_EXT_CAP_MULTIBSSID 22
/* Direct Multicast Service */
#define DOT11_EXT_CAP_DMS 26
/* Interworking support bit position */
/* Fine timing measurement - D3.0 */
#define DOT11_EXT_CAP_FTM_RESPONDER 70
#define DOT11_EXT_CAP_FTM_INITIATOR 71 /* tentative 11mcd3.0 */
-#define DOT11_EXT_CAP_FILS 72 /* FILS Capability */
/* TWT support */
-#define DOT11_EXT_CAP_TWT_REQUESTER 77
-#define DOT11_EXT_CAP_TWT_RESPONDER 78
-#define DOT11_EXT_CAP_OBSS_NB_RU_OFDMA 79
-#define DOT11_EXT_CAP_EMBSS_ADVERTISE 80
+#define DOT11_EXT_CAP_TWT_REQUESTER 75
+#define DOT11_EXT_CAP_TWT_RESPONDER 76
/* TODO: Update DOT11_EXT_CAP_MAX_IDX to reflect the highest offset.
* Note: DOT11_EXT_CAP_MAX_IDX must only be used in attach path.
* It will cause ROM invalidation otherwise.
*/
-#define DOT11_EXT_CAP_MAX_IDX 80
+#define DOT11_EXT_CAP_MAX_IDX 76
+#ifdef WL_FTM
#define DOT11_EXT_CAP_MAX_BIT_IDX 95 /* !!!update this please!!! */
-
+#else
+#define DOT11_EXT_CAP_MAX_BIT_IDX 62 /* !!!update this please!!! */
+#endif
/* extended capability */
#ifndef DOT11_EXTCAP_LEN_MAX
#define DOT11_EXTCAP_LEN_MAX ((DOT11_EXT_CAP_MAX_BIT_IDX + 8) >> 3)
-#endif // endif
+#endif
BWL_PRE_PACKED_STRUCT struct dot11_extcap {
uint8 extcap[DOT11_EXTCAP_LEN_MAX];
} BWL_POST_PACKED_STRUCT;
#define DOT11_PUB_ACTION_GAS_CB_REQ 12 /* GAS Comeback Request */
#define DOT11_PUB_ACTION_FTM_REQ 32 /* FTM request */
#define DOT11_PUB_ACTION_FTM 33 /* FTM measurement */
-#define DOT11_PUB_ACTION_FTM_REQ_TRIGGER_START 1u /* FTM request start trigger */
-#define DOT11_PUB_ACTION_FTM_REQ_TRIGGER_STOP 0u /* FTM request stop trigger */
/* Block Ack action types */
#define DOT11_BA_ACTION_ADDBA_REQ 0 /* ADDBA Req action frame type */
typedef struct dot11_dls_resp dot11_dls_resp_t;
#define DOT11_DLS_RESP_LEN 16 /* Fixed length */
+
/* ************* 802.11v related definitions. ************* */
/** BSS Management Transition Query frame header */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_bsstrans_req dot11_bsstrans_req_t;
#define DOT11_BSSTRANS_REQ_LEN 7 /* Fixed length */
-#define DOT11_BSSTRANS_REQ_FIXED_LEN 7u /* Fixed length */
/* BSS Mgmt Transition Request Mode Field - 802.11v */
#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL 0x01
#define DOT11_BSSTRANS_RESP_STATUS_REJ_NO_SUITABLE_BSS 7
#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS 8
+
/** BSS Max Idle Period element */
BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie {
uint8 id; /* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */
dot11_tclas_fc_t fc;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tclas_ie dot11_tclas_ie_t;
-#define DOT11_TCLAS_IE_LEN 3u /* Fixed length, include id and len */
+#define DOT11_TCLAS_IE_LEN 3 /* Fixed length, include id and len */
/** TCLAS processing element */
BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie {
typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t;
#define DOT11_TCLAS_PROC_IE_LEN 3 /* Fixed length, include id and len */
-#define DOT11_TCLAS_PROC_LEN 1u /* Proc ie length is always 1 byte */
-
#define DOT11_TCLAS_PROC_MATCHALL 0 /* All high level element need to match */
#define DOT11_TCLAS_PROC_MATCHONE 1 /* One high level element need to match */
#define DOT11_TCLAS_PROC_NONMATCH 2 /* Non match to any high level element */
+
/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */
#define DOT11_TSPEC_IE_LEN 57 /* Fixed length */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tfs_se dot11_tfs_se_t;
+
/** TFS response element */
BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie {
uint8 id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t;
-#define DOT11_TFS_RESP_IE_LEN 1u /* Fixed length, without id and len */
+#define DOT11_TFS_RESP_IE_LEN 1 /* Fixed length, without id and len */
/** TFS response subelement IDs (same subelments, but different IDs than in TFS request */
#define DOT11_TFS_RESP_TFS_STATUS_SE_ID 1
typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t;
#define DOT11_TFS_NOTIFY_RESP_LEN 3 /* Fixed length */
+
/** WNM-Sleep Management Request frame header */
BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req {
uint8 category; /* category of action frame (10) */
/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */
#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t)
+
/* ************* 802.11k related definitions. ************* */
/* Radio measurements enabled capability ie */
struct ether_addr bssid;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t;
-#define DOT11_RMREQ_BCN_LEN 18u
+#define DOT11_RMREQ_BCN_LEN 18
BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn {
uint8 reg;
#define DOT11_RMREQ_BCN_REPDET_ID 2
#define DOT11_RMREQ_BCN_REQUEST_ID 10
#define DOT11_RMREQ_BCN_APCHREP_ID DOT11_MNG_AP_CHREP_ID
-#define DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ID 164
/* Reporting Detail element definition */
#define DOT11_RMREQ_BCN_REPDET_FIXED 0 /* Fixed length fields only */
#define DOT11_RMREQ_BCN_REPINFO_LEN 2 /* Beacon Reporting Information length */
#define DOT11_RMREQ_BCN_REPCOND_DEFAULT 0 /* Report to be issued after each measurement */
-/* Last Beacon Report Indication Request definition */
-#define DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ENAB 1
-
-BWL_PRE_PACKED_STRUCT struct dot11_rmrep_last_bcn_rpt_ind_req {
- uint8 id; /* DOT11_RMREQ_BCN_LAST_RPT_IND_REQ_ID */
- uint8 len; /* length of remaining fields */
- uint8 data; /* data = 1 means last bcn rpt ind requested */
-} BWL_POST_PACKED_STRUCT;
-typedef struct dot11_rmrep_last_bcn_rpt_ind_req dot11_rmrep_last_bcn_rpt_ind_req_t;
-
/* Sub-element IDs for Beacon Report */
#define DOT11_RMREP_BCN_FRM_BODY 1
-#define DOT11_RMREP_BCN_FRM_BODY_FRAG_ID 2
-#define DOT11_RMREP_BCN_LAST_RPT_IND 164
#define DOT11_RMREP_BCN_FRM_BODY_LEN_MAX 224 /* 802.11k-2008 7.3.2.22.6 */
-/* Refer IEEE P802.11-REVmd/D1.0 9.4.2.21.7 Beacon report */
-BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_fragmt_id {
- uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */
- uint8 len; /* length of remaining fields */
- /* More fragments(B15), fragment Id(B8-B14), Bcn rpt instance ID (B0 - B7) */
- uint16 frag_info_rpt_id;
-} BWL_POST_PACKED_STRUCT;
-
-typedef struct dot11_rmrep_bcn_frm_body_fragmt_id dot11_rmrep_bcn_frm_body_fragmt_id_t;
-
-BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn_frm_body_frag_id {
- uint8 id; /* DOT11_RMREP_BCN_FRM_BODY_FRAG_ID */
- uint8 len; /* length of remaining fields */
- uint8 bcn_rpt_id; /* Bcn rpt instance ID */
- uint8 frag_info; /* fragment Id(7 bits) | More fragments(1 bit) */
-} BWL_POST_PACKED_STRUCT;
-
-typedef struct dot11_rmrep_bcn_frm_body_frag_id dot11_rmrep_bcn_frm_body_frag_id_t;
-#define DOT11_RMREP_BCNRPT_FRAG_ID_DATA_LEN 2u
-#define DOT11_RMREP_BCNRPT_FRAG_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_frag_id_t)
-#define DOT11_RMREP_BCNRPT_FRAG_ID_NUM_SHIFT 1u
-#define DOT11_RMREP_BCNRPT_FRAGMT_ID_SE_LEN sizeof(dot11_rmrep_bcn_frm_body_fragmt_id_t)
-#define DOT11_RMREP_BCNRPT_BCN_RPT_ID_MASK 0x00FFu
-#define DOT11_RMREP_BCNRPT_FRAGMT_ID_NUM_SHIFT 8u
-#define DOT11_RMREP_BCNRPT_FRAGMT_ID_NUM_MASK 0x7F00u
-#define DOT11_RMREP_BCNRPT_MORE_FRAG_SHIFT 15u
-#define DOT11_RMREP_BCNRPT_MORE_FRAG_MASK 0x8000u
-
-BWL_PRE_PACKED_STRUCT struct dot11_rmrep_last_bcn_rpt_ind {
- uint8 id; /* DOT11_RMREP_BCN_LAST_RPT_IND */
- uint8 len; /* length of remaining fields */
- uint8 data; /* data = 1 is last bcn rpt */
-} BWL_POST_PACKED_STRUCT;
-
-typedef struct dot11_rmrep_last_bcn_rpt_ind dot11_rmrep_last_bcn_rpt_ind_t;
-#define DOT11_RMREP_LAST_BCN_RPT_IND_DATA_LEN 1
-#define DOT11_RMREP_LAST_BCN_RPT_IND_SE_LEN sizeof(dot11_rmrep_last_bcn_rpt_ind_t)
-
/* Sub-element IDs for Frame Report */
#define DOT11_RMREP_FRAME_COUNT_REPORT 1
typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t;
#define DOT11_RMREQ_PAUSE_LEN 7
+
/* Neighbor Report subelements ID (11k & 11v) */
#define DOT11_NGBR_TSF_INFO_SE_ID 1
#define DOT11_NGBR_CCS_SE_ID 2
uint8 data[1]; /* Variable size subelements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t;
-#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN 13u
+#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN 13
+
/* MLME Enumerations */
#define DOT11_BSSTYPE_INFRASTRUCTURE 0 /* d11 infrastructure */
#define BRCM_PROP_OUI "\x00\x90\x4C"
+
#define BRCM_FTM_IE_TYPE 14
/* #define HT_CAP_IE_TYPE 51
BRCM_FTM_VS_COLLECT_SUBTYPE = 2, /* FTM Collect debug protocol */
};
-/*
- * This BRCM_PROP_OUI types is intended for use in events to embed additional
- * data, and would not be expected to appear on the air -- but having an IE
- * format allows IE frame data with extra data in events in that allows for
- * more flexible parsing.
- */
-#define BRCM_EVT_WL_BSS_INFO 64
-
-/**
- * Following is the generic structure for brcm_prop_ie (uses BRCM_PROP_OUI).
- * DPT uses this format with type set to DPT_IE_TYPE
- */
-BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s {
- uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
- uint8 len; /* IE length */
- uint8 oui[3];
- uint8 type; /* type of this IE */
- uint16 cap; /* DPT capabilities */
-} BWL_POST_PACKED_STRUCT;
-typedef struct brcm_prop_ie_s brcm_prop_ie_t;
-#define BRCM_PROP_IE_LEN 6 /* len of fixed part of brcm_prop ie */
-#define DPT_IE_TYPE 2
-
-#define BRCM_SYSCAP_IE_TYPE 3
-#define WET_TUNNEL_IE_TYPE 3
/* brcm syscap_ie cap */
#define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */
uchar id;
uchar len;
uchar oui [3];
- uchar data [1]; /* Variable size data */
+ uchar data [1]; /* Variable size data */
} BWL_POST_PACKED_STRUCT;
typedef struct vndr_ie vndr_ie_t;
-#define VNDR_IE_HDR_LEN 2u /* id + len field */
-#define VNDR_IE_MIN_LEN 3u /* size of the oui field */
+#define VNDR_IE_HDR_LEN 2 /* id + len field */
+#define VNDR_IE_MIN_LEN 3 /* size of the oui field */
#define VNDR_IE_FIXED_LEN (VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN)
-#define VNDR_IE_MAX_LEN 255u /* vendor IE max length, without ID and len */
+#define VNDR_IE_MAX_LEN 255 /* vendor IE max length, without ID and len */
/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */
BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie {
/* ************* HT definitions. ************* */
#define MCSSET_LEN 16 /* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */
#define MAX_MCS_NUM (128) /* max mcs number = 128 */
-#define BASIC_HT_MCS 0xFFu /* HT MCS supported rates */
BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
uint16 cap;
#define HT_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */
#define HT_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */
+
#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX 0x1
#define HT_CAP_TXBF_CAP_NDP_RX 0x8
#define HT_CAP_TXBF_CAP_NDP_TX 0x10
#define AMPDU_RX_FACTOR_BASE 8*1024 /* ampdu factor base for rx len */
#define AMPDU_RX_FACTOR_BASE_PWR 13 /* ampdu factor base for rx len in power of 2 */
-#define AMPDU_DELIMITER_LEN 4u /* length of ampdu delimiter */
+#define AMPDU_DELIMITER_LEN 4 /* length of ampdu delimiter */
#define AMPDU_DELIMITER_LEN_MAX 63 /* max length of ampdu delimiter(enforced in HW) */
#define HT_CAP_EXT_PCO 0x0001
#define VHT_MPDU_LIMIT_8K 7991
#define VHT_MPDU_LIMIT_11K 11454
+
/**
* VHT Operation IE (sec 8.4.2.161)
*/
VHT_OP_CHAN_WIDTH_80_80 = 3 /* deprecated - IEEE 802.11 REVmc D8.0 Table 11-25 */
} vht_op_chan_width_t;
-#define VHT_OP_INFO_LEN 3
-
/* AID length */
#define AID_IE_LEN 2
/**
VHT_MCS_SS_SUPPORTED(2, mcsMap) ? 2 : \
VHT_MCS_SS_SUPPORTED(1, mcsMap) ? 1 : 0
-#ifdef IBSS_RMC
-/* customer's OUI */
-#define RMC_PROP_OUI "\x00\x16\x32"
-#endif // endif
-
/* ************* WPA definitions. ************* */
#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
#define WPA_OUI_LEN 3 /* WPA OUI length */
#define WPA_OUI_TYPE 1
#define WPA_VERSION 1 /* WPA version */
-#define WPA_VERSION_LEN 2 /* WPA version length */
-
-/* ************* WPA2 definitions. ************* */
#define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */
#define WPA2_OUI_LEN 3 /* WPA2 OUI length */
#define WPA2_VERSION 1 /* WPA2 version */
#define WPA2_VERSION_LEN 2 /* WAP2 version length */
-#define MAX_RSNE_SUPPORTED_VERSION WPA2_VERSION /* Max supported version */
/* ************* WPS definitions. ************* */
#define WPS_OUI "\x00\x50\xF2" /* WPS OUI */
#define WPS_OUI_LEN 3 /* WPS OUI length */
#define WPS_OUI_TYPE 4
-/* ************* TPC definitions. ************* */
-#define TPC_OUI "\x00\x50\xF2" /* TPC OUI */
-#define TPC_OUI_LEN 3 /* TPC OUI length */
-#define TPC_OUI_TYPE 8
-#define WFA_OUI_TYPE_TPC 8 /* deprecated */
-
/* ************* WFA definitions. ************* */
-#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */
-#define WFA_OUI_LEN 3 /* WFA OUI length */
-#define WFA_OUI_TYPE_P2P 9
+#if defined(WL_LEGACY_P2P)
+#define MAC_OUI "\x00\x17\xF2" /* MACOSX OUI */
+#define MAC_OUI_TYPE_P2P 5
+#endif
-#ifdef WL_LEGACY_P2P
-#define APPLE_OUI "\x00\x17\xF2" /* MACOSX OUI */
-#define APPLE_OUI_LEN 3
-#define APPLE_OUI_TYPE_P2P 5
-#endif /* WL_LEGACY_P2P */
-
-#ifndef WL_LEGACY_P2P
-#define P2P_OUI WFA_OUI
-#define P2P_OUI_LEN WFA_OUI_LEN
-#define P2P_OUI_TYPE WFA_OUI_TYPE_P2P
+#ifdef P2P_IE_OVRD
+#define WFA_OUI MAC_OUI
+#else
+#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */
+#endif /* P2P_IE_OVRD */
+#define WFA_OUI_LEN 3 /* WFA OUI length */
+#ifdef P2P_IE_OVRD
+#define WFA_OUI_TYPE_P2P MAC_OUI_TYPE_P2P
#else
-#define P2P_OUI APPLE_OUI
-#define P2P_OUI_LEN APPLE_OUI_LEN
-#define P2P_OUI_TYPE APPLE_OUI_TYPE_P2P
-#endif /* !WL_LEGACY_P2P */
+#define WFA_OUI_TYPE_TPC 8
+#define WFA_OUI_TYPE_P2P 9
+#endif
+#define WFA_OUI_TYPE_TPC 8
#ifdef WLTDLS
#define WFA_OUI_TYPE_TPQ 4 /* WFD Tunneled Probe ReQuest */
#define WFA_OUI_TYPE_TPS 5 /* WFD Tunneled Probe ReSponse */
#define WFA_OUI_TYPE_MBO_OCE 0x16
/* RSN authenticated key managment suite */
-#define RSN_AKM_NONE 0 /* None (IBSS) */
-#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */
-#define RSN_AKM_PSK 2 /* Pre-shared Key */
-#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */
-#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */
+#define RSN_AKM_NONE 0 /* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */
+#define RSN_AKM_PSK 2 /* Pre-shared Key */
+#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */
+#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */
/* RSN_AKM_MFP_1X and RSN_AKM_MFP_PSK are not used any more
* Just kept here to avoid build issue in BISON/CARIBOU branch
*/
-#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */
-#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
-#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */
-#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
-#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */
-#define RSN_AKM_SAE_PSK 8 /* AKM for SAE with 4-way handshake */
-#define RSN_AKM_SAE_FBT 9 /* AKM for SAE with FBT */
-#define RSN_AKM_SUITEB_SHA256_1X 11 /* Suite B SHA256 */
-#define RSN_AKM_SUITEB_SHA384_1X 12 /* Suite B-192 SHA384 */
-#define RSN_AKM_FBT_SHA384_1X 13 /* FBT SHA384 */
-#define RSN_AKM_FILS_SHA256 14 /* SHA256 key derivation, using FILS */
-#define RSN_AKM_FILS_SHA384 15 /* SHA384 key derivation, using FILS */
-#define RSN_AKM_FBT_SHA256_FILS 16
-#define RSN_AKM_FBT_SHA384_FILS 17
-#define RSN_AKM_OWE 18 /* RFC 8110 OWE */
-#define RSN_AKM_FBT_SHA384_PSK 19
-#define RSN_AKM_PSK_SHA384 20
+#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_SHA256_1X 5 /* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_SHA256_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */
+#define RSN_AKM_FILS_SHA256 14 /* SHA256 key derivation, using FILS */
+#define RSN_AKM_FILS_SHA384 15 /* SHA384 key derivation, using FILS */
+
/* OSEN authenticated key managment suite */
#define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */
#define WCN_OUI "\x00\x50\xf2" /* WCN OUI */
#define WCN_TYPE 4 /* WCN type */
-#ifdef BCMWAPI_WPI
-#define SMS4_KEY_LEN 16
-#define SMS4_WPI_CBC_MAC_LEN 16
-#endif // endif
/* 802.11r protocol definitions */
/** Mobility Domain IE */
BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
uint8 id;
- uint8 len; /* DOT11_MDID_IE_DATA_LEN (3) */
+ uint8 len;
uint16 mdid; /* Mobility Domain Id */
uint8 cap;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_mdid_ie dot11_mdid_ie_t;
-/* length of data portion of Mobility Domain IE */
-#define DOT11_MDID_IE_DATA_LEN 3
-#define DOT11_MDID_LEN 2
#define FBT_MDID_CAP_OVERDS 0x01 /* Fast Bss transition over the DS support */
#define FBT_MDID_CAP_RRP 0x02 /* Resource request protocol support */
-/* Fast Bss Transition IE */
-#ifdef FT_IE_VER_V2
-typedef BWL_PRE_PACKED_STRUCT struct dot11_ft_ie_v2 {
- uint8 id;
- uint8 len;
- uint16 mic_control;
- /* dynamic offset to following mic[], anonce[], snonce[] */
-} BWL_POST_PACKED_STRUCT dot11_ft_ie_v2;
-typedef struct dot11_ft_ie_v2 dot11_ft_ie_t;
-#else
+/** Fast Bss Transition IE */
BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
uint8 id;
- uint8 len; /* At least equal to DOT11_FT_IE_FIXED_LEN (82) */
+ uint8 len;
uint16 mic_control; /* Mic Control */
uint8 mic[16];
uint8 anonce[32];
uint8 snonce[32];
- /* Optional sub-elements follow */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_ft_ie dot11_ft_ie_t;
-/* Fixed length of data portion of Fast BSS Transition IE. There could be
- * optional parameters, which if present, could raise the FT IE length to 255.
- */
-#define DOT11_FT_IE_FIXED_LEN 82
-#endif /* FT_IE_VER_V2 */
-
-#ifdef FT_IE_VER_V2
-#define DOT11_FT_IE_LEN(mic_len) (sizeof(dot11_ft_ie_v2) + mic_len + EAPOL_WPA_KEY_NONCE_LEN *2)
-#define FT_IE_MIC(pos) ((uint8 *)pos + sizeof(dot11_ft_ie_v2))
-#define FT_IE_ANONCE(pos, mic_len) ((uint8 *)pos + sizeof(dot11_ft_ie_v2) + mic_len)
-#define FT_IE_SNONCE(pos, mic_len) ((uint8 *)pos + sizeof(dot11_ft_ie_v2) + mic_len + \
- EAPOL_WPA_KEY_NONCE_LEN)
-#else
-#define DOT11_FT_IE_LEN(mic_len) sizeof(dot11_ft_ie)
-#define FT_IE_MIC(pos) ((uint8 *)&pos->mic)
-#define FT_IE_ANONCE(pos, mic_len) ((uint8 *)&pos->anonce)
-#define FT_IE_SNONCE(pos, mic_len) ((uint8 *)&pos->snonce)
-#endif /* FT_IE_VER_V2 */
#define TIE_TYPE_RESERVED 0
#define TIE_TYPE_REASSOC_DEADLINE 1
#define TIE_TYPE_KEY_LIEFTIME 2
#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00"
#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF"
-#ifdef BCMWAPI_WAI
-#define WAPI_IE_MIN_LEN 20 /* WAPI IE min length */
-#define WAPI_VERSION 1 /* WAPI version */
-#define WAPI_VERSION_LEN 2 /* WAPI version length */
-#define WAPI_OUI "\x00\x14\x72" /* WAPI OUI */
-#define WAPI_OUI_LEN DOT11_OUI_LEN /* WAPI OUI length */
-#endif /* BCMWAPI_WAI */
/* ************* WMM Parameter definitions. ************* */
#define WMM_OUI "\x00\x50\xF2" /* WNN OUI */
struct ether_addr tdls_resp_mac;
} BWL_POST_PACKED_STRUCT;
typedef struct link_id_ie link_id_ie_t;
-#define TDLS_LINK_ID_IE_LEN 18u
+#define TDLS_LINK_ID_IE_LEN 18
/** Link Wakeup Schedule Element */
BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie {
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_ftm dot11_ftm_t;
+
#define DOT11_FTM_ERR_NOT_CONT_OFFSET 1
#define DOT11_FTM_ERR_NOT_CONT_MASK 0x80
#define DOT11_FTM_ERR_NOT_CONT_SHIFT 7
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_ftm_vs_ie dot11_ftm_vs_ie_t;
-/* same as payload of dot11_ftm_vs_ie.
-* This definition helps in having struct access
-* of pay load while building FTM VS IE from other modules(NAN)
-*/
-BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_ie_pyld {
- uint8 sub_type; /* BRCM_FTM_IE_TYPE (or Customer) */
- uint8 version;
- ftm_vs_tlv_t tlvs[1];
-} BWL_POST_PACKED_STRUCT;
-typedef struct dot11_ftm_vs_ie_pyld dot11_ftm_vs_ie_pyld_t;
-
/* ftm vs api version */
#define BCM_FTM_VS_PARAMS_VERSION 0x01
FTM_VS_TLV_SEC_PARAMS = 3, /* security parameters (in either) */
FTM_VS_TLV_SEQ_PARAMS = 4, /* toast parameters (FTM_REQ, BRCM proprietary) */
FTM_VS_TLV_MF_BUF = 5, /* multi frame buffer - may span ftm vs ie's */
- FTM_VS_TLV_TIMING_PARAMS = 6, /* timing adjustments */
- FTM_VS_TLV_MF_STATS_BUF = 7 /* multi frame statistics buffer */
+ FTM_VS_TLV_TIMING_PARAMS = 6 /* timing adjustments */
/* add additional types above */
};
#define FTM_TPK_RI_PHY_LEN_SECURE_2_0 14
#define FTM_TPK_RR_PHY_LEN_SECURE_2_0 14
+
BWL_PRE_PACKED_STRUCT struct dot11_ftm_vs_params {
uint8 id; /* DOT11_MNG_VS_ID */
uint8 len;
DOT11_MNG_IE_ID_EXT_MATCH(_ie, DOT11_MNG_FTM_SYNC_INFO) && \
(_ie)->len == DOT11_FTM_SYNC_INFO_IE_LEN)
-BWL_PRE_PACKED_STRUCT struct dot11_dh_param_ie {
- uint8 id; /* OWE */
- uint8 len;
- uint8 ext_id; /* EXT_MNG_OWE_DH_PARAM_ID */
- uint16 group;
- uint8 pub_key[0];
-} BWL_POST_PACKED_STRUCT;
-typedef struct dot11_dh_param_ie dot11_dh_param_ie_t;
-
-#define DOT11_DH_EXTID_OFFSET (OFFSETOF(dot11_dh_param_ie_t, ext_id))
-
-#define DOT11_OWE_DH_PARAM_IE(_ie) (\
- DOT11_MNG_IE_ID_EXT_MATCH(_ie, EXT_MNG_OWE_DH_PARAM_ID))
-
-#define DOT11_MNG_OWE_IE_ID_EXT_INIT(_ie, _id, _len) do {\
- (_ie)->id = DOT11_MNG_ID_EXT_ID; \
- (_ie)->len = _len; \
- (_ie)->ext_id = _id; \
-} while (0)
-
/* 802.11u interworking access network options */
#define IW_ANT_MASK 0x0f
#define IW_INTERNET_MASK 0x10
#define ANQP_ID_DOMAIN_NAME_LIST 268
#define ANQP_ID_EMERGENCY_ALERT_ID_URI 269
#define ANQP_ID_EMERGENCY_NAI 271
-#define ANQP_ID_NEIGHBOR_REPORT 272
#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797
-/* 802.11u ANQP ID len */
-#define ANQP_INFORMATION_ID_LEN 2
-
/* 802.11u ANQP OUI */
#define ANQP_OUI_SUBTYPE 9
#define BCM_AIBSS_IE_TYPE 56
-#define SSE_OUI "\x00\x00\xF0"
-#define VENDOR_ENTERPRISE_STA_OUI_TYPE 0x22
-#define MAX_VSIE_DISASSOC (1)
-#define DISCO_VSIE_LEN 0x09u
-
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
/*
* 802.11e protocol header file
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: 802.11e.h 785355 2018-10-18 05:32:56Z $
+ * $Id: 802.11e.h 700076 2017-05-17 14:42:22Z $
*/
#ifndef _802_11e_H_
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
#define DOT11E_STATUS_ADDTS_INVALID_PARAM 1 /* TSPEC invalid parameter status */
#define DOT11E_STATUS_ADDTS_REFUSED_NSBW 3 /* ADDTS refused (non-sufficient BW) */
#define DOT11E_STATUS_ADDTS_REFUSED_AWHILE 47 /* ADDTS refused but could retry later */
-#ifdef BCMCCX
-#define CCX_STATUS_ASSOC_DENIED_UNKNOWN 0xc8 /* unspecified QoS related failure */
-#define CCX_STATUS_ASSOC_DENIED_AP_POLICY 0xc9 /* TSPEC refused due to AP policy */
-#define CCX_STATUS_ASSOC_DENIED_NO_BW 0xca /* Assoc denied due to AP insufficient BW */
-#define CCX_STATUS_ASSOC_DENIED_BAD_PARAM 0xcb /* one or more TSPEC with invalid parameter */
-#endif /* BCMCCX */
/* 802.11e DELTS status code */
#define DOT11E_STATUS_QSTA_LEAVE_QBSS 36 /* STA leave QBSS */
#define DOT11E_STATUS_UNKNOWN_TS 38 /* UNKNOWN TS */
#define DOT11E_STATUS_QSTA_REQ_TIMEOUT 39 /* STA ADDTS request timeout */
+
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
/*
* Fundamental types and constants relating to 802.11s Mesh
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
} BWL_POST_PACKED_STRUCT;
typedef struct mesh_targetinfo mesh_targetinfo_t;
+
/* Mesh PREP IE */
BWL_PRE_PACKED_STRUCT struct mesh_prep_ie {
uint8 id;
} BWL_POST_PACKED_STRUCT;
typedef struct mesh_prep_ie mesh_prep_ie_t;
+
/* Mesh PERR IE */
struct mesh_perr_ie {
uint8 id;
#define MESH_PEERING_STATE_STRINGS \
{"IDLE ", "OPNSNT", "CNFRCV", "OPNRCV", "ESTAB ", "HOLDNG"}
-#ifdef WLMESH
typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info {
/* mesh_peer_instance as given in the spec. Note that, peer address
* is stored in scb
*/
} BWL_POST_PACKED_STRUCT mesh_peer_info_t;
-typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info_ext {
- mesh_peer_info_t peer_info;
- uint16 local_aid; /* AID generated by *local* to peer */
- struct ether_addr ea; /* peer ea */
- uint32 entry_state; /* see MESH_PEER_ENTRY_STATE_ACTIVE etc; valid
- * ONLY for internal peering requests
- */
- int rssi;
-} BWL_POST_PACKED_STRUCT mesh_peer_info_ext_t;
-
-/* #ifdef WLMESH */
-typedef BWL_PRE_PACKED_STRUCT struct mesh_peer_info_dump {
- uint32 buflen;
- uint32 version;
- uint32 count; /* number of results */
- mesh_peer_info_ext_t mpi_ext[1];
-} BWL_POST_PACKED_STRUCT mesh_peer_info_dump_t;
-#define WL_MESH_PEER_RES_FIXED_SIZE (sizeof(mesh_peer_info_dump_t) - sizeof(mesh_peer_info_ext_t))
-
-#endif /* WLMESH */
-
/* once an entry is added into mesh_peer_list, if peering is lost, it will
* get retried for peering, MAX_MESH_PEER_ENTRY_RETRIES times. after wards, it
* wont get retried and will be moved to MESH_PEER_ENTRY_STATE_TIMEDOUT state,
/*
* Fundamental types and constants relating to 802.1D
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
/*
* Fundamental constants relating to 802.3
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
/*
* Broadcom AMBA Interconnect definitions.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: aidmp.h 617751 2016-02-08 09:04:22Z $
+ * $Id: aidmp.h 614820 2016-01-23 17:16:17Z $
*/
#ifndef _AIDMP_H
#define SD_SG32 0x00000008
#define SD_SZ_ALIGN 0x00000fff
+
#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
typedef volatile struct _aidmp {
#define AI_OOBDINWIDTH 0x364
#define AI_OOBDOUTWIDTH 0x368
+
#define AI_IOCTRLSET 0x400
#define AI_IOCTRLCLEAR 0x404
#define AI_IOCTRL 0x408
#define AI_OOBSEL_7_SHIFT 24
#define AI_IOCTRL_ENABLE_D11_PME (1 << 14)
-/* bit Specific for AI_OOBSELOUTB30 */
-#define OOB_B_ALP_REQUEST 0
-#define OOB_B_HT_REQUEST 1
-#define OOB_B_ILP_REQUEST 2
-#define OOB_B_ALP_AVAIL_REQUEST 3
-#define OOB_B_HT_AVAIL_REQUEST 4
-
/* mask for interrupts from each core to wrapper */
#define AI_OOBSELINA74_CORE_MASK 0x80808080
#define AI_OOBSELINA30_CORE_MASK 0x80808080
/*
* BCM common config options
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcm_cfg.h 672943 2016-11-30 08:54:06Z $
+ * $Id: bcm_cfg.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _bcm_cfg_h_
* and instrumentation on top of the heap, without modifying the heap
* allocation implementation.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#include <typedefs.h> /* needed for uint16 */
+
/*
**************************************************************************
*
struct bcm_mp_pool;
typedef struct bcm_mp_pool *bcm_mp_pool_h;
+
/*
* To make instrumentation more readable, every memory
* pool must have a readable name. Pool names are up to
*/
#define BCM_MP_NAMELEN 8
+
/*
* Type definition for pool statistics.
*/
uint16 failed_alloc; /* Failed allocations. */
} bcm_mp_stats_t;
+
/*
**************************************************************************
*
*/
int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp);
+
/*
* bcm_mpm_deinit() - de-initialize the whole memory pool system.
*
const char poolname[BCM_MP_NAMELEN],
bcm_mp_pool_h *newp);
+
/*
* bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after
* all memory objects have been freed back to the pool.
const char poolname[BCM_MP_NAMELEN],
bcm_mp_pool_h *newp);
+
/*
* bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after
* all memory objects have been freed back to the pool.
*/
int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp);
+
/*
* bcm_mpm_stats() - Return stats for all pools
*
*/
int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries);
+
/*
* bcm_mpm_dump() - Display statistics on all pools
*
*/
int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b);
+
/*
* bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to
* compensate for alignment requirements of the objects.
*/
int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz);
+
/*
***************************************************************************
*
***************************************************************************
*/
+
/*
* bcm_mp_alloc() - Allocate a memory pool object.
*
*/
void bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats);
+
/*
* bcm_mp_dump() - Dump a pool
*
*/
int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b);
+
#endif /* _BCM_MPOOL_PUB_H */
*
* NOTE: A ring of size N, may only hold N-1 elements.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcm_ring.h 700321 2017-05-18 16:09:07Z $
+ * $Id: bcm_ring.h 596126 2015-10-29 19:53:48Z $
*/
#ifndef __bcm_ring_included__
#define __bcm_ring_included__
* private L1 data cache.
* +----------------------------------------------------------------------------
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcm_ring.h 700321 2017-05-18 16:09:07Z $
+ * $Id: bcm_ring.h 596126 2015-10-29 19:53:48Z $
*
* -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*-
* vim: set ts=4 noet sw=4 tw=80:
#define __ring_aligned ____cacheline_aligned
#else
#define __ring_aligned
-#endif // endif
+#endif
/* Conditional compile for debug */
/* #define BCM_RING_DEBUG */
int read __ring_aligned; /* READ index in a circular ring */
} bcm_ring_t;
+
static INLINE void bcm_ring_init(bcm_ring_t *ring);
static INLINE void bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from);
static INLINE bool bcm_ring_is_empty(bcm_ring_t *ring);
const int ring_size);
static INLINE void bcm_ring_cons_all(bcm_ring_t *ring);
+
/**
* bcm_ring_init - initialize a ring context.
* @ring: pointer to a ring context
return (ring->read == ring->write);
}
+
/**
* __bcm_ring_next_write - determine the index where the next write may occur
* (with wrap-around).
return ((ring->write + 1) % ring_size);
}
+
/**
* __bcm_ring_full - support function for ring full test.
* @ring: pointer to a ring context
return (next_write == ring->read);
}
+
/**
* bcm_ring_is_full - "Boolean" test whether a ring is full.
* @ring: pointer to a ring context
return __bcm_ring_full(ring, next_write);
}
+
/**
* bcm_ring_prod_done - commit a previously pending index where production
* was requested.
ring->write = write;
}
+
/**
* bcm_ring_prod_pend - Fetch in "pend" mode, the index where an element may be
* produced.
return rtn;
}
+
/**
* bcm_ring_prod - Fetch and "commit" the next index where a ring element may
* be produced.
return prod_write;
}
+
/**
* bcm_ring_cons_done - commit a previously pending read
* @ring: pointer to a ring context
ring->read = read;
}
+
/**
* bcm_ring_cons_pend - fetch in "pend" mode, the next index where a ring
* element may be consumed.
return rtn;
}
+
/**
* bcm_ring_cons - fetch and "commit" the next index where a ring element may
* be consumed.
return cons_read;
}
+
/**
* bcm_ring_sync_read - on consumption, update peer's read index.
* @peer: pointer to peer's producer ring context
peer->read = self->read; /* flush read update to peer producer */
}
+
/**
* bcm_ring_sync_write - on consumption, update peer's write index.
* @peer: pointer to peer's consumer ring context
peer->write = self->write; /* flush write update to peer consumer */
}
+
/**
* bcm_ring_prod_avail - fetch total number of available empty slots in the
* ring for production.
return prod_avail;
}
+
/**
* bcm_ring_cons_avail - fetch total number of available elements for consumption.
* @ring: pointer to a ring context
return cons_avail;
}
+
/**
* bcm_ring_cons_all - set ring in state where all elements are consumed.
* @ring: pointer to a ring context
ring->read = ring->write;
}
+
/**
* Work Queue
* A work Queue is composed of a ring of work items, of a specified depth.
typedef struct bcm_workq bcm_workq_t;
+
/* #define BCM_WORKQ_DEBUG */
#if defined(BCM_WORKQ_DEBUG)
#define WORKQ_ASSERT(exp) ASSERT(exp)
((__elem_type *)((__workq)->buffer)) + (__index); \
})
+
static INLINE void bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer,
void *buffer, int ring_size);
bcm_ring_sync_read(WORKQ_PEER_RING(workq_cons), WORKQ_RING(workq_cons));
}
+
/**
* bcm_workq_prod_refresh - Fetch the updated consumer's read index
* @workq_prod: producer's workq whose read index must be refreshed from peer
bcm_ring_sync_write(WORKQ_RING(workq_cons), WORKQ_PEER_RING(workq_cons));
}
+
#endif /* ! __bcm_ring_h_included__ */
+++ /dev/null
-/*
- * Fundamental constants relating to ARP Protocol
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: bcmarp.h 701633 2017-05-25 23:07:17Z $
- */
-
-#ifndef _bcmarp_h_
-#define _bcmarp_h_
-
-#ifndef _TYPEDEFS_H_
-#include <typedefs.h>
-#endif // endif
-#include <bcmip.h>
-
-/* This marks the start of a packed structure section. */
-#include <packed_section_start.h>
-
-#define ARP_OPC_OFFSET 6 /* option code offset */
-#define ARP_SRC_ETH_OFFSET 8 /* src h/w address offset */
-#define ARP_SRC_IP_OFFSET 14 /* src IP address offset */
-#define ARP_TGT_ETH_OFFSET 18 /* target h/w address offset */
-#define ARP_TGT_IP_OFFSET 24 /* target IP address offset */
-
-#define ARP_OPC_REQUEST 1 /* ARP request */
-#define ARP_OPC_REPLY 2 /* ARP reply */
-
-#define ARP_DATA_LEN 28 /* ARP data length */
-
-#define HTYPE_ETHERNET 1 /* htype for ethernet */
-BWL_PRE_PACKED_STRUCT struct bcmarp {
- uint16 htype; /* Header type (1 = ethernet) */
- uint16 ptype; /* Protocol type (0x800 = IP) */
- uint8 hlen; /* Hardware address length (Eth = 6) */
- uint8 plen; /* Protocol address length (IP = 4) */
- uint16 oper; /* ARP_OPC_... */
- uint8 src_eth[ETHER_ADDR_LEN]; /* Source hardware address */
- uint8 src_ip[IPV4_ADDR_LEN]; /* Source protocol address (not aligned) */
- uint8 dst_eth[ETHER_ADDR_LEN]; /* Destination hardware address */
- uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination protocol address */
-} BWL_POST_PACKED_STRUCT;
-
-/* Ethernet header + Arp message */
-BWL_PRE_PACKED_STRUCT struct bcmetharp {
- struct ether_header eh;
- struct bcmarp arp;
-} BWL_POST_PACKED_STRUCT;
-
-/* IPv6 Neighbor Advertisement */
-#define NEIGHBOR_ADVERTISE_SRC_IPV6_OFFSET 8 /* src IPv6 address offset */
-#define NEIGHBOR_ADVERTISE_TYPE_OFFSET 40 /* type offset */
-#define NEIGHBOR_ADVERTISE_CHECKSUM_OFFSET 42 /* check sum offset */
-#define NEIGHBOR_ADVERTISE_FLAGS_OFFSET 44 /* R,S and O flags offset */
-#define NEIGHBOR_ADVERTISE_TGT_IPV6_OFFSET 48 /* target IPv6 address offset */
-#define NEIGHBOR_ADVERTISE_OPTION_OFFSET 64 /* options offset */
-#define NEIGHBOR_ADVERTISE_TYPE 136
-#define NEIGHBOR_SOLICITATION_TYPE 135
-
-#define OPT_TYPE_SRC_LINK_ADDR 1
-#define OPT_TYPE_TGT_LINK_ADDR 2
-
-#define NEIGHBOR_ADVERTISE_DATA_LEN 72 /* neighbor advertisement data length */
-#define NEIGHBOR_ADVERTISE_FLAGS_VALUE 0x60 /* R=0, S=1 and O=1 */
-
-/* This marks the end of a packed structure section. */
-#include <packed_section_end.h>
-
-#endif /* !defined(_bcmarp_h_) */
+++ /dev/null
-/*
- * Bloom filter support
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: bcmbloom.h 714397 2017-08-04 08:24:38Z $
- */
-
-#ifndef _bcmbloom_h_
-#define _bcmbloom_h_
-
-#include <typedefs.h>
-#ifdef BCMDRIVER
-#include <osl.h>
-#else
-#include <stddef.h> /* For size_t */
-#endif // endif
-
-struct bcm_bloom_filter;
-typedef struct bcm_bloom_filter bcm_bloom_filter_t;
-
-typedef void* (*bcm_bloom_alloc_t)(void *ctx, uint size);
-typedef void (*bcm_bloom_free_t)(void *ctx, void *buf, uint size);
-typedef uint (*bcm_bloom_hash_t)(void* ctx, uint idx, const uint8 *tag, uint len);
-
-/* create/allocate a bloom filter. filter size can be 0 for validate only filters */
-int bcm_bloom_create(bcm_bloom_alloc_t alloc_cb,
- bcm_bloom_free_t free_cb, void *callback_ctx, uint max_hash,
- uint filter_size /* bytes */, bcm_bloom_filter_t **bloom);
-
-/* destroy bloom filter */
-int bcm_bloom_destroy(bcm_bloom_filter_t **bloom, bcm_bloom_free_t free_cb);
-
-/* add a hash function to filter, return an index */
-int bcm_bloom_add_hash(bcm_bloom_filter_t *filter, bcm_bloom_hash_t hash, uint *idx);
-
-/* remove the hash function at index from filter */
-int bcm_bloom_remove_hash(bcm_bloom_filter_t *filter, uint idx);
-
-/* check if given tag is member of the filter. If buf is NULL and/or buf_len is 0
- * then use the internal state. BCME_OK if member, BCME_NOTFOUND if not,
- * or other error (e.g. BADARG)
- */
-bool bcm_bloom_is_member(bcm_bloom_filter_t *filter,
- const uint8 *tag, uint tag_len, const uint8 *buf, uint buf_len);
-
-/* add a member to the filter. invalid for validate_only filters */
-int bcm_bloom_add_member(bcm_bloom_filter_t *filter, const uint8 *tag, uint tag_len);
-
-/* no support for remove member */
-
-/* get the filter data from state. BCME_BUFTOOSHORT w/ required length in buf_len
- * if supplied size is insufficient
- */
-int bcm_bloom_get_filter_data(bcm_bloom_filter_t *filter,
- uint buf_size, uint8 *buf, uint *buf_len);
-
-#endif /* _bcmbloom_h_ */
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmcdc.h 700076 2017-05-17 14:42:22Z $
+ * $Id: bcmcdc.h 676811 2016-12-24 20:48:46Z $
*/
#ifndef _bcmcdc_h_
#define _bcmcdc_h_
#define BDC_HEADER_LEN 4
/* flags field bitmap */
-#define BDC_FLAG_EXEMPT 0x03 /* EXT_STA: encryption exemption (host -> dongle?) */
#define BDC_FLAG_80211_PKT 0x01 /* Packet is in 802.11 format (dongle -> host) */
#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good RX checksums */
#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums: host->device */
#define BDC_SET_IF_IDX(hdr, idx) \
((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT)))
+#define BDC_FLAG2_PAD_MASK 0xf0
+#define BDC_FLAG_PAD_MASK 0x03
+#define BDC_FLAG2_PAD_SHIFT 2
+#define BDC_FLAG_PAD_SHIFT 0
+#define BDC_FLAG2_PAD_IDX 0x3c
+#define BDC_FLAG_PAD_IDX 0x03
+#define BDC_GET_PAD_LEN(hdr) \
+ ((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \
+ ((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT)))
+#define BDC_SET_PAD_LEN(hdr, idx) \
+ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \
+ (((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \
+ ((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \
+ (((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT)))
+
#endif /* _bcmcdc_h_ */
/*
* Misc system wide definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmdefs.h 788740 2018-11-13 21:45:01Z $
+ * $Id: bcmdefs.h 657791 2016-09-02 15:14:42Z $
*/
#ifndef _bcmdefs_h_
#define UNUSED_VAR __attribute__ ((unused))
#else
#define UNUSED_VAR
-#endif // endif
+#endif
-/* GNU GCC 4.6+ supports selectively turning off a warning.
- * Define these diagnostic macros to help suppress cast-qual warning
- * until all the work can be done to fix the casting issues.
- */
-#if (defined(__GNUC__) && defined(STRICT_GCC_WARNINGS) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6)))
-#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \
- _Pragma("GCC diagnostic push") \
- _Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#define GCC_DIAGNOSTIC_POP() \
- _Pragma("GCC diagnostic pop")
-#else
-#define GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
-#define GCC_DIAGNOSTIC_POP()
-#endif /* Diagnostic macros not defined */
-
-/* Support clang for MACOSX compiler */
-#ifdef __clang__
-#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST() \
- _Pragma("clang diagnostic push") \
- _Pragma("clang diagnostic ignored \"-Wcast-qual\"")
-#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT() \
- _Pragma("clang diagnostic push") \
- _Pragma("clang diagnostic ignored \"-Wformat-nonliteral\"")
-#define CLANG_DIAGNOSTIC_POP() \
- _Pragma("clang diagnostic pop")
-#else
-#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
-#define CLANG_DIAGNOSTIC_PUSH_SUPPRESS_FORMAT()
-#define CLANG_DIAGNOSTIC_POP()
-#endif // endif
/* Compile-time assert can be used in place of ASSERT if the expression evaluates
* to a constant at compile time.
*/
extern bool bcm_reclaimed;
extern bool bcm_attach_part_reclaimed;
extern bool bcm_preattach_part_reclaimed;
-extern bool bcm_postattach_part_reclaimed;
-
-#define RECLAIMED() (bcm_reclaimed)
-#define ATTACH_PART_RECLAIMED() (bcm_attach_part_reclaimed)
-#define PREATTACH_PART_RECLAIMED() (bcm_preattach_part_reclaimed)
-#define POSTATTACH_PART_RECLAIMED() (bcm_postattach_part_reclaimed)
#if defined(BCM_RECLAIM_ATTACH_FN_DATA)
#define _data __attribute__ ((__section__ (".dataini2." #_data))) _data
#define BCMPREATTACHDATA(_data) __attribute__ ((__section__ (".dataini2." #_data))) _data
#define BCMPREATTACHFN(_fn) __attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn
#endif /* PREATTACH_NORECLAIM */
-#define BCMPOSTATTACHDATA(_data) __attribute__ ((__section__ (".dataini5." #_data))) _data
-#define BCMPOSTATTACHFN(_fn) __attribute__ ((__section__ (".textini5." #_fn), noinline)) _fn
#else /* BCM_RECLAIM_ATTACH_FN_DATA */
#define _data _data
#define _fn _fn
#define BCMPREATTACHDATA(_data) _data
#define BCMPREATTACHFN(_fn) _fn
-#define BCMPOSTATTACHDATA(_data) _data
-#define BCMPOSTATTACHFN(_fn) _fn
#endif /* BCM_RECLAIM_ATTACH_FN_DATA */
-#ifdef BCMDBG_SR
-/*
- * Don't reclaim so we can compare SR ASM
- */
-#define BCMPREATTACHDATASR(_data) _data
-#define BCMPREATTACHFNSR(_fn) _fn
-#define BCMATTACHDATASR(_data) _data
-#define BCMATTACHFNSR(_fn) _fn
-#else
-#define BCMPREATTACHDATASR(_data) BCMPREATTACHDATA(_data)
-#define BCMPREATTACHFNSR(_fn) BCMPREATTACHFN(_fn)
-#define BCMATTACHDATASR(_data) _data
-#define BCMATTACHFNSR(_fn) _fn
-#endif // endif
-
#if defined(BCM_RECLAIM_INIT_FN_DATA)
#define _data __attribute__ ((__section__ (".dataini1." #_data))) _data
#define _fn __attribute__ ((__section__ (".textini1." #_fn), noinline)) _fn
#define _fn _fn
#ifndef CONST
#define CONST const
-#endif // endif
+#endif
#endif /* BCM_RECLAIM_INIT_FN_DATA */
/* Non-manufacture or internal attach function/dat */
#define BCMNMIATTACHFN(_fn) _fn
#define BCMNMIATTACHDATA(_data) _data
-#if defined(BCM_CISDUMP_NO_RECLAIM)
-#define BCMCISDUMPATTACHFN(_fn) _fn
-#define BCMCISDUMPATTACHDATA(_data) _data
-#else
-#define BCMCISDUMPATTACHFN(_fn) BCMNMIATTACHFN(_fn)
-#define BCMCISDUMPATTACHDATA(_data) BCMNMIATTACHDATA(_data)
-#endif // endif
-
-/* SROM with OTP support */
-#if defined(BCMOTPSROM)
-#define BCMSROMATTACHFN(_fn) _fn
-#define BCMSROMATTACHDATA(_data) _data
-#else
-#define BCMSROMATTACHFN(_fn) BCMNMIATTACHFN(_fn)
-#define BCMSROMATTACHDATA(_data) BCMNMIATTACHFN(_data)
-#endif /* BCMOTPSROM */
-
-#if defined(BCM_CISDUMP_NO_RECLAIM)
-#define BCMSROMCISDUMPATTACHFN(_fn) _fn
-#define BCMSROMCISDUMPATTACHDATA(_data) _data
-#else
-#define BCMSROMCISDUMPATTACHFN(_fn) BCMSROMATTACHFN(_fn)
-#define BCMSROMCISDUMPATTACHDATA(_data) BCMSROMATTACHDATA(_data)
-#endif /* BCM_CISDUMP_NO_RECLAIM */
-
#ifdef BCMNODOWN
#define _fn _fn
#else
#define _fn _fn
-#endif // endif
+#endif
#else /* BCM_RECLAIM */
-#define bcm_reclaimed (1)
-#define bcm_attach_part_reclaimed (1)
-#define bcm_preattach_part_reclaimed (1)
-#define bcm_postattach_part_reclaimed (1)
+#define bcm_reclaimed 0
#define _data _data
#define _fn _fn
#define BCM_SRM_ATTACH_DATA(_data) _data
#define BCM_SRM_ATTACH_FN(_fn) _fn
#define BCMPREATTACHDATA(_data) _data
#define BCMPREATTACHFN(_fn) _fn
-#define BCMPOSTATTACHDATA(_data) _data
-#define BCMPOSTATTACHFN(_fn) _fn
#define _data _data
#define _fn _fn
#define _fn _fn
#define BCMNMIATTACHFN(_fn) _fn
#define BCMNMIATTACHDATA(_data) _data
-#define BCMSROMATTACHFN(_fn) _fn
-#define BCMSROMATTACHDATA(_data) _data
-#define BCMPREATTACHFNSR(_fn) _fn
-#define BCMPREATTACHDATASR(_data) _data
-#define BCMATTACHFNSR(_fn) _fn
-#define BCMATTACHDATASR(_data) _data
-#define BCMSROMATTACHFN(_fn) _fn
-#define BCMSROMATTACHDATA(_data) _data
-#define BCMCISDUMPATTACHFN(_fn) _fn
-#define BCMCISDUMPATTACHDATA(_data) _data
-#define BCMSROMCISDUMPATTACHFN(_fn) _fn
-#define BCMSROMCISDUMPATTACHDATA(_data) _data
#define CONST const
-#define RECLAIMED() (bcm_reclaimed)
-#define ATTACH_PART_RECLAIMED() (bcm_attach_part_reclaimed)
-#define PREATTACH_PART_RECLAIMED() (bcm_preattach_part_reclaimed)
-#define POSTATTACH_PART_RECLAIMED() (bcm_postattach_part_reclaimed)
-
#endif /* BCM_RECLAIM */
-#define BCMUCODEDATA(_data) _data
-
-#if defined(BCM_DMA_CT) && !defined(BCM_DMA_CT_DISABLED)
-#define BCMUCODEFN(_fn) _fn
-#else
-#define BCMUCODEFN(_fn) _fn
-#endif /* BCM_DMA_CT */
-
#if !defined STB
#undef BCM47XX_CA9
#endif /* STB */
#if defined(STB)
#define BCMFASTPATH __attribute__ ((__section__ (".text.fastpath")))
#define BCMFASTPATH_HOST __attribute__ ((__section__ (".text.fastpath_host")))
-#else /* mips || BCM47XX_CA9 || STB */
+#else
#define BCMFASTPATH
#define BCMFASTPATH_HOST
-#endif // endif
+#endif
#endif /* BCMFASTPATH */
+
/* Use the BCMRAMFN() macro to tag functions in source that must be included in RAM (excluded from
* ROM). This should eliminate the need to manually specify these functions in the ROM config file.
* It should only be used in special cases where the function must be in RAM for *all* ROM-based
*/
#define BCMRAMFN(_fn) _fn
-/* Use BCMSPECSYM() macro to tag symbols going to a special output section in the binary. */
-#define BCMSPECSYM(_sym) __attribute__ ((__section__ (".special." #_sym))) _sym
-
#define STATIC static
/* Bus types */
#define BUSTYPE(bus) (BCMBUSTYPE)
#else
#define BUSTYPE(bus) (bus)
-#endif // endif
+#endif
#ifdef BCMBUSCORETYPE
#define BUSCORETYPE(ct) (BCMBUSCORETYPE)
#else
#define BUSCORETYPE(ct) (ct)
-#endif // endif
+#endif
/* Allows size optimization for single-backplane image */
#ifdef BCMCHIPTYPE
#define CHIPTYPE(bus) (BCMCHIPTYPE)
#else
#define CHIPTYPE(bus) (bus)
-#endif // endif
+#endif
+
/* Allows size optimization for SPROM support */
#if defined(BCMSPROMBUS)
#define SPROMBUS (PCMCIA_BUS)
#else
#define SPROMBUS (PCI_BUS)
-#endif // endif
+#endif
/* Allows size optimization for single-chip image */
#ifdef BCMCHIPID
#define CHIPID(chip) (BCMCHIPID)
#else
#define CHIPID(chip) (chip)
-#endif // endif
+#endif
#ifdef BCMCHIPREV
#define CHIPREV(rev) (BCMCHIPREV)
#else
#define CHIPREV(rev) (rev)
-#endif // endif
+#endif
#ifdef BCMPCIEREV
#define PCIECOREREV(rev) (BCMPCIEREV)
#else
#define PCIECOREREV(rev) (rev)
-#endif // endif
+#endif
#ifdef BCMPMUREV
#define PMUREV(rev) (BCMPMUREV)
#else
#define PMUREV(rev) (rev)
-#endif // endif
+#endif
#ifdef BCMCCREV
#define CCREV(rev) (BCMCCREV)
#else
#define CCREV(rev) (rev)
-#endif // endif
+#endif
#ifdef BCMGCIREV
#define GCIREV(rev) (BCMGCIREV)
#else
#define GCIREV(rev) (rev)
-#endif // endif
-
-#ifdef BCMCR4REV
-#define CR4REV (BCMCR4REV)
-#endif // endif
+#endif
/* Defines for DMA Address Width - Shared between OSL and HNDDMA */
#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */
#define MAX_DMA_SEGS 8
+
typedef struct {
void *oshdmah; /* Opaque handle for OSL to store its information */
uint origsize; /* Size of the virtual packet */
hnddma_seg_t segs[MAX_DMA_SEGS];
} hnddma_seg_map_t;
+
/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
* By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL.
* There is a compile time check in wlc.c which ensure that this value is at least as big
#define BCMEXTRAHDROOM 260
#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
#if defined(STB)
+#if defined(BCM_GMAC3)
+#define BCMEXTRAHDROOM 32 /* For FullDongle, no D11 headroom space required. */
+#else
#define BCMEXTRAHDROOM 224
+#endif /* ! BCM_GMAC3 */
#else
#define BCMEXTRAHDROOM 204
-#endif // endif
+#endif
#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
/* Packet alignment for most efficient SDIO (can change based on platform) */
#ifndef SDALIGN
#define SDALIGN 32
-#endif // endif
+#endif
/* Headroom required for dongle-to-host communication. Packets allocated
* locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should
#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ)
+
#if defined(NO_BCMDBG_ASSERT)
# undef BCMDBG_ASSERT
# undef BCMASSERT_LOG
-#endif // endif
+#endif
#if defined(BCMASSERT_LOG)
#define BCMASSERT_SUPPORT
-#endif // endif
+#endif
/* Macros for doing definition and get/set of bitfields
* Usage example, e.g. a three-bit field (bits 4-6):
#else
#define BCMSPACE
#define bcmspace TRUE /* if (bcmspace) code is retained */
-#endif // endif
+#endif
/* Max. nvram variable table size */
#ifndef MAXSZ_NVRAM_VARS
#ifdef LARGE_NVRAM_MAXSZ
-#define MAXSZ_NVRAM_VARS (LARGE_NVRAM_MAXSZ * 2)
+#define MAXSZ_NVRAM_VARS LARGE_NVRAM_MAXSZ
#else
-#define LARGE_NVRAM_MAXSZ 8192
-#define MAXSZ_NVRAM_VARS (LARGE_NVRAM_MAXSZ * 2)
+/* SROM12 changes */
+#define MAXSZ_NVRAM_VARS 6144
#endif /* LARGE_NVRAM_MAXSZ */
#endif /* !MAXSZ_NVRAM_VARS */
-/* ROM_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also
+
+
+/* WL_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also
* be defined via makefiles (e.g. ROM auto abandon unoptimized compiles).
*/
+
#ifdef BCMLFRAG /* BCMLFRAG support enab macros */
extern bool _bcmlfrag;
- #if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
#define BCMLFRAG_ENAB() (_bcmlfrag)
#elif defined(BCMLFRAG_DISABLED)
#define BCMLFRAG_ENAB() (0)
#ifdef BCMPCIEDEV /* BCMPCIEDEV support enab macros */
extern bool _pciedevenab;
- #if defined(ROM_ENAB_RUNTIME_CHECK)
+ #if defined(WL_ENAB_RUNTIME_CHECK)
#define BCMPCIEDEV_ENAB() (_pciedevenab)
#elif defined(BCMPCIEDEV_ENABLED)
#define BCMPCIEDEV_ENAB() 1
#define BCMPCIEDEV_ENAB() 0
#endif /* BCMPCIEDEV */
-#ifdef BCMRESVFRAGPOOL /* BCMRESVFRAGPOOL support enab macros */
-extern bool _resvfragpool_enab;
- #if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
- #define BCMRESVFRAGPOOL_ENAB() (_resvfragpool_enab)
- #elif defined(BCMRESVFRAGPOOL_ENABLED)
- #define BCMRESVFRAGPOOL_ENAB() 1
- #else
- #define BCMRESVFRAGPOOL_ENAB() 0
- #endif
-#else
- #define BCMRESVFRAGPOOL_ENAB() 0
-#endif /* BCMPCIEDEV */
-
#define BCMSDIODEV_ENAB() 0
/* Max size for reclaimable NVRAM array */
extern uint32 gFWID;
-#ifdef BCMFRWDPOOLREORG /* BCMFRWDPOOLREORG support enab macros */
- extern bool _bcmfrwdpoolreorg;
- #if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
- #define BCMFRWDPOOLREORG_ENAB() (_bcmfrwdpoolreorg)
- #elif defined(BCMFRWDPOOLREORG_DISABLED)
- #define BCMFRWDPOOLREORG_ENAB() (0)
- #else
- #define BCMFRWDPOOLREORG_ENAB() (1)
- #endif
-#else
- #define BCMFRWDPOOLREORG_ENAB() (0)
-#endif /* BCMFRWDPOOLREORG */
-
-#ifdef BCMPOOLRECLAIM /* BCMPOOLRECLAIM support enab macros */
- extern bool _bcmpoolreclaim;
- #if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
- #define BCMPOOLRECLAIM_ENAB() (_bcmpoolreclaim)
- #elif defined(BCMPOOLRECLAIM_DISABLED)
- #define BCMPOOLRECLAIM_ENAB() (0)
- #else
- #define BCMPOOLRECLAIM_ENAB() (1)
- #endif
-#else
- #define BCMPOOLRECLAIM_ENAB() (0)
-#endif /* BCMPOOLRECLAIM */
-
/* Chip related low power flags (lpflags) */
+#define LPFLAGS_SI_GLOBAL_DISABLE (1 << 0)
+#define LPFLAGS_SI_MEM_STDBY_DISABLE (1 << 1)
+#define LPFLAGS_SI_SFLASH_DISABLE (1 << 2)
+#define LPFLAGS_SI_BTLDO3P3_DISABLE (1 << 3)
+#define LPFLAGS_SI_GCI_FORCE_REGCLK_DISABLE (1 << 4)
+#define LPFLAGS_SI_FORCE_PWM_WHEN_RADIO_ON (1 << 5)
+#define LPFLAGS_PHY_GLOBAL_DISABLE (1 << 16)
+#define LPFLAGS_PHY_LP_DISABLE (1 << 17)
+#define LPFLAGS_PSM_PHY_CTL (1 << 18)
+
+/* Chip related Cbuck modes */
+#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE0 0x00001c03
+#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE0 0x00492490
+#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE1 0x00001c03
+#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE1 0x00490410
+
+/* Chip related dynamic cbuck mode mask */
+
+#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFC00
+#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFFFF
#ifndef PAD
#define _PADLINE(line) pad ## line
#define _XSTR(line) _PADLINE(line)
#define PAD _XSTR(__LINE__)
-#endif // endif
-
-#ifndef FRAG_HEADROOM
-#define FRAG_HEADROOM 224 /* In absence of SFD, use default headroom of 224 */
-#endif // endif
-
-#define MODULE_DETACH(var, detach_func)\
- if (var) { \
- detach_func(var); \
- (var) = NULL; \
- }
-#define MODULE_DETACH_2(var1, var2, detach_func) detach_func(var1, var2)
-#define MODULE_DETACH_TYPECASTED(var, detach_func) detach_func(var)
-
-/* When building ROML image use runtime conditional to cause the compiler
- * to compile everything but not to complain "defined but not used"
- * as #ifdef would cause at the callsites.
- * In the end functions called under if (0) {} will not be linked
- * into the final binary if they're not called from other places either.
- */
-#define BCM_ATTACH_REF_DECL()
-#define BCM_ATTACH_REF() (1)
-
-/* Const in ROM else normal data in RAM */
-#if defined(ROM_ENAB_RUNTIME_CHECK)
- #define ROMCONST CONST
-#else
- #define ROMCONST
-#endif // endif
+#endif
#endif /* _bcmdefs_h_ */
/*
* Broadcom device-specific manifest constants.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmdevs.h 825481 2019-06-14 10:06:03Z $
+ * $Id: bcmdevs.h 625027 2016-03-15 08:20:18Z $
*/
#ifndef _BCMDEVS_H
#define VENDOR_RICOH 0x1180 /* Ricoh */
#define VENDOR_JMICRON 0x197b
+
/* PCMCIA vendor IDs */
#define VENDOR_BROADCOM_PCMCIA 0x02d0
#define BCM_DNGL_BL_PID_4319 0xbd16
#define BCM_DNGL_BL_PID_43236 0xbd17
#define BCM_DNGL_BL_PID_4332 0xbd18
+#define BCM_DNGL_BL_PID_4330 0xbd19
+#define BCM_DNGL_BL_PID_4334 0xbd1a
+#define BCM_DNGL_BL_PID_43239 0xbd1b
+#define BCM_DNGL_BL_PID_4324 0xbd1c
#define BCM_DNGL_BL_PID_4360 0xbd1d
#define BCM_DNGL_BL_PID_43143 0xbd1e
#define BCM_DNGL_BL_PID_43242 0xbd1f
+#define BCM_DNGL_BL_PID_43342 0xbd21
#define BCM_DNGL_BL_PID_4335 0xbd20
+#define BCM_DNGL_BL_PID_43341 0xbd22
#define BCM_DNGL_BL_PID_4350 0xbd23
#define BCM_DNGL_BL_PID_4345 0xbd24
#define BCM_DNGL_BL_PID_4349 0xbd25
#define BCM_DNGL_BL_PID_4354 0xbd26
#define BCM_DNGL_BL_PID_43569 0xbd27
+#define BCM_DNGL_BL_PID_43909 0xbd28
#define BCM_DNGL_BL_PID_4373 0xbd29
#define BCM_DNGL_BDC_PID 0x0bdc
#define BCM_DNGL_JTAG_PID 0x4a44
-#ifdef DEPRECATED
-#define BCM_DNGL_BL_PID_43239 0xbd1b
-#define BCM_DNGL_BL_PID_4324 0xbd1c
-#define BCM_DNGL_BL_PID_43242 0xbd1f
-#define BCM_DNGL_BL_PID_43909 0xbd28
-#endif // endif
+/* HW USB BLOCK [CPULESS USB] PIDs */
+#define BCM_HWUSB_PID_43239 43239
/* PCI Device IDs */
#ifdef DEPRECATED /* These products have been deprecated */
#define BCM4329_D11N_ID 0x432e /* 4329 802.11n dualband device */
#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */
#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */
-#define BCM4314_D11N2G_ID 0x4364 /* 4314 802.11n 2.4G device */
-#define BCM43143_D11N2G_ID 0x4366 /* 43143 802.11n 2.4G device */
#define BCM4315_D11DUAL_ID 0x4334 /* 4315 802.11a/g id */
#define BCM4315_D11G_ID 0x4335 /* 4315 802.11g id */
#define BCM4315_D11A_ID 0x4336 /* 4315 802.11a id */
#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */
#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */
#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */
+#define BCM43231_D11N2G_ID 0x4340 /* 43231 802.11n 2.4GHz device */
#define BCM43221_D11N2G_ID 0x4341 /* 43221 802.11n 2.4GHz device */
#define BCM43222_D11N_ID 0x4350 /* 43222 802.11n dualband device */
#define BCM43222_D11N2G_ID 0x4351 /* 43222 802.11n 2.4GHz device */
#define BCM43222_D11N5G_ID 0x4352 /* 43222 802.11n 5GHz device */
-#define BCM43225_D11N2G_ID 0x4357 /* 43225 802.11n 2.4GHz device */
#define BCM43226_D11N_ID 0x4354 /* 43226 802.11n dualband device */
-#define BCM43228_D11N5G_ID 0x435a /* 43228 802.11n 5GHz device */
-#define BCM43231_D11N2G_ID 0x4340 /* 43231 802.11n 2.4GHz device */
-#define BCM43237_D11N_ID 0x4355 /* 43237 802.11n dualband device */
-#define BCM43237_D11N5G_ID 0x4356 /* 43237 802.11n 5GHz device */
-#define BCM43239_D11N_ID 0x4370 /* 43239 802.11n dualband device */
-#define BCM4324_D11N_ID 0x4374 /* 4324 802.11n dualband device */
-#define BCM43242_D11N_ID 0x4367 /* 43242 802.11n dualband device */
-#define BCM43242_D11N2G_ID 0x4368 /* 43242 802.11n 2.4G device */
-#define BCM43242_D11N5G_ID 0x4369 /* 43242 802.11n 5G device */
-#define BCM4330_D11N_ID 0x4360 /* 4330 802.11n dualband device */
-#define BCM4330_D11N2G_ID 0x4361 /* 4330 802.11n 2.4G device */
-#define BCM4330_D11N5G_ID 0x4362 /* 4330 802.11n 5G device */
-#define BCM4334_D11N_ID 0x4380 /* 4334 802.11n dualband device */
-#define BCM4334_D11N2G_ID 0x4381 /* 4334 802.11n 2.4G device */
-#define BCM4334_D11N5G_ID 0x4382 /* 4334 802.11n 5G device */
-#define BCM43342_D11N_ID 0x4383 /* 43342 802.11n dualband device */
-#define BCM43342_D11N2G_ID 0x4384 /* 43342 802.11n 2.4G device */
-#define BCM43342_D11N5G_ID 0x4385 /* 43342 802.11n 5G device */
-#define BCM43341_D11N_ID 0x4386 /* 43341 802.11n dualband device */
-#define BCM43341_D11N2G_ID 0x4387 /* 43341 802.11n 2.4G device */
-#define BCM43341_D11N5G_ID 0x4388 /* 43341 802.11n 5G device */
-#define BCM4336_D11N_ID 0x4343 /* 4336 802.11n 2.4GHz device */
-#define BCM43362_D11N_ID 0x4363 /* 43362 802.11n 2.4GHz device */
-#define BCM43421_D11N_ID 0xA99D /* 43421 802.11n dualband device */
-#define BCM43909_D11AC_ID 0x43d0 /* 43909 802.11ac dualband device */
-#define BCM43909_D11AC2G_ID 0x43d1 /* 43909 802.11ac 2.4G device */
-#define BCM43909_D11AC5G_ID 0x43d2 /* 43909 802.11ac 5G device */
#endif /* DEPRECATED */
/* DEPRECATED but used */
#define BCM4306_D11G_ID 0x4320 /* 4306 802.11g */
#define BCM4306_D11A_ID 0x4321 /* 4306 802.11a */
#define BCM4306_D11DUAL_ID 0x4324 /* 4306 dual A+B */
-#define BCM43142_D11N2G_ID 0x4365 /* 43142 802.11n 2.4G device */
-#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
#define BCM4318_D11G_ID 0x4318 /* 4318 802.11b/g id */
#define BCM4318_D11DUAL_ID 0x4319 /* 4318 802.11a/b/g id */
-#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */
-#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db device */
-#define BCM43227_D11N2G_ID 0x4358 /* 43228 802.11n 2.4GHz device */
-#define BCM43228_D11N_ID 0x4359 /* 43228 802.11n DualBand device */
-#define BCM4331_D11N_ID 0x4331 /* 4331 802.11n dualband id */
-#define BCM4331_D11N2G_ID 0x4332 /* 4331 802.11n 2.4Ghz band id */
-#define BCM4331_D11N5G_ID 0x4333 /* 4331 802.11n 5Ghz band id */
/* DEPRECATED */
+#define BCM53572_D11N2G_ID 0x4329 /* 53572 802.11n 2.4Ghz band id (same as BCM4321) */
+#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */
+#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db device */
#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */
#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */
#define BCM43236_D11N5G_ID 0x4348 /* 43236 802.11n 5GHz device */
+#define BCM43225_D11N2G_ID 0x4357 /* 43225 802.11n 2.4GHz device */
+#define BCM43421_D11N_ID 0xA99D /* 43421 802.11n dualband device */
+#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
+#define BCM4330_D11N_ID 0x4360 /* 4330 802.11n dualband device */
+#define BCM4330_D11N2G_ID 0x4361 /* 4330 802.11n 2.4G device */
+#define BCM4330_D11N5G_ID 0x4362 /* 4330 802.11n 5G device */
+#define BCM4336_D11N_ID 0x4343 /* 4336 802.11n 2.4GHz device */
#define BCM6362_D11N_ID 0x435f /* 6362 802.11n dualband device */
#define BCM6362_D11N2G_ID 0x433f /* 6362 802.11n 2.4Ghz band id */
#define BCM6362_D11N5G_ID 0x434f /* 6362 802.11n 5Ghz band id */
+#define BCM4331_D11N_ID 0x4331 /* 4331 802.11n dualband id */
+#define BCM4331_D11N2G_ID 0x4332 /* 4331 802.11n 2.4Ghz band id */
+#define BCM4331_D11N5G_ID 0x4333 /* 4331 802.11n 5Ghz band id */
+#define BCM43237_D11N_ID 0x4355 /* 43237 802.11n dualband device */
+#define BCM43237_D11N5G_ID 0x4356 /* 43237 802.11n 5GHz device */
+#define BCM43227_D11N2G_ID 0x4358 /* 43228 802.11n 2.4GHz device */
+#define BCM43228_D11N_ID 0x4359 /* 43228 802.11n DualBand device */
+#define BCM43228_D11N5G_ID 0x435a /* 43228 802.11n 5GHz device */
+#define BCM43362_D11N_ID 0x4363 /* 43362 802.11n 2.4GHz device */
+#define BCM43239_D11N_ID 0x4370 /* 43239 802.11n dualband device */
+#define BCM4324_D11N_ID 0x4374 /* 4324 802.11n dualband device */
#define BCM43217_D11N2G_ID 0x43a9 /* 43217 802.11n 2.4GHz device */
#define BCM43131_D11N2G_ID 0x43aa /* 43131 802.11n 2.4GHz device */
+#define BCM4314_D11N2G_ID 0x4364 /* 4314 802.11n 2.4G device */
+#define BCM43142_D11N2G_ID 0x4365 /* 43142 802.11n 2.4G device */
+#define BCM43143_D11N2G_ID 0x4366 /* 43143 802.11n 2.4G device */
+#define BCM4334_D11N_ID 0x4380 /* 4334 802.11n dualband device */
+#define BCM4334_D11N2G_ID 0x4381 /* 4334 802.11n 2.4G device */
+#define BCM4334_D11N5G_ID 0x4382 /* 4334 802.11n 5G device */
+#define BCM43342_D11N_ID 0x4383 /* 43342 802.11n dualband device */
+#define BCM43342_D11N2G_ID 0x4384 /* 43342 802.11n 2.4G device */
+#define BCM43342_D11N5G_ID 0x4385 /* 43342 802.11n 5G device */
+#define BCM43341_D11N_ID 0x4386 /* 43341 802.11n dualband device */
+#define BCM43341_D11N2G_ID 0x4387 /* 43341 802.11n 2.4G device */
+#define BCM43341_D11N5G_ID 0x4388 /* 43341 802.11n 5G device */
#define BCM4360_D11AC_ID 0x43a0
#define BCM4360_D11AC2G_ID 0x43a1
#define BCM4360_D11AC5G_ID 0x43a2
#define BCM43597_D11AC_ID 0x441c /* 43597 802.11ac dualband device */
#define BCM43597_D11AC2G_ID 0x441d /* 43597 802.11ac 2.4G device */
#define BCM43597_D11AC5G_ID 0x441e /* 43597 802.11ac 5G device */
+#define BCM43909_D11AC_ID 0x43d0 /* 43909 802.11ac dualband device */
+#define BCM43909_D11AC2G_ID 0x43d1 /* 43909 802.11ac 2.4G device */
+#define BCM43909_D11AC5G_ID 0x43d2 /* 43909 802.11ac 5G device */
#define BCM43012_D11N_ID 0xA804 /* 43012 802.11n dualband device */
#define BCM43012_D11N2G_ID 0xA805 /* 43012 802.11n 2.4G device */
#define BCM43012_D11N5G_ID 0xA806 /* 43012 802.11n 5G device */
-#define BCM43014_D11N_ID 0x4495 /* 43014 802.11n dualband device */
-#define BCM43014_D11N2G_ID 0x4496 /* 43014 802.11n 2.4G device */
-#define BCM43014_D11N5G_ID 0x4497 /* 43014 802.11n 5G device */
/* PCI Subsystem ID */
+#define BCM943228HMB_SSID_VEN1 0x0607
#define BCM94313HMGBL_SSID_VEN1 0x0608
#define BCM94313HMG_SSID_VEN1 0x0609
#define BCM943142HM_SSID_VEN1 0x0611
+#define BCM43143_D11N2G_ID 0x4366 /* 43143 802.11n 2.4G device */
+
+#define BCM43242_D11N_ID 0x4367 /* 43242 802.11n dualband device */
+#define BCM43242_D11N2G_ID 0x4368 /* 43242 802.11n 2.4G device */
+#define BCM43242_D11N5G_ID 0x4369 /* 43242 802.11n 5G device */
+
#define BCM4350_D11AC_ID 0x43a3
#define BCM4350_D11AC2G_ID 0x43a4
#define BCM4350_D11AC5G_ID 0x43a5
#define BCM43430_D11N2G_ID 0x43e2 /* 43430 802.11n 2.4G device */
#define BCM43018_D11N2G_ID 0x441b /* 43018 802.11n 2.4G device */
+
#define BCM4347_D11AC_ID 0x440a /* 4347 802.11ac dualband device */
#define BCM4347_D11AC2G_ID 0x440b /* 4347 802.11ac 2.4G device */
#define BCM4347_D11AC5G_ID 0x440c /* 4347 802.11ac 5G device */
#define BCM4362_D11AX_ID 0x4490 /* 4362 802.11ax dualband device */
#define BCM4362_D11AX2G_ID 0x4491 /* 4362 802.11ax 2.4G device */
#define BCM4362_D11AX5G_ID 0x4492 /* 4362 802.11ax 5G device */
-#define BCM43751_D11AX_ID 0x449a /* 43751 802.11ax dualband device */
-#define BCM43751_D11AX2G_ID 0x449b /* 43751 802.11ax 2.4G device */
-#define BCM43751_D11AX5G_ID 0x449c /* 43751 802.11ax 5G device */
-#define BCM43752_D11AX_ID 0x449d /* 43752 802.11ax dualband device */
-#define BCM43752_D11AX2G_ID 0x449e /* 43752 802.11ax 2.4G device */
-#define BCM43752_D11AX5G_ID 0x449f /* 43752 802.11ax 5G device */
#define BCM4364_D11AC_ID 0x4464 /* 4364 802.11ac dualband device */
#define BCM4364_D11AC2G_ID 0x446a /* 4364 802.11ac 2.4G device */
#define BCM4366_D11AC2G_ID 0x43c4
#define BCM4366_D11AC5G_ID 0x43c5
-/* TBD change below values */
-#define BCM4369_D11AX_ID 0x4470 /* 4369 802.11ax dualband device */
-#define BCM4369_D11AX2G_ID 0x4471 /* 4369 802.11ax 2.4G device */
-#define BCM4369_D11AX5G_ID 0x4472 /* 4369 802.11ax 5G device */
-
-#define BCM4375_D11AX_ID 0x4475 /* 4375 802.11ax dualband device */
-#define BCM4375_D11AX2G_ID 0x4476 /* 4375 802.11ax 2.4G device */
-#define BCM4375_D11AX5G_ID 0x4477 /* 4375 802.11ax 5G device */
-
#define BCM43349_D11N_ID 0x43e6 /* 43349 802.11n dualband id */
#define BCM43349_D11N2G_ID 0x43e7 /* 43349 802.11n 2.4Ghz band id */
#define BCM43349_D11N5G_ID 0x43e8 /* 43349 802.11n 5Ghz band id */
#define BCM43111_CHIP_ID 43111 /* 43111 chipcommon chipid (OTP chipid) */
#define BCM43112_CHIP_ID 43112 /* 43112 chipcommon chipid (OTP chipid) */
#define BCM4312_CHIP_ID 0x4312 /* 4312 chipcommon chipid */
-#define BCM4314_CHIP_ID 0x4314 /* 4314 chipcommon chipid */
-#define BCM43142_CHIP_ID 43142 /* 43142 chipcommon chipid */
-#define BCM43143_CHIP_ID 43143 /* 43143 chipcommon chipid */
-#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */
#define BCM4315_CHIP_ID 0x4315 /* 4315 chip id */
#define BCM4318_CHIP_ID 0x4318 /* 4318 chipcommon chipid */
#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */
#define BCM4322_CHIP_ID 0x4322 /* 4322 chipcommon chipid */
#define BCM43221_CHIP_ID 43221 /* 43221 chipcommon chipid (OTP chipid) */
#define BCM43222_CHIP_ID 43222 /* 43222 chipcommon chipid */
-#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */
-#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */
#define BCM43226_CHIP_ID 43226 /* 43226 chipcommon chipid */
-#define BCM43227_CHIP_ID 43227 /* 43227 chipcommon chipid */
-#define BCM43228_CHIP_ID 43228 /* 43228 chipcommon chipid */
#define BCM43231_CHIP_ID 43231 /* 43231 chipcommon chipid (OTP chipid) */
-#define BCM43237_CHIP_ID 43237 /* 43237 chipcommon chipid */
-#define BCM43239_CHIP_ID 43239 /* 43239 chipcommon chipid */
-#define BCM4324_CHIP_ID 0x4324 /* 4324 chipcommon chipid */
-#define BCM43242_CHIP_ID 43242 /* 43242 chipcommon chipid */
-#define BCM43243_CHIP_ID 43243 /* 43243 chipcommon chipid */
+#define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */
#define BCM4325_CHIP_ID 0x4325 /* 4325 chip id */
#define BCM4328_CHIP_ID 0x4328 /* 4328 chip id */
#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */
-#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */
-#define BCM4334_CHIP_ID 0x4334 /* 4334 chipcommon chipid */
-#define BCM43349_CHIP_ID 43349 /* 43349(0xA955) chipcommon chipid */
-#define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */
-#define BCM43341_CHIP_ID 43341 /* 43341 chipcommon chipid */
-#define BCM43342_CHIP_ID 43342 /* 43342 chipcommon chipid */
-#define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */
-#define BCM43420_CHIP_ID 43420 /* 43420 chipcommon chipid (OTP, RBBU) */
-#define BCM43421_CHIP_ID 43421 /* 43224 chipcommon chipid (OTP, RBBU) */
-#define BCM43431_CHIP_ID 43431 /* 4331 chipcommon chipid (OTP, RBBU) */
-#define BCM43909_CHIP_ID 0xab85 /* 43909 chipcommon chipid */
#define BCM4712_CHIP_ID 0x4712 /* 4712 chipcommon chipid */
-#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
-#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
#endif /* DEPRECATED */
/* DEPRECATED but still referenced in components - start */
+#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
+#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */
#define BCM5354_CHIP_ID 0x5354 /* 5354 chipcommon chipid */
/* DEPRECATED but still referenced in components - end */
+#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */
+#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */
+#define BCM43227_CHIP_ID 43227 /* 43227 chipcommon chipid */
+#define BCM43228_CHIP_ID 43228 /* 43228 chipcommon chipid */
#define BCM43217_CHIP_ID 43217 /* 43217 chip id (OTP chipid) */
+#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */
#define BCM43131_CHIP_ID 43131 /* 43131 chip id (OTP chipid) */
#define BCM43234_CHIP_ID 43234 /* 43234 chipcommon chipid */
#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */
#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */
+#define BCM43237_CHIP_ID 43237 /* 43237 chipcommon chipid */
#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */
+#define BCM43239_CHIP_ID 43239 /* 43239 chipcommon chipid */
+#define BCM43420_CHIP_ID 43420 /* 43222 chipcommon chipid (OTP, RBBU) */
+#define BCM43421_CHIP_ID 43421 /* 43224 chipcommon chipid (OTP, RBBU) */
#define BCM43428_CHIP_ID 43428 /* 43228 chipcommon chipid (OTP, RBBU) */
+#define BCM43431_CHIP_ID 43431 /* 4331 chipcommon chipid (OTP, RBBU) */
#define BCM43460_CHIP_ID 43460 /* 4360 chipcommon chipid (OTP, RBBU) */
+#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */
+#define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */
#define BCM43362_CHIP_ID 43362 /* 43362 chipcommon chipid */
#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */
-#define BCM43465_CHIP_ID 43465 /* 4366 chipcommon chipid (OTP, RBBU) */
-#define BCM43525_CHIP_ID 43525 /* 4365 chipcommon chipid (OTP, RBBU) */
-#define BCM47452_CHIP_ID 47452 /* 53573 chipcommon chipid (OTP, RBBU) */
#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */
+#define BCM4314_CHIP_ID 0x4314 /* 4314 chipcommon chipid */
+#define BCM43142_CHIP_ID 43142 /* 43142 chipcommon chipid */
#define BCM43143_CHIP_ID 43143 /* 43143 chipcommon chipid */
#define BCM4324_CHIP_ID 0x4324 /* 4324 chipcommon chipid */
#define BCM43242_CHIP_ID 43242 /* 43242 chipcommon chipid */
+#define BCM43243_CHIP_ID 43243 /* 43243 chipcommon chipid */
#define BCM4334_CHIP_ID 0x4334 /* 4334 chipcommon chipid */
#define BCM4335_CHIP_ID 0x4335 /* 4335 chipcommon chipid */
#define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */
+#define BCM43349_CHIP_ID 43349 /* 43349(0xA955) chipcommon chipid */
#define BCM4360_CHIP_ID 0x4360 /* 4360 chipcommon chipid */
#define BCM4364_CHIP_ID 0x4364 /* 4364 chipcommon chipid */
#define BCM4352_CHIP_ID 0x4352 /* 4352 chipcommon chipid */
#define BCM43526_CHIP_ID 0xAA06
#define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */
#define BCM43341_CHIP_ID 43341 /* 43341 chipcommon chipid */
+#define BCM43342_CHIP_ID 43342 /* 43342 chipcommon chipid */
#define BCM4350_CHIP_ID 0x4350 /* 4350 chipcommon chipid */
#define BCM4354_CHIP_ID 0x4354 /* 4354 chipcommon chipid */
#define BCM4356_CHIP_ID 0x4356 /* 4356 chipcommon chipid */
#define BCM43569_CHIP_ID 0xAA31 /* 43569 chipcommon chipid */
#define BCM43570_CHIP_ID 0xAA32 /* 43570 chipcommon chipid */
#define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */
+#define BCM4371_CHIP_ID 0x4371 /* 4371 chipcommon chipid */
#define BCM43012_CHIP_ID 0xA804 /* 43012 chipcommon chipid */
-#define BCM43014_CHIP_ID 0xA806 /* 43014 chipcommon chipid */
-#define BCM4369_CHIP_ID 0x4369 /* 4369 chipcommon chipid */
-
#define BCM4350_CHIP(chipid) ((CHIPID(chipid) == BCM4350_CHIP_ID) || \
(CHIPID(chipid) == BCM4354_CHIP_ID) || \
(CHIPID(chipid) == BCM43556_CHIP_ID) || \
#define BCM4349_CHIP(chipid) ((CHIPID(chipid) == BCM4349_CHIP_ID) || \
(CHIPID(chipid) == BCM4355_CHIP_ID) || \
(CHIPID(chipid) == BCM4359_CHIP_ID))
-
-#define BCM4355_CHIP(chipid) (CHIPID(chipid) == BCM4355_CHIP_ID)
-
#define BCM4349_CHIP_GRPID BCM4349_CHIP_ID: \
case BCM4355_CHIP_ID: \
case BCM4359_CHIP_ID
#define BCM43596_CHIP_ID 43596 /* 43596 chipcommon chipid */
-
#define BCM4347_CHIP_ID 0x4347 /* 4347 chipcommon chipid */
#define BCM4357_CHIP_ID 0x4357 /* 4357 chipcommon chipid */
#define BCM4361_CHIP_ID 0x4361 /* 4361 chipcommon chipid */
-#define BCM4369_CHIP_ID 0x4369 /* 4369/ chipcommon chipid */
-#define BCM4375_CHIP_ID 0x4375 /* 4375/ chipcommon chipid */
-#define BCM4377_CHIP_ID 0x4377 /* 4377/ chipcommon chipid */
#define BCM4362_CHIP_ID 0x4362 /* 4362 chipcommon chipid */
-#define BCM43751_CHIP_ID 0xAAE7 /* 43751 chipcommon chipid */
-#define BCM43752_CHIP_ID 0xAAE8 /* 43752 chipcommon chipid */
-
#define BCM4347_CHIP(chipid) ((CHIPID(chipid) == BCM4347_CHIP_ID) || \
(CHIPID(chipid) == BCM4357_CHIP_ID) || \
(CHIPID(chipid) == BCM4361_CHIP_ID))
case BCM4357_CHIP_ID: \
case BCM4361_CHIP_ID
-#define BCM4369_CHIP(chipid) ((CHIPID(chipid) == BCM4369_CHIP_ID) || \
- (CHIPID(chipid) == BCM4377_CHIP_ID))
-#define BCM4369_CHIP_GRPID BCM4369_CHIP_ID: \
- case BCM4377_CHIP_ID
-
-#define BCM4362_CHIP(chipid) ((CHIPID(chipid) == BCM4362_CHIP_ID) || \
- (CHIPID(chipid) == BCM43751_CHIP_ID) || \
- (CHIPID(chipid) == BCM43752_CHIP_ID))
-#define BCM4362_CHIP_GRPID BCM4362_CHIP_ID: \
- case BCM43751_CHIP_ID: \
- case BCM43752_CHIP_ID
-
#define BCM4365_CHIP_ID 0x4365 /* 4365 chipcommon chipid */
#define BCM4366_CHIP_ID 0x4366 /* 4366 chipcommon chipid */
-#define BCM43664_CHIP_ID 43664 /* 4366E chipcommon chipid */
-#define BCM43666_CHIP_ID 43666 /* 4365E chipcommon chipid */
#define BCM4365_CHIP(chipid) ((CHIPID(chipid) == BCM4365_CHIP_ID) || \
- (CHIPID(chipid) == BCM4366_CHIP_ID) || \
- (CHIPID(chipid) == BCM43664_CHIP_ID) || \
- (CHIPID(chipid) == BCM43666_CHIP_ID))
-#define CASE_BCM4365_CHIP case BCM4365_CHIP_ID: /* fallthrough */ \
- case BCM4366_CHIP_ID: /* fallthrough */ \
- case BCM43664_CHIP_ID: /* fallthrough */ \
- case BCM43666_CHIP_ID
+ (CHIPID(chipid) == BCM4366_CHIP_ID))
+
+
+#define BCM43909_CHIP_ID 0xab85 /* 43909 chipcommon chipid */
#define BCM43602_CHIP_ID 0xaa52 /* 43602 chipcommon chipid */
#define BCM43462_CHIP_ID 0xa9c6 /* 43462 chipcommon chipid */
#define BCM4402_CHIP_ID 0x4402 /* 4402 chipid */
#define BCM4704_CHIP_ID 0x4704 /* 4704 chipcommon chipid */
+#define BCM4706_CHIP_ID 0x5300 /* 4706 chipcommon chipid */
#define BCM4707_CHIP_ID 53010 /* 4707 chipcommon chipid */
#define BCM47094_CHIP_ID 53030 /* 47094 chipcommon chipid */
#define BCM53018_CHIP_ID 53018 /* 53018 chipcommon chipid */
((chipid) == BCM53018_CHIP_ID) || \
((chipid) == BCM47094_CHIP_ID))
#define BCM4710_CHIP_ID 0x4710 /* 4710 chipid */
+#define BCM4749_CHIP_ID 0x4749 /* 5357 chipcommon chipid (OTP, RBBU) */
#define BCM4785_CHIP_ID 0x4785 /* 4785 chipcommon chipid */
#define BCM5350_CHIP_ID 0x5350 /* 5350 chipcommon chipid */
#define BCM5352_CHIP_ID 0x5352 /* 5352 chipcommon chipid */
#define BCM5365_CHIP_ID 0x5365 /* 5365 chipcommon chipid */
+#define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */
+#define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */
+#define BCM53572_CHIP_ID 53572 /* 53572 chipcommon chipid */
#define BCM53573_CHIP_ID 53573 /* 53573 chipcommon chipid */
#define BCM53574_CHIP_ID 53574 /* 53574 chipcommon chipid */
#define BCM53573_CHIP(chipid) ((CHIPID(chipid) == BCM53573_CHIP_ID) || \
- (CHIPID(chipid) == BCM53574_CHIP_ID) || \
- (CHIPID(chipid) == BCM47452_CHIP_ID))
+ (CHIPID(chipid) == BCM53574_CHIP_ID))
#define BCM53573_CHIP_GRPID BCM53573_CHIP_ID : \
- case BCM53574_CHIP_ID : \
- case BCM47452_CHIP_ID
+ case BCM53574_CHIP_ID
#define BCM53573_DEVICE(devid) (((devid) == BCM53573_D11AC_ID) || \
((devid) == BCM53573_D11AC2G_ID) || \
((devid) == BCM53573_D11AC5G_ID) || \
((devid) == BCM47189_D11AC5G_ID))
#define BCM7271_CHIP_ID 0x05c9 /* 7271 chipcommon chipid */
-#define BCM7271_CHIP(chipid) ((CHIPID(chipid) == BCM7271_CHIP_ID))
-
#define BCM4373_CHIP_ID 0x4373 /* 4373 chipcommon chipid */
/* Package IDs */
#define BCM4716_PKG_ID 8 /* 4716 package id */
#define BCM4717_PKG_ID 9 /* 4717 package id */
#define BCM4718_PKG_ID 10 /* 4718 package id */
+#endif /* DEPRECATED */
+#define BCM5356_PKG_NONMODE 1 /* 5356 package without nmode suppport */
+#define BCM5358U_PKG_ID 8 /* 5358U package id */
+#define BCM5358_PKG_ID 9 /* 5358 package id */
+#define BCM47186_PKG_ID 10 /* 47186 package id */
+#define BCM5357_PKG_ID 11 /* 5357 package id */
+#define BCM5356U_PKG_ID 12 /* 5356U package id */
+#define BCM53572_PKG_ID 8 /* 53572 package id */
+#define BCM5357C0_PKG_ID 8 /* 5357c0 package id (the same as 53572) */
+#define BCM47188_PKG_ID 9 /* 47188 package id */
+#define BCM5358C0_PKG_ID 0xa /* 5358c0 package id */
+#define BCM5356C0_PKG_ID 0xb /* 5356c0 package id */
#define BCM4331TT_PKG_ID 8 /* 4331 12x12 package id */
#define BCM4331TN_PKG_ID 9 /* 4331 12x9 package id */
#define BCM4331TNA0_PKG_ID 0xb /* 4331 12x9 package id */
-#endif /* DEPRECATED */
#define BCM47189_PKG_ID 1 /* 47189 package id */
#define BCM53573_PKG_ID 0 /* 53573 package id */
+#define BCM4706L_PKG_ID 1 /* 4706L package id */
#define HDLSIM5350_PKG_ID 1 /* HDL simulator package id for a 5350 */
#define HDLSIM_PKG_ID 14 /* HDL simulator package id */
#define HWSIM_PKG_ID 15 /* Hardware simulator package id */
+#define BCM43224_FAB_CSM 0x8 /* the chip is manufactured by CSM */
+#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */
+#define BCM4336_WLBGA_PKG_ID 0x8
+#define BCM4330_WLBGA_PKG_ID 0x0
+#define BCM4314PCIE_ARM_PKG_ID (8 | 0) /* 4314 QFN PCI package id, bit 3 tie high */
+#define BCM4314SDIO_PKG_ID (8 | 1) /* 4314 QFN SDIO package id */
+#define BCM4314PCIE_PKG_ID (8 | 2) /* 4314 QFN PCI (ARM-less) package id */
+#define BCM4314SDIO_ARM_PKG_ID (8 | 3) /* 4314 QFN SDIO (ARM-less) package id */
+#define BCM4314SDIO_FPBGA_PKG_ID (8 | 4) /* 4314 FpBGA SDIO package id */
+#define BCM4314DEV_PKG_ID (8 | 6) /* 4314 Developement package id */
#define BCM4707_PKG_ID 1 /* 4707 package id */
#define BCM4708_PKG_ID 2 /* 4708 package id */
#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */
#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */
#define BFL2_CAESERS_BRD 0x00000040 /* Board is Caesers brd (unused by sw) */
-#define BFL2_WLCX_ATLAS 0x00000040 /* Board flag to initialize ECI for WLCX on FL-ATLAS */
#define BFL2_BTC3WIRE 0x00000080 /* Board support legacy 3 wire or 4 wire */
#define BFL2_BTCLEGACY 0x00000080 /* Board support legacy 3/4 wire, to replace
* BFL2_BTC3WIRE
#define BFL2_PWR_NOMINAL 0x04000000 /* 0: power reduction on, 1: no power reduction */
#define BFL2_EXTLNA_PWRSAVE 0x08000000 /* boardflag to enable ucode to apply power save */
/* ucode control of eLNA during Tx */
+#define BFL2_4313_RADIOREG 0x10000000
+ /* board rework */
+#define BFL2_DYNAMIC_VMID 0x10000000 /* enable dynamic Vmid in idle TSSI CAL for 4331 */
+
#define BFL2_SDR_EN 0x20000000 /* SDR enabled or disabled */
#define BFL2_DYNAMIC_VMID 0x10000000 /* boardflag to enable dynamic Vmid idle TSSI CAL */
#define BFL2_LNA1BYPFORTR2G 0x40000000 /* acphy, enable lna1 bypass for clip gain, 2g */
#define BFL3_AVVMID_FROM_NVRAM_SHIFT 30 /* Read Av Vmid from NVRAM */
#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT 31 /* Enable Vlin from NVRAM */
-/* boardflags4 for SROM12/SROM13 */
+/* boardflags4 for SROM12 */
#define BFL4_SROM12_4dBPAD (1 << 0) /* To distinguigh between normal and 4dB pad board */
#define BFL4_SROM12_2G_DETTYPE (1 << 1) /* Determine power detector type for 2G */
#define BFL4_SROM12_5G_DETTYPE (1 << 2) /* Determine power detector type for 5G */
-#define BFL4_SROM13_DETTYPE_EN (1 << 3) /* using pa_dettype from SROM13 flags */
-#define BFL4_SROM13_CCK_SPUR_EN (1 << 4) /* using cck spur reduction setting in 4366 */
-#define BFL4_SROM13_1P5V_CBUCK (1 << 7) /* using 1.5V cbuck board in 4366 */
-#define BFL4_SROM13_EN_SW_TXRXCHAIN_MASK (1 << 8) /* Enable/disable bit for sw chain mask */
-
#define BFL4_4364_HARPOON 0x0100 /* Harpoon module 4364 */
#define BFL4_4364_GODZILLA 0x0200 /* Godzilla module 4364 */
-#define BFL4_BTCOEX_OVER_SECI 0x00000400 /* Enable btcoex over gci seci */
+
/* papd params */
#define PAPD_TX_ATTN_2G 0xFF
#define PAPD_CALREF_DB_5G 0xFF00
#define PAPD_CALREF_DB_5G_SHIFT 8
+
/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
#define BOARD_GPIO_BTC3W_IN 0x850 /* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */
#define BOARD_GPIO_BTC3W_OUT 0x020 /* bit 5 is TX_CONF */
#define BOARD_GPIO_13_WLAN_PWR 0x2000 /* throttle WLAN power on X14 board */
#define GPIO_BTC4W_OUT_4312 0x010 /* bit 4 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224 0x020 /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224_SHARED 0x0e0 /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43225 0x0e0 /* bit 5 BT_IODISABLE, bit 6 SW_BT, bit 7 SW_WL */
+#define GPIO_BTC4W_OUT_43421 0x020 /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_4313 0x060 /* bit 5 SW_BT, bit 6 SW_WL */
+#define GPIO_BTC4W_OUT_4331_SHARED 0x010 /* GPIO 4 */
#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */
#define PCI_CFG_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */
#define MIN_SLOW_CLK 32 /* us Slow clock period */
#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */
+
+/* 43341 Boards */
+#define BCM943341WLABGS_SSID 0x062d
+
+/* 43342 Boards */
+#define BCM943342FCAGBI_SSID 0x0641
+
/* 43012 wlbga Board */
#define BCM943012WLREF_SSID 0x07d7
/*
* Fundamental constants relating to DHCP Protocol
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
/*
* Byte order utilities
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmendian.h 788572 2018-11-13 03:52:19Z $
+ * $Id: bcmendian.h 514727 2014-11-12 03:02:48Z $
*
* This file by default provides proper behavior on little-endian architectures.
* On big-endian architectures, IL_BIGENDIAN should be defined.
((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \
(((uint64)(val) & 0xffffffff00000000ULL) >> 32)))
+
/* Byte swapping macros
* Host <=> Network (Big Endian) for 16- and 32-bit values
* Host <=> Little-Endian for 16- and 32-bit values
#define ltoh16_buf(buf, i)
#define htol16_buf(buf, i)
-#define ltoh32_buf(buf, i)
-#define htol32_buf(buf, i)
-#define ltoh64_buf(buf, i)
-#define htol64_buf(buf, i)
/* Unaligned loads and stores in host byte order */
#define load32_ua(a) ltoh32_ua(a)
#define store32_ua(a, v) htol32_ua_store(v, a)
#define load16_ua(a) ltoh16_ua(a)
#define store16_ua(a, v) htol16_ua_store(v, a)
-#define load64_ua(a) ltoh64_ua(a)
-#define store64_ua(a, v) htol64_ua_store(v, a)
-
-#define _LTOH16_UA(cp) (uint16)((cp)[0] | ((cp)[1] << 8))
-#define _LTOH32_UA(cp) (uint32)((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
-#define _NTOH16_UA(cp) (uint16)(((cp)[0] << 8) | (cp)[1])
-#define _NTOH32_UA(cp) (uint32)(((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
-
-#define _LTOH64_UA(cp) ((uint64)(cp)[0] | ((uint64)(cp)[1] << 8) | \
- ((uint64)(cp)[2] << 16) | ((uint64)(cp)[3] << 24) | \
- ((uint64)(cp)[4] << 32) | ((uint64)(cp)[5] << 40) | \
- ((uint64)(cp)[6] << 48) | ((uint64)(cp)[7] << 56))
-#define _NTOH64_UA(cp) ((uint64)(cp)[7] | ((uint64)(cp)[6] << 8) | \
- ((uint64)(cp)[5] << 16) | ((uint64)(cp)[4] << 24) | \
- ((uint64)(cp)[3] << 32) | ((uint64)(cp)[2] << 40) | \
- ((uint64)(cp)[1] << 48) | ((uint64)(cp)[0] << 56))
+#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8))
+#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24))
+#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1])
+#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3])
#define ltoh_ua(ptr) \
(sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \
} \
})
-#define bcmswap32_buf(buf, len) ({ \
- uint32 *_buf = (uint32 *)(buf); \
- uint _wds = (len) / 4; \
- while (_wds--) { \
- *_buf = bcmswap32(*_buf); \
- _buf++; \
- } \
-})
-
-#define bcmswap64_buf(buf, len) ({ \
- uint64 *_buf = (uint64 *)(buf); \
- uint _wds = (len) / 8; \
- while (_wds--) { \
- *_buf = bcmswap64(*_buf); \
- _buf++; \
- } \
-})
-
#define htol16_ua_store(val, bytes) ({ \
uint16 _val = (val); \
uint8 *_bytes = (uint8 *)(bytes); \
_bytes[3] = _val >> 24; \
})
-#define htol64_ua_store(val, bytes) ({ \
- uint64 _val = (val); \
- uint8 *_bytes = (uint8 *)(bytes); \
- int i; \
- for (i = 0; i < (int)sizeof(_val); ++i) { \
- *_bytes++ = _val & 0xff; \
- _val >>= 8; \
- } \
-})
-
#define hton16_ua_store(val, bytes) ({ \
uint16 _val = (val); \
uint8 *_bytes = (uint8 *)(bytes); \
_LTOH32_UA(_bytes); \
})
-#define ltoh64_ua(bytes) ({ \
- const uint8 *_bytes = (const uint8 *)(bytes); \
- _LTOH64_UA(_bytes); \
-})
-
#define ntoh16_ua(bytes) ({ \
const uint8 *_bytes = (const uint8 *)(bytes); \
_NTOH16_UA(_bytes); \
_NTOH32_UA(_bytes); \
})
-#define ntoh64_ua(bytes) ({ \
- const uint8 *_bytes = (const uint8 *)(bytes); \
- _NTOH64_UA(_bytes); \
-})
-
#else /* !__GNUC__ */
/* Inline versions avoid referencing the argument multiple times */
bytes[3] = val >> 24;
}
-/*
- * Store 64-bit value to unaligned little-endian byte array.
- */
-static INLINE void
-htol64_ua_store(uint64 val, uint8 *bytes)
-{
- int i;
- for (i = 0; i < sizeof(val); ++i) {
- *bytes++ = (uint8)(val & 0xff);
- val >>= 8;
- }
-}
-
/*
* Store 16-bit value to unaligned network-(big-)endian byte array.
*/
return _LTOH32_UA((const uint8 *)bytes);
}
-/*
- * Load 64-bit value from unaligned little-endian byte array.
- */
-static INLINE uint64
-ltoh64_ua(const void *bytes)
-{
- return _LTOH64_UA((const uint8 *)bytes);
-}
-
/*
* Load 16-bit value from unaligned big-(network-)endian byte array.
*/
return _NTOH32_UA((const uint8 *)bytes);
}
-/*
- * Load 64-bit value from unaligned big-(network-)endian byte array.
- */
-static INLINE uint64
-ntoh64_ua(const void *bytes)
-{
- return _NTOH64_UA((const uint8 *)bytes);
-}
-
#endif /* !__GNUC__ */
#endif /* !_BCMENDIAN_H_ */
/*
* Broadcom Ethernettype protocol definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
#define BCMILCP_SUBTYPE_CERT 32770
#define BCMILCP_SUBTYPE_SES 32771
+
#define BCMILCP_BCM_SUBTYPE_RESERVED 0
#define BCMILCP_BCM_SUBTYPE_EVENT 1
#define BCMILCP_BCM_SUBTYPE_SES 2
uint16 usr_subtype;
} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
+
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
*
* Dependencies: bcmeth.h
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmevent.h 822151 2019-05-28 18:37:23Z $
+ * $Id: bcmevent.h 700076 2017-05-17 14:42:22Z $
*
*/
#include <bcmeth.h>
#if defined(DNGL_EVENT_SUPPORT)
#include <dnglevent.h>
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
wl_event_msg_t event;
#if defined(DNGL_EVENT_SUPPORT)
bcm_dngl_event_msg_t dngl_event;
-#endif // endif
+#endif
/* add new event here */
} bcm_event_msg_u_t;
#define WLC_E_ACTION_FRAME_COMPLETE 60 /* Action frame Tx complete */
#define WLC_E_PRE_ASSOC_IND 61 /* assoc request received */
#define WLC_E_PRE_REASSOC_IND 62 /* re-assoc request received */
-#ifdef CSI_SUPPORT
-#define WLC_E_CSI 63
-#else
#define WLC_E_CHANNEL_ADOPTED 63
-#endif /* CSI_SUPPORT */
#define WLC_E_AP_STARTED 64 /* AP started */
#define WLC_E_DFS_AP_STOP 65 /* AP stopped due to DFS */
#define WLC_E_DFS_AP_RESUME 66 /* AP resumed due to DFS */
#define WLC_E_INVALID_IE 162 /* Received invalid IE */
#define WLC_E_MODE_SWITCH 163 /* Mode switch event */
#define WLC_E_PKT_FILTER 164 /* Packet filter event */
-#define WLC_E_DMA_TXFLUSH_COMPLETE 165 /* TxFlush done before changing tx/rxchain */
+#define WLC_E_DMA_TXFLUSH_COMPLETE 165 /* TxFlush done before changing
+ * tx/rxchain
+ */
#define WLC_E_FBT 166 /* FBT event */
-#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */
+#define WLC_E_PFN_SCAN_BACKOFF 167 /* PFN SCAN Backoff event */
#define WLC_E_PFN_BSSID_SCAN_BACKOFF 168 /* PFN BSSID SCAN BAckoff event */
#define WLC_E_AGGR_EVENT 169 /* Aggregated event */
#define WLC_E_TVPM_MITIGATION 171 /* Change in mitigation applied by TVPM */
-#define WLC_E_SCAN_START 172 /* Deprecated */
-#define WLC_E_SCAN 172 /* Scan event */
-#define WLC_E_MBO 173 /* MBO event */
-#define WLC_E_PHY_CAL 174 /* Phy calibration start indication to host */
-#define WLC_E_RPSNOA 175 /* Radio power save start/end indication to host */
-#define WLC_E_ADPS 176 /* ADPS event */
-#define WLC_E_SLOTTED_BSS_PEER_OP 177 /* Per peer SCB delete */
-#define WLC_E_HWA 178 /* HWA events */
-#define WLC_E_GTK_KEYROT_NO_CHANSW 179 /* Avoid Chanswitch while GTK key rotation */
-#define WLC_E_ONBODY_STATUS_CHANGE 180 /* Indication of onbody status change */
-#define WLC_E_BCNRECV_ABORTED 181 /* Fake AP bcnrecv aborted roam event */
-#define WLC_E_PMK_INFO 182 /* PMK,PMKID information event */
-#define WLC_E_BSSTRANS 183 /* BSS Transition request / Response */
-#define WLC_E_WA_LQM 184 /* link quality monitoring */
-#define WLC_E_ACTION_FRAME_OFF_CHAN_DWELL_COMPLETE 185 /* action frame off channel
- * dwell time complete
- */
-#define WLC_E_WSEC 186 /* wsec keymgmt event */
-#define WLC_E_LAST 187 /* highest val + 1 for range checking */
-#if (WLC_E_LAST > 187)
-#error "WLC_E_LAST: Invalid value for last event; must be <= 187."
+#define WLC_E_LAST 172 /* highest val + 1 for range checking */
+#if (WLC_E_LAST > 172)
+#error "WLC_E_LAST: Invalid value for last event; must be <= 172."
#endif /* WLC_E_LAST */
/* define an API for getting the string name of an event */
void wl_event_to_host_order(wl_event_msg_t * evt);
void wl_event_to_network_order(wl_event_msg_t * evt);
+
/* Event status codes */
#define WLC_E_STATUS_SUCCESS 0 /* operation was successful */
#define WLC_E_STATUS_FAIL 1 /* operation failed */
#define WLC_E_STATUS_11HQUIET 11 /* 802.11h quiet period started */
#define WLC_E_STATUS_SUPPRESS 12 /* user disabled scanning (WLC_SET_SCANSUPPRESS) */
#define WLC_E_STATUS_NOCHANS 13 /* no allowable channels to scan */
-#ifdef BCMCCX
-#define WLC_E_STATUS_CCXFASTRM 14 /* scan aborted due to CCX fast roam */
-#endif /* BCMCCX */
#define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */
#define WLC_E_STATUS_ERROR 16 /* request failed due to error */
-#define WLC_E_STATUS_SLOTTED_PEER_ADD 17 /* Slotted scb for peer addition status */
-#define WLC_E_STATUS_SLOTTED_PEER_DEL 18 /* Slotted scb for peer deletion status */
-#define WLC_E_STATUS_RXBCN 19 /* Rx Beacon event for FAKEAP feature */
-#define WLC_E_STATUS_RXBCN_ABORT 20 /* Rx Beacon abort event for FAKEAP feature */
-#define WLC_E_STATUS_LOWPOWER_ON_LOWSPAN 21 /* LOWPOWER scan request during LOWSPAN */
#define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */
/* 4-way handshake event type */
#define WLC_E_STATUS_SLICE_SWAP_START 3
#define WLC_E_STATUS_SLICE_SWAP_COMPLETE 4
+
/* SDB transition reason code */
#define WLC_E_REASON_HOST_DIRECT 0
#define WLC_E_REASON_INFRA_ASSOC 1
#define WLC_E_REASON_INFRA_ROAM 2
#define WLC_E_REASON_INFRA_DISASSOC 3
#define WLC_E_REASON_NO_MODE_CHANGE_NEEDED 4
+#define WLC_E_REASON_AWDL_ENABLE 5
+#define WLC_E_REASON_AWDL_DISABLE 6
/* WLC_E_SDB_TRANSITION event data */
#define WL_MAX_BSSCFG 4
struct wl_event_sdb_data values[WL_MAX_BSSCFG];
} wl_event_sdb_trans_t;
-/* reason codes for WLC_E_GTK_KEYROT_NO_CHANSW event */
-#define WLC_E_GTKKEYROT_SCANDELAY 0 /* Delay scan while gtk in progress */
-#define WLC_E_GTKKEYROT_SKIPCHANSW_P2P 2 /* Avoid chansw by p2p while gtk in progress */
-
/* roam reason codes */
#define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */
#define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */
/* retained for precommit auto-merging errors; remove once all branches are synced */
#define WLC_E_REASON_REQUESTED_ROAM 11
#define WLC_E_REASON_BSSTRANS_REQ 11 /* roamed due to BSS Transition request by AP */
-#define WLC_E_REASON_LOW_RSSI_CU 12 /* roamed due to low RSSI and Channel Usage */
+#define WLC_E_REASON_LOW_RSSI_CU 12 /* roamed due to low RSSI and Channel Usage */
#define WLC_E_REASON_RADAR_DETECTED 13 /* roamed due to radar detection by STA */
-#define WLC_E_REASON_CSA 14 /* roamed due to CSA from AP */
-#define WLC_E_REASON_ESTM_LOW 15 /* roamed due to ESTM low tput */
-#define WLC_E_REASON_SILENT_ROAM 16 /* roamed due to Silent roam */
-#define WLC_E_REASON_INACTIVITY 17 /* full roam scan due to inactivity */
-#define WLC_E_REASON_ROAM_SCAN_TIMEOUT 18 /* roam scan timer timeout */
-#define WLC_E_REASON_LAST 19 /* NOTE: increment this as you add reasons above */
/* prune reason codes */
#define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */
#define WLC_E_RSN_MISMATCH 8 /* STA does not support AP's RSN */
#define WLC_E_PRUNE_NO_COMMON_RATES 9 /* No rates in common with AP */
#define WLC_E_PRUNE_BASIC_RATES 10 /* STA does not support all basic rates of BSS */
-#ifdef BCMCCX
-#define WLC_E_PRUNE_CCXFAST_PREVAP 11 /* CCX FAST ROAM: prune previous AP */
-#endif /* def BCMCCX */
#define WLC_E_PRUNE_CIPHER_NA 12 /* BSS's cipher not supported */
#define WLC_E_PRUNE_KNOWN_STA 13 /* AP is already known to us as a STA */
-#ifdef BCMCCX
-#define WLC_E_PRUNE_CCXFAST_DROAM 14 /* CCX FAST ROAM: prune unqualified AP */
-#endif /* def BCMCCX */
#define WLC_E_PRUNE_WDS_PEER 15 /* AP is already known to us as a WDS peer */
#define WLC_E_PRUNE_QBSS_LOAD 16 /* QBSS LOAD - AAC is too low */
#define WLC_E_PRUNE_HOME_AP 17 /* prune home AP */
-#ifdef BCMCCX
-#define WLC_E_PRUNE_AP_BLOCKED 18 /* prune blocked AP */
-#define WLC_E_PRUNE_NO_DIAG_SUPPORT 19 /* prune due to diagnostic mode not supported */
-#endif /* BCMCCX */
#define WLC_E_PRUNE_AUTH_RESP_MAC 20 /* suppress auth resp by MAC filter */
-#define WLC_E_PRUNE_ASSOC_RETRY_DELAY 21 /* MBO assoc retry delay */
-#define WLC_E_PRUNE_RSSI_ASSOC_REJ 22 /* OCE RSSI-based assoc rejection */
/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */
#define WLC_E_SUP_OTHER 0 /* Other reason */
#define WLC_E_SUP_WPA_PSK_TMO 15 /* WPA PSK 4-way handshake timeout */
#define WLC_E_SUP_WPA_PSK_M1_TMO 16 /* WPA PSK 4-way handshake M1 timeout */
#define WLC_E_SUP_WPA_PSK_M3_TMO 17 /* WPA PSK 4-way handshake M3 timeout */
-#define WLC_E_SUP_GTK_UPDATE_FAIL 18 /* GTK update failure */
-#define WLC_E_SUP_TK_UPDATE_FAIL 19 /* TK update failure */
-#define WLC_E_SUP_KEY_INSTALL_FAIL 20 /* Buffered key install failure */
-#define WLC_E_SUP_PTK_UPDATE 21 /* PTK update */
-#define WLC_E_SUP_MSG1_PMKID_MISMATCH 22 /* MSG1 PMKID not matched to PMKSA cache list */
-
-/* event msg for WLC_E_SUP_PTK_UPDATE */
-typedef struct wlc_sup_ptk_update {
- uint16 version; /* 0x0001 */
- uint16 length; /* length of data that follows */
- uint32 tsf_low; /* tsf at which ptk updated by internal supplicant */
- uint32 tsf_high;
- uint8 key_id; /* always 0 for PTK update */
- uint8 tid; /* tid for the PN below - PTK refresh is per key */
- uint16 pn_low;
- uint32 pn_high; /* local highest PN of any tid of the key when M4 was sent */
-} wlc_sup_ptk_update_t;
-
-/* sub event of WLC_E_WSEC */
-typedef enum {
- WLC_WSEC_EVENT_PTK_PN_SYNC_ERROR = 0x01
-} wl_wsec_event_type_t;
-
-/* sub event msg - WLC_WSEC_EVENT_PTK_PN_SYNC_ERROR */
-struct wlc_wsec_ptk_pn_sync_error_v1 {
- uint32 tsf_low; /* tsf at which PN sync error happened */
- uint32 tsf_high;
- uint8 key_id; /* always 0 for PTK update */
- uint8 tid; /* tid for the PN below - PTK refresh is per key */
- uint16 PAD1;
- uint16 rx_seqn; /* d11 seq number */
- uint16 pn_low;
- uint32 pn_high; /* local PN window start for the tid */
- uint16 key_idx; /* key idx in the keymgmt */
- uint16 rx_pn_low;
- uint32 rx_pn_high; /* Rx PN window start for the tid */
- uint32 span_time; /* time elapsed since replay */
- uint32 span_pkts; /* pkt count since replay */
-};
-
-typedef struct wlc_wsec_ptk_pn_sync_error_v1 wlc_wsec_ptk_pn_sync_error_t;
-
-/* WLC_E_WSEC event msg */
-typedef struct wlc_wsec_event {
- uint16 version; /* 0x0001 */
- uint16 length; /* length of data that follows */
- uint16 type; /* wsec_event_type_t */
- uint16 PAD1;
- union {
- wlc_wsec_ptk_pn_sync_error_t pn_sync_err;
- } data;
-} wlc_wsec_event_t;
+
/* Ucode reason codes carried in the WLC_E_MACDBG event */
#define WLC_E_MACDBG_LIST_PSM 0 /* Dump list update for PSM registers */
* WLC_E_P2P_PROBREQ_MSG
* WLC_E_ACTION_FRAME_RX
*/
-
-#define MAX_PHY_CORE_NUM 4u
-
-#define BCM_RX_FRAME_DATA_VERSION_2 2u
-
-typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data_v2 {
+typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data {
uint16 version;
- uint16 len;
uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */
- uint16 pad;
int32 rssi;
uint32 mactime;
uint32 rate;
- int8 per_core_rssi[MAX_PHY_CORE_NUM];
-} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_v2_t;
+} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t;
-typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data_v1 {
- uint16 version;
- uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */
- int32 rssi;
- uint32 mactime;
- uint32 rate;
-} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_v1_t;
-
-#define BCM_RX_FRAME_DATA_VERSION_1 1u
-
-#ifndef WL_EVENT_RX_FRAME_DATA_ALIAS
-#define BCM_RX_FRAME_DATA_VERSION BCM_RX_FRAME_DATA_VERSION_1
-typedef wl_event_rx_frame_data_v1_t wl_event_rx_frame_data_t;
-#endif // endif
+#define BCM_RX_FRAME_DATA_VERSION 1
/* WLC_E_IF event data */
typedef struct wl_event_data_if {
#define WLC_E_IF_ROLE_WDS 2 /* WDS link */
#define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */
#define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */
-#ifdef WLMESH_CFG80211
-#define WLC_E_IF_ROLE_MESH 5 /* MESH */
-#endif /* WLMESH_CFG80211 */
-#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */
-#define WLC_E_IF_ROLE_NAN 9 /* NAN */
+#define WLC_E_IF_ROLE_IBSS 8 /* IBSS */
+#define WLC_E_IF_ROLE_NAN 9 /* NAN */
/* WLC_E_RSSI event data */
typedef struct wl_event_data_rssi {
int32 noise;
} wl_event_data_rssi_t;
-#define WL_EVENT_WA_LQM_VER 0 /* initial version */
-
-#define WL_EVENT_WA_LQM_BASIC 0 /* event sub-types */
-typedef struct { /* payload of subevent in xtlv */
- int32 rssi;
- int32 snr;
- uint32 tx_rate;
- uint32 rx_rate;
-} wl_event_wa_lqm_basic_t;
-
-typedef struct wl_event_wa_lqm {
- uint16 ver; /* version */
- uint16 len; /* total length structure */
- uint8 subevent[]; /* sub-event data in bcm_xtlv_t format */
-} wl_event_wa_lqm_t;
-
/* WLC_E_IF flag */
#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */
#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */
#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */
#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */
-#define WLC_E_LINK_ASSOC_FAIL 5 /* Link down because assoc to new AP during roaming failed */
+
/* WLC_E_NDIS_LINK event data */
typedef BWL_PRE_PACKED_STRUCT struct ndis_link_parms {
#define WLAN_TDLS_SET_SETUP_WFD_IE 12
#define WLAN_TDLS_SET_WFD_ENABLED 13
#define WLAN_TDLS_SET_WFD_DISABLED 14
-#endif // endif
+#endif
/* WLC_E_RANGING_EVENT subtypes */
#define WLC_E_RANGING_RESULTS 0
-#define PHY_CAL_EVT_VERSION 1
-typedef struct wlc_phy_cal_info {
- uint16 version; /* structure version */
- uint16 length; /* length of the rest of the structure - pad */
- uint16 chanspec;
- uint8 start;
- uint8 phase;
- int16 temp;
- uint8 reason;
- uint8 pad;
-} wlc_phy_cal_info_t;
/* GAS event data */
typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas {
ts_sample_t ts_buff[1]; /* Timestamps */
} BWL_POST_PACKED_STRUCT wl_proxd_event_ts_results_t;
+
/* Video Traffic Interference Monitor Event */
#define INTFER_EVENT_VERSION 1
#define INTFER_STREAM_TYPE_NONTCP 1
char payload[1]; /* Measurement payload */
} wl_rrm_event_t;
+
/* WLC_E_PSTA_PRIMARY_INTF_IND event data */
typedef struct wl_psta_primary_intf_event {
struct ether_addr prim_ea; /* primary intf ether addr */
} wl_dpsta_intf_event_t;
/* ********** NAN protocol events/subevents ********** */
-#ifndef NAN_EVENT_BUFFER_SIZE
#define NAN_EVENT_BUFFER_SIZE 512 /* max size */
-#endif /* NAN_EVENT_BUFFER_SIZE */
/* NAN Events sent by firmware */
/*
WL_NAN_EVENT_DATA_IF_ADD = 18, /* Unused */
WL_NAN_EVENT_DATA_PEER_ADD = 19, /* Event for peer add */
/* nan 2.0 */
+ /* Will be removed after source code is committed. */
+ WL_NAN_EVENT_DATA_IND = 20,
WL_NAN_EVENT_PEER_DATAPATH_IND = 20, /* Incoming DP req */
+ /* Will be removed after source code is committed. */
+ WL_NAN_EVENT_DATA_CONF = 21,
WL_NAN_EVENT_DATAPATH_ESTB = 21, /* DP Established */
WL_NAN_EVENT_SDF_RX = 22, /* SDF payload */
WL_NAN_EVENT_DATAPATH_END = 23, /* DP Terminate recvd */
+ /* Below event needs to be removed after source code is committed. */
+ WL_NAN_EVENT_DATA_END = 23,
WL_NAN_EVENT_BCN_RX = 24, /* received beacon payload */
WL_NAN_EVENT_PEER_DATAPATH_RESP = 25, /* Peer's DP response */
WL_NAN_EVENT_PEER_DATAPATH_CONF = 26, /* Peer's DP confirm */
WL_NAN_EVENT_RNG_TERM_IND = 29, /* Range Termination */
WL_NAN_EVENT_PEER_DATAPATH_SEC_INST = 30, /* Peer's DP sec install */
WL_NAN_EVENT_TXS = 31, /* for tx status of follow-up and SDFs */
- WL_NAN_EVENT_DW_START = 32, /* dw start */
- WL_NAN_EVENT_DW_END = 33, /* dw end */
- WL_NAN_EVENT_CHAN_BOUNDARY = 34, /* channel switch event */
- WL_NAN_EVENT_MR_CHANGED = 35, /* AMR or IMR changed event during DW */
- WL_NAN_EVENT_RNG_RESP_IND = 36, /* Range Response Rx */
- WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF = 37, /* Peer's schedule update notification */
- WL_NAN_EVENT_PEER_SCHED_REQ = 38, /* Peer's schedule request */
- WL_NAN_EVENT_PEER_SCHED_RESP = 39, /* Peer's schedule response */
- WL_NAN_EVENT_PEER_SCHED_CONF = 40, /* Peer's schedule confirm */
- WL_NAN_EVENT_SENT_DATAPATH_END = 41, /* Sent DP terminate frame */
- WL_NAN_EVENT_SLOT_START = 42, /* SLOT_START event */
- WL_NAN_EVENT_SLOT_END = 43, /* SLOT_END event */
- WL_NAN_EVENT_HOST_ASSIST_REQ = 44, /* Requesting host assist */
- WL_NAN_EVENT_RX_MGMT_FRM = 45, /* NAN management frame received */
- WL_NAN_EVENT_DISC_CACHE_TIMEOUT = 46, /* Disc cache timeout */
-
WL_NAN_EVENT_INVALID /* delimiter for max value */
} nan_app_events_e;
-#define NAN_EV_MASK(ev) (1 << (ev - 1))
+#define NAN_EV_MASK(ev) \
+ (1 << (ev - 1))
#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0)
-
-#define NAN_EV_MASK_SET(var, evt) \
- ((evt < WL_NAN_EVMASK_EXTN_LEN * 8) ? \
- ((*((uint8 *)var + ((evt - 1)/8))) |= (1 << ((evt - 1) %8))) : 0)
-#define IS_NAN_EVENT_ON(var, evt) \
- ((evt < WL_NAN_EVMASK_EXTN_LEN * 8) && \
- (((*((uint8 *)var + ((evt - 1)/8))) & (1 << ((evt - 1) %8))) != 0))
-
/* ******************* end of NAN section *************** */
-typedef enum wl_scan_events {
- WL_SCAN_START = 1,
- WL_SCAN_END = 2
-} wl_scan_events;
-
/* WLC_E_ULP event data */
#define WL_ULP_EVENT_VERSION 1
#define WL_ULP_DISABLE_CONSOLE 1 /* Disable console message on ULP entry */
radar_detected_event_info_t radar_info[2];
} wl_event_radar_detect_data_t;
+
#define WL_EVENT_MODESW_VER_1 1
#define WL_EVENT_MODESW_VER_CURRENT WL_EVENT_MODESW_VER_1
CHANSW_NAN = 18, /* channel switch due to NAN */
CHANSW_NAN_DISC = 19, /* channel switch due to NAN Disc */
CHANSW_NAN_SCHED = 20, /* channel switch due to NAN Sched */
+ CHANSW_AWDL_AW = 21, /* channel switch due to AWDL aw */
+ CHANSW_AWDL_SYNC = 22, /* channel switch due to AWDL sync */
+ CHANSW_AWDL_CAL = 23, /* channel switch due to AWDL Cal */
+ CHANSW_AWDL_PSF = 24, /* channel switch due to AWDL PSF */
+ CHANSW_AWDL_OOB_AF = 25, /* channel switch due to AWDL OOB action frame */
CHANSW_TDLS = 26, /* channel switch due to TDLS */
CHANSW_PROXD = 27, /* channel switch due to PROXD */
- CHANSW_SLOTTED_BSS = 28, /* channel switch due to slotted bss */
- CHANSW_SLOTTED_CMN_SYNC = 29, /* channel switch due to Common Sync Layer */
- CHANSW_SLOTTED_BSS_CAL = 30, /* channel switch due to Cal request from slotted bss */
- CHANSW_MAX_NUMBER = 31 /* max channel switch reason */
+ CHANSW_MAX_NUMBER = 28 /* max channel switch reason */
} wl_chansw_reason_t;
#define CHANSW_REASON(reason) (1 << reason)
uint8 data[]; /* Aggregate buffer containing Events */
} event_aggr_data_t;
+
/* WLC_E_TVPM_MITIGATION event structure version */
#define WL_TVPM_MITIGATION_VERSION 1
uint16 on_off; /* mitigation status bits */
} wl_event_tvpm_mitigation_t;
-/* Event structures for sub health checks of PHY */
-
-#define WL_PHY_HC_DESENSE_STATS_VER (1)
-typedef struct wl_hc_desense_stats {
- uint16 version;
- uint16 chanspec;
- int8 allowed_weakest_rssi; /* based on weakest link RSSI */
- uint8 ofdm_desense; /* Desense requested for OFDM */
- uint8 bphy_desense; /* Desense requested for bphy */
- int8 glitch_upd_wait; /* wait post ACI mitigation */
-} wl_hc_desense_stats_v1_t;
-
-#define WL_PHY_HC_TEMP_STATS_VER (1)
-typedef struct wl_hc_temp_stats {
- uint16 version;
- uint16 chanspec;
- int16 curtemp; /* Temperature */
- uint8 temp_disthresh; /* Threshold to reduce tx chain */
- uint8 temp_enthresh; /* Threshold to increase tx chains */
- uint tempsense_period; /* Temperature check period */
- bool heatedup; /* 1: temp throttling on */
- uint8 bitmap; /* Indicating rx and tx chains */
- uint8 pad[2];
-} wl_hc_temp_stats_v1_t;
-
-#define WL_PHY_HC_TEMP_STATS_VER_2 (2)
-typedef struct {
- uint16 version;
- uint16 chanspec;
- int16 curtemp; /* Temperature */
- uint8 pad[2];
-} wl_hc_temp_stats_v2_t;
-
-#define WL_PHY_HC_VCOCAL_STATS_VER (1)
-typedef struct wl_hc_vcocal_stats {
- uint16 version;
- uint16 chanspec;
- int16 curtemp; /* Temperature */
- /* Ring buffer - Maintains history of previous 16 wake/sleep cycles */
- uint16 vcocal_status_wake;
- uint16 vcocal_status_sleep;
- uint16 plllock_status_wake;
- uint16 plllock_status_sleep;
- /* Cal Codes */
- uint16 cc_maincap;
- uint16 cc_secondcap;
- uint16 cc_auxcap;
-} wl_hc_vcocal_stats_v1_t;
-
-#define WL_PHY_HC_TXPWR_STATS_VER (1)
-typedef struct wl_hc_tx_stats {
- uint16 version;
- uint16 chanspec;
- int8 tgt_pwr[MAX_PHY_CORE_NUM]; /* Target pwr (qdBm) */
- int8 estPwr[MAX_PHY_CORE_NUM]; /* Rate corrected (qdBm) */
- int8 estPwr_adj[MAX_PHY_CORE_NUM]; /* Max power (qdBm) */
- uint8 baseindex[MAX_PHY_CORE_NUM]; /* Tx base index */
- int16 temp; /* Temperature */
- uint16 TxCtrlWrd[3]; /* 6 PHY ctrl bytes */
- int8 min_txpower; /* min tx power per ant */
- uint8 pad[3];
-} wl_hc_txpwr_stats_v1_t;
-
-#define WL_PHY_HC_TXPWR_STATS_VER_2 (2)
-typedef struct {
- uint16 version;
- uint16 chanspec;
- int8 tgt_pwr[MAX_PHY_CORE_NUM]; /* Target pwr (qdBm) */
- uint8 estPwr[MAX_PHY_CORE_NUM]; /* Rate corrected (qdBm) */
- uint8 estPwr_adj[MAX_PHY_CORE_NUM]; /* Max power (qdBm) */
- uint8 baseindex[MAX_PHY_CORE_NUM]; /* Tx base index */
- int16 temp; /* Temperature */
- uint16 TxCtrlWrd[3]; /* 6 PHY ctrl bytes */
- int8 min_txpower; /* min tx power per ant */
- uint8 pad[3];
-} wl_hc_txpwr_stats_v2_t;
-
-typedef enum wl_mbo_event_type {
- WL_MBO_E_CELLULAR_NW_SWITCH = 1,
- WL_MBO_E_BTM_RCVD = 2,
- /* ADD before this */
- WL_MBO_E_LAST = 3 /* highest val + 1 for range checking */
-} wl_mbo_event_type_t;
-
-/* WLC_E_MBO event structure version */
-#define WL_MBO_EVT_VER 1
-
-struct wl_event_mbo {
- uint16 version; /* structure version */
- uint16 length; /* length of the rest of the structure from type */
- wl_mbo_event_type_t type; /* Event type */
- uint8 data[]; /* Variable length data */
-};
-
-/* WLC_E_MBO_CELLULAR_NW_SWITCH event structure version */
-#define WL_MBO_CELLULAR_NW_SWITCH_VER 1
-
-/* WLC_E_MBO_CELLULAR_NW_SWITCH event data */
-struct wl_event_mbo_cell_nw_switch {
- uint16 version; /* structure version */
- uint16 length; /* length of the rest of the structure from reason */
- /* Reason of switch as per MBO Tech spec */
- uint8 reason;
- /* pad */
- uint8 pad;
- /* delay after which re-association can be tried to current BSS (seconds) */
- uint16 reassoc_delay;
- /* How long current association will be there (milli seconds).
- * This is zero if not known or value is overflowing.
- */
- uint32 assoc_time_remain;
-};
-
-/* WLC_E_MBO_BTM_RCVD event structure version */
-#define WL_BTM_EVENT_DATA_VER_1 1
-/* Specific btm event type data */
-struct wl_btm_event_type_data {
- uint16 version;
- uint16 len;
- uint8 transition_reason; /* transition reason code */
- uint8 pad[3]; /* pad */
-};
-
-/* WLC_E_PRUNE event structure version */
-#define WL_BSSID_PRUNE_EVT_VER_1 1
-/* MBO-OCE params */
-struct wl_bssid_prune_evt_info {
- uint16 version;
- uint16 len;
- uint8 SSID[32];
- uint32 time_remaining; /* Time remaining */
- struct ether_addr BSSID;
- uint8 SSID_len;
- uint8 reason; /* Reason code */
- int8 rssi_threshold; /* RSSI threshold */
- uint8 pad[3]; /* pad */
-};
-
-/* WLC_E_HWA Event structure */
-typedef struct wl_event_hwa {
- uint16 version; /* structure version */
- uint16 length; /* length of structure */
- uint32 sub_type; /* Sub event type */
- uint8 data[0]; /* variable length data */
-} wl_event_hwa_t;
-
-#define WL_HWA_EVENT_VER 1
-
-typedef enum wl_event_hwa_subtype {
- WL_HWA_EVENT_SUBTYPE_ERROR = 1,
- WL_HWA_EVENT_SUBTYPE_LAST = 2
-} wl_event_hwa_subtype_t;
-
-/* WLC_E_ADPS status */
-enum {
- WL_E_STATUS_ADPS_DEAUTH = 0,
- WL_E_STATUS_ADPS_MAX
-};
-
-/* WLC_E_ADPS event data */
-#define WL_EVENT_ADPS_VER_1 1
-
-/* WLC_E_ADPS event type */
-#define WL_E_TYPE_ADPS_BAD_AP 1
-
-typedef struct wl_event_adps_bad_ap {
- uint32 status;
- uint32 reason;
- struct ether_addr ea; /* bssid */
-} wl_event_adps_bad_ap_t;
-
-typedef struct wl_event_adps {
- uint16 version; /* structure version */
- uint16 length; /* length of structure */
- uint32 type; /* event type */
- uint8 data[]; /* variable length data */
-} wl_event_adps_v1_t;
-
-typedef wl_event_adps_v1_t wl_event_adps_t;
-
-#define WLC_USER_E_KEY_UPDATE 1 /* Key add/remove */
-
#endif /* _BCMEVENT_H_ */
+++ /dev/null
-/*
- * Fundamental constants relating to ICMP Protocol
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: bcmicmp.h 700076 2017-05-17 14:42:22Z $
- */
-
-#ifndef _bcmicmp_h_
-#define _bcmicmp_h_
-
-#ifndef _TYPEDEFS_H_
-#include <typedefs.h>
-#endif // endif
-
-/* This marks the start of a packed structure section. */
-#include <packed_section_start.h>
-
-#define ICMP_TYPE_ECHO_REQUEST 8 /* ICMP type echo request */
-#define ICMP_TYPE_ECHO_REPLY 0 /* ICMP type echo reply */
-
-#define ICMP_CHKSUM_OFFSET 2 /* ICMP body checksum offset */
-
-/* ICMP6 error and control message types */
-#define ICMP6_DEST_UNREACHABLE 1
-#define ICMP6_PKT_TOO_BIG 2
-#define ICMP6_TIME_EXCEEDED 3
-#define ICMP6_PARAM_PROBLEM 4
-#define ICMP6_ECHO_REQUEST 128
-#define ICMP6_ECHO_REPLY 129
-#define ICMP_MCAST_LISTENER_QUERY 130
-#define ICMP_MCAST_LISTENER_REPORT 131
-#define ICMP_MCAST_LISTENER_DONE 132
-#define ICMP6_RTR_SOLICITATION 133
-#define ICMP6_RTR_ADVERTISEMENT 134
-#define ICMP6_NEIGH_SOLICITATION 135
-#define ICMP6_NEIGH_ADVERTISEMENT 136
-#define ICMP6_REDIRECT 137
-
-#define ICMP6_RTRSOL_OPT_OFFSET 8
-#define ICMP6_RTRADV_OPT_OFFSET 16
-#define ICMP6_NEIGHSOL_OPT_OFFSET 24
-#define ICMP6_NEIGHADV_OPT_OFFSET 24
-#define ICMP6_REDIRECT_OPT_OFFSET 40
-
-BWL_PRE_PACKED_STRUCT struct icmp6_opt {
- uint8 type; /* Option identifier */
- uint8 length; /* Lenth including type and length */
- uint8 data[0]; /* Variable length data */
-} BWL_POST_PACKED_STRUCT;
-
-#define ICMP6_OPT_TYPE_SRC_LINK_LAYER 1
-#define ICMP6_OPT_TYPE_TGT_LINK_LAYER 2
-#define ICMP6_OPT_TYPE_PREFIX_INFO 3
-#define ICMP6_OPT_TYPE_REDIR_HDR 4
-#define ICMP6_OPT_TYPE_MTU 5
-
-/* These fields are stored in network order */
-BWL_PRE_PACKED_STRUCT struct bcmicmp_hdr {
- uint8 type; /* Echo or Echo-reply */
- uint8 code; /* Always 0 */
- uint16 chksum; /* Icmp packet checksum */
-} BWL_POST_PACKED_STRUCT;
-
-/* This marks the end of a packed structure section. */
-#include <packed_section_end.h>
-
-#endif /* #ifndef _bcmicmp_h_ */
+++ /dev/null
-/*
- * bcmiov.h
- * Common iovar handling/parsing support - batching, parsing, sub-cmd dispatch etc.
- * To be used in firmware and host apps or dhd - reducing code size,
- * duplication, and maintenance overhead.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id$
- */
-
-#ifndef _bcmiov_h_
-#define _bcmiov_h_
-
-#include <typedefs.h>
-#include <bcmutils.h>
-#include <wlioctl.h>
-#ifdef BCMDRIVER
-#include <osl.h>
-#else
-#include <stddef.h> /* For size_t */
-#endif /* BCMDRIVER */
-
-/* Forward declarations */
-typedef uint16 bcm_iov_cmd_id_t;
-typedef uint16 bcm_iov_cmd_flags_t;
-typedef uint16 bcm_iov_cmd_mflags_t;
-typedef struct bcm_iov_cmd_info bcm_iov_cmd_info_t;
-typedef struct bcm_iov_cmd_digest bcm_iov_cmd_digest_t;
-typedef struct bcm_iov_cmd_tlv_info bcm_iov_cmd_tlv_info_t;
-typedef struct bcm_iov_buf bcm_iov_buf_t;
-typedef struct bcm_iov_batch_buf bcm_iov_batch_buf_t;
-typedef struct bcm_iov_parse_context bcm_iov_parse_context_t;
-typedef struct bcm_iov_sub_cmd_context bcm_iov_sub_cmd_context_t;
-
-typedef void* (*bcm_iov_malloc_t)(void* alloc_ctx, size_t len);
-typedef void (*bcm_iov_free_t)(void* alloc_ctx, void *buf, size_t len);
-
-typedef uint8 bcm_iov_tlp_data_type_t;
-typedef struct bcm_iov_tlp bcm_iov_tlp_t;
-typedef struct bcm_iov_tlp_node bcm_iov_tlp_node_t;
-typedef struct bcm_iov_batch_subcmd bcm_iov_batch_subcmd_t;
-
-/*
- * iov validation handler - All the common checks that are required
- * for processing of iovars for any given command.
- */
-typedef int (*bcm_iov_cmd_validate_t)(const bcm_iov_cmd_digest_t *dig,
- uint32 actionid, const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen);
-
-/* iov get handler - process subcommand specific input and return output.
- * input and output may overlap, so the callee needs to check if
- * that is supported. For xtlv data a tlv digest is provided to make
- * parsing simpler. Output tlvs may be packed into output buffer using
- * bcm xtlv support. olen is input/output parameter. On input contains
- * max available obuf length and callee must fill the correct length
- * to represent the length of output returned.
- */
-typedef int (*bcm_iov_cmd_get_t)(const bcm_iov_cmd_digest_t *dig,
- const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen);
-
-/* iov set handler - process subcommand specific input and return output
- * input and output may overlap, so the callee needs to check if
- * that is supported. olen is input/output parameter. On input contains
- * max available obuf length and callee must fill the correct length
- * to represent the length of output returned.
- */
-typedef int (*bcm_iov_cmd_set_t)(const bcm_iov_cmd_digest_t *dig,
- const uint8* ibuf, size_t ilen, uint8 *obuf, size_t *olen);
-
-/* iov (sub-cmd) batch - a vector of commands. count can be zero
- * to support a version query. Each command is a tlv - whose data
- * portion may have an optional return status, followed by a fixed
- * length data header, optionally followed by tlvs.
- * cmd = type|length|<status|options>[header][tlvs]
- */
-
-/*
- * Batch sub-commands have status length included in the
- * response length packed in TLV.
- */
-#define BCM_IOV_STATUS_LEN sizeof(uint32)
-
-/* batch version is indicated by setting high bit. */
-#define BCM_IOV_BATCH_MASK 0x8000
-
-/*
- * Batched commands will have the following memory layout
- * +--------+---------+--------+-------+
- * |version |count | is_set |sub-cmd|
- * +--------+---------+--------+-------+
- * version >= 0x8000
- * count = number of sub-commands encoded in the iov buf
- * sub-cmd one or more sub-commands for processing
- * Where sub-cmd is padded byte buffer with memory layout as follows
- * +--------+---------+-----------------------+-------------+------
- * |cmd-id |length |IN(options) OUT(status)|command data |......
- * +--------+---------+-----------------------+-------------+------
- * cmd-id =sub-command ID
- * length = length of this sub-command
- * IN(options) = On input processing options/flags for this command
- * OUT(status) on output processing status for this command
- * command data = encapsulated IOVAR data as a single structure or packed TLVs for each
- * individual sub-command.
- */
-struct bcm_iov_batch_subcmd {
- uint16 id;
- uint16 len;
- union {
- uint32 options;
- uint32 status;
- } u;
- uint8 data[1];
-};
-
-struct bcm_iov_batch_buf {
- uint16 version;
- uint8 count;
- uint8 is_set; /* to differentiate set or get */
- struct bcm_iov_batch_subcmd cmds[0];
-};
-
-/* non-batched command version = major|minor w/ major <= 127 */
-struct bcm_iov_buf {
- uint16 version;
- uint16 len;
- bcm_iov_cmd_id_t id;
- uint16 data[1]; /* 32 bit alignment may be repurposed by the command */
- /* command specific data follows */
-};
-
-/* iov options flags */
-enum {
- BCM_IOV_CMD_OPT_ALIGN_NONE = 0x0000,
- BCM_IOV_CMD_OPT_ALIGN32 = 0x0001,
- BCM_IOV_CMD_OPT_TERMINATE_SUB_CMDS = 0x0002
-};
-
-/* iov command flags */
-enum {
- BCM_IOV_CMD_FLAG_NONE = 0,
- BCM_IOV_CMD_FLAG_STATUS_PRESENT = (1 << 0), /* status present at data start - output only */
- BCM_IOV_CMD_FLAG_XTLV_DATA = (1 << 1), /* data is a set of xtlvs */
- BCM_IOV_CMD_FLAG_HDR_IN_LEN = (1 << 2), /* length starts at version - non-bacthed only */
- BCM_IOV_CMD_FLAG_NOPAD = (1 << 3) /* No padding needed after iov_buf */
-};
-
-/* information about the command, xtlv options and xtlvs_off are meaningful
- * only if XTLV_DATA cmd flag is selected
- */
-struct bcm_iov_cmd_info {
- bcm_iov_cmd_id_t cmd; /* the (sub)command - module specific */
- bcm_iov_cmd_flags_t flags; /* checked by bcmiov but set by module */
- bcm_iov_cmd_mflags_t mflags; /* owned and checked by module */
- bcm_xtlv_opts_t xtlv_opts;
- bcm_iov_cmd_validate_t validate_h; /* command validation handler */
- bcm_iov_cmd_get_t get_h;
- bcm_iov_cmd_set_t set_h;
- uint16 xtlvs_off; /* offset to beginning of xtlvs in cmd data */
- uint16 min_len_set;
- uint16 max_len_set;
- uint16 min_len_get;
- uint16 max_len_get;
-};
-
-/* tlv digest to support parsing of xtlvs for commands w/ tlv data; the tlv
- * digest is available in the handler for the command. The count and order in
- * which tlvs appear in the digest are exactly the same as the order of tlvs
- * passed in the registration for the command. Unknown tlvs are ignored.
- * If registered tlvs are missing datap will be NULL. common iov rocessing
- * acquires an input digest to process input buffer. The handler is responsible
- * for constructing an output digest and use packing functions to generate
- * the output buffer. The handler may use the input digest as output digest once
- * the tlv data is extracted and used. Multiple tlv support involves allocation of
- * tlp nodes, except the first, as required,
- */
-
-/* tlp data type indicates if the data is not used/invalid, input or output */
-enum {
- BCM_IOV_TLP_NODE_INVALID = 0,
- BCM_IOV_TLP_NODE_IN = 1,
- BCM_IOV_TLP_NODE_OUT = 2
-};
-
-struct bcm_iov_tlp {
- uint16 type;
- uint16 len;
- uint16 nodeix; /* node index */
-};
-
-/* tlp data for a given tlv - multiple tlvs of same type chained */
-struct bcm_iov_tlp_node {
- uint8 *next; /* multiple tlv support */
- bcm_iov_tlp_data_type_t type;
- uint8 *data; /* pointer to data in buffer or state */
-};
-
-struct bcm_iov_cmd_digest {
- uint32 version; /* Version */
- void *cmd_ctx;
- struct wlc_bsscfg *bsscfg;
- const bcm_iov_cmd_info_t *cmd_info;
- uint16 max_tlps; /* number of tlps allocated */
- uint16 max_nodes; /* number of nods allocated */
- uint16 num_tlps; /* number of tlps valid */
- uint16 num_nodes; /* number of nods valid */
- uint16 tlps_off; /* offset to tlps */
- uint16 nodes_off; /* offset to nodes */
- /*
- * bcm_iov_tlp_t tlps[max_tlps];
- * bcm_iov_tlp_node_t nodes[max_nodes]
- */
-};
-
-/* get length callback - default length is min_len taken from digest */
-typedef size_t (*bcm_iov_xtlv_get_len_t)(const bcm_iov_cmd_digest_t *dig,
- const bcm_iov_cmd_tlv_info_t *tlv_info);
-
-/* pack to buffer data callback. under some conditions it might
- * not be a straight copy and can refer to context(ual) information and
- * endian conversions...
- */
-typedef void (*bcm_iov_xtlv_pack_t)(const bcm_iov_cmd_digest_t *dig,
- const bcm_iov_cmd_tlv_info_t *tlv_info,
- uint8 *out_buf, const uint8 *in_data, size_t len);
-
-struct bcm_iov_cmd_tlv_info {
- uint16 id;
- uint16 min_len; /* inclusive */
- uint16 max_len; /* inclusive */
- bcm_iov_xtlv_get_len_t get_len;
- bcm_iov_xtlv_pack_t pack;
-};
-
-/*
- * module private parse context. Default version type len is uint16
- */
-enum {
- BCM_IOV_PARSE_CMD_NONE = 0
-};
-typedef uint32 parse_context_opts_t;
-
-/* get digest callback */
-typedef int (*bcm_iov_get_digest_t)(void *cmd_ctx, bcm_iov_cmd_digest_t **dig);
-
-typedef struct bcm_iov_parse_config {
- parse_context_opts_t options; /* to handle different ver lengths */
- bcm_iov_malloc_t alloc_fn;
- bcm_iov_free_t free_fn;
- bcm_iov_get_digest_t dig_fn;
- int max_regs;
- void *alloc_ctx;
-} bcm_iov_parse_config_t;
-
-/* API */
-
-/* All calls return an integer status code BCME_* unless otherwise indicated */
-
-/* return length of allocation for 'num_cmds' commands. data_len
- * includes length of data for all the commands excluding the headers
- */
-size_t bcm_iov_get_alloc_len(int num_cmds, size_t data_len);
-
-/* create parsing context using allocator provided; max_regs provides
- * the number of allowed registrations for commands using the context
- * sub-components of a module may register their own commands indepdently
- * using the parsing context. If digest callback is NULL or returns NULL,
- * the (input) digest is allocated using the provided allocators and released on
- * completion of processing.
- */
-int bcm_iov_create_parse_context(const bcm_iov_parse_config_t *parse_cfg,
- bcm_iov_parse_context_t **parse_ctx);
-
-/* free the parsing context; ctx is set to NULL on exit */
-int bcm_iov_free_parse_context(bcm_iov_parse_context_t **ctx, bcm_iov_free_t free_fn);
-
-/* Return the command context for the module */
-void *bcm_iov_get_cmd_ctx_info(bcm_iov_parse_context_t *parse_ctx);
-
-/* register a command info vector along with supported tlvs. Each command
- * may support a subset of tlvs
- */
-int bcm_iov_register_commands(bcm_iov_parse_context_t *parse_ctx, void *cmd_ctx,
- const bcm_iov_cmd_info_t *info, size_t num_cmds,
- const bcm_iov_cmd_tlv_info_t *tlv_info, size_t num_tlvs);
-
-/* pack the xtlvs provided in the digest. may returns BCME_BUFTOOSHORT, but the
- * out_len is set to required length in that case.
- */
-int bcm_iov_pack_xtlvs(const bcm_iov_cmd_digest_t *dig, bcm_xtlv_opts_t xtlv_opts,
- uint8 *out_buf, size_t out_size, size_t *out_len);
-
-#ifdef BCMDRIVER
-/* wlc modules register their iovar(s) using the parsing context w/ wlc layer
- * during attach.
- */
-struct wlc_if;
-struct wlc_info;
-extern struct wlc_bsscfg *bcm_iov_bsscfg_find_from_wlcif(struct wlc_info *wlc,
- struct wlc_if *wlcif);
-int bcm_iov_doiovar(void *parse_ctx, uint32 id, void *params, uint params_len,
- void *arg, uint arg_len, uint vsize, struct wlc_if *intf);
-#endif /* BCMDRIVER */
-
-/* parsing context helpers */
-
-/* get the maximum number of tlvs - can be used to allocate digest for all
- * commands. the digest can be shared. Negative values are BCM_*, >=0, the
- * number of tlvs
- */
-int bcm_iov_parse_get_max_tlvs(const bcm_iov_parse_context_t *ctx);
-
-/* common packing support */
-
-/* pack a buffer of uint8s - memcpy wrapper */
-int bcm_iov_pack_buf(const bcm_iov_cmd_digest_t *dig, uint8 *buf,
- const uint8 *data, size_t len);
-
-#define bcm_iov_packv_u8 bcm_iov_pack_buf
-
-/*
- * pack a buffer with uint16s - serialized in LE order, data points to uint16
- * length is not checked.
- */
-int bcm_iov_packv_u16(const bcm_iov_cmd_digest_t *dig, uint8 *buf,
- const uint16 *data, int n);
-
-/*
- * pack a buffer with uint32s - serialized in LE order - data points to uint32
- * length is not checked.
- */
-int bcm_iov_packv_u32(const bcm_iov_cmd_digest_t *dig, uint8 *buf,
- const uint32 *data, int n);
-
-#endif /* _bcmiov_h_ */
/*
* Fundamental constants relating to IP Protocol
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmip.h 785436 2018-10-18 17:54:25Z $
+ * $Id: bcmip.h 700076 2017-05-17 14:42:22Z $
*/
#ifndef _bcmip_h_
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
+
/* IPV4 and IPV6 common */
#define IP_VER_OFFSET 0x0 /* offset to version field */
#define IP_VER_MASK 0xf0 /* version mask */
#define IPV4_HLEN_MASK 0x0f /* IPV4 header length mask */
#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
-#define IPV4_HLEN_MIN (4 * 5) /* IPV4 header minimum length */
-
#define IPV4_ADDR_LEN 4 /* IPV4 address length */
#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
if (eh->nexthdr == IPV6_EXTHDR_NONE)
return -1;
else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT)
- hlen = 8U;
+ hlen = 8;
else if (eh->nexthdr == IPV6_EXTHDR_AUTH)
- hlen = (uint16)((eh->hdrlen + 2U) << 2U);
+ hlen = (eh->hdrlen + 2) << 2;
else
- hlen = (uint16)IPV6_EXTHDR_LEN(eh);
+ hlen = IPV6_EXTHDR_LEN(eh);
len += hlen;
eh = (struct ipv6_exthdr *)(h + len);
/*
* Fundamental constants relating to Neighbor Discovery Protocol
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
uint8 addr[16];
} BWL_POST_PACKED_STRUCT;
+
/* ICMPV6 Header */
BWL_PRE_PACKED_STRUCT struct icmp6_hdr {
uint8 icmp6_type;
struct ipv6_addr target;
} BWL_POST_PACKED_STRUCT;
+
/* Neighibor Solicitation/Advertisement Optional Structure */
BWL_PRE_PACKED_STRUCT struct nd_msg_opt {
uint8 type;
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmmsgbuf.h 814986 2019-04-15 21:18:21Z $
+ * $Id: bcmmsgbuf.h 676811 2016-12-24 20:48:46Z $
*/
#ifndef _bcmmsgbuf_h_
#define _bcmmsgbuf_h_
#define H2DRING_INFO_BUFPOST_ITEMSIZE H2DRING_CTRL_SUB_ITEMSIZE
#define D2HRING_INFO_BUFCMPLT_ITEMSIZE D2HRING_CTRL_CMPLT_ITEMSIZE
-#define D2HRING_SNAPSHOT_CMPLT_ITEMSIZE 20
-
#define H2DRING_TXPOST_MAX_ITEM 512
#define H2DRING_RXPOST_MAX_ITEM 512
#define H2DRING_CTRL_SUB_MAX_ITEM 64
#define H2DRING_DYNAMIC_INFO_MAX_ITEM 32
#define D2HRING_DYNAMIC_INFO_MAX_ITEM 32
-#define D2HRING_EDL_HDR_SIZE 48u
-#define D2HRING_EDL_ITEMSIZE 2048u
-#define D2HRING_EDL_MAX_ITEM 256u
-#define D2HRING_EDL_WATERMARK (D2HRING_EDL_MAX_ITEM >> 5u)
-
#define D2HRING_CTRL_CMPLT_MAX_ITEM 64
enum {
#define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE
#define PCIEDEV_FIRMWARE_TSINFO 0x1
-#define PCIEDEV_FIRMWARE_TSINFO_FIRST 0x1
-#define PCIEDEV_FIRMWARE_TSINFO_MIDDLE 0x2
-#define PCIEDEV_BTLOG_POST 0x3
-#define PCIEDEV_BT_SNAPSHOT_POST 0x4
#ifdef PCIE_API_REV1
MSG_TYPE_TX_STATUS = 0x10,
MSG_TYPE_RXBUF_POST = 0x11,
MSG_TYPE_RX_CMPLT = 0x12,
- MSG_TYPE_LPBK_DMAXFER = 0x13,
+ MSG_TYPE_LPBK_DMAXFER = 0x13,
MSG_TYPE_LPBK_DMAXFER_CMPLT = 0x14,
MSG_TYPE_FLOW_RING_RESUME = 0x15,
MSG_TYPE_FLOW_RING_RESUME_CMPLT = 0x16,
MSG_TYPE_HOSTTIMSTAMP = 0x26,
MSG_TYPE_HOSTTIMSTAMP_CMPLT = 0x27,
MSG_TYPE_FIRMWARE_TIMESTAMP = 0x28,
- MSG_TYPE_SNAPSHOT_UPLOAD = 0x29,
- MSG_TYPE_SNAPSHOT_CMPLT = 0x2A,
- MSG_TYPE_H2D_RING_DELETE = 0x2B,
- MSG_TYPE_D2H_RING_DELETE = 0x2C,
- MSG_TYPE_H2D_RING_DELETE_CMPLT = 0x2D,
- MSG_TYPE_D2H_RING_DELETE_CMPLT = 0x2E,
MSG_TYPE_API_MAX_RSVD = 0x3F
} bcmpcie_msg_type_t;
MSG_TYPE_TXMETADATA_PYLD = 0x46,
MSG_TYPE_INDX_UPDATE = 0x47,
MSG_TYPE_INFO_PYLD = 0x48,
- MSG_TYPE_TS_EVENT_PYLD = 0x49,
- MSG_TYPE_PVT_BTLOG_CMPLT = 0x4A,
- MSG_TYPE_BTLOG_PYLD = 0x4B,
- MSG_TYPE_HMAPTEST_PYLD = 0x4C,
- MSG_TYPE_PVT_BT_SNAPSHOT_CMPLT = 0x4D,
- MSG_TYPE_BT_SNAPSHOT_PYLD = 0x4E
+ MSG_TYPE_TS_EVENT_PYLD = 0x49
} bcmpcie_msgtype_int_t;
typedef enum bcmpcie_msgtype_u {
* D2H_RING_CONFIG_SUBTYPE_MSI_DOORBELL
*/
typedef enum bcmpcie_msi_intr_idx {
- MSI_INTR_IDX_CTRL_CMPL_RING = 0,
- MSI_INTR_IDX_TXP_CMPL_RING = 1,
- MSI_INTR_IDX_RXP_CMPL_RING = 2,
- MSI_INTR_IDX_INFO_CMPL_RING = 3,
- MSI_INTR_IDX_MAILBOX = 4,
- MSI_INTR_IDX_MAX = 5
+ MSI_INTR_IDX_CTRL_CMPL_RING,
+ MSI_INTR_IDX_TXP_CMPL_RING,
+ MSI_INTR_IDX_RXP_CMPL_RING,
+ MSI_INTR_IDX_MAILBOX,
+ MSI_INTR_IDX_MAX
} bcmpcie_msi_intr_idx_t;
-#define BCMPCIE_D2H_MSI_OFFSET_SINGLE 0
typedef enum bcmpcie_msi_offset_type {
- BCMPCIE_D2H_MSI_OFFSET_MB0 = 2,
- BCMPCIE_D2H_MSI_OFFSET_MB1 = 3,
- BCMPCIE_D2H_MSI_OFFSET_DB0 = 4,
- BCMPCIE_D2H_MSI_OFFSET_DB1 = 5,
- BCMPCIE_D2H_MSI_OFFSET_H1_DB0 = 6,
- BCMPCIE_D2H_MSI_OFFSET_MAX = 7
+ BCMPCIE_D2H_MSI_OFFSET_MB0 = 2,
+ BCMPCIE_D2H_MSI_OFFSET_MB1,
+ BCMPCIE_D2H_MSI_OFFSET_DB0,
+ BCMPCIE_D2H_MSI_OFFSET_DB1,
+ BCMPCIE_D2H_MSI_OFFSET_MAX
} bcmpcie_msi_offset_type_t;
typedef struct bcmpcie_msi_offset {
#define BCMPCIE_D2H_MSI_OFFSET_DEFAULT BCMPCIE_D2H_MSI_OFFSET_DB1
-#define BCMPCIE_D2H_MSI_SINGLE 0xFFFE
/* if_id */
#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT 5
/* flags */
#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX 0x1
#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR 0x2
-#define BCMPCIE_CMNHDR_FLAGS_TS_SEQNUM_INIT 0x4
#define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT 0x80
#define BCMPCIE_CMNHDR_PHASE_BIT_INIT 0x80
/* IOCTL request message */
typedef struct ioctl_req_msg {
/** common message header */
- cmn_msg_hdr_t cmn_hdr;
+ cmn_msg_hdr_t cmn_hdr;
/** ioctl command type */
uint32 cmd;
/** ioctl transaction ID, to pair with a ioctl response */
/* buffer post messages for device to use to return dbg buffers */
typedef ioctl_resp_evt_buf_post_msg_t info_buf_post_msg_t;
-#define DHD_INFOBUF_RX_BUFPOST_PKTSZ (2 * 1024)
-
-#define DHD_BTLOG_RX_BUFPOST_PKTSZ (2 * 1024)
/* An infobuf host buffer starts with a 32 bit (LE) version. */
#define PCIE_INFOBUF_V1 1
uint16 length;
} info_buf_payload_hdr_t;
-/* BT logs/memory to DMA directly from BT memory to host */
-typedef struct info_buf_btlog_s {
- void (*status_cb)(void *ctx, void *p, int error); /* obsolete - to be removed */
- void *ctx;
- dma64addr_t src_addr;
- uint32 length;
- bool (*pcie_status_cb)(osl_t *osh, void *p, int error);
- uint32 bt_intstatus;
- int error;
-} info_buf_btlog_t;
-
-/** snapshot upload request message */
-typedef struct snapshot_upload_request_msg {
- /** common message header */
- cmn_msg_hdr_t cmn_hdr;
- /** length of the snaphost buffer supplied */
- uint32 snapshot_buf_len;
- /** type of snapshot */
- uint8 snapshot_type;
- /** snapshot param */
- uint8 snapshot_param;
- /** to align the host address on 8 byte boundary */
- uint8 reserved[2];
- /** always align on 8 byte boundary */
- bcm_addr64_t host_buf_addr;
- uint32 rsvd[4];
-} snapshot_upload_request_msg_t;
-
-/** snapshot types */
-typedef enum bcmpcie_snapshot_type {
- SNAPSHOT_TYPE_BT = 0, /* Bluetooth SRAM and patch RAM */
- SNAPSHOT_TYPE_WLAN_SOCRAM = 1, /* WLAN SOCRAM */
- SNAPSHOT_TYPE_WLAN_HEAP = 2, /* WLAN HEAP */
- SNAPSHOT_TYPE_WLAN_REGISTER = 3 /* WLAN registers */
-} bcmpcie_snapshot_type_t;
-
-#define PCIE_DMA_XFER_FLG_D11_LPBK_MASK 0xF
-#define PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT 2
-#define PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK 3
-#define PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT 0
+#define PCIE_DMA_XFER_FLG_D11_LPBK_MASK 0x00000001
+#define PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT 0
typedef struct pcie_dma_xfer_params {
/** common message header */
/** delay before doing the dest txfer */
uint32 destdelay;
uint8 rsvd[3];
- /* bit0: D11 DMA loopback flag */
uint8 flags;
} pcie_dma_xfer_params_t;
-#define BCMPCIE_FLOW_RING_INTF_HP2P 0x1
/** Complete msgbuf hdr for flow ring update from host to dongle */
typedef struct tx_flowring_create_request {
cmn_msg_hdr_t msg;
uint8 da[ETHER_ADDR_LEN];
uint8 sa[ETHER_ADDR_LEN];
uint8 tid;
- uint8 if_flags;
+ uint8 if_flags;
uint16 flow_ring_id;
- uint8 tc;
+ uint8 tc;
/* priority_ifrmmask is to define core mask in ifrm mode.
* currently it is not used for priority. so uses solely for ifrm mask
*/
uint8 priority_ifrmmask;
- uint16 int_vector;
+ uint16 int_vector;
uint16 max_items;
uint16 len_item;
bcm_addr64_t flow_ring_ptr;
typedef struct tx_flowring_delete_request {
cmn_msg_hdr_t msg;
uint16 flow_ring_id;
- uint16 reason;
+ uint16 reason;
uint32 rsvd[7];
} tx_flowring_delete_request_t;
-typedef tx_flowring_delete_request_t d2h_ring_delete_req_t;
-typedef tx_flowring_delete_request_t h2d_ring_delete_req_t;
-
typedef struct tx_flowring_flush_request {
cmn_msg_hdr_t msg;
uint16 flow_ring_id;
- uint16 reason;
+ uint16 reason;
uint32 rsvd[7];
} tx_flowring_flush_request_t;
h2d_mailbox_data_t h2d_mailbox_data;
host_timestamp_msg_t host_ts;
ts_buf_post_msg_t ts_buf_post;
- d2h_ring_delete_req_t d2h_delete;
- h2d_ring_delete_req_t h2d_delete;
unsigned char check[H2DRING_CTRL_SUB_ITEMSIZE];
} ctrl_submit_item_t;
/** Control Completion messages (20 bytes) */
typedef struct compl_msg_hdr {
- union {
- /** status for the completion */
- int16 status;
-
- /* mutually exclusive with pkt fate debug feature */
- struct pktts_compl_hdr {
- uint16 d_t4; /* Delta TimeStamp 3: T4-tref */
- } tx_pktts;
- };
+ /** status for the completion */
+ int16 status;
/** submisison flow ring id which generated this status */
union {
uint16 ring_id;
typedef uint32 dma_done_t;
#define MAX_CLKSRC_ID 0xF
-#define TX_PKT_RETRY_CNT_0_MASK 0x000000FF
-#define TX_PKT_RETRY_CNT_0_SHIFT 0
-#define TX_PKT_RETRY_CNT_1_MASK 0x0000FF00
-#define TX_PKT_RETRY_CNT_1_SHIFT 8
-#define TX_PKT_RETRY_CNT_2_MASK 0x00FF0000
-#define TX_PKT_RETRY_CNT_2_SHIFT 16
-#define TX_PKT_BAND_INFO 0x0F000000
-#define TX_PKT_BAND_INFO_SHIFT 24
-#define TX_PKT_VALID_INFO 0xF0000000
-#define TX_PKT_VALID_INFO_SHIFT 28
typedef struct ts_timestamp_srcid {
union {
uint32 ts_low; /* time stamp low 32 bits */
- uint32 rate_spec; /* use ratespec */
+ uint32 reserved; /* If timestamp not used */
};
union {
uint32 ts_high; /* time stamp high 28 bits */
uint32 phase :1; /* Phase bit */
dma_done_t marker_ext;
};
- uint32 tx_pkt_band_retry_info;
};
} ts_timestamp_srcid_t;
typedef ts_timestamp_t ts_correction_m_t;
typedef ts_timestamp_t ts_correction_b_t;
-typedef struct _pktts {
- uint32 tref; /* Ref Clk in uSec (currently, tsf) */
- uint16 d_t2; /* Delta TimeStamp 1: T2-tref */
- uint16 d_t3; /* Delta TimeStamp 2: T3-tref */
-} pktts_t;
-
/* completion header status codes */
#define BCMPCIE_SUCCESS 0
#define BCMPCIE_NOTFOUND 1
#define BCMPCIE_RING_TYPE_INVALID 14
#define BCMPCIE_NO_TS_EVENT_BUF 15
#define BCMPCIE_MAX_TS_EVENT_BUF 16
-#define BCMPCIE_PCIE_NO_BTLOG_BUF 17
-#define BCMPCIE_BT_DMA_ERR 18
-#define BCMPCIE_BT_DMA_DESCR_FETCH_ERR 19
-#define BCMPCIE_SNAPSHOT_ERR 20
-#define BCMPCIE_NOT_READY 21
-#define BCMPCIE_INVALID_DATA 22
-#define BCMPCIE_NO_RESPONSE 23
-#define BCMPCIE_NO_CLOCK 24
/** IOCTL completion response */
typedef struct ioctl_compl_resp_msg {
/** common message header */
cmn_msg_hdr_t cmn_hdr;
/** completion message header */
- compl_msg_hdr_t compl_hdr;
+ compl_msg_hdr_t compl_hdr;
/** cmd id */
uint32 cmd;
uint32 rsvd;
typedef struct ring_create_response {
cmn_msg_hdr_t cmn_hdr;
- compl_msg_hdr_t cmplt;
+ compl_msg_hdr_t cmplt;
uint32 rsvd[2];
/** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
typedef struct tx_flowring_delete_response {
cmn_msg_hdr_t msg;
- compl_msg_hdr_t cmplt;
- uint16 read_idx;
- uint16 rsvd[3];
+ compl_msg_hdr_t cmplt;
+ uint32 rsvd[2];
/** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} tx_flowring_delete_response_t;
-typedef tx_flowring_delete_response_t h2d_ring_delete_response_t;
-typedef tx_flowring_delete_response_t d2h_ring_delete_response_t;
-
typedef struct tx_flowring_flush_response {
cmn_msg_hdr_t msg;
- compl_msg_hdr_t cmplt;
+ compl_msg_hdr_t cmplt;
uint32 rsvd[2];
/** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
cmn_msg_hdr_t cmn_hdr;
/** completion message header */
compl_msg_hdr_t compl_hdr;
- uint16 subtype;
- uint16 rsvd[3];
+ uint32 rsvd[2];
/** XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} ring_config_resp_t;
uint16 info_data_len;
/* sequence number */
uint16 seqnum;
- /* destination */
- uint8 dest;
/* rsvd */
- uint8 rsvd[3];
+ uint32 rsvd;
/* XOR checksum or a magic number to audit DMA done */
dma_done_t marker;
} info_buf_resp_t;
-/* snapshot completion msg: send from device to host */
-typedef struct snapshot_resp {
- /* common message header */
- cmn_msg_hdr_t cmn_hdr;
- /* completion message header */
- compl_msg_hdr_t compl_hdr;
- /* snapshot length uploaded */
- uint32 resp_len;
- /* snapshot type */
- uint8 type;
- /* rsvd */
- uint8 rsvd[3];
- /* XOR checksum or a magic number to audit DMA done */
- dma_done_t marker;
-} snapshot_resp_t;
-
typedef struct info_ring_cpl_item {
info_buf_resp_t info_buf_post;
unsigned char check[D2HRING_INFO_BUFCMPLT_ITEMSIZE];
d2h_ring_create_response_t d2h_ring_create_resp;
host_timestamp_msg_cpl_t host_ts_cpl;
fw_timestamp_event_msg_t fw_ts_event;
- h2d_ring_delete_response_t h2d_ring_delete_resp;
- d2h_ring_delete_response_t d2h_ring_delete_resp;
unsigned char ctrl_response[D2HRING_CTRL_CMPLT_ITEMSIZE];
} ctrl_completion_item_t;
/** rx status */
uint32 rx_status_0;
uint32 rx_status_1;
-
- union { /* size per IPC = (3 x uint32) bytes */
- struct {
- /* used by Monitor mode */
- uint32 marker;
- /* timestamp */
- ipc_timestamp_t ts;
- };
-
- /* LatTS_With_XORCSUM */
- struct {
- /* latency timestamp */
- pktts_t rx_pktts;
- /* XOR checksum or a magic number to audit DMA done */
- dma_done_t marker_ext;
- };
- };
+ /** XOR checksum or a magic number to audit DMA done */
+ /* This is for rev6 only. For IPC rev7, this is a reserved field */
+ dma_done_t marker;
+ /* timestamp */
+ ipc_timestamp_t ts;
} host_rxbuf_cmpl_t;
typedef union rxbuf_complete_item {
unsigned char check[D2HRING_RXCMPLT_ITEMSIZE];
} rxbuf_complete_item_t;
+
typedef struct host_txbuf_post {
/** common message header */
cmn_msg_hdr_t cmn_hdr;
uint16 metadata_buf_len;
/** provided data buffer len to receive data */
uint16 data_len;
- union {
- struct {
- /** extended transmit flags */
- uint8 ext_flags;
- uint8 scale_factor;
-
- /** user defined rate */
- uint8 rate;
- uint8 exp_time;
- };
- /** XOR checksum or a magic number to audit DMA done */
- dma_done_t marker;
- };
+ /** XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
} host_txbuf_post_t;
#define BCMPCIE_PKT_FLAGS_FRAME_802_3 0x01
#define BCMPCIE_PKT_FLAGS_FRAME_802_11 0x02
-#define BCMPCIE_PKT_FLAGS_FRAME_NORETRY 0x01 /* Disable retry on this frame */
-#define BCMPCIE_PKT_FLAGS_FRAME_NOAGGR 0x02 /* Disable aggregation for this frame */
-#define BCMPCIE_PKT_FLAGS_FRAME_UDR 0x04 /* User defined rate for this frame */
-#define BCMPCIE_PKT_FLAGS_FRAME_ATTR_MASK 0x07 /* Attribute mask */
-
#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK 0x03 /* Exempt uses 2 bits */
#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT 0x02 /* needs to be shifted past other bits */
-#define BCMPCIE_PKT_FLAGS_EPOCH_SHIFT 3u
-#define BCMPCIE_PKT_FLAGS_EPOCH_MASK (1u << BCMPCIE_PKT_FLAGS_EPOCH_SHIFT)
#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5
#define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT)
#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT
#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK
+
/* H2D Txpost ring work items */
typedef union txbuf_submit_item {
host_txbuf_post_t txpost;
cmn_msg_hdr_t cmn_hdr;
/** completion message header */
compl_msg_hdr_t compl_hdr;
-
- union { /* size per IPC = (3 x uint32) bytes */
- /* Usage 1: TxS_With_TimeSync */
- struct {
- struct {
- union {
- /** provided meta data len */
- uint16 metadata_len;
- /** provided extended TX status */
- uint16 tx_status_ext;
- }; /*Ext_TxStatus */
-
- /** WLAN side txstatus */
- uint16 tx_status;
- }; /* TxS */
- /* timestamp */
- ipc_timestamp_t ts;
- }; /* TxS_with_TS */
-
- /* Usage 2: LatTS_With_XORCSUM */
+ union {
struct {
- /* latency timestamp */
- pktts_t tx_pktts;
- /* XOR checksum or a magic number to audit DMA done */
- dma_done_t marker_ext;
+ /** provided meta data len */
+ uint16 metadata_len;
+ /** WLAN side txstatus */
+ uint16 tx_status;
};
+ /** XOR checksum or a magic number to audit DMA done */
+ /* This is for rev6 only. For IPC rev7, this is not used */
+ dma_done_t marker;
};
+ /* timestamp */
+ ipc_timestamp_t ts;
} host_txbuf_cmpl_t;
unsigned char check[D2HRING_TXCMPLT_ITEMSIZE];
} txbuf_complete_item_t;
-#define PCIE_METADATA_VER 1u
-
-/* version and length are not part of this structure.
- * dhd queries version and length through bus iovar "bus:metadata_info".
- */
-struct metadata_txcmpl_v1 {
- uint32 tref; /* TSF or Ref Clock in uSecs */
- uint16 d_t2; /* T2-fwt1 delta */
- uint16 d_t3; /* T3-fwt1 delta */
- uint16 d_t4; /* T4-fwt1 delta */
- uint16 rsvd; /* reserved */
-};
-typedef struct metadata_txcmpl_v1 metadata_txcmpl_t;
-
#define BCMPCIE_D2H_METADATA_HDRLEN 4
#define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4)
uint32 high_addr;
} ret_buf_t;
+
#ifdef PCIE_API_REV1
/* ioctl specific hdr */
typedef struct ioctl_hdr {
- uint16 cmd;
+ uint16 cmd;
uint16 retbuf_len;
uint32 cmd_id;
} ioctl_hdr_t;
typedef struct ioctlptr_hdr {
- uint16 cmd;
+ uint16 cmd;
uint16 retbuf_len;
- uint16 buflen;
+ uint16 buflen;
uint16 rsvd;
uint32 cmd_id;
} ioctlptr_hdr_t;
typedef struct ioctl_req_hdr {
uint32 pkt_id; /**< Packet ID */
- uint32 cmd; /**< IOCTL ID */
+ uint32 cmd; /**< IOCTL ID */
uint16 retbuf_len;
- uint16 buflen;
+ uint16 buflen;
uint16 xt_id; /**< transaction ID */
uint16 rsvd[1];
} ioctl_req_hdr_t;
#endif /* PCIE_API_REV1 */
+
/** Complete msgbuf hdr for ioctl from host to dongle */
typedef struct ioct_reqst_hdr {
cmn_msg_hdr_t msg;
ioctl_hdr_t ioct_hdr;
#else
ioctl_req_hdr_t ioct_hdr;
-#endif // endif
+#endif
ret_buf_t ret_buf;
} ioct_reqst_hdr_t;
ioctlptr_hdr_t ioct_hdr;
#else
ioctl_req_hdr_t ioct_hdr;
-#endif // endif
+#endif
ret_buf_t ret_buf;
ret_buf_t ioct_buf;
} ioctptr_reqst_hdr_t;
uint32 cmd_id;
#else
uint32 pkt_id;
-#endif // endif
+#endif
uint32 status;
uint32 ret_len;
uint32 inline_data;
#else
uint16 xt_id; /**< transaction ID */
uint16 rsvd[1];
-#endif // endif
+#endif
} ioct_resp_hdr_t;
/* ioct resp header used in dongle */
/* time period to capture the device time stamp and toggle WLAN_TIME_SYNC_GPIO */
uint16 period_ms;
uint8 flags;
- uint8 post_delay;
+ uint8 rsvd;
uint32 reset_cnt;
} ts_host_timestamping_config_t;
/* Flags in host timestamping config TLV */
#define FLAG_HOST_RESET (1 << 0)
-#define IS_HOST_RESET(x) ((x) & FLAG_HOST_RESET)
-#define CLEAR_HOST_RESET(x) ((x) & ~FLAG_HOST_RESET)
-
-#define FLAG_CONFIG_NODROP (1 << 1)
-#define IS_CONFIG_NODROP(x) ((x) & FLAG_CONFIG_NODROP)
-#define CLEAR_CONFIG_NODROP(x) ((x) & ~FLAG_CONFIG_NODROP)
#endif /* _bcmmsgbuf_h_ */
/*
* NVRAM variable manipulation
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmnvram.h 655606 2016-08-22 17:16:11Z $
+ * $Id: bcmnvram.h 613043 2016-01-16 00:24:13Z $
*/
#ifndef _bcmnvram_h_
extern int nvram_init(void *sih);
extern int nvram_deinit(void *sih);
-extern int nvram_file_read(char **nvramp, int *nvraml);
/*
* Append a chunk of nvram variables to the global list
extern void nvram_get_global_vars(char **varlst, uint *varsz);
+
/*
* Check for reset button press for restoring factory defaults.
*/
/* For CFE builds this gets passed in thru the makefile */
#ifndef MAX_NVRAM_SPACE
#define MAX_NVRAM_SPACE 0x10000
-#endif // endif
+#endif
#define DEF_NVRAM_SPACE 0x8000
#define ROM_ENVRAM_SPACE 0x1000
#define NVRAM_LZMA_MAGIC 0x4c5a4d41 /* 'LZMA' */
#define BCM_JUMBO_NVRAM_DELIMIT '\n'
#define BCM_JUMBO_START "Broadcom Jumbo Nvram file"
+
#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \
defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__))
#define IMAGE_SIZE "image_size"
#define IMAGE_SECOND_OFFSET "image_second_offset"
#define LINUX_FIRST "linux"
#define LINUX_SECOND "linux2"
-#endif // endif
+#endif
#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \
defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__))
* Software-specific definitions shared between device and host side
* Explains the shared area between host and dongle
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmpcie.h 821465 2019-05-23 19:50:00Z $
+ * $Id: bcmpcie.h 678914 2017-01-11 15:34:26Z $
*/
+
#ifndef _bcmpcie_h_
#define _bcmpcie_h_
-#include <typedefs.h>
+#include <bcmutils.h>
#define ADDR_64(x) (x.addr)
#define HIGH_ADDR_32(x) ((uint32) (((sh_addr_t) x).high_addr))
uint32 high_addr;
} sh_addr_t;
+
/* May be overridden by 43xxxxx-roml.mk */
#if !defined(BCMPCIE_MAX_TX_FLOWS)
#define BCMPCIE_MAX_TX_FLOWS 40
* Feature flags enabled in dongle. Advertised by dongle to DHD via the PCIe Shared structure that
* is located in device memory.
*/
+#define PCIE_SHARED_VERSION PCIE_SHARED_VERSION_7
#define PCIE_SHARED_VERSION_MASK 0x000FF
#define PCIE_SHARED_ASSERT_BUILT 0x00100
#define PCIE_SHARED_ASSERT 0x00200
#define PCIE_SHARED_EVT_SEQNUM 0x08000
#define PCIE_SHARED_DMA_INDEX 0x10000
+/* WAR: D11 txstatus through unused status field of PCIe completion header */
+#define PCIE_SHARED_D2H_D11_TX_STATUS 0x40000000 /* using flags2 in shared area */
+#define PCIE_SHARED_H2D_D11_TX_STATUS 0x80000000 /* using flags2 in shared area */
+
/**
* There are host types where a device interrupt can 'race ahead' of data written by the device into
* host memory. The dongle can avoid this condition using a variety of techniques (read barrier,
#define PCIE_SHARED_IDLE_FLOW_RING 0x80000
#define PCIE_SHARED_2BYTE_INDICES 0x100000
-#define PCIE_SHARED_FAST_DELETE_RING 0x00000020 /* Fast Delete Ring */
-#define PCIE_SHARED_EVENT_BUF_POOL_MAX 0x000000c0 /* event buffer pool max bits */
-#define PCIE_SHARED_EVENT_BUF_POOL_MAX_POS 6 /* event buffer pool max bit position */
+#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 /* using flags2 in shared area */
/* dongle supports fatal buf log collection */
-#define PCIE_SHARED_FATAL_LOGBUG_VALID 0x200000
+#define PCIE_SHARED_FATAL_LOGBUG_VALID 0x200000
/* Implicit DMA with corerev 19 and after */
#define PCIE_SHARED_IDMA 0x400000
#define PCIE_SHARED_HOSTRDY_SUPPORT 0x10000000
/* When set, Firmwar does not support OOB Device Wake based DS protocol */
-#define PCIE_SHARED_NO_OOB_DW 0x20000000
+#define PCIE_SHARED_NO_OOB_DW 0x20000000
/* When set, Firmwar supports Inband DS protocol */
-#define PCIE_SHARED_INBAND_DS 0x40000000
+#define PCIE_SHARED_INBAND_DS 0x40000000
-/* use DAR registers */
-#define PCIE_SHARED_DAR 0x80000000
-
-/**
- * Following are the shared2 flags. All bits in flags have been used. A flags2
- * field got added and the definition for these flags come here:
- */
-/* WAR: D11 txstatus through unused status field of PCIe completion header */
-#define PCIE_SHARED2_EXTENDED_TRAP_DATA 0x00000001 /* using flags2 in shared area */
-#define PCIE_SHARED2_TXSTATUS_METADATA 0x00000002
-#define PCIE_SHARED2_BT_LOGGING 0x00000004 /* BT logging support */
-#define PCIE_SHARED2_SNAPSHOT_UPLOAD 0x00000008 /* BT/WLAN snapshot upload support */
-#define PCIE_SHARED2_SUBMIT_COUNT_WAR 0x00000010 /* submission count WAR */
-#define PCIE_SHARED2_FAST_DELETE_RING 0x00000020 /* Fast Delete ring support */
-#define PCIE_SHARED2_EVTBUF_MAX_MASK 0x000000C0 /* 0:32, 1:64, 2:128, 3: 256 */
-
-/* using flags2 to indicate firmware support added to reuse timesync to update PKT txstatus */
-#define PCIE_SHARED2_PKT_TX_STATUS 0x00000100
-#define PCIE_SHARED2_FW_SMALL_MEMDUMP 0x00000200 /* FW small memdump */
-#define PCIE_SHARED2_FW_HC_ON_TRAP 0x00000400
-#define PCIE_SHARED2_HSCB 0x00000800 /* Host SCB support */
-
-#define PCIE_SHARED2_EDL_RING 0x00001000 /* Support Enhanced Debug Lane */
-#define PCIE_SHARED2_DEBUG_BUF_DEST 0x00002000 /* debug buf dest support */
-#define PCIE_SHARED2_PCIE_ENUM_RESET_FLR 0x00004000 /* BT producer index reset WAR */
-#define PCIE_SHARED2_PKT_TIMESTAMP 0x00008000 /* Timestamp in packet */
-
-#define PCIE_SHARED2_HP2P 0x00010000u /* HP2P feature */
-#define PCIE_SHARED2_HWA 0x00020000u /* HWA feature */
-#define PCIE_SHARED2_TRAP_ON_HOST_DB7 0x00040000u /* can take a trap on DB7 from host */
-
-#define PCIE_SHARED2_DURATION_SCALE 0x00100000u
-
-#define PCIE_SHARED2_D2H_D11_TX_STATUS 0x40000000
-#define PCIE_SHARED2_H2D_D11_TX_STATUS 0x80000000
+/* Implicit DMA WAR for 4347B0 PCIe memory retention */
+#define PCIE_SHARED_IDMA_RETENTION_DS 0x80000000
#define PCIE_SHARED_D2H_MAGIC 0xFEDCBA09
#define PCIE_SHARED_H2D_MAGIC 0x12345678
-typedef uint16 pcie_hwa_db_index_t; /* 16 bit HWA index (IPC Rev 7) */
-#define PCIE_HWA_DB_INDEX_SZ (2u) /* 2 bytes sizeof(pcie_hwa_db_index_t) */
-
/**
* Message rings convey messages between host and device. They are unidirectional, and are located
* in host memory.
#define BCMPCIE_H2D_RING_TYPE_RXBUFPOST 0x3
#define BCMPCIE_H2D_RING_TYPE_TXSUBMIT 0x4
#define BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT 0x5
-#define BCMPCIE_H2D_RING_TYPE_BTLOG_SUBMIT 0x6
#define BCMPCIE_D2H_RING_TYPE_CTRL_CPL 0x1
#define BCMPCIE_D2H_RING_TYPE_TX_CPL 0x2
#define BCMPCIE_D2H_RING_TYPE_RX_CPL 0x3
#define BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL 0x4
#define BCMPCIE_D2H_RING_TYPE_AC_RX_COMPLETE 0x5
-#define BCMPCIE_D2H_RING_TYPE_BTLOG_CPL 0x6
-#define BCMPCIE_D2H_RING_TYPE_EDL 0x7
-#define BCMPCIE_D2H_RING_TYPE_HPP_TX_CPL 0x8
-#define BCMPCIE_D2H_RING_TYPE_HPP_RX_CPL 0x9
/**
* H2D and D2H, WR and RD index, are maintained in the following arrays:
sh_addr_t base_addr; /* 64 bits address, either in host or device memory */
} ring_mem_t;
+
/**
* Per flow ring, information is maintained in device memory, eg at what address the ringmem and
* ringstate are located. The flow ring itself can be instantiated in either host or device memory.
uint16 max_vdevs; /* max number of virtual interfaces supported */
sh_addr_t ifrm_w_idx_hostaddr; /* Array of all H2D ring's WR indices for IFRM */
-
- /* 32bit ptr to arrays of HWA DB indices for all rings in dongle memory */
- uint32 h2d_hwa_db_idx_ptr; /* Array of all H2D ring's HWA DB indices */
- uint32 d2h_hwa_db_idx_ptr; /* Array of all D2H ring's HWA DB indices */
-
} ring_info_t;
/**
uint32 host_dma_scratch_buffer_len;
sh_addr_t host_dma_scratch_buffer;
- /* location in host memory for scb host offload structures */
- sh_addr_t host_scb_addr;
- uint32 host_scb_size;
+ /** block of host memory for the dongle to push the status into */
+ uint32 device_rings_stsblk_len;
+ sh_addr_t device_rings_stsblk;
- /* anonymous union for overloading fields in structure */
- union {
- uint32 buzz_dbg_ptr; /* BUZZZ state format strings and trace buffer */
- struct {
- /* Host provided trap buffer length in words */
- uint16 device_trap_debug_buffer_len;
- uint16 rsvd2;
- };
- };
+ uint32 buzz_dbg_ptr; /* BUZZZ state format strings and trace buffer */
/* rev6 compatible changes */
uint32 flags2;
/* location in the host address space to write trap indication.
* At this point for the current rev of the spec, firmware will
* support only indications to 32 bit host addresses.
- * This essentially is device_trap_debug_buffer_addr
*/
sh_addr_t host_trap_addr;
uint32 device_fatal_logbuf_start;
/* location in host memory for offloaded modules */
- sh_addr_t hoffload_addr;
- uint32 flags3;
- uint32 host_cap2;
- uint32 host_cap3;
+ sh_addr_t hoffload_addr;
} pciedev_shared_t;
-/* Device F/W provides the following access function:
- * pciedev_shared_t *hnd_get_pciedev_shared(void);
- */
+extern pciedev_shared_t pciedev_shared;
/* host capabilities */
#define HOSTCAP_PCIEAPI_VERSION_MASK 0x000000FF
#define HOSTCAP_H2D_DAR 0x00010000
#define HOSTCAP_EXTENDED_TRAP_DATA 0x00020000
#define HOSTCAP_TXSTATUS_METADATA 0x00040000
-#define HOSTCAP_BT_LOGGING 0x00080000
-#define HOSTCAP_SNAPSHOT_UPLOAD 0x00100000
-#define HOSTCAP_FAST_DELETE_RING 0x00200000
-#define HOSTCAP_PKT_TXSTATUS 0x00400000
-#define HOSTCAP_UR_FW_NO_TRAP 0x00800000 /* Don't trap on UR */
-#define HOSTCAP_HSCB 0x02000000
-/* Host support for extended device trap debug buffer */
-#define HOSTCAP_EXT_TRAP_DBGBUF 0x04000000
-/* Host support for enhanced debug lane */
-#define HOSTCAP_EDL_RING 0x10000000
-#define HOSTCAP_PKT_TIMESTAMP 0x20000000
-#define HOSTCAP_PKT_HP2P 0x40000000
-#define HOSTCAP_HWA 0x80000000
-#define HOSTCAP2_DURATION_SCALE_MASK 0x0000003Fu
-
-/* extended trap debug buffer allocation sizes. Note that this buffer can be used for
- * other trap related purposes also.
- */
-#define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MIN (64u * 1024u)
-#define BCMPCIE_HOST_EXT_TRAP_DBGBUF_LEN_MAX (256u * 1024u)
/**
* Mailboxes notify a remote party that an event took place, using interrupts. They use hardware
*/
/* H2D mail box Data */
-#define H2D_HOST_D3_INFORM 0x00000001
+#define H2D_HOST_D3_INFORM 0x00000001
#define H2D_HOST_DS_ACK 0x00000002
#define H2D_HOST_DS_NAK 0x00000004
-#define H2D_HOST_D0_INFORM_IN_USE 0x00000008
-#define H2D_HOST_D0_INFORM 0x00000010
-#define H2DMB_DS_ACTIVE 0x00000020
-#define H2DMB_DS_DEVICE_WAKE 0x00000040
-#define H2D_HOST_IDMA_INITED 0x00000080
-#define H2D_HOST_ACK_NOINT 0x00010000 /* d2h_ack interrupt ignore */
#define H2D_HOST_CONS_INT 0x80000000 /**< h2d int for console cmds */
#define H2D_FW_TRAP 0x20000000 /**< h2d force TRAP */
+#define H2D_HOST_D0_INFORM_IN_USE 0x00000008
+#define H2D_HOST_D0_INFORM 0x00000010
+#define H2D_HOST_IDMA_INITED 0x00000080
#define H2DMB_DS_HOST_SLEEP_INFORM H2D_HOST_D3_INFORM
#define H2DMB_DS_DEVICE_SLEEP_ACK H2D_HOST_DS_ACK
#define H2DMB_DS_DEVICE_SLEEP_NAK H2D_HOST_DS_NAK
#define H2DMB_D0_INFORM_IN_USE H2D_HOST_D0_INFORM_IN_USE
#define H2DMB_D0_INFORM H2D_HOST_D0_INFORM
+#define H2DMB_DS_ACTIVE 0x00000020
+#define H2DMB_DS_DEVICE_WAKE 0x00000040
#define H2DMB_FW_TRAP H2D_FW_TRAP
#define H2DMB_HOST_CONS_INT H2D_HOST_CONS_INT
#define H2DMB_DS_DEVICE_WAKE_ASSERT H2DMB_DS_DEVICE_WAKE
#define H2DMB_DS_DEVICE_WAKE_DEASSERT H2DMB_DS_ACTIVE
/* D2H mail box Data */
-#define D2H_DEV_D3_ACK 0x00000001
-#define D2H_DEV_DS_ENTER_REQ 0x00000002
-#define D2H_DEV_DS_EXIT_NOTE 0x00000004
-#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008
-#define D2H_DEV_IDMA_INITED 0x00000010
+#define D2H_DEV_D3_ACK 0x00000001
+#define D2H_DEV_DS_ENTER_REQ 0x00000002
+#define D2H_DEV_DS_EXIT_NOTE 0x00000004
+#define D2H_DEV_FWHALT 0x10000000
+#define D2H_DEV_EXT_TRAP_DATA 0x20000000
+#define D2H_DEV_IDMA_INITED 0x00000010
+#define D2H_FWTRAP_MASK 0x0000001F /* Adding maskbits for TRAP information */
#define D2HMB_DS_HOST_SLEEP_ACK D2H_DEV_D3_ACK
#define D2HMB_DS_DEVICE_SLEEP_ENTER_REQ D2H_DEV_DS_ENTER_REQ
#define D2HMB_DS_DEVICE_SLEEP_EXIT D2H_DEV_DS_EXIT_NOTE
-
+#define D2HMB_DS_HOST_SLEEP_EXIT_ACK 0x00000008
+#define D2HMB_FWHALT D2H_DEV_FWHALT
#define D2H_DEV_MB_MASK (D2H_DEV_D3_ACK | D2H_DEV_DS_ENTER_REQ | \
- D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED)
+ D2H_DEV_DS_EXIT_NOTE | D2H_DEV_IDMA_INITED | D2H_DEV_FWHALT | \
+ D2H_FWTRAP_MASK | D2H_DEV_EXT_TRAP_DATA)
#define D2H_DEV_MB_INVALIDATED(x) ((!x) || (x & ~D2H_DEV_MB_MASK))
-/* trap data codes */
-#define D2H_DEV_FWHALT 0x10000000
-#define D2H_DEV_EXT_TRAP_DATA 0x20000000
-#define D2H_DEV_TRAP_IN_TRAP 0x40000000
-#define D2H_DEV_TRAP_HOSTDB 0x80000000 /* trap as set by host DB */
-#define D2H_DEV_TRAP_DUE_TO_BT 0x01000000
-/* Indicates trap due to HMAP violation */
-#define D2H_DEV_TRAP_DUE_TO_HMAP 0x02000000
-/* Indicates whether HMAP violation was Write */
-#define D2H_DEV_TRAP_HMAP_WRITE 0x04000000
-#define D2H_DEV_TRAP_PING_HOST_FAILURE 0x08000000
-#define D2H_FWTRAP_MASK 0x0000001F /* Adding maskbits for TRAP information */
-
-#define D2HMB_FWHALT D2H_DEV_FWHALT
-#define D2HMB_TRAP_IN_TRAP D2H_DEV_TRAP_IN_TRAP
-#define D2HMB_EXT_TRAP_DATA D2H_DEV_EXT_TRAP_DATA
-
-/* Size of Extended Trap data Buffer */
-#define BCMPCIE_EXT_TRAP_DATA_MAXLEN 4096
/** These macro's operate on type 'inuse_lclbuf_pool_t' and are used by firmware only */
-#define PREVTXP(i, d) (((i) == 0) ? ((d) - 1) : ((i) - 1))
#define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1))
-#define NEXTNTXP(i, n, d) ((((i)+(n)) >= (d)) ? 0 : ((i)+(n)))
#define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w)))
#define NTXPAVAIL(r, w, d) (((d) - NTXPACTIVE((r), (w), (d))) > 1)
/* Function can be used to notify host of FW halt */
-#define READ_AVAIL_SPACE(w, r, d) ((w >= r) ? (uint32)(w - r) : (uint32)(d - r))
-#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w))
-#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1)
-#define CHECK_WRITE_SPACE(r, w, d) ((r) > (w)) ? \
- (uint32)((r) - (w) - 1) : ((r) == 0 || (w) == 0) ? \
- (uint32)((d) - (w) - 1) : (uint32)((d) - (w))
+#define READ_AVAIL_SPACE(w, r, d) \
+ ((w >= r) ? (w - r) : (d - r))
+
+#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w))
+#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1)
+#define CHECK_WRITE_SPACE(r, w, d) \
+ ((r) > (w)) ? ((r) - (w) - 1) : ((r) == 0 || (w) == 0) ? ((d) - (w) - 1) : ((d) - (w))
+#define CHECK_NOWRITE_SPACE(r, w, d) \
+ (((r) == (w) + 1) || (((r) == 0) && ((w) == ((d) - 1))))
-#define CHECK_NOWRITE_SPACE(r, w, d) \
- (((uint32)(r) == (uint32)((w) + 1)) || (((r) == 0) && ((w) == ((d) - 1))))
#define WRT_PEND(x) ((x)->wr_pending)
#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr)) /**< advanced by producer */
#define HOST_RING_BASE(x) ((x)->dma_buf.va)
#define HOST_RING_END(x) ((uint8 *)HOST_RING_BASE((x)) + \
((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x))))
-
-/* Trap types copied in the pciedev_shared.trap_addr */
-#define FW_INITIATED_TRAP_TYPE (0x1 << 7)
-#define HEALTHCHECK_NODS_TRAP_TYPE (0x1 << 6)
-
#endif /* _bcmpcie_h_ */
/*
* Broadcom PCI-SPI Host Controller Register Definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#define PAD _XSTR(__LINE__)
#endif /* PAD */
+
typedef volatile struct {
uint32 spih_ctrl; /* 0x00 SPI Control Register */
uint32 spih_stat; /* 0x04 SPI Status Register */
#define PCI_SYS_ERR_INT_EN (1 << 4) /* System Error Interrupt Enable */
#define PCI_SOFTWARE_RESET (1U << 31) /* Software reset of the PCI Core. */
+
/* PCI Core ISR Register bit definitions */
#define PCI_INT_PROP_ST (1 << 0) /* Interrupt Propagation Status */
#define PCI_WB_ERR_INT_ST (1 << 1) /* Wishbone Error Interrupt Status */
#define PCI_PAR_ERR_INT_ST (1 << 3) /* Parity Error Interrupt Status */
#define PCI_SYS_ERR_INT_ST (1 << 4) /* System Error Interrupt Status */
+
/* Registers on the Wishbone bus */
#define SPIH_CTLR_INTR (1 << 0) /* SPI Host Controller Core Interrupt */
#define SPIH_DEV_INTR (1 << 1) /* SPI Device Interrupt */
/*
* Performance counters software interface.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
* Definitions for API from sdio common code (bcmsdh) to individual
* host controller drivers.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmsdbus.h 689948 2017-03-14 05:21:03Z $
+ * $Id: bcmsdbus.h 644725 2016-06-21 12:26:04Z $
*/
#ifndef _sdio_api_h_
#include <linux/mmc/sdio_func.h>
#endif /* defined (BT_OVER_SDIO) */
+
#define SDIOH_API_RC_SUCCESS (0x00)
#define SDIOH_API_RC_FAIL (0x01)
#define SDIOH_API_SUCCESS(status) (status == 0)
#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!"
#undef SDPCM_DEFGLOM_SIZE
#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
-#endif // endif
+#endif
#ifdef PKT_STATICS
typedef struct pkt_statics {
#if defined(DHD_DEBUG)
extern bool sdioh_interrupt_pending(sdioh_info_t *si);
-#endif // endif
+#endif
/* read or write one byte using cmd52 */
extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *si, uint rw, uint fnc, uint addr, uint8 *byte);
/* Reset and re-initialize the device */
extern int sdioh_sdio_reset(sdioh_info_t *si);
-#ifdef BCMSPI
-/* Function to pass gSPI specific device-status bits to dhd. */
-extern uint32 sdioh_get_dstatus(sdioh_info_t *si);
-/* chipid and chiprev info for lower layers to control sw WAR's for hw bugs. */
-extern void sdioh_chipinfo(sdioh_info_t *si, uint32 chip, uint32 chiprev);
-extern void sdioh_dwordmode(sdioh_info_t *si, bool set);
-#endif /* BCMSPI */
#if defined(BCMSDIOH_STD)
#define SDIOH_SLEEP_ENABLED
-#endif // endif
+#endif
extern SDIOH_API_RC sdioh_sleep(sdioh_info_t *si, bool enab);
/* GPIO support */
* export functions to client drivers
* abstract OS and BUS specific details of SDIO
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmsdh.h 727623 2017-10-21 01:00:32Z $
+ * $Id: bcmsdh.h 698895 2017-05-11 02:55:17Z $
*/
/**
typedef struct bcmsdh_info bcmsdh_info_t;
typedef void (*bcmsdh_cb_fn_t)(void *);
+
#if defined(BT_OVER_SDIO)
typedef enum {
NO_HANG_STATE = 0,
HANG_START_STATE = 1,
HANG_RECOVERY_STATE = 2
} dhd_hang_state_t;
-#endif // endif
+#endif
extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva);
/**
#if defined(DHD_DEBUG)
/* Query pending interrupt status from the host controller */
extern bool bcmsdh_intr_pending(void *sdh);
-#endif // endif
+#endif
/* Register a callback to be called if and when bcmsdh detects
* device removal. No-op in the case of non-removable/hardwired devices.
/* Miscellaneous knob tweaker. */
extern int bcmsdh_iovar_op(void *sdh, const char *name,
- void *params, uint plen, void *arg, uint len, bool set);
+ void *params, int plen, void *arg, int len, bool set);
/* Reset and reinitialize the device */
extern int bcmsdh_reset(bcmsdh_info_t *sdh);
extern int bcmsdh_reg_sdio_notify(void* semaphore);
extern void bcmsdh_unreg_sdio_notify(void);
-#if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
+#if defined(OOB_INTR_ONLY)
extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
void* oob_irq_handler_context);
extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh);
extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable);
-#endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
+#endif
extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh);
extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh);
extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh);
/* Function to pass chipid and rev to lower layers for controlling pr's */
extern void bcmsdh_chipinfo(void *sdh, uint32 chip, uint32 chiprev);
-#ifdef BCMSPI
-extern void bcmsdh_dwordmode(void *sdh, bool set);
-#endif /* BCMSPI */
extern int bcmsdh_sleep(void *sdh, bool enab);
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Proprietary,Open:>>
*
- * $Id: bcmsdh_sdmmc.h 753315 2018-03-21 04:10:12Z $
+ * $Id: bcmsdh_sdmmc.h 687253 2017-02-28 09:33:36Z $
*/
#ifndef __BCMSDH_SDMMC_H__
/* private bus modes */
#define SDIOH_MODE_SD4 2
#define CLIENT_INTR 0x100 /* Get rid of this! */
-#define SDIOH_SDMMC_MAX_SG_ENTRIES 64
+#define SDIOH_SDMMC_MAX_SG_ENTRIES (SDPCM_MAXGLOM_SIZE + 2)
struct sdioh_info {
osl_t *osh; /* osh handler */
extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
+
/**************************************************************
* Internal interfaces: bcmsdh_sdmmc.c references to per-port code
*/
* Broadcom SDIO/PCMCIA
* Software-specific definitions shared between device and host side
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmsdpcm.h 700076 2017-05-17 14:42:22Z $
+ * $Id: bcmsdpcm.h 614070 2016-01-21 00:55:57Z $
*/
#ifndef _bcmsdpcm_h_
#define SMB_DATA_DSACK 0x200 /* host acking a deepsleep request */
#define SMB_DATA_DSNACK 0x400 /* host nacking a deepsleep request */
#endif /* DS_PROT */
-/* force a trap */
-#define SMB_DATA_TRAP 0x800 /* host forcing trap */
#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */
#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */
#define HMB_DATA_DSPROT_MASK 0xf00
#endif /* DS_PROT */
+
#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */
#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */
/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
-#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
+#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
#define SDPCM_DOFFSET_MASK 0xff000000
#define SDPCM_DOFFSET_SHIFT 24
#define SDPCM_SHARED_SET_BRPT 0x1000
#define SDPCM_SHARED_PENDING_BRPT 0x2000
#define SDPCM_SHARED_FATAL_LOGBUF_VALID 0x100000
-#define SDPCM_SHARED_RXLIM_POST 0x4000
typedef struct {
uint32 flags;
uint32 device_fatal_logbuf_start;
} sdpcm_shared_t;
-/* Device F/W provides the following access function:
- * sdpcm_shared_t *hnd_get_sdpcm_shared(void);
- */
+extern sdpcm_shared_t sdpcm_shared;
#endif /* _bcmsdpcm_h_ */
/*
* SD-SPI Protocol Conversion - BCMSDH->SPI Translation Layer
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
/*
* 'Standard' SDIO HOST CONTROLLER driver
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmsdstd.h 768214 2018-06-19 03:53:58Z $
+ * $Id: bcmsdstd.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _BCM_SD_STD_H
#define _BCM_SD_STD_H
#define sdstd_os_yield(sd) do {} while (0)
#define RETRIES_SMALL 100
+
#define USE_BLOCKMODE 0x2 /* Block mode can be single block or multi */
#define USE_MULTIBLOCK 0x4
#define HC_INTR_RETUNING 0x1000
+
#ifdef BCMSDIOH_TXGLOM
/* Total glom pkt can not exceed 64K
* need one more slot for glom padding packet
ulong dma_phys_arr[SDIOH_MAXGLOM_SIZE]; /* DMA_MAPed address of frames */
uint16 nbytes[SDIOH_MAXGLOM_SIZE]; /* Size of each frame */
} glom_buf_t;
-#endif // endif
+#endif
struct sdioh_info {
- uint cfg_bar; /* pci cfg address for bar */
- uint32 caps; /* cached value of capabilities reg */
- uint32 curr_caps; /* max current capabilities reg */
+ uint cfg_bar; /* pci cfg address for bar */
+ uint32 caps; /* cached value of capabilities reg */
+ uint32 curr_caps; /* max current capabilities reg */
- osl_t *osh; /* osh handler */
- volatile char *mem_space; /* pci device memory va */
- uint lockcount; /* nest count of sdstd_lock() calls */
+ osl_t *osh; /* osh handler */
+ volatile char *mem_space; /* pci device memory va */
+ uint lockcount; /* nest count of sdstd_lock() calls */
bool client_intr_enabled; /* interrupt connnected flag */
bool intr_handler_valid; /* client driver interrupt handler valid */
sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
ulong adma2_dscr_start_phys;
uint alloced_adma2_dscr_size;
- int r_cnt; /* rx count */
- int t_cnt; /* tx_count */
+ int r_cnt; /* rx count */
+ int t_cnt; /* tx_count */
bool got_hcint; /* local interrupt flag */
uint16 last_intrstatus; /* to cache intrstatus */
- int host_UHSISupported; /* whether UHSI is supported for HC. */
- int card_UHSI_voltage_Supported; /* whether UHSI is supported for
+ int host_UHSISupported; /* whether UHSI is supported for HC. */
+ int card_UHSI_voltage_Supported; /* whether UHSI is supported for
* Card in terms of Voltage [1.8 or 3.3].
*/
int global_UHSI_Supp; /* type of UHSI support in both host and card.
* HOST_SDR_12_25: SDR12 and SDR25 supported
* HOST_SDR_50_104_DDR: one of SDR50/SDR104 or DDR50 supptd
*/
- volatile int sd3_dat_state; /* data transfer state used for retuning check */
- volatile int sd3_tun_state; /* tuning state used for retuning check */
- bool sd3_tuning_reqd; /* tuning requirement parameter */
- bool sd3_tuning_disable; /* tuning disable due to bus sleeping */
+ volatile int sd3_dat_state; /* data transfer state used for retuning check */
+ volatile int sd3_tun_state; /* tuning state used for retuning check */
+ bool sd3_tuning_reqd; /* tuning requirement parameter */
uint32 caps3; /* cached value of 32 MSbits capabilities reg (SDIO 3.0) */
#ifdef BCMSDIOH_TXGLOM
glom_buf_t glom_info; /* pkt information used for glomming */
uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */
-#endif // endif
+#endif
};
#define DMA_MODE_NONE 0
#define CHECK_TUNING_PRE_DATA 1
#define CHECK_TUNING_POST_DATA 2
+
#ifdef DHD_DEBUG
#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01
#define SD_DHD_ENABLE_PERIODIC_TUNING 0x00
-#endif // endif
+#endif
+
/************************************************************
* Internal interfaces: per-port references into bcmsdstd.c
/* Wait for specified interrupt and error bits to be set */
extern void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err);
+
/**************************************************************
* Internal interfaces: bcmsdstd.c references to per-port code
*/
/*
* Broadcom SPI Low-Level Hardware Driver API
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
/*
* SD-SPI Protocol Conversion - BCMSDH->gSPI Translation Layer
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef SPI_MAX_IOFUNCS
/* Maximum number of I/O funcs */
#define SPI_MAX_IOFUNCS 4
-#endif // endif
+#endif
/* global msglevel for debug messages - bitvals come from sdiovar.h */
#if defined(DHD_DEBUG)
#define sd_debug(x)
#define sd_data(x)
#define sd_ctrl(x)
-#endif // endif
+#endif
#define sd_log(x)
struct sdioh_info {
uint cfg_bar; /* pci cfg address for bar */
uint32 caps; /* cached value of capabilities reg */
-#ifndef BCMSPI_ANDROID
void *bar0; /* BAR0 for PCI Device */
-#endif /* !BCMSPI_ANDROID */
osl_t *osh; /* osh handler */
void *controller; /* Pointer to SPI Controller's private data struct */
uint lockcount; /* nest count of spi_lock() calls */
/*
* SROM format definition.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmsrom_fmt.h 688657 2017-03-07 10:12:56Z $
+ * $Id: bcmsrom_fmt.h 646789 2016-06-30 19:43:02Z $
*/
#ifndef _bcmsrom_fmt_h_
#define _bcmsrom_fmt_h_
-#define SROM_MAXREV 16 /* max revision supported by driver */
+#define SROM_MAXREV 13 /* max revision supported by driver */
-/* Maximum srom: 16 Kilobits == 2048 bytes */
+/* Maximum srom: 12 Kilobits == 1536 bytes */
-#define SROM_MAX 2048
-#define SROM_MAXW 1024
+#define SROM_MAX 1536
+#define SROM_MAXW 594
#ifdef LARGE_NVRAM_MAXSZ
-#define VARS_MAX LARGE_NVRAM_MAXSZ
+#define VARS_MAX LARGE_NVRAM_MAXSZ
#else
-#define LARGE_NVRAM_MAXSZ 8192
-#define VARS_MAX LARGE_NVRAM_MAXSZ
+#define VARS_MAX 4096
#endif /* LARGE_NVRAM_MAXSZ */
/* PCI fields */
#define PCI_F0DEVID 48
+
#define SROM_WORDS 64
-#define SROM_SIGN_MINWORDS 128
+
#define SROM3_SWRGN_OFF 28 /* s/w region offset in words */
#define SROM_SSID 2
#define SROM4_SWITCH_MASK 0xff00
#define SROM4_SWITCH_SHIFT 8
+
/* Per-path fields */
#define MAX_PATH_SROM 4
#define SROM4_PATH0 64
#define SROM4_CRCREV 219
+
/* SROM Rev 8: Make space for a 48word hardware header for PCIe rev >= 6.
* This is acombined srom for both MIMO and SISO boards, usable in
* the .130 4Kilobit OTP with hardware redundancy.
/* Measured power 1 & 2, 0-13 bits at offset 95, MSB 2 bits are unused for now. */
#define SROM8_MPWR_1_AND_2 95
+
/* Per-path offsets & fields */
#define SROM8_PATH0 96
#define SROM8_PATH1 112
#define SROM10_WORDS 230
#define SROM10_SIGNATURE SROM4_SIGNATURE
+
/* SROM REV 11 */
#define SROM11_BREV 65
#define SROM11_WORDS 234
#define SROM11_SIGNATURE 0x0634
+
/* SROM REV 12 */
#define SROM12_SIGN 64
#define SROM12_WORDS 512
#define SROM12_PDOFF_20in80M_5G_B3 491
#define SROM12_PDOFF_20in80M_5G_B4 492
+#define SROM13_PDOFFSET20IN40M5GCORE3 98
+#define SROM13_PDOFFSET20IN40M5GCORE3_1 99
+#define SROM13_PDOFFSET20IN80M5GCORE3 510
+#define SROM13_PDOFFSET20IN80M5GCORE3_1 511
+#define SROM13_PDOFFSET40IN80M5GCORE3 105
+#define SROM13_PDOFFSET40IN80M5GCORE3_1 106
+
+#define SROM13_PDOFFSET20IN40M2G 94
+#define SROM13_PDOFFSET20IN40M2GCORE3 95
+
#define SROM12_GPDN_L 91 /* GPIO pull down bits [15:0] */
#define SROM12_GPDN_H 233 /* GPIO pull down bits [31:16] */
#define SROM13_SIGNATURE 0x4d55
#define SROM13_CRCREV 589
+
/* Per-path fields and offset */
#define MAX_PATH_SROM_13 4
#define SROM13_PATH0 256
#define SROM13_ANTGAIN_BANDBGA 100
+#define SROM13_RXGAINS2CORE0 101
+#define SROM13_RXGAINS2CORE1 102
+#define SROM13_RXGAINS2CORE2 103
+#define SROM13_RXGAINS2CORE3 104
+
#define SROM13_PDOFFSET40IN80M5GCORE3 105
#define SROM13_PDOFFSET40IN80M5GCORE3_1 106
#define SROM13_RPCAL5GB01CORE3 102
#define SROM13_RPCAL5GB23CORE3 103
-#define SROM13_SW_TXRX_MASK 104
-
#define SROM13_EU_EDCRSTH 232
#define SROM13_SWCTRLMAP4_CFG 493
#define SROM13_RXGAINERRCORE3 586
#define SROM13_RXGAINERRCORE3_1 587
-#define SROM13_PDOFF_2G_CCK_20M 167
-
-#define SROM15_CALDATA_WORDS 943
-#define SROM15_CAL_OFFSET_LOC 68
-#define MAX_IOCTL_TXCHUNK_SIZE 1500
-#define SROM15_MAX_CAL_SIZE 1886
-#define SROM15_SIGNATURE 0x110c
-#define SROM15_WORDS 1024
-#define SROM15_MACHI 65
-#define SROM15_CRCREV 1023
-#define SROM15_BRDREV 69
-#define SROM15_CCODE 70
-#define SROM15_REGREV 71
-#define SROM15_SIGN 64
-
-#define SROM16_SIGN 128
-#define SROM16_WORDS 1024
-#define SROM16_SIGNATURE 0x4357
-#define SROM16_CRCREV 1023
-#define SROM16_MACHI 129
-#define SROM16_CALDATA_OFFSET_LOC 132
-#define SROM16_BOARDREV 133
-#define SROM16_CCODE 134
-#define SROM16_REGREV 135
-
-#define SROM_CALDATA_WORDS 832
-
-#define SROM17_SIGN 64
-#define SROM17_BRDREV 65
-#define SROM17_MACADDR 66
-#define SROM17_CCODE 69
-#define SROM17_CALDATA 70
-#define SROM17_GCALTMP 71
-
-#define SROM17_C0SRD202G 72
-#define SROM17_C0SRD202G_1 73
-#define SROM17_C0SRD205GL 74
-#define SROM17_C0SRD205GL_1 75
-#define SROM17_C0SRD205GML 76
-#define SROM17_C0SRD205GML_1 77
-#define SROM17_C0SRD205GMU 78
-#define SROM17_C0SRD205GMU_1 79
-#define SROM17_C0SRD205GH 80
-#define SROM17_C0SRD205GH_1 81
-
-#define SROM17_C1SRD202G 82
-#define SROM17_C1SRD202G_1 83
-#define SROM17_C1SRD205GL 84
-#define SROM17_C1SRD205GL_1 85
-#define SROM17_C1SRD205GML 86
-#define SROM17_C1SRD205GML_1 87
-#define SROM17_C1SRD205GMU 88
-#define SROM17_C1SRD205GMU_1 89
-#define SROM17_C1SRD205GH 90
-#define SROM17_C1SRD205GH_1 91
-
-#define SROM17_TRAMMAGIC 92
-#define SROM17_TRAMMAGIC_1 93
-#define SROM17_TRAMDATA 94
-
-#define SROM17_WORDS 256
-#define SROM17_CRCREV 255
-#define SROM17_CALDATA_WORDS 161
-#define SROM17_SIGNATURE 0x1103 /* 4355 in hex format */
+
+#define SROM16_SIGN 104
+#define SROM16_WORDS 512
+#define SROM16_SIGNATURE 0x4347
+#define SROM16_CRCREV 511
typedef struct {
uint8 tssipos; /* TSSI positive slope, 1: positive, 0: negative */
/*
* Table that encodes the srom formats for PCI/PCIe NICs.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmsrom_tbl.h 700323 2017-05-18 16:12:11Z $
+ * $Id: bcmsrom_tbl.h 616054 2016-01-29 13:22:24Z $
*/
#ifndef _bcmsrom_tbl_h_
#define SRFL_ARRAY 0x100 /* value is in an array. All elements EXCEPT FOR THE LAST
* ONE in the array should have this flag set.
*/
-#define PRHEX_N_MORE (SRFL_PRHEX | SRFL_MORE)
+
#define SROM_DEVID_PCIE 48
* - The last entry's name field must be NULL to indicate the end of the table. Other
* entries must have non-NULL name.
*/
-#if !defined(SROM15_MEMOPT)
static const sromvar_t pci_sromvars[] = {
/* name revmask flags off mask */
#if defined(CABLECPE)
{"devid", 0xffffff00, SRFL_PRHEX, SROM_DEVID_PCIE, 0xffff},
#else
{"devid", 0xffffff00, SRFL_PRHEX|SRFL_NOVAR, PCI_F0DEVID, 0xffff},
-#endif // endif
+#endif
{"boardrev", 0x0000000e, SRFL_PRHEX, SROM_AABREV, SROM_BR_MASK},
{"boardrev", 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
{"boardrev", 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
{"pdoffset20in80m5gb3", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B3, 0xffff},
{"pdoffset20in80m5gb4", 0xfffff000, 0, SROM12_PDOFF_20in80M_5G_B4, 0xffff},
+ {"pdoffset20in40m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff},
+ {"pdoffset20in40m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff},
+ {"pdoffset20in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff},
+ {"pdoffset20in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff},
+ {"pdoffset40in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff},
+ {"pdoffset40in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff},
+
+ {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff},
+ {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff},
+
/* power per rate */
{"mcsbw205gx1po", 0xfffff000, SRFL_MORE, SROM12_MCSBW205GX1PO, 0xffff},
{"", 0xfffff000, 0, SROM12_MCSBW205GX1PO_1, 0xffff},
{"rpcal5gb2core3", 0xffffe000, 0, SROM13_RPCAL5GB23CORE3, 0x00ff},
{"rpcal5gb3core3", 0xffffe000, 0, SROM13_RPCAL5GB23CORE3, 0xff00},
- {"sw_txchain_mask", 0xffffe000, 0, SROM13_SW_TXRX_MASK, 0x000f},
- {"sw_rxchain_mask", 0xffffe000, 0, SROM13_SW_TXRX_MASK, 0x00f0},
-
{"eu_edthresh2g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0x00ff},
{"eu_edthresh5g", 0x00002000, 0, SROM13_EU_EDCRSTH, 0xff00},
{"rxgains5ghtrisoa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x7800},
{"rxgains5ghtrelnabypa3", 0xffffe000, 0, SROM13_RXGAINS1CORE3, 0x8000},
- /* pdoffset */
- {"pdoffset20in40m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff},
- {"pdoffset20in40m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff},
- {"pdoffset20in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff},
- {"pdoffset20in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff},
- {"pdoffset40in80m5gcore3", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff},
- {"pdoffset40in80m5gcore3_1", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff},
-
- {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff},
- {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff},
- {"pdoffsetcck20m", 0xffffe000, 0, SROM13_PDOFF_2G_CCK_20M, 0xffff},
-
/* power per rate */
{"mcs1024qam2gpo", 0xffffe000, 0, SROM13_MCS1024QAM2GPO, 0xffff},
{"mcs1024qam5glpo", 0xffffe000, SRFL_MORE, SROM13_MCS1024QAM5GLPO, 0xffff},
{"sb20in40hrlrpox", 0xffffe000, 0, SROM13_SB20IN40HRLRPOX, 0xffff},
+ {"pdoffset20in40m2g", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2G, 0xffff},
+ {"pdoffset20in40m2gcore3", 0xffffe000, 0, SROM13_PDOFFSET20IN40M2GCORE3, 0xffff},
+
+ {"pdoffset20in40m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN40M5GCORE3, 0xffff},
+ {"", 0xffffe000, 0, SROM13_PDOFFSET20IN40M5GCORE3_1, 0xffff},
+ {"pdoffset40in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET40IN80M5GCORE3, 0xffff},
+ {"", 0xffffe000, 0, SROM13_PDOFFSET40IN80M5GCORE3_1, 0xffff},
+ {"pdoffset20in80m5gcore3", 0xffffe000, SRFL_MORE, SROM13_PDOFFSET20IN80M5GCORE3, 0xffff},
+ {"", 0xffffe000, 0, SROM13_PDOFFSET20IN80M5GCORE3_1, 0xffff},
+
{"swctrlmap4_cfg", 0xffffe000, 0, SROM13_SWCTRLMAP4_CFG, 0xffff},
{"swctrlmap4_TX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_TX2G_FEM3TO0, 0xffff},
{"swctrlmap4_RX2g_fem3to0", 0xffffe000, 0, SROM13_SWCTRLMAP4_RX2G_FEM3TO0, 0xffff},
{"swctrlmap4_misc5g_fem7to4", 0xffffe000, 0, SROM13_SWCTRLMAP4_MISC5G_FEM7TO4, 0xffff},
{NULL, 0, 0, 0, 0}
};
-#endif /* !defined(SROM15_MEMOPT) */
-
-static const sromvar_t pci_srom15vars[] = {
- {"macaddr", 0x00008000, SRFL_ETHADDR, SROM15_MACHI, 0xffff},
- {"caldata_offset", 0x00008000, 0, SROM15_CAL_OFFSET_LOC, 0xffff},
- {"boardrev", 0x00008000, SRFL_PRHEX, SROM15_BRDREV, 0xffff},
- {"ccode", 0x00008000, SRFL_CCODE, SROM15_CCODE, 0xffff},
- {"regrev", 0x00008000, 0, SROM15_REGREV, 0xffff},
- {NULL, 0, 0, 0, 0}
-};
-
-static const sromvar_t pci_srom16vars[] = {
- {"macaddr", 0x00010000, SRFL_ETHADDR, SROM16_MACHI, 0xffff},
- {"caldata_offset", 0x00010000, 0, SROM16_CALDATA_OFFSET_LOC, 0xffff},
- {"boardrev", 0x00010000, 0, SROM16_BOARDREV, 0xffff},
- {"ccode", 0x00010000, 0, SROM16_CCODE, 0xffff},
- {"regrev", 0x00010000, 0, SROM16_REGREV, 0xffff},
- {NULL, 0, 0, 0, 0}
-};
-
-static const sromvar_t pci_srom17vars[] = {
- {"boardrev", 0x00020000, SRFL_PRHEX, SROM17_BRDREV, 0xffff},
- {"macaddr", 0x00020000, SRFL_ETHADDR, SROM17_MACADDR, 0xffff},
- {"ccode", 0x00020000, SRFL_CCODE, SROM17_CCODE, 0xffff},
- {"caldata_offset", 0x00020000, 0, SROM17_CALDATA, 0xffff},
- {"gain_cal_temp", 0x00020000, SRFL_PRHEX, SROM17_GCALTMP, 0xffff},
- {"rssi_delta_2gb0_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD202G, 0xffff},
- {"", 0x00020000, 0, SROM17_C0SRD202G_1, 0xffff},
- {"rssi_delta_5gl_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GL, 0xffff},
- {"", 0x00020000, 0, SROM17_C0SRD205GL_1, 0xffff},
- {"rssi_delta_5gml_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GML, 0xffff},
- {"", 0x00020000, 0, SROM17_C0SRD205GML_1, 0xffff},
- {"rssi_delta_5gmu_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GMU, 0xffff},
- {"", 0x00020000, 0, SROM17_C0SRD205GMU_1, 0xffff},
- {"rssi_delta_5gh_c0", 0x00020000, PRHEX_N_MORE, SROM17_C0SRD205GH, 0xffff},
- {"", 0x00020000, 0, SROM17_C0SRD205GH_1, 0xffff},
- {"rssi_delta_2gb0_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD202G, 0xffff},
- {"", 0x00020000, 0, SROM17_C1SRD202G_1, 0xffff},
- {"rssi_delta_5gl_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GL, 0xffff},
- {"", 0x00020000, 0, SROM17_C1SRD205GL_1, 0xffff},
- {"rssi_delta_5gml_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GML, 0xffff},
- {"", 0x00020000, 0, SROM17_C1SRD205GML_1, 0xffff},
- {"rssi_delta_5gmu_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GMU, 0xffff},
- {"", 0x00020000, 0, SROM17_C1SRD205GMU_1, 0xffff},
- {"rssi_delta_5gh_c1", 0x00020000, PRHEX_N_MORE, SROM17_C1SRD205GH, 0xffff},
- {"", 0x00020000, 0, SROM17_C1SRD205GH_1, 0xffff},
- {"txpa_trim_magic", 0x00020000, PRHEX_N_MORE, SROM17_TRAMMAGIC, 0xffff},
- {"", 0x00020000, 0, SROM17_TRAMMAGIC_1, 0xffff},
- {"txpa_trim_data", 0x00020000, SRFL_PRHEX, SROM17_TRAMDATA, 0xffff},
- {NULL, 0, 0, 0, 0x00}
-};
static const sromvar_t perpath_pci_sromvars[] = {
{"maxp2ga", 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
{NULL, 0, 0, 0, 0}
};
-#if !defined(PHY_TYPE_N)
+#if !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N))
+#define PHY_TYPE_HT 7 /* HT-Phy value */
#define PHY_TYPE_N 4 /* N-Phy value */
#endif /* !(defined(PHY_TYPE_HT) && defined(PHY_TYPE_N)) */
#if !defined(PHY_TYPE_AC)
} pavars_t;
static const pavars_t pavars[] = {
+ /* HTPHY */
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_2G, 2, "pa2gw0a2 pa2gw1a2 pa2gw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 0, "pa5glw0a0 pa5glw1a0 pa5glw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 1, "pa5glw0a1 pa5glw1a1 pa5glw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND0, 2, "pa5glw0a2 pa5glw1a2 pa5glw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 0, "pa5gw0a0 pa5gw1a0 pa5gw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 1, "pa5gw0a1 pa5gw1a1 pa5gw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND1, 2, "pa5gw0a2 pa5gw1a2 pa5gw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 0, "pa5ghw0a0 pa5ghw1a0 pa5ghw2a0"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 1, "pa5ghw0a1 pa5ghw1a1 pa5ghw2a1"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND2, 2, "pa5ghw0a2 pa5ghw1a2 pa5ghw2a2"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 0, "pa5gw0a3 pa5gw1a3 pa5gw2a3"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 1, "pa5glw0a3 pa5glw1a3 pa5glw2a3"},
+ {PHY_TYPE_HT, WL_CHAN_FREQ_RANGE_5G_BAND3, 2, "pa5ghw0a3 pa5ghw1a3 pa5ghw2a3"},
/* NPHY */
{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 0, "pa2gw0a0 pa2gw1a0 pa2gw2a0"},
{PHY_TYPE_N, WL_CHAN_FREQ_RANGE_2G, 1, "pa2gw0a1 pa2gw1a1 pa2gw2a1"},
{PHY_TYPE_NULL, 0, 0, ""}
};
+
static const pavars_t pavars_SROM12[] = {
/* ACPHY */
{PHY_TYPE_AC, WL_CHAN_FREQ_RANGE_2G, 0, "pa2ga0"},
{HNBU_FEM_CFG, 0xfffff800, 5, "0femctrl 0papdcap2g 0tworangetssi2g 0pdgain2g "
"0epagain2g 0tssiposslope2g 0gainctrlsph 0papdcap5g 0tworangetssi5g 0pdgain5g 0epagain5g "
"0tssiposslope5g"}, /* special case */
- {HNBU_ACPA_C0, 0x00001800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 "
+ {HNBU_ACPA_C0, 0xfffff800, 39, "2subband5gver 2maxp2ga0 2*3pa2ga0 "
"1*4maxp5ga0 2*12pa5ga0"},
- {HNBU_ACPA_C1, 0x00001800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"},
- {HNBU_ACPA_C2, 0x00001800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"},
+ {HNBU_ACPA_C1, 0xfffff800, 37, "2maxp2ga1 2*3pa2ga1 1*4maxp5ga1 2*12pa5ga1"},
+ {HNBU_ACPA_C2, 0xfffff800, 37, "2maxp2ga2 2*3pa2ga2 1*4maxp5ga2 2*12pa5ga2"},
{HNBU_MEAS_PWR, 0xfffff800, 5, "1measpower 1measpower1 1measpower2 2rawtempsense"},
{HNBU_PDOFF, 0xfffff800, 13, "2pdoffset40ma0 2pdoffset40ma1 2pdoffset40ma2 "
"2pdoffset80ma0 2pdoffset80ma1 2pdoffset80ma2"},
"2tx_duty_cycle_thresh_40_5g 2tx_duty_cycle_ofdm_80_5g 2tx_duty_cycle_thresh_80_5g"},
{HNBU_PDOFF_2G, 0xfffff800, 3, "0pdoffset2g40ma0 0pdoffset2g40ma1 "
"0pdoffset2g40ma2 0pdoffset2g40mvalid"},
- {HNBU_ACPA_CCK_C0, 0xfffff800, 7, "2*3pa2gccka0"},
- {HNBU_ACPA_CCK_C1, 0xfffff800, 7, "2*3pa2gccka1"},
+ {HNBU_ACPA_CCK, 0xfffff800, 7, "2*3pa2gccka0"},
{HNBU_ACPA_40, 0xfffff800, 25, "2*12pa5gbw40a0"},
{HNBU_ACPA_80, 0xfffff800, 25, "2*12pa5gbw80a0"},
{HNBU_ACPA_4080, 0xfffff800, 49, "2*12pa5gbw4080a0 2*12pa5gbw4080a1"},
- {HNBU_ACPA_4X4C0, 0xffffe000, 23, "1maxp2ga0 2*4pa2ga0 2*4pa2g40a0 "
- "1maxp5gb0a0 1maxp5gb1a0 1maxp5gb2a0 1maxp5gb3a0 1maxp5gb4a0"},
- {HNBU_ACPA_4X4C1, 0xffffe000, 23, "1maxp2ga1 2*4pa2ga1 2*4pa2g40a1 "
- "1maxp5gb0a1 1maxp5gb1a1 1maxp5gb2a1 1maxp5gb3a1 1maxp5gb4a1"},
- {HNBU_ACPA_4X4C2, 0xffffe000, 23, "1maxp2ga2 2*4pa2ga2 2*4pa2g40a2 "
- "1maxp5gb0a2 1maxp5gb1a2 1maxp5gb2a2 1maxp5gb3a2 1maxp5gb4a2"},
- {HNBU_ACPA_4X4C3, 0xffffe000, 23, "1maxp2ga3 2*4pa2ga3 2*4pa2g40a3 "
- "1maxp5gb0a3 1maxp5gb1a3 1maxp5gb2a3 1maxp5gb3a3 1maxp5gb4a3"},
- {HNBU_ACPA_BW20_4X4C0, 0xffffe000, 41, "2*20pa5ga0"},
- {HNBU_ACPA_BW40_4X4C0, 0xffffe000, 41, "2*20pa5g40a0"},
- {HNBU_ACPA_BW80_4X4C0, 0xffffe000, 41, "2*20pa5g80a0"},
- {HNBU_ACPA_BW20_4X4C1, 0xffffe000, 41, "2*20pa5ga1"},
- {HNBU_ACPA_BW40_4X4C1, 0xffffe000, 41, "2*20pa5g40a1"},
- {HNBU_ACPA_BW80_4X4C1, 0xffffe000, 41, "2*20pa5g80a1"},
- {HNBU_ACPA_BW20_4X4C2, 0xffffe000, 41, "2*20pa5ga2"},
- {HNBU_ACPA_BW40_4X4C2, 0xffffe000, 41, "2*20pa5g40a2"},
- {HNBU_ACPA_BW80_4X4C2, 0xffffe000, 41, "2*20pa5g80a2"},
- {HNBU_ACPA_BW20_4X4C3, 0xffffe000, 41, "2*20pa5ga3"},
- {HNBU_ACPA_BW40_4X4C3, 0xffffe000, 41, "2*20pa5g40a3"},
- {HNBU_ACPA_BW80_4X4C3, 0xffffe000, 41, "2*20pa5g80a3"},
{HNBU_SUBBAND5GVER, 0xfffff800, 3, "2subband5gver"},
{HNBU_PAPARAMBWVER, 0xfffff800, 2, "1paparambwver"},
{HNBU_TXBFRPCALS, 0xfffff800, 11,
+++ /dev/null
-/*
- * Broadcom Secure Standard Library.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * * $Id $
- */
-
-#ifndef _bcmstdlib_s_h_
-#define _bcmstdlib_s_h_
-
-#ifndef BWL_NO_INTERNAL_STDLIB_SUPPORT
-#if !defined(__STDC_WANT_SECURE_LIB__) && !(defined(__STDC_LIB_EXT1__) && \
- defined(__STDC_WANT_LIB_EXT1__))
-extern int memmove_s(void *dest, size_t destsz, const void *src, size_t n);
-extern int memcpy_s(void *dest, size_t destsz, const void *src, size_t n);
-extern int memset_s(void *dest, size_t destsz, int c, size_t n);
-#endif /* !__STDC_WANT_SECURE_LIB__ && !(__STDC_LIB_EXT1__ && __STDC_WANT_LIB_EXT1__) */
-#if !defined(FREEBSD) && !defined(BCM_USE_PLATFORM_STRLCPY)
-extern size_t strlcpy(char *dest, const char *src, size_t size);
-#endif // endif
-extern size_t strlcat_s(char *dest, const char *src, size_t size);
-#endif /* !BWL_NO_INTERNAL_STDLIB_SUPPORT */
-#endif /* _bcmstdlib_s_h_ */
/*
* Fundamental constants relating to TCP Protocol
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
+
#define TCP_SRC_PORT_OFFSET 0 /* TCP source port offset */
#define TCP_DEST_PORT_OFFSET 2 /* TCP dest port offset */
#define TCP_SEQ_NUM_OFFSET 4 /* TCP sequence number offset */
+++ /dev/null
-/*
- * TLV and XTLV support
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: $
- */
-
-#ifndef _bcmtlv_h_
-#define _bcmtlv_h_
-
-#include <typedefs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/* begin tlvs - used in 802.11 IEs etc. */
-
-/* type(aka id)/length/value buffer triple */
-typedef struct bcm_tlv {
- uint8 id;
- uint8 len;
- uint8 data[1];
-} bcm_tlv_t;
-
-/* size of tlv including data */
-#define BCM_TLV_SIZE(_tlv) ((_tlv) ? (OFFSETOF(bcm_tlv_t, data) + (_tlv)->len) : 0)
-
-/* get next tlv - no length checks */
-#define BCM_TLV_NEXT(_tlv) (bcm_tlv_t *)((uint8 *)(_tlv)+ BCM_TLV_SIZE(_tlv))
-
-/* tlv length is restricted to 1 byte */
-#define BCM_TLV_MAX_DATA_SIZE (255)
-
-/* tlv header - two bytes */
-#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data))
-
-/* Check that bcm_tlv_t fits into the given buffer len */
-#define bcm_valid_tlv(elt, buflen) (\
- ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \
- ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len)))
-
-/* type(aka id)/length/ext/value buffer */
-typedef struct bcm_tlv_ext {
- uint8 id;
- uint8 len;
- uint8 ext;
- uint8 data[1];
-} bcm_tlv_ext_t;
-
-/* get next tlv_ext - no length checks */
-#define BCM_TLV_EXT_NEXT(_tlv_ext) \
- (bcm_tlv_ext_t *)((uint8 *)(_tlv_ext)+ BCM_TLV_EXT_SIZE(_tlv_ext))
-
-/* tlv_ext length is restricted to 1 byte */
-#define BCM_TLV_EXT_MAX_DATA_SIZE (254)
-
-/* tlv_ext header - three bytes */
-#define BCM_TLV_EXT_HDR_SIZE (OFFSETOF(bcm_tlv_ext_t, data))
-
-/* size of tlv_ext including data */
-#define BCM_TLV_EXT_SIZE(_tlv_ext) (BCM_TLV_EXT_HDR_SIZE + (_tlv_ext)->len)
-
-/* find the next tlv */
-bcm_tlv_t *bcm_next_tlv(const bcm_tlv_t *elt, uint *buflen);
-
-/* move buffer/buflen up to the given tlv, or set to NULL/0 on error */
-void bcm_tlv_buffer_advance_to(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen);
-
-/* move buffer/buflen past the given tlv, or set to NULL/0 on error */
-void bcm_tlv_buffer_advance_past(const bcm_tlv_t *elt, const uint8 **buffer, uint *buflen);
-
-/* find the tlv for a given id */
-bcm_tlv_t *bcm_parse_tlvs(const void *buf, uint buflen, uint key);
-
-/*
- * Traverse tlvs and return pointer to the first tlv that
- * matches the key. Return NULL if not found or tlv len < min_bodylen
- */
-bcm_tlv_t *bcm_parse_tlvs_min_bodylen(const void *buf, int buflen, uint key, int min_bodylen);
-
-/* parse tlvs for dot11 - same as parse_tlvs but supports 802.11 id extension */
-bcm_tlv_t *bcm_parse_tlvs_dot11(const void *buf, int buflen, uint key, bool id_ext);
-
-/* same as parse_tlvs, but stops when found id > key */
-const bcm_tlv_t *bcm_parse_ordered_tlvs(const void *buf, int buflen, uint key);
-
-/* find a tlv with DOT11_MNG_PROPR_ID as id, and the given oui and type */
- bcm_tlv_t *bcm_find_vendor_ie(const void *tlvs, uint tlvs_len, const char *voui,
- uint8 *type, uint type_len);
-
-/* write tlv at dst and return next tlv ptr */
-uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst);
-
-/* write tlv_ext at dst and return next tlv ptr */
-uint8 *bcm_write_tlv_ext(uint8 type, uint8 ext, const void *data, uint8 datalen, uint8 *dst);
-
-/* write tlv at dst if space permits and return next tlv ptr */
-uint8 *bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst,
- int dst_maxlen);
-
-/* copy a tlv and return next tlv ptr */
-uint8 *bcm_copy_tlv(const void *src, uint8 *dst);
-
-/* copy a tlv if space permits and return next tlv ptr */
-uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen);
-
-/* end tlvs */
-
-/* begin xtlv - used for iovars, nan attributes etc. */
-
-/* bcm type(id), length, value with w/16 bit id/len. The structure below
- * is nominal, and is used to support variable length id and type. See
- * xtlv options below.
- */
-typedef struct bcm_xtlv {
- uint16 id;
- uint16 len;
- uint8 data[1];
-} bcm_xtlv_t;
-
-/* xtlv options */
-#define BCM_XTLV_OPTION_NONE 0x0000
-#define BCM_XTLV_OPTION_ALIGN32 0x0001 /* 32bit alignment of type.len.data */
-#define BCM_XTLV_OPTION_IDU8 0x0002 /* shorter id */
-#define BCM_XTLV_OPTION_LENU8 0x0004 /* shorted length */
-#define BCM_XTLV_OPTION_IDBE 0x0008 /* big endian format id */
-#define BCM_XTLV_OPTION_LENBE 0x0010 /* big endian format length */
-typedef uint16 bcm_xtlv_opts_t;
-
-/* header size. depends on options. Macros names ending w/ _EX are where
- * options are explcitly specified that may be less common. The ones
- * without use default values that correspond to ...OPTION_NONE
- */
-
-/* xtlv header size depends on options */
-#define BCM_XTLV_HDR_SIZE 4
-#define BCM_XTLV_HDR_SIZE_EX(_opts) bcm_xtlv_hdr_size(_opts)
-
-/* note: xtlv len only stores the value's length without padding */
-#define BCM_XTLV_LEN(_elt) ltoh16_ua(&(_elt)->len)
-#define BCM_XTLV_LEN_EX(_elt, _opts) bcm_xtlv_len(_elt, _opts)
-
-#define BCM_XTLV_ID(_elt) ltoh16_ua(&(_elt)->id)
-#define BCM_XTLV_ID_EX(_elt, _opts) bcm_xtlv_id(_elt, _opts)
-
-/* entire size of the XTLV including header, data, and optional padding */
-#define BCM_XTLV_SIZE(elt, opts) bcm_xtlv_size(elt, opts)
-#define BCM_XTLV_SIZE_EX(_elt, _opts) bcm_xtlv_size(_elt, _opts)
-
-/* max xtlv data size */
-#define BCM_XTLV_MAX_DATA_SIZE 65535
-#define BCM_XTLV_MAX_DATA_SIZE_EX(_opts) ((_opts & BCM_XTLV_OPTION_LENU8) ? \
- 255 : 65535)
-
-/* descriptor of xtlv data, packing(src) and unpacking(dst) support */
-typedef struct {
- uint16 type;
- uint16 len;
- void *ptr; /* ptr to memory location */
-} xtlv_desc_t;
-
-/* xtlv buffer - packing/unpacking support */
-struct bcm_xtlvbuf {
- bcm_xtlv_opts_t opts;
- uint16 size;
- uint8 *head; /* point to head of buffer */
- uint8 *buf; /* current position of buffer */
- /* allocated buffer may follow, but not necessarily */
-};
-typedef struct bcm_xtlvbuf bcm_xtlvbuf_t;
-
-/* valid xtlv ? */
-bool bcm_valid_xtlv(const bcm_xtlv_t *elt, int buf_len, bcm_xtlv_opts_t opts);
-
-/* return the next xtlv element, and update buffer len (remaining). Buffer length
- * updated includes padding as specified by options
- */
-bcm_xtlv_t *bcm_next_xtlv(const bcm_xtlv_t *elt, int *buf_len, bcm_xtlv_opts_t opts);
-
-/* initialize an xtlv buffer. Use options specified for packing/unpacking using
- * the buffer. Caller is responsible for allocating both buffers.
- */
-int bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len,
- bcm_xtlv_opts_t opts);
-
-/* length of data in the xtlv buffer */
-uint16 bcm_xtlv_buf_len(struct bcm_xtlvbuf *tbuf);
-
-/* remaining space in the xtlv buffer */
-uint16 bcm_xtlv_buf_rlen(struct bcm_xtlvbuf *tbuf);
-
-/* write ptr */
-uint8 *bcm_xtlv_buf(struct bcm_xtlvbuf *tbuf);
-
-/* head */
-uint8 *bcm_xtlv_head(struct bcm_xtlvbuf *tbuf);
-
-/* put a data buffer into xtlv */
-int bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const uint8 *data, int n);
-
-/* put one or more u16 elts into xtlv */
-int bcm_xtlv_put16(bcm_xtlvbuf_t *tbuf, uint16 type, const uint16 *data, int n);
-
-/* put one or more u32 elts into xtlv */
-int bcm_xtlv_put32(bcm_xtlvbuf_t *tbuf, uint16 type, const uint32 *data, int n);
-
-/* put one or more u64 elts into xtlv */
-int bcm_xtlv_put64(bcm_xtlvbuf_t *tbuf, uint16 type, const uint64 *data, int n);
-
-/* note: there are no get equivalent of integer unpacking, becasuse bcmendian.h
- * can be used directly using pointers returned in the buffer being processed.
- */
-
-/* unpack a single xtlv entry, advances buffer and copies data to dst_data on match
- * type and length match must be exact
- */
-int bcm_unpack_xtlv_entry(const uint8 **buf, uint16 expected_type, uint16 expected_len,
- uint8 *dst_data, bcm_xtlv_opts_t opts);
-
-/* packs an xtlv into buffer, and advances buffer, decreements buffer length.
- * buffer length is checked and must be >= size of xtlv - otherwise BCME_BADLEN
- */
-int bcm_pack_xtlv_entry(uint8 **buf, uint16 *buflen, uint16 type, uint16 len,
- const uint8 *src_data, bcm_xtlv_opts_t opts);
-
-/* accessors and lengths for element given options */
-int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts);
-int bcm_xtlv_hdr_size(bcm_xtlv_opts_t opts);
-int bcm_xtlv_len(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts);
-int bcm_xtlv_id(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts);
-int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts);
-
-/* compute size needed for number of tlvs whose total data len is given */
-#define BCM_XTLV_SIZE_FOR_TLVS(_data_len, _num_tlvs, _opts) (\
- bcm_xtlv_size_for_data(_data_len, _opts) + (\
- (_num_tlvs) * BCM_XTLV_HDR_SIZE_EX(_opts)))
-
-/* unsafe copy xtlv */
-#define BCM_XTLV_BCOPY(_src, _dst, _opts) \
- bcm_xtlv_bcopy(_src, _dst, BCM_XTLV_MAX_DATA_SIZE_EX(_opts), \
- BCM_XTLV_MAX_DATA_SIZE_EX(_opts), _opts)
-
-/* copy xtlv - note: src->dst bcopy order - to be compatible w/ tlv version */
-bcm_xtlv_t* bcm_xtlv_bcopy(const bcm_xtlv_t *src, bcm_xtlv_t *dst,
- int src_buf_len, int dst_buf_len, bcm_xtlv_opts_t opts);
-
-/* callback for unpacking xtlv from a buffer into context. */
-typedef int (bcm_xtlv_unpack_cbfn_t)(void *ctx, const uint8 *buf,
- uint16 type, uint16 len);
-
-/* unpack a tlv buffer using buffer, options, and callback */
-int bcm_unpack_xtlv_buf(void *ctx, const uint8 *buf, uint16 buflen,
- bcm_xtlv_opts_t opts, bcm_xtlv_unpack_cbfn_t *cbfn);
-
-/* unpack a set of tlvs from the buffer using provided xtlv descriptors */
-int bcm_unpack_xtlv_buf_to_mem(uint8 *buf, int *buflen, xtlv_desc_t *items,
- bcm_xtlv_opts_t opts);
-
-/* pack a set of tlvs into buffer using provided xtlv descriptors */
-int bcm_pack_xtlv_buf_from_mem(uint8 **buf, uint16 *buflen,
- const xtlv_desc_t *items, bcm_xtlv_opts_t opts);
-
-/* return data pointer and data length of a given id from xtlv buffer
- * data_len may be NULL
- */
-const uint8* bcm_get_data_from_xtlv_buf(const uint8 *tlv_buf, uint16 buflen,
- uint16 id, uint16 *datalen, bcm_xtlv_opts_t opts);
-
-/* callback to return next tlv id and len to pack, if there is more tlvs to come and
- * options e.g. alignment
- */
-typedef bool (*bcm_pack_xtlv_next_info_cbfn_t)(void *ctx, uint16 *tlv_id, uint16 *tlv_len);
-
-/* callback to pack the tlv into length validated buffer */
-typedef void (*bcm_pack_xtlv_pack_next_cbfn_t)(void *ctx,
- uint16 tlv_id, uint16 tlv_len, uint8* buf);
-
-/* pack a set of tlvs into buffer using get_next to interate */
-int bcm_pack_xtlv_buf(void *ctx, uint8 *tlv_buf, uint16 buflen,
- bcm_xtlv_opts_t opts, bcm_pack_xtlv_next_info_cbfn_t get_next,
- bcm_pack_xtlv_pack_next_cbfn_t pack_next, int *outlen);
-
-/* pack an xtlv. does not do any error checking. if data is not NULL
- * data of given length is copied to buffer (xtlv)
- */
-void bcm_xtlv_pack_xtlv(bcm_xtlv_t *xtlv, uint16 type, uint16 len,
- const uint8 *data, bcm_xtlv_opts_t opts);
-
-/* unpack an xtlv and return ptr to data, and data length */
-void bcm_xtlv_unpack_xtlv(const bcm_xtlv_t *xtlv, uint16 *type, uint16 *len,
- const uint8 **data, bcm_xtlv_opts_t opts);
-
-/* end xtlvs */
-
-/* length value pairs */
-struct bcm_xlv {
- uint16 len;
- uint8 data[1];
-};
-typedef struct bcm_xlv bcm_xlv_t;
-
-struct bcm_xlvp {
- uint16 len;
- uint8 *data;
-};
-typedef struct bcm_xlvp bcm_xlvp_t;
-
-struct bcm_const_xlvp {
- uint16 len;
- const uint8 *data;
-};
-typedef struct bcm_const_xlvp bcm_const_xlvp_t;
-
-/* end length value pairs */
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* _bcmtlv_h_ */
/*
* Fundamental constants relating to UDP Protocol
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
+
/* UDP header */
#define UDP_DEST_PORT_OFFSET 2 /* UDP dest port offset */
#define UDP_LEN_OFFSET 4 /* UDP length offset */
/*
* Misc useful os-independent macros and functions.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: bcmutils.h 813798 2019-04-08 10:20:21Z $
+ * $Id: bcmutils.h 701785 2017-05-26 11:08:50Z $
*/
#ifndef _bcmutils_h_
#define _bcmutils_h_
-#include <bcmtlv.h>
#ifdef __cplusplus
extern "C" {
-#endif // endif
+#endif
+
#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count))
-#ifdef FREEBSD
-#define bcm_strncat_s(dst, noOfElements, src, count) strcat((dst), (src))
-#else
#define bcm_strncat_s(dst, noOfElements, src, count) strncat((dst), (src), (count))
-#endif /* FREEBSD */
#define bcm_snprintf_s snprintf
#define bcm_sprintf_s snprintf
* and take appropriate error action if 'exp' is still true.
*/
#ifndef SPINWAIT_POLL_PERIOD
-#define SPINWAIT_POLL_PERIOD 10U
-#endif // endif
+#define SPINWAIT_POLL_PERIOD 10
+#endif
#define SPINWAIT(exp, us) { \
- uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1U); \
- while (((exp) != 0) && (uint)(countdown >= SPINWAIT_POLL_PERIOD)) { \
+ uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1); \
+ while ((exp) && (countdown >= SPINWAIT_POLL_PERIOD)) { \
OSL_DELAY(SPINWAIT_POLL_PERIOD); \
countdown -= SPINWAIT_POLL_PERIOD; \
} \
extern int ether_isnulladdr(const void *ea);
#define UP_TABLE_MAX ((IPV4_TOS_DSCP_MASK >> IPV4_TOS_DSCP_SHIFT) + 1) /* 64 max */
-#define CORE_SLAVE_PORT_0 0
-#define CORE_SLAVE_PORT_1 1
-#define CORE_BASE_ADDR_0 0
-#define CORE_BASE_ADDR_1 1
/* externs */
/* packet */
extern uint pkttotlen(osl_t *osh, void *p);
extern void *pktlast(osl_t *osh, void *p);
extern uint pktsegcnt(osl_t *osh, void *p);
+extern uint pktsegcnt_war(osl_t *osh, void *p);
extern uint8 *pktdataoffset(osl_t *osh, void *p, uint offset);
extern void *pktoffset(osl_t *osh, void *p, uint offset);
/* Add to adjust 802.1x priority */
extern void pktset8021xprio(void *pkt, int prio);
/* Get priority from a packet and pass it back in scb (or equiv) */
-#define PKTPRIO_VDSCP 0x100 /* DSCP prio found after VLAN tag */
+#define PKTPRIO_VDSCP 0x100 /* DSCP prio found af ter VLAN tag */
#define PKTPRIO_VLAN 0x200 /* VLAN prio found */
#define PKTPRIO_UPD 0x400 /* DSCP used to update VLAN prio */
#define PKTPRIO_DSCP 0x800 /* DSCP prio found */
#define DSCP_AF21 0x12
#define DSCP_AF22 0x14
#define DSCP_AF23 0x16
-/* CS2: OAM (RFC2474) */
-#define DSCP_CS2 0x10
/* AF3x: Multimedia Streaming (RFC2597) */
#define DSCP_AF31 0x1A
#define DSCP_AF32 0x1C
#define DSCP_AF33 0x1E
-/* CS3: Broadcast Video (RFC2474) */
-#define DSCP_CS3 0x18
-/* VA: VOCIE-ADMIT (RFC5865) */
-#define DSCP_VA 0x2C
/* EF: Telephony (RFC3246) */
#define DSCP_EF 0x2E
-/* CS6: Network Control (RFC2474) */
-#define DSCP_CS6 0x30
-/* CS7: Network Control (RFC2474) */
-#define DSCP_CS7 0x38
extern uint pktsetprio(void *pkt, bool update_vtag);
extern uint pktsetprio_qms(void *pkt, uint8* up_table, bool update_vtag);
extern void bcm_mdelay(uint ms);
/* variable access */
#if defined(BCM_RECLAIM)
-extern bool _nvram_reclaim_enb;
-#define NVRAM_RECLAIM_ENAB() (_nvram_reclaim_enb)
#define NVRAM_RECLAIM_CHECK(name) \
- if (NVRAM_RECLAIM_ENAB() && (bcm_attach_part_reclaimed == TRUE)) { \
+ if (bcm_attach_part_reclaimed == TRUE) { \
*(char*) 0 = 0; /* TRAP */ \
return NULL; \
}
extern int getintvar(char *vars, const char *name);
extern int getintvararray(char *vars, const char *name, int index);
extern int getintvararraysize(char *vars, const char *name);
-
-/* Read an array of values from a possibly slice-specific nvram string */
-extern int get_uint8_vararray_slicespecific(osl_t *osh, char *vars, char *vars_table_accessor,
- const char* name, uint8* dest_array, uint dest_size);
-extern int get_int16_vararray_slicespecific(osl_t *osh, char *vars, char *vars_table_accessor,
- const char* name, int16* dest_array, uint dest_size);
-/* Prepend a slice-specific accessor to an nvram string name */
-extern int get_slicespecific_var_name(osl_t *osh, char *vars_table_accessor,
- const char *name, char **name_out);
-
extern uint getgpiopin(char *vars, char *pin_name, uint def_pin);
#define bcm_perf_enable()
#define bcmstats(fmt)
#if defined(WLTINYDUMP) || defined(WLMSG_INFORM) || defined(WLMSG_ASSOC) || \
defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
-#endif // endif
+#endif
#endif /* BCMDRIVER */
/* string */
extern int bcm_atoi(const char *s);
extern ulong bcm_strtoul(const char *cp, char **endp, uint base);
-extern uint64 bcm_strtoull(const char *cp, char **endp, uint base);
extern char *bcmstrstr(const char *haystack, const char *needle);
extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len);
extern char *bcmstrcat(char *dest, const char *src);
/* ** driver/apps-shared section ** */
-#define BCME_STRLEN 64 /* Max string length for BCM errors */
-#define VALID_BCMERROR(e) valid_bcmerror(e)
+#define BCME_STRLEN 64 /* Max string length for BCM errors */
+#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST))
-#ifdef DBG_BUS
-/** tracks non typical execution paths, use gdb with arm sim + firmware dump to read counters */
-#define DBG_BUS_INC(s, cnt) ((s)->dbg_bus->cnt++)
-#else
-#define DBG_BUS_INC(s, cnt)
-#endif /* DBG_BUS */
/*
* error codes could be added but the defined ones shouldn't be changed/deleted
#define BCME_RXFAIL -39 /* RX failure */
#define BCME_NODEVICE -40 /* Device not present */
#define BCME_NMODE_DISABLED -41 /* NMODE disabled */
-#define BCME_HOFFLOAD_RESIDENT -42 /* offload resident */
+#define BCME_NONRESIDENT -42 /* access to nonresident overlay */
#define BCME_SCANREJECT -43 /* reject scan request */
#define BCME_USAGE_ERROR -44 /* WLCMD usage error */
#define BCME_IOCTL_ERROR -45 /* WLCMD ioctl error */
#define BCME_FRAG_Q_FAILED -58 /* queueing 80211 frag failedi */
#define BCME_GET_AF_FAILED -59 /* Get p2p AF pkt failed */
#define BCME_MSCH_NOTREADY -60 /* scheduler not ready */
-#define BCME_IOV_LAST_CMD -61 /* last batched iov sub-command */
-#define BCME_MINIPMU_CAL_FAIL -62 /* MiniPMU cal failed */
-#define BCME_RCAL_FAIL -63 /* Rcal failed */
-#define BCME_LPF_RCCAL_FAIL -64 /* RCCAL failed */
-#define BCME_DACBUF_RCCAL_FAIL -65 /* RCCAL failed */
-#define BCME_VCOCAL_FAIL -66 /* VCOCAL failed */
-#define BCME_BANDLOCKED -67 /* interface is restricted to a band */
-#define BCME_DNGL_DEVRESET -68 /* dongle re-attach during DEVRESET */
-#define BCME_LAST BCME_DNGL_DEVRESET
+#define BCME_LAST BCME_MSCH_NOTREADY
#define BCME_NOTENABLED BCME_DISABLED
#define BCME_IOCTL_PATCH_UNSUPPORTED -9999
#if (BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED)
#error "BCME_LAST <= BCME_IOCTL_PATCH_UNSUPPORTED"
-#endif // endif
+#endif
/* These are collection of BCME Error strings */
#define BCMERRSTRINGTABLE { \
"RX Failure", \
"Device Not Present", \
"NMODE Disabled", \
- "Host Offload in device", \
+ "Nonresident overlay access", \
"Scan Rejected", \
"WLCMD usage error", \
"WLCMD ioctl error", \
"FRAG Q FAILED", \
"GET ActionFrame failed", \
"scheduler not ready", \
- "Last IOV batched sub-cmd", \
- "Mini PMU Cal failed", \
- "R-cal failed", \
- "LPF RC Cal failed", \
- "DAC buf RC Cal failed", \
- "VCO Cal failed", \
- "band locked", \
- "Dongle Devreset", \
}
#ifndef ABS
#define CONTAINEROF(ptr, type, member) ((type *)((char *)(ptr) - OFFSETOF(type, member)))
#endif /* CONTAINEROF */
-/* substruct size up to and including a member of the struct */
-#ifndef STRUCT_SIZE_THROUGH
-#define STRUCT_SIZE_THROUGH(sptr, fname) \
- (((uint8*)&((sptr)->fname) - (uint8*)(sptr)) + sizeof((sptr)->fname))
-#endif // endif
-
-/* Extracting the size of element in a structure */
-#define SIZE_OF(type, field) sizeof(((type *)0)->field)
-
#ifndef ARRAYSIZE
-#define ARRAYSIZE(a) (uint32)(sizeof(a) / sizeof(a[0]))
-#endif // endif
+#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0]))
+#endif
#ifndef ARRAYLAST /* returns pointer to last array element */
#define ARRAYLAST(a) (&a[ARRAYSIZE(a)-1])
-#endif // endif
-
-/* Calculates the required pad size. This is mainly used in register structures */
-#define PADSZ(start, end) ((((end) - (start)) / 4) + 1)
+#endif
/* Reference a function; used to prevent a static function from being optimized out */
extern void *_bcmutils_dummy_fn;
#define clrbit(a, i) (((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY)))
#define isset(a, i) (((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY)))
#define isclr(a, i) ((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0)
-#endif // endif
+#endif
#endif /* setbit */
-
-/* read/write/clear field in a consecutive bits in an octet array.
- * 'addr' is the octet array's start byte address
- * 'size' is the octet array's byte size
- * 'stbit' is the value's start bit offset
- * 'nbits' is the value's bit size
- * This set of utilities are for convenience. Don't use them
- * in time critical/data path as there's a great overhead in them.
- */
-void setbits(uint8 *addr, uint size, uint stbit, uint nbits, uint32 val);
-uint32 getbits(const uint8 *addr, uint size, uint stbit, uint nbits);
-#define clrbits(addr, size, stbit, nbits) setbits(addr, size, stbit, nbits, 0)
-
extern void set_bitrange(void *array, uint start, uint end, uint maxbit);
-extern int bcm_find_fsb(uint32 num);
#define isbitset(a, i) (((a) & (1 << (i))) != 0)
return ((*a >> pos) & MSK); \
}
-DECLARE_MAP_API(2, 4, 1, 15U, 0x0003U) /* setbit2() and getbit2() */
-DECLARE_MAP_API(4, 3, 2, 7U, 0x000FU) /* setbit4() and getbit4() */
-DECLARE_MAP_API(8, 2, 3, 3U, 0x00FFU) /* setbit8() and getbit8() */
+DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */
+DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */
+DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */
/* basic mux operation - can be optimized on several architectures */
#define MUX(pred, true, false) ((pred) ? (true) : (false))
(ea).octet[5]
#if !defined(SIMPLE_MAC_PRINT)
#define MACDBG "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC2STRDBG(ea) CONST_ETHERP_TO_MACF(ea)
+#define MAC2STRDBG(ea) (ea)[0], (ea)[1], (ea)[2], (ea)[3], (ea)[4], (ea)[5]
#else
-#define MACDBG "%02x:xx:xx:xx:x%x:%02x"
-#define MAC2STRDBG(ea) ((uint8*)(ea))[0], (((uint8*)(ea))[4] & 0xf), ((uint8*)(ea))[5]
+#define MACDBG "%02x:%02x:%02x"
+#define MAC2STRDBG(ea) (ea)[0], (ea)[4], (ea)[5]
#endif /* SIMPLE_MAC_PRINT */
-#define MACOUIDBG "%02x:%x:%02x"
-#define MACOUI2STRDBG(ea) ((uint8*)(ea))[0], ((uint8*)(ea))[1] & 0xf, ((uint8*)(ea))[2]
-
-#define MACOUI "%02x:%02x:%02x"
-#define MACOUI2STR(ea) ((uint8*)(ea))[0], ((uint8*)(ea))[1], ((uint8*)(ea))[2]
-
/* bcm_format_flags() bit description structure */
typedef struct bcm_bit_desc {
uint32 bit;
if (
#ifdef __i386__
1 ||
-#endif // endif
+#endif
(((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
/* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */
/* x86 supports unaligned. This version runs 6x-9x faster on x86. */
/* externs */
/* crc */
-uint8 hndcrc8(const uint8 *p, uint nbytes, uint8 crc);
-uint16 hndcrc16(const uint8 *p, uint nbytes, uint16 crc);
-uint32 hndcrc32(const uint8 *p, uint nbytes, uint32 crc);
+extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
+extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
+extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
/* format/print */
#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
extern int bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, int len);
/* print out which bits in flags are set */
extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
-/* print out whcih bits in octet array 'addr' are set. bcm_bit_desc_t:bit is a bit offset. */
-int bcm_format_octets(const bcm_bit_desc_t *bd, uint bdsz,
- const uint8 *addr, uint size, char *buf, int len);
-#endif // endif
+#endif
extern int bcm_format_hex(char *str, const void *bytes, int len);
extern char *bcm_chipname(uint chipid, char *buf, uint len);
extern char *bcm_brev_str(uint32 brev, char *buf);
extern void printbig(char *buf);
-extern void prhex(const char *msg, const uchar *buf, uint len);
+extern void prhex(const char *msg, volatile uchar *buf, uint len);
+
+/* IE parsing */
+
+/* packing is required if struct is passed across the bus */
+#include <packed_section_start.h>
+/* tag_ID/length/value_buffer tuple */
+typedef struct bcm_tlv {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} bcm_tlv_t;
+
+#define BCM_TLV_SIZE(_tlv) ((_tlv) ? (OFFSETOF(bcm_tlv_t, data) + (_tlv)->len) : 0)
+
+#define BCM_XTLV_TAG_LEN_SIZE 4
+
+/* bcm tlv w/ 16 bit id/len */
+typedef BWL_PRE_PACKED_STRUCT struct bcm_xtlv {
+ uint16 id;
+ uint16 len;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT bcm_xtlv_t;
+#include <packed_section_end.h>
+
+
+/* descriptor of xtlv data src or dst */
+typedef struct {
+ uint16 type;
+ uint16 len;
+ void *ptr; /* ptr to memory location */
+} xtlv_desc_t;
+
+/* xtlv options */
+#define BCM_XTLV_OPTION_NONE 0x0000
+#define BCM_XTLV_OPTION_ALIGN32 0x0001
+
+typedef uint16 bcm_xtlv_opts_t;
+struct bcm_xtlvbuf {
+ bcm_xtlv_opts_t opts;
+ uint16 size;
+ uint8 *head; /* point to head of buffer */
+ uint8 *buf; /* current position of buffer */
+ /* allocated buffer may follow, but not necessarily */
+};
+typedef struct bcm_xtlvbuf bcm_xtlvbuf_t;
+
+#define BCM_TLV_MAX_DATA_SIZE (255)
+#define BCM_XTLV_MAX_DATA_SIZE (65535)
+#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data))
+
+#define BCM_XTLV_HDR_SIZE (OFFSETOF(bcm_xtlv_t, data))
+/* LEN only stores the value's length without padding */
+#define BCM_XTLV_LEN(elt) ltoh16_ua(&(elt->len))
+#define BCM_XTLV_ID(elt) ltoh16_ua(&(elt->id))
+/* entire size of the XTLV including header, data, and optional padding */
+#define BCM_XTLV_SIZE(elt, opts) bcm_xtlv_size(elt, opts)
+#define bcm_valid_xtlv(elt, buflen, opts) (elt && ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt, opts)))
+
+/* Check that bcm_tlv_t fits into the given buflen */
+#define bcm_valid_tlv(elt, buflen) (\
+ ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \
+ ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len)))
+
+
+extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
+extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen);
+extern bcm_tlv_t *bcm_parse_tlvs_dot11(void *buf, int buflen, uint key, bool id_ext);
+
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+
+extern bcm_tlv_t *bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type,
+ int type_len);
+
+extern uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst);
+extern uint8 *bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst,
+ int dst_maxlen);
+
+extern uint8 *bcm_copy_tlv(const void *src, uint8 *dst);
+extern uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen);
+
+/* xtlv */
+
+/* return the next xtlv element, and update buffer len (remaining). Buffer length
+ * updated includes padding as specified by options
+ */
+extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen, bcm_xtlv_opts_t opts);
+
+/* initialize an xtlv buffer. Use options specified for packing/unpacking using
+ * the buffer. Caller is responsible for allocating both buffers.
+ */
+extern int bcm_xtlv_buf_init(bcm_xtlvbuf_t *tlv_buf, uint8 *buf, uint16 len,
+ bcm_xtlv_opts_t opts);
+
+extern uint16 bcm_xtlv_buf_len(struct bcm_xtlvbuf *tbuf);
+extern uint16 bcm_xtlv_buf_rlen(struct bcm_xtlvbuf *tbuf);
+extern uint8 *bcm_xtlv_buf(struct bcm_xtlvbuf *tbuf);
+extern uint8 *bcm_xtlv_head(struct bcm_xtlvbuf *tbuf);
+extern int bcm_xtlv_put_data(bcm_xtlvbuf_t *tbuf, uint16 type, const void *data, uint16 dlen);
+extern int bcm_xtlv_put_8(bcm_xtlvbuf_t *tbuf, uint16 type, const int8 data);
+extern int bcm_xtlv_put_16(bcm_xtlvbuf_t *tbuf, uint16 type, const int16 data);
+extern int bcm_xtlv_put_32(bcm_xtlvbuf_t *tbuf, uint16 type, const int32 data);
+extern int bcm_unpack_xtlv_entry(uint8 **buf, uint16 xpct_type, uint16 xpct_len,
+ void *dst, bcm_xtlv_opts_t opts);
+extern int bcm_pack_xtlv_entry(uint8 **buf, uint16 *buflen, uint16 type, uint16 len,
+ void *src, bcm_xtlv_opts_t opts);
+extern int bcm_xtlv_size(const bcm_xtlv_t *elt, bcm_xtlv_opts_t opts);
+
+/* callback for unpacking xtlv from a buffer into context. */
+typedef int (bcm_xtlv_unpack_cbfn_t)(void *ctx, uint8 *buf, uint16 type, uint16 len);
+
+/* unpack a tlv buffer using buffer, options, and callback */
+extern int bcm_unpack_xtlv_buf(void *ctx, uint8 *buf, uint16 buflen,
+ bcm_xtlv_opts_t opts, bcm_xtlv_unpack_cbfn_t *cbfn);
+
+/* unpack a set of tlvs from the buffer using provided xtlv desc */
+extern int bcm_unpack_xtlv_buf_to_mem(void *buf, int *buflen, xtlv_desc_t *items,
+ bcm_xtlv_opts_t opts);
+
+/* pack a set of tlvs into buffer using provided xtlv desc */
+extern int bcm_pack_xtlv_buf_from_mem(void **buf, uint16 *buflen, xtlv_desc_t *items,
+ bcm_xtlv_opts_t opts);
+
+/* return data pointer of a given ID from xtlv buffer
+ * xtlv data length is given to *datalen_out, if the pointer is valid
+ */
+extern void *bcm_get_data_from_xtlv_buf(uint8 *tlv_buf, uint16 buflen, uint16 id,
+ uint16 *datalen_out, bcm_xtlv_opts_t opts);
+
+/* callback to return next tlv id and len to pack, if there is more tlvs to come and
+ * options e.g. alignment
+ */
+typedef bool (*bcm_pack_xtlv_next_info_cbfn_t)(void *ctx, uint16 *tlv_id, uint16 *tlv_len);
+
+/* callback to pack the tlv into length validated buffer */
+typedef void (*bcm_pack_xtlv_pack_next_cbfn_t)(void *ctx,
+ uint16 tlv_id, uint16 tlv_len, uint8* buf);
+
+/* pack a set of tlvs into buffer using get_next to interate */
+int bcm_pack_xtlv_buf(void *ctx, void *tlv_buf, uint16 buflen,
+ bcm_xtlv_opts_t opts, bcm_pack_xtlv_next_info_cbfn_t get_next,
+ bcm_pack_xtlv_pack_next_cbfn_t pack_next, int *outlen);
/* bcmerror */
extern const char *bcmerrorstr(int bcmerror);
char *buf, uint32 bufsize);
extern uint bcm_bitcount(uint8 *bitmap, uint bytelength);
-extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
- __attribute__ ((format (__printf__, 2, 0)));
+extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
/* power conversion */
extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
extern uint bcm_mkiovar(const char *name, const char *data, uint datalen, char *buf, uint len);
unsigned int process_nvram_vars(char *varbuf, unsigned int len);
-extern bool replace_nvram_variable(char *varbuf, unsigned int buflen, const char *variable,
- unsigned int *datalen);
/* trace any object allocation / free, with / without features (flags) set to the object */
#define bcm_object_trace_deinit()
#endif /* BCM_OBJECT_TRACE */
+/* calculate a * b + c */
+extern void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c);
+/* calculate a / b */
+extern void bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+
/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */
/* Table driven count set bits. */
return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]);
}
+
static INLINE int /* C equivalent count of leading 0's in a u32 */
C_bcm_count_leading_zeros(uint32 u32arg)
{
while (u32arg) {
shifts++; u32arg >>= 1;
}
- return (32 - shifts);
+ return (32U - shifts);
}
-/* the format of current TCM layout during boot
- *
- * Code Unused memory Random numbers Random number Magic number NVRAM NVRAM
- * byte Count 0xFEEDC0DE Size
- * |<-----Variable---->|<---Variable--->|<-----4 bytes-->|<---4 bytes---->|<---V--->|<--4B--->|
- * |<------------- BCM_ENTROPY_HOST_MAXSIZE --------->|
- */
-
-/* The HOST need to provided 64 bytes (512 bits) entropy for the bcm SW RNG */
-#define BCM_ENTROPY_MAGIC_SIZE 4u
-#define BCM_ENTROPY_COUNT_SIZE 4u
-#define BCM_ENTROPY_MIN_NBYTES 64u
-#define BCM_ENTROPY_MAX_NBYTES 512u
-#define BCM_ENTROPY_HOST_NBYTES 128u
-#define BCM_ENTROPY_HOST_MAXSIZE \
- (BCM_ENTROPY_MAGIC_SIZE + BCM_ENTROPY_COUNT_SIZE + BCM_ENTROPY_MAX_NBYTES)
-
-/* Keep BCM MAX_RAND NUMBERS definition for the current dongle image. It will be
- * removed after the dongle image is updated to use the bcm RNG.
- */
-#define BCM_MAX_RAND_NUMBERS 2u
+#ifdef BCM_ASLR_HEAP
-/* Constant for calculate the location of host entropy input */
-#define BCM_NVRAM_OFFSET_TCM 4u
-#define BCM_NVRAM_IMG_COMPRS_FACTOR 4u
-#define BCM_NVRAM_RNG_SIGNATURE 0xFEEDC0DEu
+#define BCM_NVRAM_OFFSET_TCM 4
+#define BCM_NVRAM_IMG_COMPRS_FACTOR 4
+#define BCM_RNG_SIGNATURE 0xFEEDC0DE
typedef struct bcm_rand_metadata {
- uint32 count; /* number of random numbers in bytes */
uint32 signature; /* host fills it in, FW verfies before reading rand */
+ uint32 count; /* number of 4byte wide random numbers */
} bcm_rand_metadata_t;
+#endif /* BCM_ASLR_HEAP */
#ifdef BCMDRIVER
/*
extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl);
/* End - Multiword bitmap based small Id allocator. */
+
/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */
#define ID8_INVALID 0xFFu
/* End - Simple 16bit Id Allocator. */
#endif /* BCMDRIVER */
+extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+
+uint64 fp_mult_64(uint64 val1, uint64 val2, uint8 nf1, uint8 nf2, uint8 nf_res);
+uint8 fp_div_64(uint64 num, uint32 den, uint8 nf_num, uint8 nf_den, uint32 *div_out);
+uint8 fp_calc_head_room_64(uint64 num);
+uint8 fp_calc_head_room_32(uint32 num);
+uint32 fp_round_64(uint64 num, uint8 rnd_pos);
+uint32 fp_round_32(uint32 num, uint8 rnd_pos);
+uint32 fp_floor_64(uint64 num, uint8 floor_pos);
+uint32 fp_floor_32(uint32 num, uint8 floor_pos);
+uint32 fp_ceil_64(uint64 num, uint8 ceil_pos);
+uint64 bcm_shl_64(uint64 input, uint8 shift_amt);
+uint64 bcm_shr_64(uint64 input, uint8 shift_amt);
+
#define MASK_32_BITS (~0)
#define MASK_8_BITS ((1 << 8) - 1)
-#define EXTRACT_LOW32(num) (uint32)(num & MASK_32_BITS)
-#define EXTRACT_HIGH32(num) (uint32)(((uint64)num >> 32) & MASK_32_BITS)
+#define EXTRACT_LOW32(num) (uint32)(num & MASK_32BITS)
+#define EXTRACT_HIGH32(num) (uint32)(((uint64)num >> 32) & MASK_32BITS)
#define MAXIMUM(a, b) ((a > b) ? a : b)
#define MINIMUM(a, b) ((a < b) ? a : b)
return list_p->next_p;
}
+
static INLINE dll_t *
dll_tail_p(dll_t *list_p)
{
return (list_p)->prev_p;
}
+
static INLINE dll_t *
dll_next_p(dll_t *node_p)
{
return (node_p)->next_p;
}
+
static INLINE dll_t *
dll_prev_p(dll_t *node_p)
{
return (node_p)->prev_p;
}
+
static INLINE bool
dll_empty(dll_t *list_p)
{
return ((list_p)->next_p == (list_p));
}
+
static INLINE bool
dll_end(dll_t *list_p, dll_t * node_p)
{
return (list_p == node_p);
}
+
/* inserts the node new_p "after" the node at_p */
static INLINE void
dll_insert(dll_t *new_p, dll_t * at_p)
dll_insert(node_p, list_p);
}
+
/* deletes a node from any list that it "may" be in, if at all. */
static INLINE void
dll_delete(dll_t *node_p)
typedef void (* dll_elem_dump)(void * elem_p);
void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size);
-int valid_bcmerror(int e);
-
/* calculate IPv4 header checksum
* - input ip points to IP header in network order
* - output cksum is in network order
#ifdef __cplusplus
}
-#endif // endif
+#endif
/* #define DEBUG_COUNTER */
#ifdef DEBUG_COUNTER
bool enabled; /* Whether to enable printing log */
} counter_tbl_t;
+
void counter_printlog(counter_tbl_t *ctr_tbl);
#endif /* DEBUG_COUNTER */
#define CALL_SITE __builtin_return_address(0)
#else
#define CALL_SITE ((void*) 0)
-#endif // endif
+#endif
#ifdef SHOW_LOGTRACE
-#define TRACE_LOG_BUF_MAX_SIZE 1700
-#define RTT_LOG_BUF_MAX_SIZE 1700
+#define TRACE_LOG_BUF_MAX_SIZE 1500
#define BUF_NOT_AVAILABLE 0
#define NEXT_BUF_NOT_AVAIL 1
#define NEXT_BUF_AVAIL 2
} trace_buf_info_t;
#endif /* SHOW_LOGTRACE */
-enum dump_dongle_e {
- DUMP_DONGLE_COREREG = 0,
- DUMP_DONGLE_D11MEM
-};
-
-typedef struct {
- uint32 type; /**< specifies e.g dump of d11 memory, use enum dump_dongle_e */
- uint32 index; /**< iterator1, specifies core index or d11 memory index */
- uint32 offset; /**< iterator2, byte offset within register set or memory */
-} dump_dongle_in_t;
-
-typedef struct {
- uint32 address; /**< e.g. backplane address of register */
- uint32 id; /**< id, e.g. core id */
- uint32 rev; /**< rev, e.g. core rev */
- uint32 n_bytes; /**< nbytes in array val[] */
- uint32 val[1]; /**< out: values that were read out of registers or memory */
-} dump_dongle_out_t;
-
-extern uint32 sqrt_int(uint32 value);
-
-#ifdef BCMDRIVER
-/* structures and routines to process variable sized data */
-typedef struct var_len_data {
- uint32 vlen;
- uint8 *vdata;
-} var_len_data_t;
-
-int bcm_vdata_alloc(osl_t *osh, var_len_data_t *vld, uint32 size);
-int bcm_vdata_free(osl_t *osh, var_len_data_t *vld);
-#endif /* BCMDRIVER */
-
-/* Count the number of elements in an array that do not match the given value */
-extern int array_value_mismatch_count(uint8 value, uint8 *array, int array_size);
-/* Count the number of non-zero elements in an uint8 array */
-extern int array_nonzero_count(uint8 *array, int array_size);
-/* Count the number of non-zero elements in an int16 array */
-extern int array_nonzero_count_int16(int16 *array, int array_size);
-/* Count the number of zero elements in an uint8 array */
-extern int array_zero_count(uint8 *array, int array_size);
-/* Validate a uint8 ordered array. Assert if invalid. */
-extern int verify_ordered_array_uint8(uint8 *array, int array_size, uint8 range_lo, uint8 range_hi);
-/* Validate a int16 configuration array that need not be zero-terminated. Assert if invalid. */
-extern int verify_ordered_array_int16(int16 *array, int array_size, int16 range_lo, int16 range_hi);
-/* Validate all values in an array are in range */
-extern int verify_array_values(uint8 *array, int array_size,
- int range_lo, int range_hi, bool zero_terminated);
-
#endif /* _bcmutils_h_ */
/*
* Definitions for nl80211 vendor command/event access to host driver
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: brcm_nl80211.h 787269 2018-11-01 11:46:31Z $
+ * $Id: brcm_nl80211.h 601873 2015-11-24 11:04:28Z $
*
*/
#define OUI_GOOGLE 0x001A11
enum wl_vendor_subcmd {
- BRCM_VENDOR_SCMD_UNSPEC = 0,
- BRCM_VENDOR_SCMD_PRIV_STR = 1,
- BRCM_VENDOR_SCMD_BCM_STR = 2,
- BRCM_VENDOR_SCMD_BCM_PSK = 3,
- BRCM_VENDOR_SCMD_SET_PMK = 4,
- BRCM_VENDOR_SCMD_GET_FEATURES = 5,
- BRCM_VENDOR_SCMD_MAX = 6
+ BRCM_VENDOR_SCMD_UNSPEC,
+ BRCM_VENDOR_SCMD_PRIV_STR,
+ BRCM_VENDOR_SCMD_BCM_STR
};
+
struct bcm_nlmsg_hdr {
uint cmd; /* common ioctl definition */
int len; /* expected return buffer length */
* Dongle BUS interface Abstraction layer
* target serial buses like USB, SDIO, SPI, etc.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dbus.h 686618 2017-02-23 07:20:43Z $
+ * $Id: dbus.h 596371 2015-10-30 22:43:47Z $
*/
#ifndef __DBUS_H__
extern void dbus_release_fw_nvfile(void *firmware);
#endif /* #if defined(BCM_REQUEST_FW) */
+
#if defined(EHCI_FASTPATH_TX) || defined(EHCI_FASTPATH_RX)
+
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
/* Backward compatibility */
typedef unsigned int gfp_t;
typedef unsigned int __hc32;
#else
#error Two-argument functions needed
-#endif // endif
+#endif
/* Private USB opcode base */
#define EHCI_FASTPATH 0x31
/*
* Header file for DHD daemon to handle timeouts
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhd_daemon.h 802947 2019-02-05 14:53:08Z $
+ * $Id: dhd_daemon.h 671464 2016-11-22 06:15:32Z $
*/
#ifndef __BCM_DHDD_H__
#define NO_TRAP 0
#define DO_TRAP 1
-/* Keep common BCM netlink macros here */
-#define BCM_NL_USER 31
-#define BCM_NL_OXYGEN 30
-#define BCM_NL_TS 29
-/* ====== !! ADD NEW NL socket related defines here !! ====== */
+#define BCM_NL_USER 31
typedef enum notify_dhd_daemon_reason {
REASON_COMMAND_TO,
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: dhdioctl.h 800512 2019-01-22 09:31:01Z $
+ * $Id: dhdioctl.h 675190 2016-12-14 15:27:52Z $
*/
#ifndef _dhdioctl_h_
#include <typedefs.h>
+
/* Linux network driver ioctl encoding */
typedef struct dhd_ioctl {
uint32 cmd; /* common ioctl definition */
BUS_TYPE_PCIE /* for PCIE dongles */
};
-typedef enum {
- DMA_XFER_SUCCESS = 0,
- DMA_XFER_IN_PROGRESS,
- DMA_XFER_FAILED
-} dma_xfer_status_t;
-
-typedef enum d11_lpbk_type {
- M2M_DMA_LPBK = 0,
- D11_LPBK = 1,
- BMC_LPBK = 2,
- M2M_NON_DMA_LPBK = 3,
- D11_HOST_MEM_LPBK = 4,
- BMC_HOST_MEM_LPBK = 5,
- MAX_LPBK = 6
-} dma_xfer_type_t;
-
-typedef struct dmaxfer_info {
- uint16 version;
- uint16 length;
- dma_xfer_status_t status;
- dma_xfer_type_t type;
- uint src_delay;
- uint dest_delay;
- uint should_wait;
- uint core_num;
- int error_code;
- uint32 num_bytes;
- uint64 time_taken;
- uint64 tput;
-} dma_xfer_info_t;
-
-#define DHD_DMAXFER_VERSION 0x1
-
-typedef struct tput_test {
- uint16 version;
- uint16 length;
- uint8 direction;
- uint8 tput_test_running;
- uint8 mac_sta[6];
- uint8 mac_ap[6];
- uint8 PAD[2];
- uint32 payload_size;
- uint32 num_pkts;
- uint32 timeout_ms;
- uint32 flags;
-
- uint32 pkts_good;
- uint32 pkts_bad;
- uint32 pkts_cmpl;
- uint64 time_ms;
- uint64 tput_bps;
-} tput_test_t;
-
-typedef enum {
- TPUT_DIR_TX = 0,
- TPUT_DIR_RX
-} tput_dir_t;
-
-#define TPUT_TEST_T_VER 1
-#define TPUT_TEST_T_LEN 68
-#define TPUT_TEST_MIN_PAYLOAD_SIZE 16
-#define TPUT_TEST_USE_ETHERNET_HDR 0x1
-#define TPUT_TEST_USE_802_11_HDR 0x2
/* per-driver magic numbers */
#define DHD_IOCTL_MAGIC 0x00444944
#define DHD_PKT_MON_VAL 0x2000000
#define DHD_PKT_MON_DUMP_VAL 0x4000000
#define DHD_ERROR_MEM_VAL 0x8000000
-#define DHD_DNGL_IOVAR_SET_VAL 0x10000000 /**< logs the setting of dongle iovars */
-#define DHD_LPBKDTDUMP_VAL 0x20000000
-#define DHD_PRSRV_MEM_VAL 0x40000000
-#define DHD_IOVAR_MEM_VAL 0x80000000
#define DHD_ANDROID_VAL 0x10000
#define DHD_IW_VAL 0x20000
#define DHD_CFG_VAL 0x40000
#define DHD_CONFIG_VAL 0x80000
-#define DUMP_EAPOL_VAL 0x0001
-#define DUMP_ARP_VAL 0x0002
-#define DUMP_DHCP_VAL 0x0004
-#define DUMP_ICMP_VAL 0x0008
-#define DUMP_DNS_VAL 0x0010
-#define DUMP_TRX_VAL 0x0080
#ifdef SDTEST
/* For pktgen iovar */
#define DHD_IDLE_ACTIVE 0 /* Do not request any SD clock change when idle */
#define DHD_IDLE_STOP (-1) /* Request SD clock be stopped (and use SD1 mode) */
-enum dhd_maclist_xtlv_type {
- DHD_MACLIST_XTLV_R = 0x1,
- DHD_MACLIST_XTLV_X = 0x2,
- DHD_SVMPLIST_XTLV = 0x3
-};
-
-typedef struct _dhd_maclist_t {
- uint16 version; /* Version */
- uint16 bytes_len; /* Total bytes length of lists, XTLV headers and paddings */
- uint8 plist[1]; /* Pointer to the first list */
-} dhd_maclist_t;
-
-typedef struct _dhd_pd11regs_param {
- uint16 start_idx;
- uint8 verbose;
- uint8 pad;
- uint8 plist[1];
-} dhd_pd11regs_param;
-
-typedef struct _dhd_pd11regs_buf {
- uint16 idx;
- uint8 pad[2];
- uint8 pbuf[1];
-} dhd_pd11regs_buf;
-
-/* BT logging and memory dump */
-
-#define BT_LOG_BUF_MAX_SIZE (DHD_IOCTL_MAXLEN - (2 * sizeof(int)))
-#define BT_LOG_BUF_NOT_AVAILABLE 0
-#define BT_LOG_NEXT_BUF_NOT_AVAIL 1
-#define BT_LOG_NEXT_BUF_AVAIL 2
-#define BT_LOG_NOT_READY 3
-
-typedef struct bt_log_buf_info {
- int availability;
- int size;
- char buf[BT_LOG_BUF_MAX_SIZE];
-} bt_log_buf_info_t;
-
-/* request BT memory in chunks */
-typedef struct bt_mem_req {
- int offset; /* offset from BT memory start */
- int buf_size; /* buffer size per chunk */
-} bt_mem_req_t;
-
-/* max dest supported */
-#define DEBUG_BUF_DEST_MAX 4
-/* debug buf dest stat */
-typedef struct debug_buf_dest_stat {
- uint32 stat[DEBUG_BUF_DEST_MAX];
-} debug_buf_dest_stat_t;
#endif /* _dhdioctl_h_ */
/*
- * Broadcom Event protocol definitions
- *
- * Dependencies: bcmeth.h
- *
- * Copyright (C) 1999-2019, Broadcom.
+ * Broadcom Event protocol definitions
*
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- *
- * <<Broadcom-WL-IPTag/Open:>>
+ * Dependencies: bcmeth.h
*
* $Id: dnglevent.h $
*
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
* -----------------------------------------------------------------------------
*
*/
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
#include <bcmeth.h>
#include <ethernet.h>
} BWL_POST_PACKED_STRUCT bcm_dngl_socramind_t;
/* SOCRAM_IND type tags */
-typedef enum socram_ind_tag {
- SOCRAM_IND_ASSERT_TAG = 1,
- SOCRAM_IND_TAG_HEALTH_CHECK = 2
-} socram_ind_tag_t;
-
+#define SOCRAM_IND_ASSERT_TAG 0x1
+#define SOCRAM_IND_TAG_HEALTH_CHECK 0x2
/* Health check top level module tags */
typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_healthcheck {
uint16 top_module_tag; /* top level module tag */
#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT 2
#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT 3
#define HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT 4
-#define HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE_SHIFT 5
#define HEALTH_CHECK_PCIEDEV_FLAG_IN_D3 1 << HEALTH_CHECK_PCIEDEV_FLAG_IN_D3_SHIFT
#define HEALTH_CHECK_PCIEDEV_FLAG_AER 1 << HEALTH_CHECK_PCIEDEV_FLAG_AER_SHIFT
#define HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN 1 << HEALTH_CHECK_PCIEDEV_FLAG_LINKDOWN_SHIFT
#define HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT 1 << HEALTH_CHECK_PCIEDEV_FLAG_MSI_INT_SHIFT
#define HEALTH_CHECK_PCIEDEV_FLAG_NODS 1 << HEALTH_CHECK_PCIEDEV_FLAG_NODS_SHIFT
-#define HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE 1 << HEALTH_CHECK_PCIEDEV_FLAG_NO_HOST_WAKE_SHIFT
/* PCIE Module TAGs */
#define HEALTH_CHECK_PCIEDEV_INDUCED_IND 0x1
#define HEALTH_CHECK_PCIEDEV_H2D_DMA_IND 0x2
#define HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND 0x5
#define HEALTH_CHECK_PCIEDEV_NODS_IND 0x6
#define HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND 0x7
-#define HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND 0x8
-#define HC_PCIEDEV_CONFIG_REGLIST_MAX 25
+#define HC_PCIEDEV_CONFIG_REGLIST_MAX 20
typedef BWL_PRE_PACKED_STRUCT struct bcm_dngl_pcie_hc {
uint16 version; /* HEALTH_CHECK_PCIEDEV_VERSION_1 */
uint16 reserved;
uint32 pcie_config_regs[HC_PCIEDEV_CONFIG_REGLIST_MAX];
} BWL_POST_PACKED_STRUCT bcm_dngl_pcie_hc_t;
-#ifdef HCHK_COMMON_SW_EVENT
-/* Enumerating top level SW entities for use by health check */
-typedef enum {
- HCHK_SW_ENTITY_UNDEFINED = 0,
- HCHK_SW_ENTITY_PCIE = 1,
- HCHK_SW_ENTITY_SDIO = 2,
- HCHK_SW_ENTITY_USB = 3,
- HCHK_SW_ENTITY_RTE = 4,
- HCHK_SW_ENTITY_WL_PRIMARY = 5, /* WL instance 0 */
- HCHK_SW_ENTITY_WL_SECONDARY = 6, /* WL instance 1 */
- HCHK_SW_ENTITY_MAX
-} hchk_sw_entity_t;
-#endif /* HCHK_COMMON_SW_EVENT */
-
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
* IEEE Std 802.1X-2001
* IEEE 802.1X RADIUS Usage Guidelines
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: eapol.h 809460 2019-03-14 00:35:24Z $
+ * $Id: eapol.h 700076 2017-05-17 14:42:22Z $
*/
#ifndef _eapol_h_
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
unsigned short length; /* Length of body */
} eapol_hdr_t;
-#define EAPOL_HDR_LEN 4u
+#define EAPOL_HDR_LEN 4
/* EAPOL version */
-#define WPA2_EAPOL_VERSION 2u
-#define WPA_EAPOL_VERSION 1u
-#define LEAP_EAPOL_VERSION 1u
-#define SES_EAPOL_VERSION 1u
+#define WPA2_EAPOL_VERSION 2
+#define WPA_EAPOL_VERSION 1
+#define LEAP_EAPOL_VERSION 1
+#define SES_EAPOL_VERSION 1
/* EAPOL types */
#define EAP_PACKET 0
-#define EAPOL_START 1u
-#define EAPOL_LOGOFF 2u
-#define EAPOL_KEY 3u
-#define EAPOL_ASF 4u
+#define EAPOL_START 1
+#define EAPOL_LOGOFF 2
+#define EAPOL_KEY 3
+#define EAPOL_ASF 4
/* EAPOL-Key types */
-#define EAPOL_RC4_KEY 1u
-#define EAPOL_WPA2_KEY 2u /* 802.11i/WPA2 */
-#define EAPOL_WPA_KEY 254u /* WPA */
+#define EAPOL_RC4_KEY 1
+#define EAPOL_WPA2_KEY 2 /* 802.11i/WPA2 */
+#define EAPOL_WPA_KEY 254 /* WPA */
/* RC4 EAPOL-Key header field sizes */
-#define EAPOL_KEY_REPLAY_LEN 8u
-#define EAPOL_KEY_IV_LEN 16u
-#define EAPOL_KEY_SIG_LEN 16u
+#define EAPOL_KEY_REPLAY_LEN 8
+#define EAPOL_KEY_IV_LEN 16
+#define EAPOL_KEY_SIG_LEN 16
/* RC4 EAPOL-Key */
typedef BWL_PRE_PACKED_STRUCT struct {
unsigned char key[1]; /* Key (optional) */
} BWL_POST_PACKED_STRUCT eapol_key_header_t;
-#define EAPOL_KEY_HEADER_LEN 44u
+#define EAPOL_KEY_HEADER_LEN 44
/* RC4 EAPOL-Key flags */
-#define EAPOL_KEY_FLAGS_MASK 0x80u
-#define EAPOL_KEY_BROADCAST 0u
-#define EAPOL_KEY_UNICAST 0x80u
+#define EAPOL_KEY_FLAGS_MASK 0x80
+#define EAPOL_KEY_BROADCAST 0
+#define EAPOL_KEY_UNICAST 0x80
/* RC4 EAPOL-Key index */
-#define EAPOL_KEY_INDEX_MASK 0x7fu
+#define EAPOL_KEY_INDEX_MASK 0x7f
/* WPA/802.11i/WPA2 EAPOL-Key header field sizes */
#define EAPOL_AKW_BLOCK_LEN 8
-#define EAPOL_WPA_KEY_REPLAY_LEN 8u
-#define EAPOL_WPA_KEY_NONCE_LEN 32u
-#define EAPOL_WPA_KEY_IV_LEN 16u
-#define EAPOL_WPA_KEY_RSC_LEN 8u
-#define EAPOL_WPA_KEY_ID_LEN 8u
+#define EAPOL_WPA_KEY_REPLAY_LEN 8
+#define EAPOL_WPA_KEY_NONCE_LEN 32
+#define EAPOL_WPA_KEY_IV_LEN 16
+#define EAPOL_WPA_KEY_RSC_LEN 8
+#define EAPOL_WPA_KEY_ID_LEN 8
+#define EAPOL_WPA_KEY_MIC_LEN 16
#define EAPOL_WPA_KEY_DATA_LEN (EAPOL_WPA_MAX_KEY_SIZE + EAPOL_AKW_BLOCK_LEN)
-#define EAPOL_WPA_MAX_KEY_SIZE 32u
-#define EAPOL_WPA_KEY_MAX_MIC_LEN 32u
-#define EAPOL_WPA_ENCR_KEY_MAX_LEN 64u
-#define EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN 32u
-
-#define EAPOL_WPA_PMK_MAX_LEN 64u
-#define EAPOL_WPA_PMK_SHA384_LEN 48u
-#define EAPOL_WPA_PMK_DEFAULT_LEN 32u
-#define EAPOL_WPA_KCK_DEFAULT_LEN 16u
-#define EAPOL_WPA_KCK_SHA384_LEN 24u
-#define EAPOL_WPA_KCK_MIC_DEFAULT_LEN 16u
-#define EAPOL_WPA_KCK_MIC_SHA384_LEN 24u
-#define EAPOL_WPA_ENCR_KEY_DEFAULT_LEN 16u
-
-#define EAPOL_WPA_KEK2_SHA256_LEN 16u
-#define EAPOL_WPA_KEK2_SHA384_LEN 32u
-#define EAPOL_WPA_KCK2_SHA256_LEN 16u
-#define EAPOL_WPA_KCK2_SHA384_LEN 24u
-
-#ifndef EAPOL_KEY_HDR_VER_V2
-#define EAPOL_WPA_KEY_MIC_LEN 16u /* deprecated */
-#define EAPOL_WPA_KEY_LEN 95u /* deprecated */
-#endif // endif
-
-#define EAPOL_PTK_KEY_MAX_LEN (EAPOL_WPA_KEY_MAX_MIC_LEN +\
- EAPOL_WPA_ENCR_KEY_MAX_LEN +\
- EAPOL_WPA_TEMP_ENCR_KEY_MAX_LEN +\
- EAPOL_WPA_KCK2_SHA384_LEN +\
- EAPOL_WPA_KEK2_SHA384_LEN)
-
-#ifndef EAPOL_KEY_HDR_VER_V2
-
-/* WPA EAPOL-Key : deprecated */
+#define EAPOL_WPA_MAX_KEY_SIZE 32
+
+/* WPA EAPOL-Key */
typedef BWL_PRE_PACKED_STRUCT struct {
unsigned char type; /* Key Descriptor Type */
unsigned short key_info; /* Key Information (unaligned) */
unsigned short data_len; /* Key Data Length */
unsigned char data[EAPOL_WPA_KEY_DATA_LEN]; /* Key data */
} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_t;
-#else
-/* WPA EAPOL-Key : new structure to consider dynamic MIC length */
-typedef BWL_PRE_PACKED_STRUCT struct {
- unsigned char type; /* Key Descriptor Type */
- unsigned short key_info; /* Key Information (unaligned) */
- unsigned short key_len; /* Key Length (unaligned) */
- unsigned char replay[EAPOL_WPA_KEY_REPLAY_LEN]; /* Replay Counter */
- unsigned char nonce[EAPOL_WPA_KEY_NONCE_LEN]; /* Nonce */
- unsigned char iv[EAPOL_WPA_KEY_IV_LEN]; /* Key IV */
- unsigned char rsc[EAPOL_WPA_KEY_RSC_LEN]; /* Key RSC */
- unsigned char id[EAPOL_WPA_KEY_ID_LEN]; /* WPA:Key ID, 802.11i/WPA2: Reserved */
-} BWL_POST_PACKED_STRUCT eapol_wpa_key_header_v2_t;
-
-typedef eapol_wpa_key_header_v2_t eapol_wpa_key_header_t;
-#endif /* EAPOL_KEY_HDR_VER_V2 */
-
-#define EAPOL_WPA_KEY_DATA_LEN_SIZE 2u
-
-#ifdef EAPOL_KEY_HDR_VER_V2
-#define EAPOL_WPA_KEY_HDR_SIZE(mic_len) (sizeof(eapol_wpa_key_header_v2_t) \
- + mic_len + EAPOL_WPA_KEY_DATA_LEN_SIZE)
-
-/* WPA EAPOL-Key header macros to reach out mic/data_len/data field */
-#define EAPOL_WPA_KEY_HDR_MIC_PTR(pos) ((uint8 *)pos + sizeof(eapol_wpa_key_header_v2_t))
-#define EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(pos, mic_len) \
- ((uint8 *)pos + sizeof(eapol_wpa_key_header_v2_t) + mic_len)
-#define EAPOL_WPA_KEY_HDR_DATA_PTR(pos, mic_len) \
- ((uint8 *)pos + EAPOL_WPA_KEY_HDR_SIZE(mic_len))
-#else
-#define EAPOL_WPA_KEY_HDR_SIZE(mic_len) EAPOL_WPA_KEY_LEN
-#define EAPOL_WPA_KEY_HDR_MIC_PTR(pos) ((uint8 *)&pos->mic)
-#define EAPOL_WPA_KEY_HDR_DATA_LEN_PTR(pos, mic_len) ((uint8 *)&pos->data_len)
-#define EAPOL_WPA_KEY_HDR_DATA_PTR(pos, mic_len) ((uint8 *)&pos->data)
-#endif /* EAPOL_KEY_HDR_VER_V2 */
+
+#define EAPOL_WPA_KEY_LEN 95
/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
#define WPA_KEY_DESC_OSEN 0x0
-#define WPA_KEY_DESC_V0 0x0
#define WPA_KEY_DESC_V1 0x01
#define WPA_KEY_DESC_V2 0x02
#define WPA_KEY_DESC_V3 0x03
#define WPA_KEY_SECURE 0x200
#define WPA_KEY_ERROR 0x400
#define WPA_KEY_REQ 0x800
-#define WPA_KEY_ENC_KEY_DATA 0x01000 /* Encrypted Key Data */
-#define WPA_KEY_SMK_MESSAGE 0x02000 /* SMK Message */
-#define WPA_KEY_DESC_VER(_ki) ((_ki) & 0x03u)
#define WPA_KEY_DESC_V2_OR_V3 WPA_KEY_DESC_V2
#define WPA2_KEY_DATA_PAD 0xdd
+
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
/*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef _epivers_h_
#define _epivers_h_
-#define EPI_MAJOR_VERSION 100
+#define EPI_MAJOR_VERSION 1
-#define EPI_MINOR_VERSION 10
+#define EPI_MINOR_VERSION 579
-#define EPI_RC_NUMBER 545
+#define EPI_RC_NUMBER 77
-#define EPI_INCREMENTAL_NUMBER 0
+#define EPI_INCREMENTAL_NUMBER 41
#define EPI_BUILD_NUMBER 0
-#define EPI_VERSION 100, 10, 545, 0
+#define EPI_VERSION 1, 579, 77, 41
-#define EPI_VERSION_NUM 0x640a2210
+#define EPI_VERSION_NUM 0x012434d29
-#define EPI_VERSION_DEV 100.10.545
+#define EPI_VERSION_DEV 1.579.77.41
/* Driver Version String, ASCII, 32 chars max */
-#define EPI_VERSION_STR "100.10.545.11 (r826445-20200610-1)"
+#define EPI_VERSION_STR "1.579.77.41.10 (r)"
#endif /* _epivers_h_ */
+++ /dev/null
-/*
- * Extended Trap data component interface file.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: etd.h 813064 2019-04-03 11:29:38Z $
- */
-
-#ifndef _ETD_H_
-#define _ETD_H_
-
-#if defined(ETD) && !defined(WLETD)
-#include <hnd_trap.h>
-#endif // endif
-#include <bcmutils.h>
-/* Tags for structures being used by etd info iovar.
- * Related structures are defined in wlioctl.h.
- */
-#define ETD_TAG_JOIN_CLASSIFICATION_INFO 10 /* general information about join request */
-#define ETD_TAG_JOIN_TARGET_CLASSIFICATION_INFO 11 /* per target (AP) join information */
-#define ETD_TAG_ASSOC_STATE 12 /* current state of the Device association state machine */
-#define ETD_TAG_CHANNEL 13 /* current channel on which the association was performed */
-#define ETD_TAG_TOTAL_NUM_OF_JOIN_ATTEMPTS 14 /* number of join attempts (bss_retries) */
-
-#define PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1 3
-#define PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2 6
-
-#ifndef _LANGUAGE_ASSEMBLY
-
-#define HND_EXTENDED_TRAP_VERSION 1
-#define HND_EXTENDED_TRAP_BUFLEN 512
-
-typedef struct hnd_ext_trap_hdr {
- uint8 version; /* Extended trap version info */
- uint8 reserved; /* currently unused */
- uint16 len; /* Length of data excluding this header */
- uint8 data[]; /* TLV data */
-} hnd_ext_trap_hdr_t;
-
-typedef enum {
- TAG_TRAP_NONE = 0, /* None trap type */
- TAG_TRAP_SIGNATURE = 1, /* Processor register dumps */
- TAG_TRAP_STACK = 2, /* Processor stack dump (possible code locations) */
- TAG_TRAP_MEMORY = 3, /* Memory subsystem dump */
- TAG_TRAP_DEEPSLEEP = 4, /* Deep sleep health check failures */
- TAG_TRAP_PSM_WD = 5, /* PSM watchdog information */
- TAG_TRAP_PHY = 6, /* Phy related issues */
- TAG_TRAP_BUS = 7, /* Bus level issues */
- TAG_TRAP_MAC_SUSP = 8, /* Mac level suspend issues */
- TAG_TRAP_BACKPLANE = 9, /* Backplane related errors */
- /* Values 10 through 14 are in use by etd_data info iovar */
- TAG_TRAP_PCIE_Q = 15, /* PCIE Queue state during memory trap */
- TAG_TRAP_WLC_STATE = 16, /* WLAN state during memory trap */
- TAG_TRAP_MAC_WAKE = 17, /* Mac level wake issues */
- TAG_TRAP_PHYTXERR_THRESH = 18, /* Phy Tx Err */
- TAG_TRAP_HC_DATA = 19, /* Data collected by HC module */
- TAG_TRAP_LOG_DATA = 20,
- TAG_TRAP_CODE = 21, /* The trap type */
- TAG_TRAP_HMAP = 22, /* HMAP violation Address and Info */
- TAG_TRAP_PCIE_ERR_ATTN = 23, /* PCIE error attn log */
- TAG_TRAP_AXI_ERROR = 24, /* AXI Error */
- TAG_TRAP_AXI_HOST_INFO = 25, /* AXI Host log */
- TAG_TRAP_AXI_SR_ERROR = 26, /* AXI SR error log */
- TAG_TRAP_LAST /* This must be the last entry */
-} hnd_ext_tag_trap_t;
-
-typedef struct hnd_ext_trap_bp_err
-{
- uint32 error;
- uint32 coreid;
- uint32 baseaddr;
- uint32 ioctrl;
- uint32 iostatus;
- uint32 resetctrl;
- uint32 resetstatus;
- uint32 resetreadid;
- uint32 resetwriteid;
- uint32 errlogctrl;
- uint32 errlogdone;
- uint32 errlogstatus;
- uint32 errlogaddrlo;
- uint32 errlogaddrhi;
- uint32 errlogid;
- uint32 errloguser;
- uint32 errlogflags;
- uint32 itipoobaout;
- uint32 itipoobbout;
- uint32 itipoobcout;
- uint32 itipoobdout;
-} hnd_ext_trap_bp_err_t;
-
-#define HND_EXT_TRAP_AXISR_INFO_VER_1 1
-typedef struct hnd_ext_trap_axi_sr_err_v1
-{
- uint8 version;
- uint8 pad[3];
- uint32 error;
- uint32 coreid;
- uint32 baseaddr;
- uint32 ioctrl;
- uint32 iostatus;
- uint32 resetctrl;
- uint32 resetstatus;
- uint32 resetreadid;
- uint32 resetwriteid;
- uint32 errlogctrl;
- uint32 errlogdone;
- uint32 errlogstatus;
- uint32 errlogaddrlo;
- uint32 errlogaddrhi;
- uint32 errlogid;
- uint32 errloguser;
- uint32 errlogflags;
- uint32 itipoobaout;
- uint32 itipoobbout;
- uint32 itipoobcout;
- uint32 itipoobdout;
-
- /* axi_sr_issue_debug */
- uint32 sr_pwr_control;
- uint32 sr_corereset_wrapper_main;
- uint32 sr_corereset_wrapper_aux;
- uint32 sr_main_gci_status_0;
- uint32 sr_aux_gci_status_0;
- uint32 sr_dig_gci_status_0;
-} hnd_ext_trap_axi_sr_err_v1_t;
-
-#define HND_EXT_TRAP_PSMWD_INFO_VER 1
-typedef struct hnd_ext_trap_psmwd_v1 {
- uint16 xtag;
- uint16 version; /* version of the information following this */
- uint32 i32_maccontrol;
- uint32 i32_maccommand;
- uint32 i32_macintstatus;
- uint32 i32_phydebug;
- uint32 i32_clk_ctl_st;
- uint32 i32_psmdebug[PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V1];
- uint16 i16_0x1a8; /* gated clock en */
- uint16 i16_0x406; /* Rcv Fifo Ctrl */
- uint16 i16_0x408; /* Rx ctrl 1 */
- uint16 i16_0x41a; /* Rxe Status 1 */
- uint16 i16_0x41c; /* Rxe Status 2 */
- uint16 i16_0x424; /* rcv wrd count 0 */
- uint16 i16_0x426; /* rcv wrd count 1 */
- uint16 i16_0x456; /* RCV_LFIFO_STS */
- uint16 i16_0x480; /* PSM_SLP_TMR */
- uint16 i16_0x490; /* PSM BRC */
- uint16 i16_0x500; /* TXE CTRL */
- uint16 i16_0x50e; /* TXE Status */
- uint16 i16_0x55e; /* TXE_xmtdmabusy */
- uint16 i16_0x566; /* TXE_XMTfifosuspflush */
- uint16 i16_0x690; /* IFS Stat */
- uint16 i16_0x692; /* IFS_MEDBUSY_CTR */
- uint16 i16_0x694; /* IFS_TX_DUR */
- uint16 i16_0x6a0; /* SLow_CTL */
- uint16 i16_0x838; /* TXE_AQM fifo Ready */
- uint16 i16_0x8c0; /* Dagg ctrl */
- uint16 shm_prewds_cnt;
- uint16 shm_txtplufl_cnt;
- uint16 shm_txphyerr_cnt;
- uint16 pad;
-} hnd_ext_trap_psmwd_v1_t;
-
-typedef struct hnd_ext_trap_psmwd {
- uint16 xtag;
- uint16 version; /* version of the information following this */
- uint32 i32_maccontrol;
- uint32 i32_maccommand;
- uint32 i32_macintstatus;
- uint32 i32_phydebug;
- uint32 i32_clk_ctl_st;
- uint32 i32_psmdebug[PSMDBG_REG_READ_CNT_FOR_PSMWDTRAP_V2];
- uint16 i16_0x4b8; /* psm_brwk_0 */
- uint16 i16_0x4ba; /* psm_brwk_1 */
- uint16 i16_0x4bc; /* psm_brwk_2 */
- uint16 i16_0x4be; /* psm_brwk_2 */
- uint16 i16_0x1a8; /* gated clock en */
- uint16 i16_0x406; /* Rcv Fifo Ctrl */
- uint16 i16_0x408; /* Rx ctrl 1 */
- uint16 i16_0x41a; /* Rxe Status 1 */
- uint16 i16_0x41c; /* Rxe Status 2 */
- uint16 i16_0x424; /* rcv wrd count 0 */
- uint16 i16_0x426; /* rcv wrd count 1 */
- uint16 i16_0x456; /* RCV_LFIFO_STS */
- uint16 i16_0x480; /* PSM_SLP_TMR */
- uint16 i16_0x500; /* TXE CTRL */
- uint16 i16_0x50e; /* TXE Status */
- uint16 i16_0x55e; /* TXE_xmtdmabusy */
- uint16 i16_0x566; /* TXE_XMTfifosuspflush */
- uint16 i16_0x690; /* IFS Stat */
- uint16 i16_0x692; /* IFS_MEDBUSY_CTR */
- uint16 i16_0x694; /* IFS_TX_DUR */
- uint16 i16_0x6a0; /* SLow_CTL */
- uint16 i16_0x490; /* psm_brc */
- uint16 i16_0x4da; /* psm_brc_1 */
- uint16 i16_0x838; /* TXE_AQM fifo Ready */
- uint16 i16_0x8c0; /* Dagg ctrl */
- uint16 shm_prewds_cnt;
- uint16 shm_txtplufl_cnt;
- uint16 shm_txphyerr_cnt;
-} hnd_ext_trap_psmwd_t;
-
-#define HEAP_HISTOGRAM_DUMP_LEN 6
-#define HEAP_MAX_SZ_BLKS_LEN 2
-
-/* Ignore chunks for which there are fewer than this many instances, irrespective of size */
-#define HEAP_HISTOGRAM_INSTANCE_MIN 4
-
-/*
- * Use the last two length values for chunks larger than this, or when we run out of
- * histogram entries (because we have too many different sized chunks) to store "other"
- */
-#define HEAP_HISTOGRAM_SPECIAL 0xfffeu
-
-#define HEAP_HISTOGRAM_GRTR256K 0xffffu
-
-typedef struct hnd_ext_trap_heap_err {
- uint32 arena_total;
- uint32 heap_free;
- uint32 heap_inuse;
- uint32 mf_count;
- uint32 stack_lwm;
- uint16 heap_histogm[HEAP_HISTOGRAM_DUMP_LEN * 2]; /* size/number */
- uint16 max_sz_free_blk[HEAP_MAX_SZ_BLKS_LEN];
-} hnd_ext_trap_heap_err_t;
-
-#define MEM_TRAP_NUM_WLC_TX_QUEUES 6
-#define HND_EXT_TRAP_WLC_MEM_ERR_VER_V2 2
-
-typedef struct hnd_ext_trap_wlc_mem_err {
- uint8 instance;
- uint8 associated;
- uint8 soft_ap_client_cnt;
- uint8 peer_cnt;
- uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES];
-} hnd_ext_trap_wlc_mem_err_t;
-
-typedef struct hnd_ext_trap_wlc_mem_err_v2 {
- uint16 version;
- uint16 pad;
- uint8 instance;
- uint8 stas_associated;
- uint8 aps_associated;
- uint8 soft_ap_client_cnt;
- uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES];
-} hnd_ext_trap_wlc_mem_err_v2_t;
-
-#define HND_EXT_TRAP_WLC_MEM_ERR_VER_V3 3
-
-typedef struct hnd_ext_trap_wlc_mem_err_v3 {
- uint8 version;
- uint8 instance;
- uint8 stas_associated;
- uint8 aps_associated;
- uint8 soft_ap_client_cnt;
- uint8 peer_cnt;
- uint16 txqueue_len[MEM_TRAP_NUM_WLC_TX_QUEUES];
-} hnd_ext_trap_wlc_mem_err_v3_t;
-
-typedef struct hnd_ext_trap_pcie_mem_err {
- uint16 d2h_queue_len;
- uint16 d2h_req_queue_len;
-} hnd_ext_trap_pcie_mem_err_t;
-
-#define MAX_DMAFIFO_ENTRIES_V1 1
-#define MAX_DMAFIFO_DESC_ENTRIES_V1 2
-#define HND_EXT_TRAP_AXIERROR_SIGNATURE 0xbabebabe
-#define HND_EXT_TRAP_AXIERROR_VERSION_1 1
-
-/* Structure to collect debug info of descriptor entry for dma channel on encountering AXI Error */
-/* Below three structures are dependant, any change will bump version of all the three */
-
-typedef struct hnd_ext_trap_desc_entry_v1 {
- uint32 ctrl1; /* descriptor entry at din < misc control bits > */
- uint32 ctrl2; /* descriptor entry at din <buffer count and address extension> */
- uint32 addrlo; /* descriptor entry at din <address of data buffer, bits 31:0> */
- uint32 addrhi; /* descriptor entry at din <address of data buffer, bits 63:32> */
-} dma_dentry_v1_t;
-
-/* Structure to collect debug info about a dma channel on encountering AXI Error */
-typedef struct hnd_ext_trap_dma_fifo_v1 {
- uint8 valid; /* no of valid desc entries filled, non zero = fifo entry valid */
- uint8 direction; /* TX=1, RX=2, currently only using TX */
- uint16 index; /* Index of the DMA channel in system */
- uint32 dpa; /* Expected Address of Descriptor table from software state */
- uint32 desc_lo; /* Low Address of Descriptor table programmed in DMA register */
- uint32 desc_hi; /* High Address of Descriptor table programmed in DMA register */
- uint16 din; /* rxin / txin */
- uint16 dout; /* rxout / txout */
- dma_dentry_v1_t dentry[MAX_DMAFIFO_DESC_ENTRIES_V1]; /* Descriptor Entires */
-} dma_fifo_v1_t;
-
-typedef struct hnd_ext_trap_axi_error_v1 {
- uint8 version; /* version = 1 */
- uint8 dma_fifo_valid_count; /* Number of valid dma_fifo entries */
- uint16 length; /* length of whole structure */
- uint32 signature; /* indicate that its filled with AXI Error data */
- uint32 axi_errorlog_status; /* errlog_status from slave wrapper */
- uint32 axi_errorlog_core; /* errlog_core from slave wrapper */
- uint32 axi_errorlog_lo; /* errlog_lo from slave wrapper */
- uint32 axi_errorlog_hi; /* errlog_hi from slave wrapper */
- uint32 axi_errorlog_id; /* errlog_id from slave wrapper */
- dma_fifo_v1_t dma_fifo[MAX_DMAFIFO_ENTRIES_V1];
-} hnd_ext_trap_axi_error_v1_t;
-
-#define HND_EXT_TRAP_MACSUSP_INFO_VER 1
-typedef struct hnd_ext_trap_macsusp {
- uint16 xtag;
- uint8 version; /* version of the information following this */
- uint8 trap_reason;
- uint32 i32_maccontrol;
- uint32 i32_maccommand;
- uint32 i32_macintstatus;
- uint32 i32_phydebug[4];
- uint32 i32_psmdebug[8];
- uint16 i16_0x41a; /* Rxe Status 1 */
- uint16 i16_0x41c; /* Rxe Status 2 */
- uint16 i16_0x490; /* PSM BRC */
- uint16 i16_0x50e; /* TXE Status */
- uint16 i16_0x55e; /* TXE_xmtdmabusy */
- uint16 i16_0x566; /* TXE_XMTfifosuspflush */
- uint16 i16_0x690; /* IFS Stat */
- uint16 i16_0x692; /* IFS_MEDBUSY_CTR */
- uint16 i16_0x694; /* IFS_TX_DUR */
- uint16 i16_0x7c0; /* WEP CTL */
- uint16 i16_0x838; /* TXE_AQM fifo Ready */
- uint16 i16_0x880; /* MHP_status */
- uint16 shm_prewds_cnt;
- uint16 shm_ucode_dbgst;
-} hnd_ext_trap_macsusp_t;
-
-#define HND_EXT_TRAP_MACENAB_INFO_VER 1
-typedef struct hnd_ext_trap_macenab {
- uint16 xtag;
- uint8 version; /* version of the information following this */
- uint8 trap_reason;
- uint32 i32_maccontrol;
- uint32 i32_maccommand;
- uint32 i32_macintstatus;
- uint32 i32_psmdebug[8];
- uint32 i32_clk_ctl_st;
- uint32 i32_powerctl;
- uint16 i16_0x1a8; /* gated clock en */
- uint16 i16_0x480; /* PSM_SLP_TMR */
- uint16 i16_0x490; /* PSM BRC */
- uint16 i16_0x600; /* TSF CTL */
- uint16 i16_0x690; /* IFS Stat */
- uint16 i16_0x692; /* IFS_MEDBUSY_CTR */
- uint16 i16_0x6a0; /* SLow_CTL */
- uint16 i16_0x6a6; /* SLow_FRAC */
- uint16 i16_0x6a8; /* fast power up delay */
- uint16 i16_0x6aa; /* SLow_PER */
- uint16 shm_ucode_dbgst;
- uint16 PAD;
-} hnd_ext_trap_macenab_t;
-
-#define HND_EXT_TRAP_PHY_INFO_VER_1 (1)
-typedef struct hnd_ext_trap_phydbg {
- uint16 err;
- uint16 RxFeStatus;
- uint16 TxFIFOStatus0;
- uint16 TxFIFOStatus1;
- uint16 RfseqMode;
- uint16 RfseqStatus0;
- uint16 RfseqStatus1;
- uint16 RfseqStatus_Ocl;
- uint16 RfseqStatus_Ocl1;
- uint16 OCLControl1;
- uint16 TxError;
- uint16 bphyTxError;
- uint16 TxCCKError;
- uint16 TxCtrlWrd0;
- uint16 TxCtrlWrd1;
- uint16 TxCtrlWrd2;
- uint16 TxLsig0;
- uint16 TxLsig1;
- uint16 TxVhtSigA10;
- uint16 TxVhtSigA11;
- uint16 TxVhtSigA20;
- uint16 TxVhtSigA21;
- uint16 txPktLength;
- uint16 txPsdulengthCtr;
- uint16 gpioClkControl;
- uint16 gpioSel;
- uint16 pktprocdebug;
- uint16 PAD;
- uint32 gpioOut[3];
-} hnd_ext_trap_phydbg_t;
-
-/* unique IDs for separate cores in SI */
-#define REGDUMP_MASK_MAC0 BCM_BIT(1)
-#define REGDUMP_MASK_ARM BCM_BIT(2)
-#define REGDUMP_MASK_PCIE BCM_BIT(3)
-#define REGDUMP_MASK_MAC1 BCM_BIT(4)
-#define REGDUMP_MASK_PMU BCM_BIT(5)
-
-typedef struct {
- uint16 reg_offset;
- uint16 core_mask;
-} reg_dump_config_t;
-
-#define HND_EXT_TRAP_PHY_INFO_VER 2
-typedef struct hnd_ext_trap_phydbg_v2 {
- uint8 version;
- uint8 len;
- uint16 err;
- uint16 RxFeStatus;
- uint16 TxFIFOStatus0;
- uint16 TxFIFOStatus1;
- uint16 RfseqMode;
- uint16 RfseqStatus0;
- uint16 RfseqStatus1;
- uint16 RfseqStatus_Ocl;
- uint16 RfseqStatus_Ocl1;
- uint16 OCLControl1;
- uint16 TxError;
- uint16 bphyTxError;
- uint16 TxCCKError;
- uint16 TxCtrlWrd0;
- uint16 TxCtrlWrd1;
- uint16 TxCtrlWrd2;
- uint16 TxLsig0;
- uint16 TxLsig1;
- uint16 TxVhtSigA10;
- uint16 TxVhtSigA11;
- uint16 TxVhtSigA20;
- uint16 TxVhtSigA21;
- uint16 txPktLength;
- uint16 txPsdulengthCtr;
- uint16 gpioClkControl;
- uint16 gpioSel;
- uint16 pktprocdebug;
- uint32 gpioOut[3];
- uint32 additional_regs[1];
-} hnd_ext_trap_phydbg_v2_t;
-
-#define HND_EXT_TRAP_PHY_INFO_VER_3 (3)
-typedef struct hnd_ext_trap_phydbg_v3 {
- uint8 version;
- uint8 len;
- uint16 err;
- uint16 RxFeStatus;
- uint16 TxFIFOStatus0;
- uint16 TxFIFOStatus1;
- uint16 RfseqMode;
- uint16 RfseqStatus0;
- uint16 RfseqStatus1;
- uint16 RfseqStatus_Ocl;
- uint16 RfseqStatus_Ocl1;
- uint16 OCLControl1;
- uint16 TxError;
- uint16 bphyTxError;
- uint16 TxCCKError;
- uint16 TxCtrlWrd0;
- uint16 TxCtrlWrd1;
- uint16 TxCtrlWrd2;
- uint16 TxLsig0;
- uint16 TxLsig1;
- uint16 TxVhtSigA10;
- uint16 TxVhtSigA11;
- uint16 TxVhtSigA20;
- uint16 TxVhtSigA21;
- uint16 txPktLength;
- uint16 txPsdulengthCtr;
- uint16 gpioClkControl;
- uint16 gpioSel;
- uint16 pktprocdebug;
- uint32 gpioOut[3];
- uint16 HESigURateFlagStatus;
- uint16 HESigUsRateFlagStatus;
- uint32 additional_regs[1];
-} hnd_ext_trap_phydbg_v3_t;
-
-/* Phy TxErr Dump Structure */
-#define HND_EXT_TRAP_PHYTXERR_INFO_VER 1
-#define HND_EXT_TRAP_PHYTXERR_INFO_VER_V2 2
-typedef struct hnd_ext_trap_macphytxerr {
- uint8 version; /* version of the information following this */
- uint8 trap_reason;
- uint16 i16_0x63E; /* tsf_tmr_rx_ts */
- uint16 i16_0x640; /* tsf_tmr_tx_ts */
- uint16 i16_0x642; /* tsf_tmr_rx_end_ts */
- uint16 i16_0x846; /* TDC_FrmLen0 */
- uint16 i16_0x848; /* TDC_FrmLen1 */
- uint16 i16_0x84a; /* TDC_Txtime */
- uint16 i16_0xa5a; /* TXE_BytCntInTxFrmLo */
- uint16 i16_0xa5c; /* TXE_BytCntInTxFrmHi */
- uint16 i16_0x856; /* TDC_VhtPsduLen0 */
- uint16 i16_0x858; /* TDC_VhtPsduLen1 */
- uint16 i16_0x490; /* psm_brc */
- uint16 i16_0x4d8; /* psm_brc_1 */
- uint16 shm_txerr_reason;
- uint16 shm_pctl0;
- uint16 shm_pctl1;
- uint16 shm_pctl2;
- uint16 shm_lsig0;
- uint16 shm_lsig1;
- uint16 shm_plcp0;
- uint16 shm_plcp1;
- uint16 shm_plcp2;
- uint16 shm_vht_sigb0;
- uint16 shm_vht_sigb1;
- uint16 shm_tx_tst;
- uint16 shm_txerr_tm;
- uint16 shm_curchannel;
- uint16 shm_crx_rxtsf_pos;
- uint16 shm_lasttx_tsf;
- uint16 shm_s_rxtsftmrval;
- uint16 i16_0x29; /* Phy indirect address */
- uint16 i16_0x2a; /* Phy indirect address */
-} hnd_ext_trap_macphytxerr_t;
-
-typedef struct hnd_ext_trap_macphytxerr_v2 {
- uint8 version; /* version of the information following this */
- uint8 trap_reason;
- uint16 i16_0x63E; /* tsf_tmr_rx_ts */
- uint16 i16_0x640; /* tsf_tmr_tx_ts */
- uint16 i16_0x642; /* tsf_tmr_rx_end_ts */
- uint16 i16_0x846; /* TDC_FrmLen0 */
- uint16 i16_0x848; /* TDC_FrmLen1 */
- uint16 i16_0x84a; /* TDC_Txtime */
- uint16 i16_0xa5a; /* TXE_BytCntInTxFrmLo */
- uint16 i16_0xa5c; /* TXE_BytCntInTxFrmHi */
- uint16 i16_0x856; /* TDC_VhtPsduLen0 */
- uint16 i16_0x858; /* TDC_VhtPsduLen1 */
- uint16 i16_0x490; /* psm_brc */
- uint16 i16_0x4d8; /* psm_brc_1 */
- uint16 shm_txerr_reason;
- uint16 shm_pctl0;
- uint16 shm_pctl1;
- uint16 shm_pctl2;
- uint16 shm_lsig0;
- uint16 shm_lsig1;
- uint16 shm_plcp0;
- uint16 shm_plcp1;
- uint16 shm_plcp2;
- uint16 shm_vht_sigb0;
- uint16 shm_vht_sigb1;
- uint16 shm_tx_tst;
- uint16 shm_txerr_tm;
- uint16 shm_curchannel;
- uint16 shm_crx_rxtsf_pos;
- uint16 shm_lasttx_tsf;
- uint16 shm_s_rxtsftmrval;
- uint16 i16_0x29; /* Phy indirect address */
- uint16 i16_0x2a; /* Phy indirect address */
- uint8 phyerr_bmac_cnt; /* number of times bmac raised phy tx err */
- uint8 phyerr_bmac_rsn; /* bmac reason for phy tx error */
- uint16 pad;
- uint32 recv_fifo_status[3][2]; /* Rcv Status0 & Rcv Status1 for 3 Rx fifos */
-} hnd_ext_trap_macphytxerr_v2_t;
-
-#define HND_EXT_TRAP_PCIE_ERR_ATTN_VER_1 (1u)
-#define MAX_AER_HDR_LOG_REGS (4u)
-typedef struct hnd_ext_trap_pcie_err_attn_v1 {
- uint8 version;
- uint8 pad[3];
- uint32 err_hdr_logreg1;
- uint32 err_hdr_logreg2;
- uint32 err_hdr_logreg3;
- uint32 err_hdr_logreg4;
- uint32 err_code_logreg;
- uint32 err_type;
- uint32 err_code_state;
- uint32 last_err_attn_ts;
- uint32 cfg_tlp_hdr[MAX_AER_HDR_LOG_REGS];
-} hnd_ext_trap_pcie_err_attn_v1_t;
-
-#define MAX_EVENTLOG_BUFFERS 48
-typedef struct eventlog_trapdata_info {
- uint32 num_elements;
- uint32 seq_num;
- uint32 log_arr_addr;
-} eventlog_trapdata_info_t;
-
-typedef struct eventlog_trap_buf_info {
- uint32 len;
- uint32 buf_addr;
-} eventlog_trap_buf_info_t;
-
-#if defined(ETD) && !defined(WLETD)
-#define ETD_SW_FLAG_MEM 0x00000001
-
-int etd_init(osl_t *osh);
-int etd_register_trap_ext_callback(void *cb, void *arg);
-int (etd_register_trap_ext_callback_late)(void *cb, void *arg);
-uint32 *etd_get_trap_ext_data(void);
-uint32 etd_get_trap_ext_swflags(void);
-void etd_set_trap_ext_swflag(uint32 flag);
-void etd_notify_trap_ext_callback(trap_t *tr);
-reg_dump_config_t *etd_get_reg_dump_config_tbl(void);
-uint etd_get_reg_dump_config_len(void);
-
-extern bool _etd_enab;
-
- #define ETD_ENAB(pub) (_etd_enab)
-
-#else
-#define ETD_ENAB(pub) (0)
-#endif /* WLETD */
-
-#endif /* !LANGUAGE_ASSEMBLY */
-
-#endif /* _ETD_H_ */
/*
* From FreeBSD 2.2.7: Fundamental constants relating to ethernet.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef _TYPEDEFS_H_
#include "typedefs.h"
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
+
/*
* The number of bytes in an ethernet (MAC) address.
*/
*/
#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
+
/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */
#define eacmp(a, b) ((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \
(((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \
((uint16 *)(d))[6] = ((const uint16 *)(s))[6]; \
} while (0)
+
static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}};
static const struct ether_addr ether_ipv6_mcast = {{0x33, 0x33, 0x00, 0x00, 0x00, 0x01}};
/*
* EVENT_LOG system definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: event_log.h 717896 2017-08-28 21:56:11Z $
+ * $Id: event_log.h 711908 2017-07-20 10:37:34Z $
*/
#ifndef _EVENT_LOG_H_
/* We make sure that the block size will fit in a single packet
* (allowing for a bit of overhead on each packet
*/
-#if defined(BCMPCIEDEV)
-#define EVENT_LOG_MAX_BLOCK_SIZE 1648
-#else
#define EVENT_LOG_MAX_BLOCK_SIZE 1400
-#endif // endif
#define EVENT_LOG_WL_BLOCK_SIZE 0x200
#define EVENT_LOG_PSM_BLOCK_SIZE 0x200
#define EVENT_LOG_BUS_BLOCK_SIZE 0x200
#define EVENT_LOG_ERROR_BLOCK_SIZE 0x200
-/* Maximum event log record payload size = 1016 bytes or 254 words. */
-#define EVENT_LOG_MAX_RECORD_PAYLOAD_SIZE 254
+/* Maximum event log record payload size = 1024 bytes or 256 words. */
+#define EVENT_LOG_MAX_RECORD_PAYLOAD_SIZE 256
/*
* There are multiple levels of objects define here:
uint32 event_logs;
} event_log_block_t;
#define EVENT_LOG_BLOCK_HDRLEN 8 /* pktlen 2 + count 2 + extra_hdr_info 4 */
-
-#define EVENT_LOG_BLOCK_LEN 12
+#define NAN_EVENT_LOG_MIN_LENGTH 2 /* Minimum length of Nan event */
typedef enum {
SET_DESTINATION_INVALID = -1,
uint16 size; /* same size for all buffers in one set */
} event_log_set_t;
-/* logstr_hdr_flags */
-#define LOGSTRS_ENCRYPTED 0x1
-
/* Top data structure for access to everything else */
typedef struct event_log_top {
uint32 magic;
uint32 logstrs_size; /* Size of lognums + logstrs area */
uint32 timestamp; /* Last timestamp event */
uint32 cyclecount; /* Cycles at last timestamp event */
- _EL_SET_PTR sets; /* Ptr to array of <num_sets> set ptrs */
+ _EL_SET_PTR sets; /* Ptr to array of <num_sets> set ptrs */
} event_log_top_t;
-/* structure of the trailing 3 words in logstrs.bin */
-typedef struct {
- uint32 fw_id; /* FWID will be written by tool later */
- uint32 flags; /* 0th bit indicates whether encrypted or not */
- /* Keep version and magic last since "header" is appended to the end of logstrs file. */
- uint32 version; /* Header version */
- uint32 log_magic; /* MAGIC number for verification 'LOGS' */
-} logstr_trailer_t;
-
/* Data structure of Keeping the Header from logstrs.bin */
-typedef struct {
- uint32 logstrs_size; /* Size of the file */
- uint32 rom_lognums_offset; /* Offset to the ROM lognum */
- uint32 ram_lognums_offset; /* Offset to the RAM lognum */
- uint32 rom_logstrs_offset; /* Offset to the ROM logstr */
- uint32 ram_logstrs_offset; /* Offset to the RAM logstr */
- logstr_trailer_t trailer;
-} logstr_header_t;
-
-/* Ver 1 Header from logstrs.bin */
typedef struct {
uint32 logstrs_size; /* Size of the file */
uint32 rom_lognums_offset; /* Offset to the ROM lognum */
/* Keep version and magic last since "header" is appended to the end of logstrs file. */
uint32 version; /* Header version */
uint32 log_magic; /* MAGIC number for verification 'LOGS' */
-} logstr_header_v1_t;
+} logstr_header_t;
/*
* Use the following macros for generating log events.
*
*/
-#if !defined(EVENT_LOG_DUMPER)
+#if !defined(EVENT_LOG_DUMPER) && !defined(DHD_EFI)
#ifndef EVENT_LOG_COMPILE
#define _EVENT_LOGE(tag, fmt_num, ...) event_logn(14, tag, fmt_num, __VA_ARGS__)
#define _EVENT_LOGF(tag, fmt_num, ...) event_logn(15, tag, fmt_num, __VA_ARGS__)
+
/* Casting low level macros */
#define _EVENT_LOG_CAST0(tag, fmt_num) \
event_log0(tag, fmt_num)
7, 6, 5, 4, 3, 2, 1, 0) \
(tag, (int) &fmtnum , ## __VA_ARGS__)
+
#define EVENT_LOG_FAST(tag, fmt, ...) \
do { \
if (event_log_tag_sets != NULL) { \
_EVENT_LOG(_EVENT_LOG_CAST, tag, fmt , ## __VA_ARGS__); \
} while (0)
+
#define EVENT_LOG(tag, fmt, ...) EVENT_LOG_COMPACT(tag, fmt , ## __VA_ARGS__)
#define EVENT_LOG_CAST(tag, fmt, ...) EVENT_LOG_COMPACT_CAST(tag, fmt , ## __VA_ARGS__)
event_log_caller_return_address(tag); \
} while (0)
-#define EVENT_LOG_IS_ON(tag) (*(event_log_tag_sets + (tag)) & ~EVENT_LOG_TAG_FLAG_SET_MASK)
#define EVENT_LOG_IS_LOG_ON(tag) (*(event_log_tag_sets + (tag)) & EVENT_LOG_TAG_FLAG_LOG)
-#define EVENT_LOG_BUFFER(tag, buf, size) event_log_buffer(tag, buf, size)
#define EVENT_DUMP event_log_buffer
extern uint8 *event_log_tag_sets;
extern int event_log_set_shrink(osl_t *osh, int set_num, int size);
extern int event_log_tag_start(int tag, int set_num, int flags);
-extern int event_log_tag_set_retrieve(int tag);
extern int event_log_tag_stop(int tag);
typedef void (*event_log_logtrace_trigger_fn_t)(void *ctx);
extern void event_log_caller_return_address(int tag);
extern int event_log_set_destination_set(int set, event_log_set_destination_t dest);
extern event_log_set_destination_t event_log_set_destination_get(int set);
-extern int event_log_flush_log_buffer(int set);
-extern uint16 event_log_get_available_space(int set);
-extern bool event_log_is_set_configured(int set_num);
-extern bool event_log_is_tag_valid(int tag);
-/* returns number of blocks available for writing */
-extern int event_log_free_blocks_get(int set);
-extern bool event_log_is_ready(void);
#endif /* EVENT_LOG_DUMPER */
* This file describes the payloads of event log entries that are data buffers
* rather than formatted string entries. The contents are generally XTLVs.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: event_log_payload.h 825102 2019-06-12 22:26:41Z $
+ * $Id: event_log_payload.h 700076 2017-05-17 14:42:22Z $
*/
#ifndef _EVENT_LOG_PAYLOAD_H_
#include <ethernet.h>
#include <event_log_tag.h>
-/**
- * A (legacy) timestamp message
- */
-typedef struct ts_message {
- uint32 timestamp;
- uint32 cyclecount;
-} ts_msg_t;
-
-/**
- * Enhanced timestamp message
- */
-typedef struct enhanced_ts_message {
- uint32 version;
- /* More data, depending on version */
- uint8 data[];
-} ets_msg_t;
-
-#define ENHANCED_TS_MSG_VERSION_1 (1u)
-
-/**
- * Enhanced timestamp message, version 1
- */
-typedef struct enhanced_ts_message_v1 {
- uint32 version;
- uint32 timestamp; /* PMU time, in milliseconds */
- uint32 cyclecount;
- uint32 cpu_freq;
-} ets_msg_v1_t;
-
#define EVENT_LOG_XTLV_ID_STR 0 /**< XTLV ID for a string */
#define EVENT_LOG_XTLV_ID_TXQ_SUM 1 /**< XTLV ID for txq_summary_t */
#define EVENT_LOG_XTLV_ID_SCBDATA_SUM 2 /**< XTLV ID for cb_subq_summary_t */
#define SCBDATA_APPS_F_OFF_BLOCKED 0x00000200
#define SCBDATA_APPS_F_OFF_IN_PROG 0x00000400
+
/**
* Summary for tx datapath AMPDU SCB cubby
* This is a specific data structure to describe the AMPDU datapath state for an SCB
/* Scan flags */
#define SCAN_SUM_CHAN_INFO 0x1
/* Scan_sum flags */
-#define BAND5G_SIB_ENAB 0x2
-#define BAND2G_SIB_ENAB 0x4
-#define PARALLEL_SCAN 0x8
-#define SCAN_ABORT 0x10
-#define SC_LOWSPAN_SCAN 0x20
-#define SC_SCAN 0x40
+#define BAND5G_SIB_ENAB 0x2
+#define BAND2G_SIB_ENAB 0x4
+#define PARALLEL_SCAN 0x8
+#define SCAN_ABORT 0x10
/* scan_channel_info flags */
#define ACTIVE_SCAN_SCN_SUM 0x2
#define SCAN_SUM_WLC_CORE0 0x4
#define SCAN_SUM_WLC_CORE1 0x8
-#define HOME_CHAN 0x10
-#define SCAN_SUM_SCAN_CORE 0x20
+#define HOME_CHAN 0x10
typedef struct wl_scan_ssid_info
{
int32 dwell_time;
} wl_chansw_event_log_record_t;
-typedef struct wl_chansw_event_log_record_v2 {
- uint32 time; /* Time in us */
- uint32 old_chanspec; /* Old channel spec */
- uint32 new_chanspec; /* New channel spec */
- uint32 chansw_reason; /* Reason for channel change */
- int32 dwell_time;
- uint32 core;
- int32 phychanswtime; /* channel switch time */
-} wl_chansw_event_log_record_v2_t;
-
/* Sub-block type for EVENT_LOG_TAG_AMPDU_DUMP */
-typedef enum {
- WL_AMPDU_STATS_TYPE_RXMCSx1 = 0, /* RX MCS rate (Nss = 1) */
- WL_AMPDU_STATS_TYPE_RXMCSx2 = 1,
- WL_AMPDU_STATS_TYPE_RXMCSx3 = 2,
- WL_AMPDU_STATS_TYPE_RXMCSx4 = 3,
- WL_AMPDU_STATS_TYPE_RXVHTx1 = 4, /* RX VHT rate (Nss = 1) */
- WL_AMPDU_STATS_TYPE_RXVHTx2 = 5,
- WL_AMPDU_STATS_TYPE_RXVHTx3 = 6,
- WL_AMPDU_STATS_TYPE_RXVHTx4 = 7,
- WL_AMPDU_STATS_TYPE_TXMCSx1 = 8, /* TX MCS rate (Nss = 1) */
- WL_AMPDU_STATS_TYPE_TXMCSx2 = 9,
- WL_AMPDU_STATS_TYPE_TXMCSx3 = 10,
- WL_AMPDU_STATS_TYPE_TXMCSx4 = 11,
- WL_AMPDU_STATS_TYPE_TXVHTx1 = 12, /* TX VHT rate (Nss = 1) */
- WL_AMPDU_STATS_TYPE_TXVHTx2 = 13,
- WL_AMPDU_STATS_TYPE_TXVHTx3 = 14,
- WL_AMPDU_STATS_TYPE_TXVHTx4 = 15,
- WL_AMPDU_STATS_TYPE_RXMCSSGI = 16, /* RX SGI usage (for all MCS rates) */
- WL_AMPDU_STATS_TYPE_TXMCSSGI = 17, /* TX SGI usage (for all MCS rates) */
- WL_AMPDU_STATS_TYPE_RXVHTSGI = 18, /* RX SGI usage (for all VHT rates) */
- WL_AMPDU_STATS_TYPE_TXVHTSGI = 19, /* TX SGI usage (for all VHT rates) */
- WL_AMPDU_STATS_TYPE_RXMCSPER = 20, /* RX PER (for all MCS rates) */
- WL_AMPDU_STATS_TYPE_TXMCSPER = 21, /* TX PER (for all MCS rates) */
- WL_AMPDU_STATS_TYPE_RXVHTPER = 22, /* RX PER (for all VHT rates) */
- WL_AMPDU_STATS_TYPE_TXVHTPER = 23, /* TX PER (for all VHT rates) */
- WL_AMPDU_STATS_TYPE_RXDENS = 24, /* RX AMPDU density */
- WL_AMPDU_STATS_TYPE_TXDENS = 25, /* TX AMPDU density */
- WL_AMPDU_STATS_TYPE_RXMCSOK = 26, /* RX all MCS rates */
- WL_AMPDU_STATS_TYPE_RXVHTOK = 27, /* RX all VHT rates */
- WL_AMPDU_STATS_TYPE_TXMCSALL = 28, /* TX all MCS rates */
- WL_AMPDU_STATS_TYPE_TXVHTALL = 29, /* TX all VHT rates */
- WL_AMPDU_STATS_TYPE_TXMCSOK = 30, /* TX all MCS rates */
- WL_AMPDU_STATS_TYPE_TXVHTOK = 31, /* TX all VHT rates */
- WL_AMPDU_STATS_TYPE_RX_HE_SUOK = 32, /* DL SU MPDU frame per MCS */
- WL_AMPDU_STATS_TYPE_RX_HE_SU_DENS = 33, /* DL SU AMPDU DENSITY */
- WL_AMPDU_STATS_TYPE_RX_HE_MUMIMOOK = 34, /* DL MUMIMO Frame per MCS */
- WL_AMPDU_STATS_TYPE_RX_HE_MUMIMO_DENS = 35, /* DL MUMIMO AMPDU Density */
- WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMAOK = 36, /* DL OFDMA Frame per MCS */
- WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMA_DENS = 37, /* DL OFDMA AMPDU Density */
- WL_AMPDU_STATS_TYPE_RX_HE_DLOFDMA_HIST = 38, /* DL OFDMA frame RU histogram */
- WL_AMPDU_STATS_TYPE_TX_HE_MCSALL = 39, /* TX HE (SU+MU) frames, all rates */
- WL_AMPDU_STATS_TYPE_TX_HE_MCSOK = 40, /* TX HE (SU+MU) frames succeeded */
- WL_AMPDU_STATS_TYPE_TX_HE_MUALL = 41, /* TX MU (UL OFDMA) frames all rates */
- WL_AMPDU_STATS_TYPE_TX_HE_MUOK = 42, /* TX MU (UL OFDMA) frames succeeded */
- WL_AMPDU_STATS_TYPE_TX_HE_RUBW = 43, /* TX UL RU by BW histogram */
- WL_AMPDU_STATS_TYPE_TX_HE_PADDING = 44, /* TX padding total (single value) */
- WL_AMPDU_STATS_MAX_CNTS = 64
-} wl_ampdu_stat_enum_t;
+#define WL_AMPDU_STATS_TYPE_RXMCSx1 0 /* RX MCS rate (Nss = 1) */
+#define WL_AMPDU_STATS_TYPE_RXMCSx2 1
+#define WL_AMPDU_STATS_TYPE_RXMCSx3 2
+#define WL_AMPDU_STATS_TYPE_RXMCSx4 3
+#define WL_AMPDU_STATS_TYPE_RXVHTx1 4 /* RX VHT rate (Nss = 1) */
+#define WL_AMPDU_STATS_TYPE_RXVHTx2 5
+#define WL_AMPDU_STATS_TYPE_RXVHTx3 6
+#define WL_AMPDU_STATS_TYPE_RXVHTx4 7
+#define WL_AMPDU_STATS_TYPE_TXMCSx1 8 /* TX MCS rate (Nss = 1) */
+#define WL_AMPDU_STATS_TYPE_TXMCSx2 9
+#define WL_AMPDU_STATS_TYPE_TXMCSx3 10
+#define WL_AMPDU_STATS_TYPE_TXMCSx4 11
+#define WL_AMPDU_STATS_TYPE_TXVHTx1 12 /* TX VHT rate (Nss = 1) */
+#define WL_AMPDU_STATS_TYPE_TXVHTx2 13
+#define WL_AMPDU_STATS_TYPE_TXVHTx3 14
+#define WL_AMPDU_STATS_TYPE_TXVHTx4 15
+#define WL_AMPDU_STATS_TYPE_RXMCSSGI 16 /* RX SGI usage (for all MCS rates) */
+#define WL_AMPDU_STATS_TYPE_TXMCSSGI 17 /* TX SGI usage (for all MCS rates) */
+#define WL_AMPDU_STATS_TYPE_RXVHTSGI 18 /* RX SGI usage (for all VHT rates) */
+#define WL_AMPDU_STATS_TYPE_TXVHTSGI 19 /* TX SGI usage (for all VHT rates) */
+#define WL_AMPDU_STATS_TYPE_RXMCSPER 20 /* RX PER (for all MCS rates) */
+#define WL_AMPDU_STATS_TYPE_TXMCSPER 21 /* TX PER (for all MCS rates) */
+#define WL_AMPDU_STATS_TYPE_RXVHTPER 22 /* RX PER (for all VHT rates) */
+#define WL_AMPDU_STATS_TYPE_TXVHTPER 23 /* TX PER (for all VHT rates) */
+#define WL_AMPDU_STATS_TYPE_RXDENS 24 /* RX AMPDU density */
+#define WL_AMPDU_STATS_TYPE_TXDENS 25 /* TX AMPDU density */
+#define WL_AMPDU_STATS_TYPE_RXMCSOK 26 /* RX all MCS rates */
+#define WL_AMPDU_STATS_TYPE_RXVHTOK 27 /* RX all VHT rates */
+#define WL_AMPDU_STATS_TYPE_TXMCSALL 28 /* TX all MCS rates */
+#define WL_AMPDU_STATS_TYPE_TXVHTALL 29 /* TX all VHT rates */
+#define WL_AMPDU_STATS_TYPE_TXMCSOK 30 /* TX all MCS rates */
+#define WL_AMPDU_STATS_TYPE_TXVHTOK 31 /* TX all VHT rates */
+
+#define WL_AMPDU_STATS_MAX_CNTS 64
+
typedef struct {
uint16 type; /* AMPDU statistics sub-type */
uint16 len; /* Number of 32-bit counters */
uint32 counters[WL_AMPDU_STATS_MAX_CNTS];
} wl_ampdu_stats_generic_t;
-typedef wl_ampdu_stats_generic_t wl_ampdu_stats_rx_t;
-typedef wl_ampdu_stats_generic_t wl_ampdu_stats_tx_t;
-
typedef struct {
uint16 type; /* AMPDU statistics sub-type */
uint16 len; /* Number of 32-bit counters + 2 */
uint32 aggr_dist[WL_AMPDU_STATS_MAX_CNTS + 1];
} wl_ampdu_stats_aggrsz_t;
-/* Sub-block type for WL_IFSTATS_XTLV_HE_TXMU_STATS */
-typedef enum {
- /* Reserve 0 to avoid potential concerns */
- WL_HE_TXMU_STATS_TYPE_TIME = 1, /* per-dBm, total usecs transmitted */
- WL_HE_TXMU_STATS_TYPE_PAD_TIME = 2, /* per-dBm, padding usecs transmitted */
-} wl_he_txmu_stat_enum_t;
-#define WL_IFSTATS_HE_TXMU_MAX 32u
-
/* Sub-block type for EVENT_LOG_TAG_MSCHPROFILE */
#define WL_MSCH_PROFILER_START 0 /* start event check */
#define WL_MSCH_PROFILER_EXIT 1 /* exit event check */
uint16 chanspec_list[WL_MSCH_NUMCHANNELS];
} msch_register_params_t;
-typedef struct {
- uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
- * Control Management (includes retransmissions)
- */
- uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
- * expecting a response
- */
- uint32 rxstrt; /**< number of received frames with a good PLCP */
- uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
- uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
- uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
- uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
- uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
- uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
- uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */
- uint32 rxbeaconmbss; /**< beacons received from member of BSS */
- uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
- uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
- uint32 rxtoolate; /**< receive too late */
- uint32 goodfcs; /**< Good fcs counters */
- uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */
- uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */
-} phy_periodic_counters_v1_t;
-
-typedef struct phycal_log_cmn {
- uint16 chanspec; /* Current phy chanspec */
- uint8 last_cal_reason; /* Last Cal Reason */
- uint8 pad1; /* Padding byte to align with word */
- uint last_cal_time; /* Last cal time in sec */
-} phycal_log_cmn_t;
-
-typedef struct phycal_log_core {
- uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */
- uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */
- uint16 ofdm_txd; /* contain di & dq */
- uint16 bphy_txa; /* BPHY Tx IQ Cal a coeff */
- uint16 bphy_txb; /* BPHY Tx IQ Cal b coeff */
- uint16 bphy_txd; /* contain di & dq */
-
- uint16 rxa; /* Rx IQ Cal A coeffecient */
- uint16 rxb; /* Rx IQ Cal B coeffecient */
- int32 rxs; /* FDIQ Slope coeffecient */
-
- uint8 baseidx; /* TPC Base index */
- uint8 adc_coeff_cap0_adcI; /* ADC CAP Cal Cap0 I */
- uint8 adc_coeff_cap1_adcI; /* ADC CAP Cal Cap1 I */
- uint8 adc_coeff_cap2_adcI; /* ADC CAP Cal Cap2 I */
- uint8 adc_coeff_cap0_adcQ; /* ADC CAP Cal Cap0 Q */
- uint8 adc_coeff_cap1_adcQ; /* ADC CAP Cal Cap1 Q */
- uint8 adc_coeff_cap2_adcQ; /* ADC CAP Cal Cap2 Q */
- uint8 pad; /* Padding byte to align with word */
-} phycal_log_core_t;
-
-#define PHYCAL_LOG_VER1 (1u)
-
-typedef struct phycal_log_v1 {
- uint8 version; /* Logging structure version */
- uint8 numcores; /* Numbe of cores for which core specific data present */
- uint16 length; /* Length of the entire structure */
- phycal_log_cmn_t phycal_log_cmn; /* Logging common structure */
- /* This will be a variable length based on the numcores field defined above */
- phycal_log_core_t phycal_log_core[1];
-} phycal_log_v1_t;
-
-typedef struct phy_periodic_log_cmn {
- uint16 chanspec; /* Current phy chanspec */
- uint16 vbatmeas; /* Measured VBAT sense value */
- uint16 featureflag; /* Currently active feature flags */
- int8 chiptemp; /* Chip temparature */
- int8 femtemp; /* Fem temparature */
-
- uint32 nrate; /* Current Tx nrate */
-
- uint8 cal_phase_id; /* Current Multi phase cal ID */
- uint8 rxchain; /* Rx Chain */
- uint8 txchain; /* Tx Chain */
- uint8 ofdm_desense; /* OFDM desense */
-
- uint8 bphy_desense; /* BPHY desense */
- uint8 pll_lockstatus; /* PLL Lock status */
- uint8 pad1; /* Padding byte to align with word */
- uint8 pad2; /* Padding byte to align with word */
-
- uint32 duration; /**< millisecs spent sampling this channel */
- uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
- /**< move if cur bss moves channels) */
- uint32 congest_obss; /**< traffic not in our bss */
- uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
-
-} phy_periodic_log_cmn_t;
-
-typedef struct phy_periodic_log_core {
- uint8 baseindxval; /* TPC Base index */
- int8 tgt_pwr; /* Programmed Target power */
- int8 estpwradj; /* Current Est Power Adjust value */
- int8 crsmin_pwr; /* CRS Min/Noise power */
- int8 rssi_per_ant; /* RSSI Per antenna */
- int8 snr_per_ant; /* SNR Per antenna */
- int8 pad1; /* Padding byte to align with word */
- int8 pad2; /* Padding byte to align with word */
-} phy_periodic_log_core_t;
-
-#define PHY_PERIODIC_LOG_VER1 (1u)
-
-typedef struct phy_periodic_log_v1 {
- uint8 version; /* Logging structure version */
- uint8 numcores; /* Numbe of cores for which core specific data present */
- uint16 length; /* Length of the entire structure */
- phy_periodic_log_cmn_t phy_perilog_cmn;
- phy_periodic_counters_v1_t counters_peri_log;
- /* This will be a variable length based on the numcores field defined above */
- phy_periodic_log_core_t phy_perilog_core[1];
-} phy_periodic_log_v1_t;
-
-/* Note: The version 2 is reserved for 4357 only. Future chips must not use this version. */
-
-#define MAX_CORE_4357 (2u)
-#define PHYCAL_LOG_VER2 (2u)
-#define PHY_PERIODIC_LOG_VER2 (2u)
-
-typedef struct {
- uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
- * Control Management (includes retransmissions)
- */
- uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
- * expecting a response
- */
- uint32 rxstrt; /**< number of received frames with a good PLCP */
- uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
- uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
- uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
- uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
- uint32 rxbeaconmbss; /**< beacons received from member of BSS */
- uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
- uint32 rxf0ovfl; /** < Rx FIFO0 overflow counters information */
- uint32 rxf1ovfl; /** < Rx FIFO1 overflow counters information */
- uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
- uint32 rxtoolate; /**< receive too late */
- uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
-} phy_periodic_counters_v2_t;
-
-/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */
-
-typedef struct phycal_log_core_v2 {
- uint16 ofdm_txa; /* OFDM Tx IQ Cal a coeff */
- uint16 ofdm_txb; /* OFDM Tx IQ Cal b coeff */
- uint16 ofdm_txd; /* contain di & dq */
- uint16 rxa; /* Rx IQ Cal A coeffecient */
- uint16 rxb; /* Rx IQ Cal B coeffecient */
- uint8 baseidx; /* TPC Base index */
- uint8 pad;
- int32 rxs; /* FDIQ Slope coeffecient */
-} phycal_log_core_v2_t;
-
-/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */
-
-typedef struct phycal_log_v2 {
- uint8 version; /* Logging structure version */
- uint16 length; /* Length of the entire structure */
- uint8 pad;
- phycal_log_cmn_t phycal_log_cmn; /* Logging common structure */
- phycal_log_core_v2_t phycal_log_core[MAX_CORE_4357];
-} phycal_log_v2_t;
-
-/* Note: The version 2 is reserved for 4357 only. All future chips must not use this version. */
-
-typedef struct phy_periodic_log_v2 {
- uint8 version; /* Logging structure version */
- uint16 length; /* Length of the entire structure */
- uint8 pad;
- phy_periodic_log_cmn_t phy_perilog_cmn;
- phy_periodic_counters_v2_t counters_peri_log;
- phy_periodic_log_core_t phy_perilog_core[MAX_CORE_4357];
-} phy_periodic_log_v2_t;
-
-/* Event log payload for enhanced roam log */
-typedef enum {
- ROAM_LOG_SCANSTART = 1, /* EVT log for roam scan start */
- ROAM_LOG_SCAN_CMPLT = 2, /* EVT log for roam scan completeted */
- ROAM_LOG_ROAM_CMPLT = 3, /* EVT log for roam done */
- ROAM_LOG_NBR_REQ = 4, /* EVT log for Neighbor REQ */
- ROAM_LOG_NBR_REP = 5, /* EVT log for Neighbor REP */
- ROAM_LOG_BCN_REQ = 6, /* EVT log for BCNRPT REQ */
- ROAM_LOG_BCN_REP = 7, /* EVT log for BCNRPT REP */
- PRSV_PERIODIC_ID_MAX
-} prsv_periodic_id_enum_t;
-
-typedef struct prsv_periodic_log_hdr {
- uint8 version;
- uint8 id;
- uint16 length;
-} prsv_periodic_log_hdr_t;
-
-#define ROAM_LOG_VER_1 (1u)
-#define ROAM_LOG_TRIG_VER (1u)
-#define ROAM_SSID_LEN (32u)
-typedef struct roam_log_trig_v1 {
- prsv_periodic_log_hdr_t hdr;
- int8 rssi;
- uint8 current_cu;
- uint8 pad[2];
- uint reason;
- int result;
- union {
- struct {
- uint rcvd_reason;
- } prt_roam;
- struct {
- uint8 req_mode;
- uint8 token;
- uint16 nbrlist_size;
- uint32 disassoc_dur;
- uint32 validity_dur;
- uint32 bss_term_dur;
- } bss_trans;
- };
-} roam_log_trig_v1_t;
-
-#define ROAM_LOG_RPT_SCAN_LIST_SIZE 3
-#define ROAM_LOG_INVALID_TPUT 0xFFFFFFFFu
-typedef struct roam_scan_ap_info {
- int8 rssi;
- uint8 pad[3];
- uint32 score;
- uint16 chanspec;
- struct ether_addr addr;
- uint32 estm_tput;
-} roam_scan_ap_info_t;
-
-typedef struct roam_log_scan_cmplt_v1 {
- prsv_periodic_log_hdr_t hdr;
- uint8 full_scan;
- uint8 scan_count;
- uint8 scan_list_size;
- uint8 pad;
- int32 score_delta;
- roam_scan_ap_info_t cur_info;
- roam_scan_ap_info_t scan_list[ROAM_LOG_RPT_SCAN_LIST_SIZE];
-} roam_log_scan_cmplt_v1_t;
-
-typedef struct roam_log_cmplt_v1 {
- prsv_periodic_log_hdr_t hdr;
- uint status;
- uint reason;
- uint16 chanspec;
- struct ether_addr addr;
- uint8 pad[3];
- uint8 retry;
-} roam_log_cmplt_v1_t;
-
-typedef struct roam_log_nbrrep {
- prsv_periodic_log_hdr_t hdr;
- uint channel_num;
-} roam_log_nbrrep_v1_t;
-
-typedef struct roam_log_nbrreq {
- prsv_periodic_log_hdr_t hdr;
- uint token;
-} roam_log_nbrreq_v1_t;
-
-typedef struct roam_log_bcnrptreq {
- prsv_periodic_log_hdr_t hdr;
- int32 result;
- uint8 reg;
- uint8 channel;
- uint8 mode;
- uint8 bssid_wild;
- uint8 ssid_len;
- uint8 pad;
- uint16 duration;
- uint8 ssid[ROAM_SSID_LEN];
-} roam_log_bcnrpt_req_v1_t;
-
-typedef struct roam_log_bcnrptrep {
- prsv_periodic_log_hdr_t hdr;
- uint32 count;
-} roam_log_bcnrpt_rep_v1_t;
-
#endif /* _EVENT_LOG_PAYLOAD_H_ */
/*
* EVENT_LOG system definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: event_log_set.h 818566 2019-05-08 04:01:45Z $
+ * $Id: event_log_set.h 700076 2017-05-17 14:42:22Z $
*/
#ifndef _EVENT_LOG_SET_H_
#define _EVENT_LOG_SET_H_
-/* Set assignments */
-#define EVENT_LOG_SET_BUS (0u)
-#define EVENT_LOG_SET_WL (1u)
-#define EVENT_LOG_SET_PSM (2u)
-#define EVENT_LOG_SET_ERROR (3u)
-
-/* MSCH logging */
-#define EVENT_LOG_SET_MSCH_PROFILER (4u)
-
-#define EVENT_LOG_SET_5 (5u)
-#define EVENT_LOG_SET_ECOUNTERS (EVENT_LOG_SET_5)
-#define EVENT_LOG_SET_6 (6u)
-#define EVENT_LOG_SET_7 (7u)
-
-#define EVENT_LOG_SET_8 (8u)
-#define EVENT_LOG_SET_PRSRV (EVENT_LOG_SET_8)
-
-#define EVENT_LOG_SET_9 (9u)
-/* General purpose preserve chatty.
- * EVENT_LOG_SET_PRSRV_CHATTY log set should not be used by FW as it is
- * used by customer host. FW should use EVENT_LOG_SET_GP_PRSRV_CHATTY
- * for general purpose preserve chatty logs.
- */
-#define EVENT_LOG_SET_GP_PRSRV_CHATTY (EVENT_LOG_SET_9)
-#define EVENT_LOG_SET_PRSRV_CHATTY (EVENT_LOG_SET_6)
-
-/* BUS preserve */
-#define EVENT_LOG_SET_PRSRV_BUS (10u)
-
-/* WL preserve */
-#define EVENT_LOG_SET_PRSRV_WL (11u)
-
-/* Slotted BSS set */
-#define EVENT_LOG_SET_WL_SLOTTED_BSS (12u)
-
-/* PHY entity logging */
-#define EVENT_LOG_SET_PHY (13u)
-
-/* PHY preserve */
-#define EVENT_LOG_SET_PRSRV_PHY (14u)
-
-/* RTE entity */
-#define EVENT_LOG_SET_RTE (15u)
-
-/* Malloc and free logging */
-#define EVENT_LOG_SET_MEM_API (16u)
-
-/* Console buffer */
-#define EVENT_LOG_SET_RTE_CONS_BUF (17u)
-
-/* three log sets for general debug purposes */
-#define EVENT_LOG_SET_GENERAL_DBG_1 (18u)
-#define EVENT_LOG_SET_GENERAL_DBG_2 (19u)
-#define EVENT_LOG_SET_GENERAL_DBG_3 (20u)
-
-/* Log sets for capturing power related logs. Note that these sets
- * are to be used across entire system and not just WL.
- */
-#define EVENT_LOG_SET_POWER_1 (21u)
-#define EVENT_LOG_SET_POWER_2 (22u)
-
-/* Used for timestamp plotting, TS_LOG() */
-#define EVENT_LOG_SET_TS_LOG (23u)
-
-/* BUS preserve chatty */
-#define EVENT_LOG_SET_PRSRV_BUS_CHATTY (24u)
-
-/* PRESERVE_PREIODIC_LOG_SET */
-/* flush if host is in D0 at every period */
-#define EVENT_LOG_SET_PRSV_PERIODIC (25u)
-
-#ifndef NUM_EVENT_LOG_SETS
/* Set a maximum number of sets here. It is not dynamic for
- * efficiency of the EVENT_LOG calls. Old branches could define
- * this to an appropriat enumber in their makefiles to reduce
- * ROM invalidation
+ * efficiency of the EVENT_LOG calls.
*/
-#ifdef NUM_EVENT_LOG_SETS_V2
-/* for v2, everything has became unsigned */
-#define NUM_EVENT_LOG_SETS (26u)
-#else /* NUM_EVENT_LOG_SETS_V2 */
-#define NUM_EVENT_LOG_SETS (26)
-#endif /* NUM_EVENT_LOG_SETS_V2 */
-#endif /* NUM_EVENT_LOG_SETS */
-
-/* send delayed logs when >= 50% of buffer is full */
-#ifndef ECOUNTERS_DELAYED_FLUSH_PERCENTAGE
-#define ECOUNTERS_DELAYED_FLUSH_PERCENTAGE (50)
-#endif // endif
+#define NUM_EVENT_LOG_SETS 8
+
+/* Define new event log sets here */
+#define EVENT_LOG_SET_BUS 0
+#define EVENT_LOG_SET_WL 1
+#define EVENT_LOG_SET_PSM 2
+#define EVENT_LOG_SET_ERROR 3
+#define EVENT_LOG_SET_MEM_API 4
+/* Share the set with MEM_API for now to limit ROM invalidation.
+ * The above set is used in dingo only
+ * On trunk, MSCH should move to a different set.
+ */
+#define EVENT_LOG_SET_MSCH_PROFILER 4
+#define EVENT_LOG_SET_ECOUNTERS 5 /* Host to instantiate this for ecounters. */
+#define EVENT_LOG_SET_6 6 /* Instantiated by host for channel switch logs */
+#define EVENT_LOG_SET_7 7 /* Instantiated by host for AMPDU stats */
#endif /* _EVENT_LOG_SET_H_ */
/*
* EVENT_LOG system definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: event_log_tag.h 820429 2019-05-17 22:30:04Z $
+ * $Id: event_log_tag.h 700681 2017-05-20 16:37:38Z $
*/
#ifndef _EVENT_LOG_TAG_H_
/* Define new event log tags here */
#define EVENT_LOG_TAG_NULL 0 /* Special null tag */
#define EVENT_LOG_TAG_TS 1 /* Special timestamp tag */
-
-/* HSIC Legacy support */
-/* Possible candidates for reuse */
#define EVENT_LOG_TAG_BUS_OOB 2
#define EVENT_LOG_TAG_BUS_STATE 3
#define EVENT_LOG_TAG_BUS_PROTO 4
#define EVENT_LOG_TAG_BUS_DESC 9
#define EVENT_LOG_TAG_BUS_SETUP 10
#define EVENT_LOG_TAG_BUS_MISC 11
-
#define EVENT_LOG_TAG_SRSCAN 22
#define EVENT_LOG_TAG_PWRSTATS_INFO 23
-
-/* Timestamp logging for plotting. */
-#define EVENT_LOG_TAG_TSLOG 26
-
-/* Possible candidates for reuse */
-#define EVENT_LOG_TAG_UCODE_FIFO 27
-
+#define EVENT_LOG_TAG_UCODE_WATCHDOG 26
+#define EVENT_LOG_TAG_UCODE_FIFO 27
#define EVENT_LOG_TAG_SCAN_TRACE_LOW 28
#define EVENT_LOG_TAG_SCAN_TRACE_HIGH 29
#define EVENT_LOG_TAG_SCAN_ERROR 30
#define EVENT_LOG_TAG_PCI_DBG 52
#define EVENT_LOG_TAG_PCI_DATA 53
#define EVENT_LOG_TAG_PCI_RING 54
+/* EVENT_LOG_TAG_AWDL_TRACE_RANGING will be removed after wlc_ranging merge from IGUANA
+ * keeping it here to avoid compilation error on trunk
+ */
+#define EVENT_LOG_TAG_AWDL_TRACE_RANGING 55
#define EVENT_LOG_TAG_RANGING_TRACE 55
#define EVENT_LOG_TAG_WL_ERROR 56
#define EVENT_LOG_TAG_PHY_ERROR 57
#define EVENT_LOG_TAG_MIMO_PS_INFO 94
#define EVENT_LOG_TAG_BTCX_STATS 95
#define EVENT_LOG_TAG_LEAKY_AP_STATS 96
+#define EVENT_LOG_TAG_AWDL_TRACE_ELECTION 97
#define EVENT_LOG_TAG_MIMO_PS_STATS 98
#define EVENT_LOG_TAG_PWRSTATS_PHY 99
#define EVENT_LOG_TAG_PWRSTATS_SCAN 100
+#define EVENT_LOG_TAG_PWRSTATS_AWDL 101
#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2 102
#define EVENT_LOG_TAG_LQM 103
#define EVENT_LOG_TAG_TRACE_WL_INFO 104
#define EVENT_LOG_TAG_STATS 153
#define EVENT_LOG_TAG_BAM 154
#define EVENT_LOG_TAG_TXFAIL 155
+#define EVENT_LOG_TAG_AWDL_CONFIG_DBG 156
+#define EVENT_LOG_TAG_AWDL_SYNC_DBG 157
+#define EVENT_LOG_TAG_AWDL_PEER_DBG 158
#define EVENT_LOG_TAG_RANDMAC_INFO 159
#define EVENT_LOG_TAG_RANDMAC_DBG 160
#define EVENT_LOG_TAG_RANDMAC_ERR 161
-#define EVENT_LOG_TAG_MSCH_CAL 163
-#define EVENT_LOG_TAG_MSCH_OPP_CAL 164
-#define EVENT_LOG_TAG_MSCH 165
-#define EVENT_LOG_TAG_NAN_SYNC 166
-#define EVENT_LOG_TAG_NAN_DPE 167
-#define EVENT_LOG_TAG_NAN_SCHED 168
-#define EVENT_LOG_TAG_NAN_RNG 169
-#define EVENT_LOG_TAG_NAN_DAM 170
-#define EVENT_LOG_TAG_NAN_NA 171
-#define EVENT_LOG_TAG_NAN_NDL 172
-#define EVENT_LOG_TAG_NAN_NDP 173
-#define EVENT_LOG_TAG_NAN_SEC 174
-#define EVENT_LOG_TAG_NAN_MAC 175
-#define EVENT_LOG_TAG_NAN_FSM 176
-
-#define EVENT_LOG_TAG_TPA_ERR 192
-#define EVENT_LOG_TAG_TPA_INFO 193
-#define EVENT_LOG_TAG_OCE_DBG 194
-#define EVENT_LOG_TAG_OCE_INFO 195
-#define EVENT_LOG_TAG_OCE_ERR 196
-#define EVENT_LOG_TAG_WL_WARN 197
-#define EVENT_LOG_TAG_SB_ERR 198
-#define EVENT_LOG_TAG_SB_INFO 199
-#define EVENT_LOG_TAG_SB_SCHED 200
-#define EVENT_LOG_TAG_ADPS_INFO 201
-#define EVENT_LOG_TAG_SB_CMN_SYNC_INFO 202
-#define EVENT_LOG_TAG_PHY_CAL_INFO 203 /* PHY CALs scheduler info */
-#define EVENT_LOG_TAG_EVT_NOTIF_INFO 204
-#define EVENT_LOG_TAG_PHY_HC_ERROR 205
-#define EVENT_LOG_TAG_PHY_TXPWR_WARN 206
-#define EVENT_LOG_TAG_PHY_TXPWR_INFO 207
-#define EVENT_LOG_TAG_PHY_ACI_INFO 208
-#define EVENT_LOG_TAG_WL_COUNTERS_AUX 209
-#define EVENT_LOG_TAG_AMPDU_DUMP_AUX 210
-#define EVENT_LOG_TAG_PWRSTATS_PHY_AUX 212
-#define EVENT_LOG_TAG_PWRSTATS_SCAN_AUX 213
-#define EVENT_LOG_TAG_PWRSTATS_WAKE_V2_AUX 214
-#define EVENT_LOG_TAG_SVT_TESTING 215 /* SVT testing/verification */
-#define EVENT_LOG_TAG_HND_SMD_ERROR 216
-#define EVENT_LOG_TAG_PSBW_INFO 217
-#define EVENT_LOG_TAG_PHY_CAL_DBG 218
-#define EVENT_LOG_TAG_FILS_DBG 219
-#define EVENT_LOG_TAG_FILS_INFO 220
-#define EVENT_LOG_TAG_FILS_ERROR 221
-#define EVENT_LOG_TAG_HWA_TXPOST 222
-#define EVENT_LOG_TAG_HWA_TXDMA 223
-#define EVENT_LOG_TAG_PPR_ERROR 224
-
-/* Arbitrator callback log tags */
-#define EVENT_LOG_TAG_STF_ARB_CB_TRACE 224
-#define EVENT_LOG_TAG_STF_ARB_CB_ERROR 225
-#define EVENT_LOG_TAG_PHY_PERIODIC_SEC 226
-#define EVENT_LOG_TAG_RTE_ERROR 227
-#define EVENT_LOG_TAG_CPLT_ERROR 228
-#define EVENT_LOG_TAG_DNGL_ERROR 229
-#define EVENT_LOG_TAG_NVRAM_ERROR 230
-#define EVENT_LOG_TAG_NAC 231
-#define EVENT_LOG_TAG_HP2P_ERR 232
-#define EVENT_LOG_TAG_SB_SCHED_DBG_SYNC 233
-#define EVENT_LOG_TAG_ENHANCED_TS 234
-
-/* Available space for new tags for Dingo, Iguana and branches
- * prior to Koala only. From Koala onwards, new tags must be greater
- * than 255. If a tag is required for Koala and legacy productization branches,
- * add that tag here. Tags > 255 will generate extended header. Legacy code
- * does not understand extended header.
- */
-
-/* Debug tags for making debug builds */
-#define EVENT_LOG_TAG_DBG1 251
-#define EVENT_LOG_TAG_DBG2 252
-#define EVENT_LOG_TAG_DBG3 253
-#define EVENT_LOG_TAG_DBG4 254
-#define EVENT_LOG_TAG_DBG5 255
-
-/* Insert new tags here for Koala onwards */
-
-/* NAN INFO/ERR evnt tags */
-#define EVENT_LOG_TAG_NAN_SYNC_INFO 256
-#define EVENT_LOG_TAG_NAN_DPE_INFO 257
-#define EVENT_LOG_TAG_NAN_SCHED_INFO 258
-#define EVENT_LOG_TAG_NAN_RNG_INFO 259
-#define EVENT_LOG_TAG_NAN_DAM_INFO 260
-#define EVENT_LOG_TAG_NAN_NA_INFO 261
-#define EVENT_LOG_TAG_NAN_NDL_INFO 262
-#define EVENT_LOG_TAG_NAN_NDP_INFO 263
-#define EVENT_LOG_TAG_NAN_SEC_INFO 264
-#define EVENT_LOG_TAG_NAN_MAC_INFO 265
-#define EVENT_LOG_TAG_NAN_FSM_INFO 266
-#define EVENT_LOG_TAG_NAN_PEER_INFO 267
-#define EVENT_LOG_TAG_NAN_AVAIL_INFO 268
-#define EVENT_LOG_TAG_NAN_CMN_INFO 269
-#define EVENT_LOG_TAG_NAN_SYNC_ERR 270
-#define EVENT_LOG_TAG_NAN_DPE_ERR 271
-#define EVENT_LOG_TAG_NAN_SCHED_ERR 272
-#define EVENT_LOG_TAG_NAN_RNG_ERR 273
-#define EVENT_LOG_TAG_NAN_DAM_ERR 274
-#define EVENT_LOG_TAG_NAN_NA_ERR 275
-#define EVENT_LOG_TAG_NAN_NDL_ERR 276
-#define EVENT_LOG_TAG_NAN_NDP_ERR 277
-#define EVENT_LOG_TAG_NAN_SEC_ERR 278
-#define EVENT_LOG_TAG_NAN_MAC_ERR 279
-#define EVENT_LOG_TAG_NAN_FSM_ERR 280
-#define EVENT_LOG_TAG_NAN_PEER_ERR 281
-#define EVENT_LOG_TAG_NAN_AVAIL_ERR 282
-#define EVENT_LOG_TAG_NAN_CMN_ERR 283
-
-/* More NAN DBG evt Tags */
-#define EVENT_LOG_TAG_NAN_PEER 284
-#define EVENT_LOG_TAG_NAN_AVAIL 285
-#define EVENT_LOG_TAG_NAN_CMN 286
-
-#define EVENT_LOG_TAG_SAE_ERROR 287
-#define EVENT_LOG_TAG_SAE_INFO 288
-
-/* rxsig module logging */
-#define EVENT_LOG_TAG_RXSIG_ERROR 289
-#define EVENT_LOG_TAG_RXSIG_DEBUG 290
-#define EVENT_LOG_TAG_RXSIG_INFO 291
-
-/* HE TWT HEB EVEVNT_LOG_TAG */
-#define EVENT_LOG_TAG_WL_HE_INFO 292
-#define EVENT_LOG_TAG_WL_HE_TRACE 293
-#define EVENT_LOG_TAG_WL_HE_WARN 294
-#define EVENT_LOG_TAG_WL_HE_ERROR 295
-#define EVENT_LOG_TAG_WL_TWT_INFO 296
-#define EVENT_LOG_TAG_WL_TWT_TRACE 297
-#define EVENT_LOG_TAG_WL_TWT_WARN 298
-#define EVENT_LOG_TAG_WL_TWT_ERROR 299
-#define EVENT_LOG_TAG_WL_HEB_ERROR 300
-#define EVENT_LOG_TAG_WL_HEB_TRACE 301
-
-/* RRM EVENT_LOG_TAG */
-#define EVENT_LOG_TAG_RRM_DBG 302
-#define EVENT_LOG_TAG_RRM_INFO 303
-#define EVENT_LOG_TAG_RRM_ERR 304
-
-/* scan core */
-#define EVENT_LOG_TAG_SC 305
-
-#define EVENT_LOG_TAG_ESP_DBG 306
-#define EVENT_LOG_TAG_ESP_INFO 307
-#define EVENT_LOG_TAG_ESP_ERR 308
-
-/* SDC */
-#define EVENT_LOG_TAG_SDC_DBG 309
-#define EVENT_LOG_TAG_SDC_INFO 310
-#define EVENT_LOG_TAG_SDC_ERR 311
-
-/* RTE */
-#define EVENT_LOG_TAG_RTE_ERR 312
-
-/* TX FIFO */
-#define EVENT_LOG_TAG_FIFO_INFO 313
-
-/* PKTTS */
-#define EVENT_LOG_TAG_LATENCY_INFO 314
-
-/* TDLS */
-#define EVENT_LOG_TAG_WL_TDLS_INFO 315
-#define EVENT_LOG_TAG_WL_TDLS_DBG 316
-#define EVENT_LOG_TAG_WL_TDLS_ERR 317
-
-/* MSCH messages */
-#define EVENT_LOG_TAG_MSCH_DATASTRUCT 319
-#define EVENT_LOG_TAG_MSCH_REGISTER 320
-#define EVENT_LOG_TAG_MSCH_CALLBACK 321
-#define EVENT_LOG_TAG_MSCH_ERROR 322
-#define EVENT_LOG_TAG_MSCH_DEBUG 323
-#define EVENT_LOG_TAG_MSCH_INFORM 324
-#define EVENT_LOG_TAG_MSCH_TRACE 325
-
-/* bus low power related info messages */
-#define EVENT_LOG_TAG_WL_BUS_LP_INFO 326
-#define EVENT_LOG_TAG_PCI_LP_INFO 327
-
-/* SBSS BT-Coex */
-#define EVENT_LOG_TAG_SB_BTCX_INFO 328
-
-/* wbus */
-#define EVENT_LOG_TAG_WBUS_ERR 329
-#define EVENT_LOG_TAG_WBUS_INFO 330
-#define EVENT_LOG_TAG_WBUS_SCHED 331
-
-/* MODESW */
-#define EVENT_LOG_TAG_MODESW_ERR 332
-
-/* LPHS */
-#define EVENT_LOG_TAG_LPHS_ERR 333
-
-/* CPU statistics */
-#define EVENT_LOG_TAG_ARM_STAT 334
-
-/* Event log tags for SOE */
-#define EVENT_LOG_TAG_SOE_ERROR 335
-#define EVENT_LOG_TAG_SOE_INFO 336
-
-/* Event log tags for GCI Shared Memory */
-#define EVENT_LOG_TAG_GCISHM_ERR 337
-#define EVENT_LOG_TAG_GCISHM_INFO 338
-
-/* Eevent log tags for Enhanced Roam Log */
-#define EVENT_LOG_TAG_ROAM_ENHANCED_LOG 339
-
-/* WL BTCEC */
-#define EVENT_LOG_TAG_BTCEC_ERR 340
-#define EVENT_LOG_TAG_BTCEC_INFO 341
-#define EVENT_LOG_TAG_BTCEC_SCHED 342
+#define EVENT_LOG_TAG_AWDL_DFSP_DBG 162
+#define EVENT_LOG_TAG_TPA_ERR 163
+#define EVENT_LOG_TAG_TPA_INFO 164
/* EVENT_LOG_TAG_MAX = Set to the same value of last tag, not last tag + 1 */
-#define EVENT_LOG_TAG_MAX 342
-
-typedef enum wl_el_set_type_def {
- EVENT_LOG_SET_TYPE_DEFAULT = 0, /* flush the log buffer when it is full - Default option */
- EVENT_LOG_SET_TYPE_PRSRV = 1, /* flush the log buffer based on fw or host trigger */
- EVENT_LOG_SET_TYPE_DFLUSH = 2 /* flush the log buffer once the watermark is reached */
-} wl_el_set_type_def_t;
-
-#define EVENT_LOG_TAG_FLUSH_NONE 0x00 /* No flush */
-#define EVENT_LOG_TAG_FLUSH_ALL 0x40 /* Flush all preserved sets */
-#define EVENT_LOG_TAG_FLUSH_SETNUM 0x80 /* Flush preserved set */
-#define EVENT_LOG_TAG_FLUSH_MASK 0x3f /* SetNum Mask */
+#define EVENT_LOG_TAG_MAX 164
+/* Note: New event should be added/reserved in trunk before adding it to branches */
-typedef enum wl_el_flush_type {
- EL_TAG_PRSRV_FLUSH_NONE = 0, /* No flush of preserve buf on this tag */
- EL_TAG_PRSRV_FLUSH_SETNUM, /* Flush the buffer set specifid on this tag */
- EL_TAG_PRSRV_FLUSH_ALL /* Flush all preserved buffer set on this tag */
-} wl_el_flush_type_t;
-
-#define EVENT_LOG_FLUSH_CURRENT_VERSION 0
-typedef struct wl_el_set_flush_prsrv_s {
- uint16 version;
- uint16 len;
- uint16 tag; /* Tag for which preserve flush should be done */
- uint8 flush_type; /* Check wl_el_flush_type_t */
- uint8 set_num; /* Log set num to flush. Max is NUM_EVENT_LOG_SETS. Valid only when
- * action is EVENT_LOG_TAG_FLUSH_SETNUM
- */
-} wl_el_set_flush_prsrv_t;
#define SD_PRHDRS(i, s, h, p, n, l)
#define SD_PRPKT(m, b, n)
uint32 t; /* Type cheat */
} event_log_hdr_t;
-/* for internal use - legacy max. tag */
-#define EVENT_LOG_TAG_MAX_LEGACY_FORMAT 255
-
-/*
- * The position of the extended header in the event log stream will be as follows:
- * <event log payload><ARM cycle count timestamp><extended header><regular header>
- * Extended header could be due to count > 255 or tag > 255.
- *
- * Extended count: 6 bits long. 8 bits (existing) + 6 bits =>
- * 2^14 words = 65536 bytes payload max
- * Extended count field is currently reserved
- * Extended tag: 8 (existing) + 4 bits = 12 bits =>2^12 = 4096 tags
- * bits[7..4] of extended tags are reserved.
- * MSB 16 bits of the extended header are reserved for future use.
- */
-
-typedef union event_log_extended_hdr {
- struct {
- uint8 extended_tag; /* Extended tag, bits[7..4] are reserved */
- uint8 extended_count; /* Extended count. Reserved for now. */
- uint16 rsvd; /* Reserved */
- };
-
- uint32 t; /* Type cheat */
-} event_log_extended_hdr_t;
#endif /* _EVENT_LOG_TAG_H_ */
/*
* Trace log blocks sent over HBUS
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: event_trace.h 693870 2017-04-05 09:03:17Z $
+ * $Id: event_trace.h 645268 2016-06-23 08:39:17Z $
*/
/**
+++ /dev/null
-/*
- * Fundamental types and constants relating to FILS AUTHENTICATION
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id$
- */
-
-#ifndef _FILSAUTH_H_
-#define _FILSAUTH_H_
-
-/* This marks the start of a packed structure section. */
-#include <packed_section_start.h>
-
-/* 11ai D6.0 8.6.8.36 FILS Discovery frame format
- category
- action
- fils_discovery_info_field_t
- fils_rnr_element_t
- fils_indication_element_t
- fils_vendor_specific_element_t
-*/
-
-/* 11revmc D4.0 8.4.2.25 Vendor Specific element */
-typedef BWL_PRE_PACKED_STRUCT struct fils_vendor_specific_element {
- uint8 elementid;
- uint8 length;
- /* variable len info */
- uint8 orgid_vendorspecific_content[];
-} BWL_POST_PACKED_STRUCT fils_vendor_specific_element_t;
-
-#define FILS_VS_ELEM_HDR_LEN (sizeof(fils_vendor_specific_element_t))
-
-/* 11ai D6.0 8.4.2.178 FILS Indication element */
-typedef BWL_PRE_PACKED_STRUCT struct fils_indication_element {
- uint8 elementid;
- uint8 length;
- uint16 fils_info;
- /* variable len info */
- uint8 cache_domain_publickey_id[];
-} BWL_POST_PACKED_STRUCT fils_indication_element_t;
-
-#define FILS_INDICATION_ELEM_HDR_LEN (sizeof(fils_indication_element_t))
-
-#define FILS_INDICATION_IE_TAG_FIXED_LEN 2
-
-#define FI_INFO_CACHE_IND_SUBFIELD_SIZE 2
-
-/* FILS Indication Information field */
-#define FI_INFO_PUB_KEY_IDENTS_MASK (0x0007)
-#define FI_INFO_REALM_IDENTS_MASK (0x0038)
-#define FI_INFO_IP_ADDR_CFG_MASK (0x0040)
-#define FI_INFO_CACHE_IDENT_MASK (0x0080)
-#define FI_INFO_HESSID_MASK (0x0100)
-#define FI_INFO_SHRKEY_AUTH_WOPFS_MASK (0x0200)
-#define FI_INFO_SHRKEY_AUTH_WPFS_MASK (0x0400)
-#define FI_INFO_PUBKEY_AUTH_MASK (0x0800)
-
-#define FI_INFO_CACHE_IDENT(fc) ((fc & FI_INFO_CACHE_IDENT_MASK))
-#define FI_INFO_HESSID(fc) ((fc & FI_INFO_HESSID_MASK))
-#define FI_INFO_SHRKEY_AUTH_WOPFS(fc) ((fc & FI_INFO_SHRKEY_AUTH_WOPFS_MASK))
-#define FI_INFO_SHRKEY_AUTH_WPFS(fc) ((fc & FI_INFO_SHRKEY_AUTH_WPFS_MASK))
-
-/* 11ai D11.0 9.4.2.171.1 TBTT Information field */
-typedef BWL_PRE_PACKED_STRUCT struct tbtt_info_field {
- uint8 tbtt_offset;
- uint8 bssid[ETHER_ADDR_LEN];
- uint32 short_ssid;
-} BWL_POST_PACKED_STRUCT tbtt_info_field_t;
-
-#define TBTT_INFO_FIELD_HDR_LEN (sizeof(tbtt_info_field_t))
-
-/* 11ai D11.0 9.4.2.171.1 Neighbor AP Information field */
-typedef BWL_PRE_PACKED_STRUCT struct neighbor_ap_info_field {
- uint16 tbtt_info_header;
- uint8 op_class;
- uint8 channel;
- /* variable len info */
- uint8 tbtt_info_field[];
-} BWL_POST_PACKED_STRUCT neighbor_ap_info_field_t;
-
-#define NEIGHBOR_AP_INFO_FIELD_HDR_LEN (sizeof(neighbor_ap_info_field_t))
-
-/* 11ai D11.0 9.4.2.171 Reduced Neighbor Report element */
-typedef BWL_PRE_PACKED_STRUCT struct fils_rnr_element {
- uint8 elementid;
- uint8 length;
- /* variable len info */
- uint8 neighbor_ap_info[];
-} BWL_POST_PACKED_STRUCT fils_rnr_element_t;
-
-#define FILS_RNR_ELEM_HDR_LEN (sizeof(fils_rnr_element_t))
-
-/* TBTT Info Header macros */
-#define TBTT_INFO_HDR_FIELD_TYPE_MASK (0x001f)
-#define TBTT_INFO_HDR_FN_AP_MASK (0x0004)
-#define TBTT_INFO_HDR_COUNT_MASK (0x00f0)
-#define TBTT_INFO_HDR_LENGTH_MASK (0xff00)
-
-#define TBTT_INFO_HDR_FIELD_TYPE(hdr)\
- ((hdr) & TBTT_INFO_HDR_FIELD_TYPE_MASK)
-#define TBTT_INFO_HDR_FN_AP(hdr)\
- (((hdr) & TBTT_INFO_HDR_FN_AP_MASK) >> 2)
-#define TBTT_INFO_HDR_COUNT(hdr)\
- (((hdr) & TBTT_INFO_HDR_COUNT_MASK) >> 4)
-#define TBTT_INFO_HDR_LENGTH(hdr)\
- (((hdr) & TBTT_INFO_HDR_LENGTH_MASK) >> 8)
-
-/* FILS Nonce element */
-#define FILS_NONCE_LENGTH 16u
-
-typedef BWL_PRE_PACKED_STRUCT struct fils_nonce_element {
- uint8 elementid;
- uint8 length;
- uint8 element_id_ext;
- uint8 fils_nonce[FILS_NONCE_LENGTH];
-} BWL_POST_PACKED_STRUCT fils_nonce_element_t;
-
-/* 11ai 9.4.2.186 FILS Key Delivery element */
-#define FILS_KEY_RSC_LENGTH 8u
-
-typedef BWL_PRE_PACKED_STRUCT struct fils_key_delivery_element {
- uint8 elementid;
- uint8 length;
- uint8 element_id_ext;
- uint8 key_rsc[FILS_KEY_RSC_LENGTH];
- uint8 kde_list[]; /* Key Data Elements */
-} BWL_POST_PACKED_STRUCT fils_key_delivery_element_t;
-
-/* 8.4.2.175 FILS Session element */
-#define FILS_SESSION_LENGTH 8u
-
-typedef BWL_PRE_PACKED_STRUCT struct fils_session_element {
- uint8 elementid;
- uint8 length;
- uint8 element_id_ext;
- uint8 fils_session[FILS_SESSION_LENGTH];
-} BWL_POST_PACKED_STRUCT fils_session_element_t;
-
-/* 9.4.2.179 FILS key confirmation element */
-#define FILS_KEY_CONFIRMATION_HEADER_LEN 3u
-
-typedef BWL_PRE_PACKED_STRUCT struct fils_key_conf_element {
- uint8 elementid;
- uint8 length;
- uint8 element_id_ext;
- /* variable len info */
- uint8 key_auth[];
-} BWL_POST_PACKED_STRUCT fils_key_conf_element_t;
-
-#define FILS_SESSION_ELEM_LEN (sizeof(fils_session_element_t))
-
-/* 8.4.2.174 FILS Key Confirmation element */
-typedef BWL_PRE_PACKED_STRUCT struct fils_key_confirm_element {
- uint8 elementid;
- uint8 length;
- uint8 element_id_ext;
- /* variable len info */
- uint8 keyauth[];
-} BWL_POST_PACKED_STRUCT fils_key_confirm_element_t;
-
-#define FILS_CONFIRM_ELEM_HDR_LEN (sizeof(fils_key_confirm_element_t))
-
-/* 11ai D6.0 8.6.8.36 FILS Discovery frame format */
-typedef BWL_PRE_PACKED_STRUCT struct fils_discovery_info_field {
- uint16 framecontrol;
- uint32 timestamp[2];
- uint16 bcninterval;
- /* variable len info */
- uint8 disc_info[];
-} BWL_POST_PACKED_STRUCT fils_discovery_info_field_t;
-
-#define FD_INFO_FIELD_HDR_LEN (sizeof(fils_discovery_info_field_t))
-
-#define FD_INFO_CAP_SUBFIELD_SIZE 2
-#define FD_INFO_LENGTH_FIELD_SIZE 2
-
-/* FILS Discovery Information field */
-#define FD_INFO_SSID_LENGTH_MASK (0x001f)
-#define FD_INFO_CAP_IND_MASK (0x0020)
-#define FD_INFO_SHORT_SSID_IND_MASK (0x0040)
-#define FD_INFO_APCSN_IND_MASK (0x0080)
-#define FD_INFO_ANO_IND_MASK (0x0100)
-#define FD_INFO_CH_CENTER_FR_IND_MASK (0x0200)
-#define FD_INFO_PRIMARY_CH_IND_MASK (0x0400)
-#define FD_INFO_RSN_IND_MASK (0x0800)
-#define FD_INFO_LENGTH_IND_MASK (0x1000)
-#define FD_INFO_MD_IND_MASK (0x2000)
-
-#define FD_INFO_SET_SSID_LENGTH(fc, len) (fc |= ((uint16)(len) & FD_INFO_SSID_LENGTH_MASK))
-#define FD_INFO_SET_CAP_PRESENT(fc) (fc |= FD_INFO_CAP_IND_MASK)
-#define FD_INFO_SET_SHORT_SSID_PRESENT(fc) (fc |= FD_INFO_SHORT_SSID_IND_MASK)
-#define FD_INFO_SET_APCSN_PRESENT(fc) ((fc |= FD_INFO_APCSN_IND_MASK)
-#define FD_INFO_SET_ANO_PRESENT(fc) (fc |= FD_INFO_ANO_IND_MASK)
-#define FD_INFO_SET_CH_CENTER_FR_PRESENT(fc) (fc |= FD_INFO_CH_CENTER_FR_IND_MASK)
-#define FD_INFO_SET_PRIMARY_CH_PRESENT(fc) (fc |= FD_INFO_PRIMARY_CH_IND_MASK)
-#define FD_INFO_SET_RSN_PRESENT(fc) (fc |= FD_INFO_RSN_IND_MASK)
-#define FD_INFO_SET_LENGTH_PRESENT(fc) (fc |= FD_INFO_LENGTH_IND_MASK)
-#define FD_INFO_SET_MD_PRESENT(fc) (fc |= FD_INFO_MD_IND_MASK)
-
-#define FD_INFO_SSID_LENGTH(fc) ((fc & FD_INFO_SSID_LENGTH_MASK))
-#define FD_INFO_IS_CAP_PRESENT(fc) ((fc & FD_INFO_CAP_IND_MASK) >> 5)
-#define FD_INFO_IS_SHORT_SSID_PRESENT(fc) ((fc & FD_INFO_SHORT_SSID_IND_MASK) >> 6)
-#define FD_INFO_IS_APCSN_PRESENT(fc) ((fc & FD_INFO_APCSN_IND_MASK) >> 7)
-#define FD_INFO_IS_ANO_PRESENT(fc) ((fc & FD_INFO_ANO_IND_MASK) >> 8)
-#define FD_INFO_IS_CH_CENTER_FR_PRESENT(fc) ((fc & FD_INFO_CH_CENTER_FR_IND_MASK) >> 9)
-#define FD_INFO_IS_PRIMARY_CH_PRESENT(fc) ((fc & FD_INFO_PRIMARY_CH_IND_MASK) >> 10)
-#define FD_INFO_IS_RSN_PRESENT(fc) ((fc & FD_INFO_RSN_IND_MASK) >> 11)
-#define FD_INFO_IS_LENGTH_PRESENT(fc) ((fc & FD_INFO_LENGTH_IND_MASK) >> 12)
-#define FD_INFO_IS_MD_PRESENT(fc) ((fc & FD_INFO_MD_IND_MASK) >> 13)
-
-/* FILS Discovery Capability subfield */
-#define FD_CAP_ESS_MASK (0x0001)
-#define FD_CAP_PRIVACY_MASK (0x0002)
-#define FD_CAP_BSS_CH_WIDTH_MASK (0x001c)
-#define FD_CAP_MAX_NSS_MASK (0x00e0)
-#define FD_CAP_MULTI_BSS_MASK (0x0200)
-#define FD_CAP_PHY_INDEX_MASK (0x1c00)
-#define FD_CAP_FILS_MIN_RATE_MASK (0xe000)
-
-#define FD_CAP_ESS(cap) ((cap & FD_CAP_ESS_MASK))
-#define FD_CAP_PRIVACY(cap) ((cap & FD_CAP_PRIVACY_MASK) >> 1)
-#define FD_CAP_BSS_CH_WIDTH(cap) ((cap & FD_CAP_BSS_CH_WIDTH_MASK) >> 2)
-#define FD_CAP_MAX_NSS(cap) ((cap & FD_CAP_MAX_NSS_MASK) >> 5)
-#define FD_CAP_MULTI_BSS(cap) ((cap & FD_CAP_MULTI_BSS_MASK) >> 9)
-#define FD_CAP_PHY_INDEX(cap) ((cap & FD_CAP_PHY_INDEX_MASK) >> 10)
-#define FD_CAP_FILS_MIN_RATE(cap) ((cap & FD_CAP_FILS_MIN_RATE_MASK) >> 13)
-
-#define FD_CAP_SET_ESS(cap) ((cap |= FD_CAP_ESS_MASK))
-#define FD_CAP_SET_PRIVACY(cap) ((cap & FD_CAP_PRIVACY_MASK) >> 1)
-#define FD_CAP_SET_BSS_CH_WIDTH(cap) ((cap & FD_CAP_BSS_CH_WIDTH_MASK) >> 2)
-#define FD_CAP_SET_MAX_NSS(cap) ((cap & FD_CAP_MAX_NSS_MASK) >> 5)
-#define FD_CAP_SET_MULTI_BSS(cap) ((cap & FD_CAP_MULTI_BSS_MASK) >> 9)
-#define FD_CAP_SET_PHY_INDEX(cap) ((cap & FD_CAP_PHY_INDEX_MASK) >> 10)
-#define FD_CAP_SET_FILS_MIN_RATE(cap) ((cap & FD_CAP_FILS_MIN_RATE_MASK) >> 13)
-
-/* 11ai D6.0 8.4.2.173 FILS Request Parameters element */
-typedef BWL_PRE_PACKED_STRUCT struct fils_request_parameters_element {
- uint8 elementid;
- uint8 length;
- uint8 element_id_ext;
- uint8 params_bitmap;
- /* variable len info */
- uint8 params_fields[];
-} BWL_POST_PACKED_STRUCT fils_request_parameters_element_t;
-
-#define FILS_PARAM_MAX_CHANNEL_TIME (1 << 2)
-
-/* 11ai 9.4.2.184 FILS HLP Container element */
-typedef BWL_PRE_PACKED_STRUCT struct fils_hlp_container_element {
- uint8 elementid;
- uint8 length;
- uint8 element_id_ext;
- uint8 dest_addr[ETHER_ADDR_LEN];
- uint8 src_addr[ETHER_ADDR_LEN];
- /* variable len hlp packet */
- uint8 hlp[];
-} BWL_POST_PACKED_STRUCT fils_hlp_container_element_t;
-
-/* 11ai 9.4.2.184 FILS Wrapped Data element */
-typedef BWL_PRE_PACKED_STRUCT struct fils_wrapped_data_element {
- uint8 elementid;
- uint8 length;
- uint8 element_id_ext;
- /* variable len wrapped data packet */
- uint8 wrapped_data[];
-} BWL_POST_PACKED_STRUCT fils_wrapped_data_element_t;
-
-#define FILS_HLP_CONTAINER_ELEM_LEN (sizeof(fils_hlp_container_element_t))
-
-/* This marks the end of a packed structure section. */
-#include <packed_section_end.h>
-
-#endif /* __FILSAUTH_H__ */
/*
* HND arm trap handling.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef _hnd_armtrap_h_
#define _hnd_armtrap_h_
+
/* ARM trap handling */
/* Trap types defined by ARM (see arminc.h) */
/*
* Console support for RTE - for host use only.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hnd_cons.h 624181 2016-03-10 18:58:22Z $
+ * $Id: hnd_cons.h 568961 2015-07-06 18:14:49Z $
*/
#ifndef _hnd_cons_h_
#define _hnd_cons_h_
#define CBUF_LEN (128)
-#ifndef LOG_BUF_LEN
#if defined(BCM_BIG_LOG)
#define LOG_BUF_LEN (16 * 1024)
#else
#define LOG_BUF_LEN 1024
-#endif // endif
-#endif /* LOG_BUF_LEN */
+#endif
#ifdef BOOTLOADER_CONSOLE_OUTPUT
#undef RWL_MAX_DATA_LEN
#define RWL_MAX_DATA_LEN (4 * 1024 + 8)
#define CBUF_LEN (RWL_MAX_DATA_LEN + 64)
#define LOG_BUF_LEN (16 * 1024)
-#endif // endif
+#endif
typedef struct {
uint32 buf; /* Can't be pointer on (64-bit) hosts */
/*
* HND Run Time Environment debug info area
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hnd_debug.h 726313 2017-10-12 06:07:22Z $
+ * $Id: hnd_debug.h 678890 2017-01-11 11:48:36Z $
*/
#ifndef _HND_DEBUG_H
/* Includes only when building dongle code */
+
/* We use explicit sizes here since this gets included from different
* systems. The sizes must be the size of the creating system
* (currently 32 bit ARM) since this is gleaned from dump.
#ifdef FWID
extern uint32 gFWID;
-#endif // endif
+#endif
/* Define pointers for use on other systems */
#define _HD_EVLOG_P uint32
uint32 tv_usec; /* Microseconds */
} timeval_t;
+
/* Linux/ARM 32 prstatus for notes section */
typedef struct prstatus {
int32 si_signo; /* Signal number */
#define RAMSIZE_PTR_PTR_LIST RAMSIZE_PTR_PTR_0, \
RAMSIZE_PTR_PTR_END
+typedef struct hnd_ext_trap_hdr {
+ uint8 version; /* Extended trap version info */
+ uint8 reserved; /* currently unused */
+ uint16 len; /* Length of data excluding this header */
+ uint8 data[]; /* TLV data */
+} hnd_ext_trap_hdr_t;
+
+#define TAG_TRAP_SIGNATURE 1 /* Processor register dumps */
+#define TAG_TRAP_STACK 2 /* Processor stack dump (possible code locations) */
+#define TAG_TRAP_MEMORY 3 /* Memory subsystem dump */
+#define TAG_TRAP_DEEPSLEEP 4 /* Deep sleep health check failures */
+#define TAG_TRAP_PSM_WD 5 /* PSM watchdog information */
+#define TAG_TRAP_PHY 6 /* Phy related issues */
+#define TAG_TRAP_BUS 7 /* Bus level issues */
+#define TAG_TRAP_MAC 8 /* Mac level issues */
+#define TAG_TRAP_BACKPLANE 9 /* Backplane related errors */
+
+typedef struct hnd_ext_trap_bp_err
+{
+ uint32 error;
+ uint32 coreid;
+ uint32 baseaddr;
+ uint32 ioctrl;
+ uint32 iostatus;
+ uint32 resetctrl;
+ uint32 resetstatus;
+ uint32 errlogctrl;
+ uint32 errlogdone;
+ uint32 errlogstatus;
+ uint32 errlogaddrlo;
+ uint32 errlogaddrhi;
+ uint32 errlogid;
+ uint32 errloguser;
+ uint32 errlogflags;
+} hnd_ext_trap_bp_err_t;
+
#endif /* !LANGUAGE_ASSEMBLY */
#endif /* _HND_DEBUG_H */
/*
* HND generic packet pool operation primitives
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hnd_pktpool.h 633941 2016-04-26 07:04:26Z $
+ * $Id: hnd_pktpool.h 613891 2016-01-20 10:05:44Z $
*/
#ifndef _hnd_pktpool_h_
#ifdef __cplusplus
extern "C" {
-#endif // endif
+#endif
/* mutex macros for thread safe */
#ifdef HND_PKTPOOL_THREAD_SAFE
#define HND_PKTPOOL_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex)
#else
#define HND_PKTPOOL_MUTEX_DECL(mutex)
-#endif // endif
+#endif
#ifdef BCMPKTPOOL
#define POOL_ENAB(pool) ((pool) && (pool)->inited)
#define PKTPOOL_CB_MAX 3
#define PKTPOOL_CB_MAX_AVL 4
-/* REMOVE_RXCPLID is an arg for pktpool callback function for removing rxcplID
- * and host addr associated with the rxfrag or shared pool buffer during pktpool_reclaim().
- */
-#define REMOVE_RXCPLID 2
/* forward declaration */
struct pktpool;
typedef struct {
pktpool_cb_t cb;
void *arg;
- uint8 refcnt;
} pktpool_cbinfo_t;
/** PCIe SPLITRX related: call back fn extension to populate host address in pool pkt */
-typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, int arg2);
+typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, bool arg2);
typedef struct {
pktpool_cb_extn_t cb;
void *arg;
} pktpool_cbextn_info_t;
+
#ifdef BCMDBG_POOL
/* pkt pool debug states */
#define POOL_IDLE 0
void * freelist; /**< free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */
uint16 avail; /**< number of packets in pool's free list */
- uint16 n_pkts; /**< number of packets managed by pool */
+ uint16 len; /**< number of packets managed by pool */
uint16 maxlen; /**< maximum size of pool <= PKTPOOL_LEN_MAX */
- uint16 max_pkt_bytes; /**< size of pkt buffer in [bytes], excluding lbuf|lbuf_frag */
+ uint16 plen; /**< size of pkt buffer in [bytes], excluding lbuf|lbuf_frag */
bool empty;
uint8 cbtoggle;
pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
uint16 dbg_qlen;
pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
-#endif // endif
+#endif
pktpool_cbinfo_t dmarxfill;
} pktpool_t;
+
pktpool_t *get_pktpools_registry(int id);
/* Incarnate a pktpool registry. On success returns total_pools. */
extern int pktpool_attach(osl_t *osh, uint32 total_pools);
extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */
-extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *n_pkts, int max_pkt_bytes, bool istx,
- uint8 type);
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx, uint8 type);
extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
-extern int pktpool_empty(osl_t *osh, pktpool_t *pktp);
-extern uint16 pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt);
extern void* pktpool_get(pktpool_t *pktp);
extern void pktpool_free(pktpool_t *pktp, void *p);
extern int pktpool_add(pktpool_t *pktp, void *p);
extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp);
extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb);
extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
-extern int pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
-extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 max_pkts);
-extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 max_pkts);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
+extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen);
extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
extern bool pktpool_emptycb_disabled(pktpool_t *pktp);
extern int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg1);
#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid))
-#define pktpool_tot_pkts(pp) (POOLPTR(pp)->n_pkts) /**< n_pkts = avail + in_use <= max_pkts */
+#define pktpool_len(pp) (POOLPTR(pp)->len) /**< returns packet length in [bytes] */
#define pktpool_avail(pp) (POOLPTR(pp)->avail)
-#define pktpool_max_pkt_bytes(pp) (POOLPTR(pp)->max_pkt_bytes)
-#define pktpool_max_pkts(pp) (POOLPTR(pp)->maxlen)
+#define pktpool_plen(pp) (POOLPTR(pp)->plen)
+#define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen)
+
/*
* ----------------------------------------------------------------------------
#ifdef BCMFRAGPOOL
#define SHARED_FRAG_POOL (pktpool_shared_lfrag)
extern pktpool_t *pktpool_shared_lfrag;
-#endif // endif
-
-#ifdef BCMRESVFRAGPOOL
-#define RESV_FRAG_POOL (pktpool_resv_lfrag)
-#define RESV_POOL_INFO (resv_pool_info)
-#else
-#define RESV_FRAG_POOL ((struct pktpool *)NULL)
-#define RESV_POOL_INFO (NULL)
-#endif /* BCMRESVFRAGPOOL */
+#endif
/** PCIe SPLITRX related */
#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag)
int hnd_pktpool_init(osl_t *osh);
int hnd_pktpool_fill(pktpool_t *pktpool, bool minimal);
void hnd_pktpool_refill(bool minimal);
-#ifdef BCMRESVFRAGPOOL
-extern pktpool_t *pktpool_resv_lfrag;
-extern struct resv_info *resv_pool_info;
-#endif /* BCMRESVFRAGPOOL */
#else /* BCMPKTPOOL */
#define SHARED_POOL ((struct pktpool *)NULL)
#endif /* BCMPKTPOOL */
#ifdef __cplusplus
}
-#endif // endif
+#endif
#endif /* _hnd_pktpool_h_ */
/*
* HND generic pktq operation primitives
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hnd_pktq.h 698847 2017-05-11 00:10:48Z $
+ * $Id: hnd_pktq.h 641285 2016-06-02 02:33:55Z $
*/
#ifndef _hnd_pktq_h_
#ifdef __cplusplus
extern "C" {
-#endif // endif
+#endif
/* mutex macros for thread safe */
#ifdef HND_PKTQ_THREAD_SAFE
#define HND_PKTQ_MUTEX_DECL(mutex) OSL_EXT_MUTEX_DECL(mutex)
#else
#define HND_PKTQ_MUTEX_DECL(mutex)
-#endif // endif
+#endif
/* osl multi-precedence packet queue */
#define PKTQ_LEN_MAX 0xFFFF /* Max uint16 65535 packets */
#ifndef PKTQ_LEN_DEFAULT
#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */
-#endif // endif
+#endif
#ifndef PKTQ_MAX_PREC
#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */
-#endif // endif
+#endif
-/** Queue for a single precedence level */
typedef struct pktq_prec {
void *head; /**< first packet to dequeue */
void *tail; /**< last packet to dequeue */
- uint16 n_pkts; /**< number of queued packets */
- uint16 max_pkts; /**< maximum number of queued packets */
- uint16 stall_count; /**< # seconds since no packets are dequeued */
- uint16 dequeue_count; /**< # of packets dequeued in last 1 second */
+ uint16 len; /**< number of queued packets */
+ uint16 max; /**< maximum number of queued packets */
} pktq_prec_t;
#ifdef PKTQ_LOG
typedef struct pktq_log pktq_log_t;
#endif /* PKTQ_LOG */
+
#define PKTQ_COMMON \
HND_PKTQ_MUTEX_DECL(mutex) \
pktq_log_t *pktqlog; \
uint16 num_prec; /**< number of precedences in use */ \
uint16 hi_prec; /**< rapid dequeue hint (>= highest non-empty prec) */ \
- uint16 max_pkts; /**< max packets */ \
- uint16 n_pkts_tot; /**< total (cummulative over all precedences) number of packets */
+ uint16 max; /**< total max packets */ \
+ uint16 len; /**< total number of packets */
-/** multi-priority packet queue */
+/* multi-priority pkt queue */
struct pktq {
PKTQ_COMMON
/* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
struct pktq_prec q[PKTQ_MAX_PREC];
};
-/** simple, non-priority packet queue */
+/* simple, non-priority pkt queue */
struct spktq {
- HND_PKTQ_MUTEX_DECL(mutex)
- struct pktq_prec q;
+ PKTQ_COMMON
+ /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+ struct pktq_prec q[1];
};
#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
* pktq filter support
*/
-/** filter function return values */
+/* filter function return values */
typedef enum {
PKT_FILTER_NOACTION = 0, /**< restore the pkt to its position in the queue */
PKT_FILTER_DELETE = 1, /**< delete the pkt */
* WARNING: pkts inserted by the user (in pkt_filter and/or flush callbacks
* and chains) in the prec queue will not be seen by the filter, and the prec
* queue will be temporarily be removed from the queue hence there're side
- * effects including pktq_n_pkts_tot() on the queue won't reflect the correct number
+ * effects including pktq_len() on the queue won't reflect the correct number
* of packets in the queue.
*/
-
typedef pktq_filter_result_t (*pktq_filter_t)(void* ctx, void* pkt);
-/**
- * The defer_free_pkt callback is invoked when the the pktq_filter callback
+/* The defer_free_pkt callback is invoked when the the pktq_filter callback
* returns PKT_FILTER_DELETE decision, which allows the user to deposite
* the packet appropriately based on the situation (free the packet or
* save it in a temporary queue etc.).
*/
typedef void (*defer_free_pkt_fn_t)(void *ctx, void *pkt);
-/**
- * The flush_free_pkt callback is invoked when all packets in the pktq
+/* The flush_free_pkt callback is invoked when all packets in the pktq
* are processed.
*/
typedef void (*flush_free_pkt_fn_t)(void *ctx);
-#if defined(WLAMPDU_MAC) && defined(PROP_TXSTATUS)
-/* this callback will be invoked when in low_txq_scb flush()
- * two back-to-back pkts has same epoch value.
- */
-typedef void (*flip_epoch_t)(void *ctx, void *pkt, uint8 *flipEpoch, uint8 *lastEpoch);
-#endif /* defined(WLAMPDU_MAC) && defined(PROP_TXSTATUS) */
-
-/** filter a pktq, using the caller supplied filter/deposition/flush functions */
+/* filter a pktq, using the caller supplied filter/deposition/flush functions */
extern void pktq_filter(struct pktq *pq, pktq_filter_t fn, void* arg,
defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx);
-/** filter a particular precedence in pktq, using the caller supplied filter function */
+/* filter a particular precedence in pktq, using the caller supplied filter function */
extern void pktq_pfilter(struct pktq *pq, int prec, pktq_filter_t fn, void* arg,
defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx);
-/** filter a simple non-precedence in spktq, using the caller supplied filter function */
-extern void spktq_filter(struct spktq *spq, pktq_filter_t fltr, void* fltr_ctx,
- defer_free_pkt_fn_t defer, void *defer_ctx, flush_free_pkt_fn_t flush, void *flush_ctx);
/* operations on a specific precedence in packet queue */
-#define pktqprec_max_pkts(pq, prec) ((pq)->q[prec].max_pkts)
-#define pktqprec_n_pkts(pq, prec) ((pq)->q[prec].n_pkts)
-#define pktqprec_empty(pq, prec) ((pq)->q[prec].n_pkts == 0)
-#define pktqprec_peek(pq, prec) ((pq)->q[prec].head)
-#define pktqprec_peek_tail(pq, prec) ((pq)->q[prec].tail)
-#define spktq_peek_tail(pq) ((pq)->q.tail)
+
+#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
+#define pktq_pmax(pq, prec) ((pq)->q[prec].max)
+#define pktq_plen(pq, prec) ((pq)->q[prec].len)
+#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
+#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
#ifdef HND_PKTQ_THREAD_SAFE
-extern int pktqprec_avail_pkts(struct pktq *pq, int prec);
-extern bool pktqprec_full(struct pktq *pq, int prec);
+extern int pktq_pavail(struct pktq *pq, int prec);
+extern bool pktq_pfull(struct pktq *pq, int prec);
#else
-#define pktqprec_avail_pkts(pq, prec) ((pq)->q[prec].max_pkts - (pq)->q[prec].n_pkts)
-#define pktqprec_full(pq, prec) ((pq)->q[prec].n_pkts >= (pq)->q[prec].max_pkts)
+#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
#endif /* HND_PKTQ_THREAD_SAFE */
extern void pktq_append(struct pktq *pq, int prec, struct spktq *list);
-extern void spktq_append(struct spktq *spq, struct spktq *list);
extern void pktq_prepend(struct pktq *pq, int prec, struct spktq *list);
-extern void spktq_prepend(struct spktq *spq, struct spktq *list);
+
extern void *pktq_penq(struct pktq *pq, int prec, void *p);
extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
extern void *pktq_pdeq(struct pktq *pq, int prec);
extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p);
extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg);
extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
-/** Remove a specified packet from its queue */
+/* Remove a specified packet from its queue */
extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
-/* For single precedence queues */
-extern void *spktq_enq(struct spktq *spq, void *p);
-extern void *spktq_enq_head(struct spktq *spq, void *p);
-extern void *spktq_deq(struct spktq *spq);
-extern void *spktq_deq_tail(struct spktq *spq);
-
/* operations on a set of precedences in packet queue */
extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
/* operations on packet queue as a whole */
-#define pktq_n_pkts_tot(pq) ((int)(pq)->n_pkts_tot)
-#define pktq_max(pq) ((int)(pq)->max_pkts)
-#define pktq_empty(pq) ((pq)->n_pkts_tot == 0)
-#define spktq_n_pkts(spq) ((int)(spq)->q.n_pkts)
-#define spktq_empty(spq) ((spq)->q.n_pkts == 0)
-
-#define spktq_max(spq) ((int)(spq)->q.max_pkts)
-#define spktq_empty(spq) ((spq)->q.n_pkts == 0)
+#define pktq_len(pq) ((int)(pq)->len)
+#define pktq_max(pq) ((int)(pq)->max)
+#define pktq_empty(pq) ((pq)->len == 0)
#ifdef HND_PKTQ_THREAD_SAFE
extern int pktq_avail(struct pktq *pq);
extern bool pktq_full(struct pktq *pq);
-extern int spktq_avail(struct spktq *spq);
-extern bool spktq_full(struct spktq *spq);
#else
-#define pktq_avail(pq) ((int)((pq)->max_pkts - (pq)->n_pkts_tot))
-#define pktq_full(pq) ((pq)->n_pkts_tot >= (pq)->max_pkts)
-#define spktq_avail(spq) ((int)((spq)->q.max_pkts - (spq)->q.n_pkts))
-#define spktq_full(spq) ((spq)->q.n_pkts >= (spq)->q.max_pkts)
+#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
+#define pktq_full(pq) ((pq)->len >= (pq)->max)
#endif /* HND_PKTQ_THREAD_SAFE */
/* operations for single precedence queues */
-#define pktenq(pq, p) pktq_penq((pq), 0, (p))
-#define pktenq_head(pq, p) pktq_penq_head((pq), 0, (p))
-#define pktdeq(pq) pktq_pdeq((pq), 0)
-#define pktdeq_tail(pq) pktq_pdeq_tail((pq), 0)
-#define pktqflush(osh, pq, dir) pktq_pflush(osh, (pq), 0, (dir))
-#define pktqinit(pq, max_pkts) pktq_init((pq), 1, (max_pkts))
-#define pktqdeinit(pq) pktq_deinit((pq))
-#define pktqavail(pq) pktq_avail((pq))
-#define pktqfull(pq) pktq_full((pq))
+#define pktenq(pq, p) pktq_penq(((struct pktq *)(void *)pq), 0, (p))
+#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)(void *)pq), 0, (p))
+#define pktdeq(pq) pktq_pdeq(((struct pktq *)(void *)pq), 0)
+#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0)
+#define pktqflush(osh, pq, dir) pktq_pflush(osh, ((struct pktq *)(void *)pq), 0, dir)
+#define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len)
+#define pktqdeinit(pq) pktq_deinit((struct pktq *)(void *)pq)
+#define pktqavail(pq) pktq_avail((struct pktq *)(void *)pq)
+#define pktqfull(pq) pktq_full((struct pktq *)(void *)pq)
#define pktqfilter(pq, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) \
- pktq_pfilter((pq), 0, (fltr), (fltr_ctx), (defer), (defer_ctx), (flush), (flush_ctx))
-
-/* operations for simple non-precedence queues */
-#define spktenq(spq, p) spktq_enq((spq), (p))
-#define spktenq_head(spq, p) spktq_enq_head((spq), (p))
-#define spktdeq(spq) spktq_deq((spq))
-#define spktdeq_tail(spq) spktq_deq_tail((spq))
-#define spktqflush(osh, spq, dir) spktq_flush((osh), (spq), (dir))
-#define spktqinit(spq, max_pkts) spktq_init((spq), (max_pkts))
-#define spktqdeinit(spq) spktq_deinit((spq))
-#define spktqavail(spq) spktq_avail((spq))
-#define spktqfull(spq) spktq_full((spq))
-
-#define spktqfilter(spq, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx) \
- spktq_filter((spq), (fltr), (fltr_ctx), (defer), (defer_ctx), (flush), (flush_ctx))
-extern bool pktq_init(struct pktq *pq, int num_prec, int max_pkts);
+ pktq_pfilter((struct pktq *)pq, 0, fltr, fltr_ctx, defer, defer_ctx, flush, flush_ctx)
+
+/* wrap macros for modules in components use */
+#define spktqinit(pq, max_pkts) pktqinit(pq, max_pkts)
+#define spktenq(pq, p) pktenq(pq, p)
+#define spktdeq(pq) pktdeq(pq)
+
+extern bool pktq_init(struct pktq *pq, int num_prec, int max_len);
extern bool pktq_deinit(struct pktq *pq);
-extern bool spktq_init(struct spktq *spq, int max_pkts);
-extern bool spktq_deinit(struct spktq *spq);
-extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_pkts);
+extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len);
/* prec_out may be NULL if caller is not interested in return value */
extern void *pktq_deq(struct pktq *pq, int *prec_out);
extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
extern void *pktq_peek(struct pktq *pq, int *prec_out);
-extern void *spktq_peek(struct spktq *spq);
extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
-/** flush pktq */
+/* flush pktq */
extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir);
-extern void spktq_flush(osl_t *osh, struct spktq *spq, bool dir);
-/** Empty the queue at particular precedence level */
+/* Empty the queue at particular precedence level */
extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir);
#ifdef __cplusplus
}
-#endif // endif
+#endif
#endif /* _hnd_pktq_h_ */
+++ /dev/null
-/*
- * HND Trap handling.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: hnd_trap.h 514727 2014-11-12 03:02:48Z $
- */
-
-#ifndef _hnd_trap_h_
-#define _hnd_trap_h_
-
-#if defined(__arm__) || defined(__thumb__) || defined(__thumb2__)
-#include <hnd_armtrap.h>
-#else
-#error "unsupported CPU architecture"
-#endif // endif
-
-#endif /* _hnd_trap_h_ */
+++ /dev/null
-/*
- * HND SiliconBackplane chipcommon support - OS independent.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: hndchipc.h 689775 2017-03-13 12:37:05Z $
- */
-
-#ifndef _hndchipc_h_
-#define _hndchipc_h_
-
-#include <typedefs.h>
-#include <siutils.h>
-
-#ifdef RTE_UART
-typedef void (*si_serial_init_fn)(si_t *sih, void *regs, uint irq, uint baud_base, uint reg_shift);
-#else
-typedef void (*si_serial_init_fn)(void *regs, uint irq, uint baud_base, uint reg_shift);
-#endif // endif
-extern void si_serial_init(si_t *sih, si_serial_init_fn add);
-
-extern volatile void *hnd_jtagm_init(si_t *sih, uint clkd, bool exttap);
-extern void hnd_jtagm_disable(si_t *sih, volatile void *h);
-extern uint32 jtag_scan(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint32 ir1,
- uint drsz, uint32 dr0, uint32 *dr1, bool rti);
-extern uint32 jtag_read_128(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint drsz,
- uint32 dr0, uint32 *dr1, uint32 *dr2, uint32 *dr3);
-extern uint32 jtag_write_128(si_t *sih, volatile void *h, uint irsz, uint32 ir0, uint drsz,
- uint32 dr0, uint32 *dr1, uint32 *dr2, uint32 *dr3);
-extern int jtag_setbit_128(si_t *sih, uint32 jtagureg_addr, uint8 bit_pos, uint8 bit_val);
-
-#endif /* _hndchipc_h_ */
+++ /dev/null
-/*
- * HND SiliconBackplane PMU support.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: hndpmu.h 546588 2015-04-13 09:24:52Z $
- */
-
-#ifndef _hndlhl_h_
-#define _hndlhl_h_
-
-enum {
- LHL_MAC_TIMER = 0,
- LHL_ARM_TIMER = 1
-};
-
-typedef struct {
- uint16 offset;
- uint32 mask;
- uint32 val;
-} lhl_reg_set_t;
-
-#define LHL_REG_OFF(reg) OFFSETOF(gciregs_t, reg)
-
-extern void si_lhl_timer_config(si_t *sih, osl_t *osh, int timer_type);
-extern void si_lhl_timer_enable(si_t *sih);
-
-extern void si_lhl_setup(si_t *sih, osl_t *osh);
-extern void si_lhl_enable(si_t *sih, osl_t *osh, bool enable);
-extern void si_lhl_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period);
-extern void si_lhl_enable_sdio_wakeup(si_t *sih, osl_t *osh);
-extern void si_lhl_disable_sdio_wakeup(si_t *sih);
-extern int si_lhl_set_lpoclk(si_t *sih, osl_t *osh, uint32 lpo_force);
-extern void si_set_lv_sleep_mode_lhl_config_4369(si_t *sih);
-
-#define HIB_EXT_WAKEUP_CAP(sih) (BCM4347_CHIP(sih->chip))
-
-#define LHL_IS_PSMODE_0(sih) (si_lhl_ps_mode(sih) == LHL_PS_MODE_0)
-#define LHL_IS_PSMODE_1(sih) (si_lhl_ps_mode(sih) == LHL_PS_MODE_1)
-#endif /* _hndlhl_h_ */
+++ /dev/null
-/*
- * Utility routines for configuring different memories in Broadcom chips.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: $
- */
-
-#ifndef _HNDMEM_H_
-#define _HNDMEM_H_
-
-typedef enum {
- MEM_SOCRAM = 0,
- MEM_BM = 1,
- MEM_UCM = 2,
- MEM_SHM = 3,
- MEM_MAX = 4
-} hndmem_type_t;
-
-/* PDA (Power Down Array) configuration */
-typedef enum {
- PDA_CONFIG_CLEAR = 0, /* Clear PDA, i.e. Turns on the memory bank */
- PDA_CONFIG_SET_FULL = 1, /* Set PDA, i.e. Truns off the memory bank */
- PDA_CONFIG_SET_PARTIAL = 2, /* Set PDA, i.e. Truns off the memory bank */
- PDA_CONFIG_MAX = 3
-} hndmem_config_t;
-
-/* Returns the number of banks in a given memory */
-extern int hndmem_num_banks(si_t *sih, int mem);
-
-/* Returns the size of a give bank in a given memory */
-extern int hndmem_bank_size(si_t *sih, hndmem_type_t mem, int bank_num);
-
-/* Returns the start address of given memory */
-extern uint32 hndmem_mem_base(si_t *sih, hndmem_type_t mem);
-
-#ifdef BCMDEBUG
-/* Dumps the complete memory information */
-extern void hndmem_dump_meminfo_all(si_t *sih);
-#endif /* BCMDEBUG */
-
-/* Configures the Sleep PDA for a particular bank for a given memory type */
-extern int hndmem_sleeppda_bank_config(si_t *sih, hndmem_type_t mem,
- int bank_num, hndmem_config_t config, uint32 pda);
-/* Configures the Active PDA for a particular bank for a given memory type */
-extern int hndmem_activepda_bank_config(si_t *sih, hndmem_type_t mem,
- int bank_num, hndmem_config_t config, uint32 pda);
-
-/* Configures the Sleep PDA for all the banks for a given memory type */
-extern int hndmem_sleeppda_config(si_t *sih, hndmem_type_t mem,
- hndmem_config_t config);
-/* Configures the Active PDA for all the banks for a given memory type */
-extern int hndmem_activepda_config(si_t *sih, hndmem_type_t mem,
- hndmem_config_t config);
-
-/* Turn off/on all the possible banks in a given memory range */
-extern int hndmem_activepda_mem_config(si_t *sih, hndmem_type_t mem,
- uint32 mem_start, uint32 size, hndmem_config_t config);
-#endif /* _HNDMEM_H_ */
+++ /dev/null
-/*
- * HND OOBR interface header
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: hndoobr.h 772387 2018-07-17 00:58:05Z $
- */
-
-#ifndef _hndoobr_h_
-#define _hndoobr_h_
-
-#include <typedefs.h>
-#include <siutils.h>
-
-/* for 'srcpidx' of hnd_oobr_get_intr_config() */
-#define HND_CORE_MAIN_INTR 0
-#define HND_CORE_ALT_INTR 1
-
-uint32 hnd_oobr_get_intstatus(si_t *sih);
-int hnd_oobr_get_intr_config(si_t *sih, uint srccidx, uint srcpidx, uint dstcidx, uint *dstpidx);
-int hnd_oobr_set_intr_src(si_t *sih, uint dstcidx, uint dstpidx, uint intrnum);
-void hnd_oobr_init(si_t *sih);
-
-#define OOBR_INVALID_PORT 0xFFu
-
-/* per core source/dest sel reg */
-#define OOBR_INTR_PER_CONFREG 4u /* 4 interrupts per configure reg */
-#define OOBR_INTR_NUM_MASK 0x7Fu
-#define OOBR_INTR_EN 0x80u
-/* per core config reg */
-#define OOBR_CORECNF_OUTPUT_MASK 0x0000FF00u
-#define OOBR_CORECNF_OUTPUT_SHIFT 8u
-#define OOBR_CORECNF_INPUT_MASK 0x00FF0000u
-#define OOBR_CORECNF_INPUT_SHIFT 16u
-
-typedef volatile struct hndoobr_percore_reg {
- uint32 sourcesel[OOBR_INTR_PER_CONFREG]; /* 0x00 - 0x0c */
- uint32 destsel[OOBR_INTR_PER_CONFREG]; /* 0x10 - 0x1c */
- uint32 reserved[6];
- uint32 config; /* 0x38 */
- uint32 reserved1[17]; /* 0x3c to 0x7c */
-} hndoobr_percore_reg_t;
-
-/* capability reg */
-#define OOBR_CAP_CORECNT_MASK 0x1fu
-typedef volatile struct hndoobr_reg {
- uint32 capability; /* 0x00 */
- uint32 reserved[3];
- uint32 intstatus[4]; /* 0x10 - 0x1c */
- uint32 reserved1[56]; /* 0x20 - 0xfc */
- hndoobr_percore_reg_t percore_reg[1]; /* 0x100 */
-} hndoobr_reg_t;
-
-#endif /* _hndoobr_h_ */
/*
* HND SiliconBackplane PMU support.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hndpmu.h 700376 2017-05-18 22:55:43Z $
+ * $Id: hndpmu.h 657872 2016-09-02 22:17:34Z $
*/
#ifndef _hndpmu_h_
#include <siutils.h>
#include <sbchipc.h>
-extern uint32 si_pmu_rsrc_macphy_clk_deps(si_t *sih, osl_t *osh, int maccore_index);
-extern uint32 si_pmu_rsrc_ht_avail_clk_deps(si_t *sih, osl_t *osh);
extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask);
extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
+extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear);
extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh);
extern void si_pmu_avbtimer_enable(si_t *sih, osl_t *osh, bool set_flag);
extern uint32 si_pmu_dump_pmucap_binary(si_t *sih, uchar *p);
extern uint32 si_pmu_dump_buf_size_pmucap(si_t *sih);
extern int si_pmu_wait_for_steady_state(si_t *sih, osl_t *osh, pmuregs_t *pmu);
-extern uint32 si_pmu_wake_bit_offset(si_t *sih);
#if defined(BCMULP)
int si_pmu_ulp_register(si_t *sih);
-extern void si_pmu_ulp_chipconfig(si_t *sih, osl_t *osh);
extern void si_pmu_ulp_ilp_config(si_t *sih, osl_t *osh, uint32 ilp_period);
-extern void si_pmu_ds1_res_init(si_t *sih, osl_t *osh);
#endif /* BCMULP */
extern uint32 si_pmu_get_pmutimer(si_t *sih);
-extern void si_switch_pmu_dependency(si_t *sih, uint mode);
extern void si_pmu_set_min_res_mask(si_t *sih, osl_t *osh, uint min_res_mask);
-extern void si_pmu_set_mac_rsrc_req(si_t *sih, int macunit);
-extern bool si_pmu_fast_lpo_enable_pcie(si_t *sih);
-extern bool si_pmu_fast_lpo_enable_pmu(si_t *sih);
-extern void si_pmu_chipcontrol_xtal_settings_4369(si_t *sih);
-extern uint32 si_cur_pmu_time(si_t *sih);
extern bool si_pmu_cap_fast_lpo(si_t *sih);
extern int si_pmu_fast_lpo_disable(si_t *sih);
-#ifdef BCMPMU_STATS
-extern void si_pmustatstimer_init(si_t *sih);
-extern void si_pmustatstimer_dump(si_t *sih);
-extern void si_pmustatstimer_start(si_t *sih, uint8 timerid);
-extern void si_pmustatstimer_stop(si_t *sih, uint8 timerid);
-extern void si_pmustatstimer_clear(si_t *sih, uint8 timerid);
-extern void si_pmustatstimer_clear_overflow(si_t *sih);
-extern uint32 si_pmustatstimer_read(si_t *sih, uint8 timerid);
-extern void si_pmustatstimer_cfg_src_num(si_t *sih, uint8 src_num, uint8 timerid);
-extern void si_pmustatstimer_cfg_cnt_mode(si_t *sih, uint8 cnt_mode, uint8 timerid);
-extern void si_pmustatstimer_int_enable(si_t *sih);
-extern void si_pmustatstimer_int_disable(si_t *sih);
-#endif /* BCMPMU_STATS */
#endif /* _hndpmu_h_ */
/*
* Broadcom HND chip & on-chip-interconnect-related definitions.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: hndsoc.h 795345 2018-12-18 16:52:03Z $
+ * $Id: hndsoc.h 613129 2016-01-17 09:25:52Z $
*/
#ifndef _HNDSOC_H
#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */
#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */
-#ifdef STB_SOC_WIFI
-#define SI_REG_BASE_SIZE 0xB000 /* size from 0xf1800000 to 0xf180AFFF (44KB) */
-#define SI_ENUM_BASE_DEFAULT 0xF1800000 /* Enumeration space base */
-#define SI_WRAP_BASE_DEFAULT 0xF1900000 /* Wrapper space base */
-#endif /* STB_SOC_WIFI */
-
-#ifndef SI_ENUM_BASE_DEFAULT
-#define SI_ENUM_BASE_DEFAULT 0x18000000 /* Enumeration space base */
-#endif // endif
-
-#ifndef SI_WRAP_BASE_DEFAULT
-#define SI_WRAP_BASE_DEFAULT 0x18100000 /* Wrapper space base */
-#endif // endif
-
-/** new(er) chips started locating their chipc core at a different BP address than 0x1800_0000 */
-// NIC and DHD driver binaries should support both old(er) and new(er) chips at the same time
-#define SI_ENUM_BASE(sih) ((sih)->enum_base)
-#define SI_WRAP_BASE(sih) (SI_ENUM_BASE(sih) + 0x00100000)
-
+#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
+#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */
#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */
-#define SI_NIC400_GPV_BASE 0x18200000 /* NIC-400 Global Programmers View (GPV) */
-#define SI_GPV_WR_CAP_ADDR 0x4008 /* WR-CAP offset */
-#define SI_GPV_RD_CAP_EN 0x1 /* issue read */
-#define SI_GPV_WR_CAP_EN 0x2 /* issue write */
-
#ifndef SI_MAXCORES
#define SI_MAXCORES 32 /* NorthStar has more cores */
#endif /* SI_MAXCORES */
#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */
#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */
#define SI_ARMCA7_ROM 0x00000000 /* ARM Cortex-A7 ROM */
-#ifndef SI_ARMCA7_RAM
#define SI_ARMCA7_RAM 0x00200000 /* ARM Cortex-A7 RAM */
-#endif // endif
#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */
#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */
#define USB30D_CORE_ID 0x83d /* usb 3.0 device core */
#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */
#define GCI_CORE_ID 0x840 /* GCI Core */
-#define SR_CORE_ID 0x841 /* SR_CORE ID */
#define M2MDMA_CORE_ID 0x844 /* memory to memory dma */
#define CMEM_CORE_ID 0x846 /* CNDS DDR2/3 memory controller */
#define ARMCA7_CORE_ID 0x847 /* ARM CA7 CPU */
#define SYSMEM_CORE_ID 0x849 /* System memory core */
-#define HUB_CORE_ID 0x84b /* Hub core ID */
-#define HND_OOBR_CORE_ID 0x85c /* Hnd oob router core ID */
#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */
#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */
#define EROM_CORE_ID 0x366 /* EROM core ID */
* unused address ranges
*/
+#define CC_4706_CORE_ID 0x500 /* chipcommon core */
#define NS_PCIEG2_CORE_ID 0x501 /* PCIE Gen 2 core */
#define NS_DMA_CORE_ID 0x502 /* DMA core */
#define NS_SDIO3_CORE_ID 0x503 /* SDIO3 core */
#define NS_NAND_CORE_ID 0x509 /* NAND flash controller core */
#define NS_QSPI_CORE_ID 0x50a /* SPI flash controller core */
#define NS_CCB_CORE_ID 0x50b /* ChipcommonB core */
-#define NS_SOCRAM_CORE_ID 0x50e /* internal memory core */
+#define SOCRAM_4706_CORE_ID 0x50e /* internal memory core */
+#define NS_SOCRAM_CORE_ID SOCRAM_4706_CORE_ID
#define ARMCA9_CORE_ID 0x510 /* ARM Cortex A9 core (ihost) */
#define NS_IHOST_CORE_ID ARMCA9_CORE_ID /* ARM Cortex A9 core (ihost) */
+#define GMAC_COMMON_4706_CORE_ID 0x5dc /* Gigabit MAC core */
+#define GMAC_4706_CORE_ID 0x52d /* Gigabit MAC core */
#define AMEMC_CORE_ID 0x52e /* DDR1/2 memory controller core */
#define ALTA_CORE_ID 0x534 /* I2S core */
#define DDR23_PHY_CORE_ID 0x5dd
#define SI_PCIE1_DMA_H32 0xc0000000 /* PCIE Client Mode sb2pcitranslation2
* (2 ZettaBytes), high 32 bits
*/
+#define CC_4706B0_CORE_REV 0x8000001f /* chipcommon core */
+#define SOCRAM_4706B0_CORE_REV 0x80000005 /* internal memory core */
+#define GMAC_4706B0_CORE_REV 0x80000000 /* Gigabit MAC core */
#define NS_PCIEG2_CORE_REV_B0 0x7 /* NS-B0 PCIE Gen 2 core rev */
-/* There are TWO constants on all HND chips: SI_ENUM_BASE_DEFAULT above,
+/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
* and chipcommon being the first core:
*/
#define SI_CC_IDX 0
#define SOCI_AI 1
#define SOCI_UBUS 2
#define SOCI_NAI 3
-#define SOCI_DVTBUS 4 /* BCM7XXX Digital Video Tech bus */
/* Common core control flags */
#define SICF_BIST_EN 0x8000
#define SISF_NS_BOOTDEV_OFFLOAD 0x0003 /* ROM core */
#define SISF_NS_SKUVEC_MASK 0x000c /* ROM core */
-/* dot11 core-specific status flags */
-#define SISF_MINORREV_D11_SHIFT 16
-#define SISF_MINORREV_D11_MASK 0xF /**< minor corerev (corerev == 61) */
-
/* A register that is common to all cores to
* communicate w/PMU regarding clock control.
*/
#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */
#define CCS_SECICLKREQ 0x00000100 /* SECI Clock Req */
#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4/CA7 fast clock request */
-#define CCS_SFLASH_CLKREQ 0x00000200 /* Sflash clk request */
#define CCS_AVBCLKREQ 0x00000400 /* AVB Clock enable request */
#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */
#define CCS_ERSRC_REQ_SHIFT 8
#define CCS_ERSRC_STS_SHIFT 24
#define CCS_SECI_AVAIL 0x01000000 /* RO: SECI is available */
+#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */
+#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */
+
/* Not really related to SOC Interconnect, but a couple of software
* conventions for the use the flash space:
*/
/*
* Linux OS Independent Layer
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: linux_osl.h 815919 2019-04-22 09:06:50Z $
+ * $Id: linux_osl.h 672413 2016-11-28 11:13:23Z $
*/
#ifndef _linux_osl_h_
#ifdef BCMDRIVER
/* OSL initialization */
+#ifdef SHARED_OSL_CMN
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn);
+#else
extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
+#endif /* SHARED_OSL_CMN */
extern void osl_detach(osl_t *osh);
extern int osl_static_mem_init(osl_t *osh, void *adapter);
extern int osl_static_mem_deinit(osl_t *osh, void *adapter);
extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
extern void* osl_get_bus_handle(osl_t *osh);
-#ifdef DHD_MAP_LOGGING
-extern void osl_dma_map_dump(osl_t *osh);
-#define OSL_DMA_MAP_DUMP(osh) osl_dma_map_dump(osh)
-#else
-#define OSL_DMA_MAP_DUMP(osh) do {} while (0)
-#endif /* DHD_MAP_LOGGING */
/* Global ASSERT type */
extern uint32 g_assert_type;
#define PRI_FMT_d "d"
#endif /* CONFIG_PHYS_ADDR_T_64BIT */
/* ASSERT */
-#ifndef ASSERT
#if defined(BCMASSERT_LOG)
#define ASSERT(exp) \
do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
#define ASSERT(exp)
#endif /* GCC_VERSION > 30100 */
#endif /* __GNUC__ */
-#endif // endif
-#endif /* ASSERT */
+#endif
/* bcm_prefetch_32B */
static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B)
case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc");
case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 0) : "cc");
}
-#endif // endif
+#endif
}
/* microsecond delay */
pktfree_cb_fn_t tx_fn; /**< Callback function for PKTFREE */
void *tx_ctx; /**< Context to the callback function */
void *unused[3];
- void (*rx_fn)(void *rx_ctx, void *p);
- void *rx_ctx;
} osl_pubinfo_t;
extern void osl_flag_set(osl_t *osh, uint32 mask);
((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \
} while (0)
-#define PKTFREESETRXCB(osh, _rx_fn, _rx_ctx) \
- do { \
- ((osl_pubinfo_t*)osh)->rx_fn = _rx_fn; \
- ((osl_pubinfo_t*)osh)->rx_ctx = _rx_ctx; \
- } while (0)
/* host/bus architecture-specific byte swap */
#define BUS_SWAP32(v) (v)
/* map/unmap shared (dma-able) memory */
#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \
osl_dma_unmap((osh), (pa), (size), (direction))
-extern void osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p,
- hnddma_seg_map_t *txp_dmah);
extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
hnddma_seg_map_t *txp_dmah);
extern void osl_dma_unmap(osl_t *osh, dmaaddr_t pa, uint size, int direction);
-#ifndef PHYS_TO_VIRT
-#define PHYS_TO_VIRT(pa) osl_phys_to_virt(pa)
-#endif // endif
-#ifndef VIRT_TO_PHYS
-#define VIRT_TO_PHYS(va) osl_virt_to_phys(va)
-#endif // endif
-extern void * osl_phys_to_virt(void * pa);
-extern void * osl_virt_to_phys(void * va);
-
/* API for DMA addressing capability */
#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);})
#define OSL_ENABLE_PREEMPTION(osh) osl_preempt_enable(osh)
#if (!defined(DHD_USE_COHERENT_MEM_FOR_RING) && defined(__ARM_ARCH_7A__)) || \
- defined(STB_SOC_WIFI)
+ (defined(STBLINUX) && defined(__ARM_ARCH_7A__))
extern void osl_cache_flush(void *va, uint size);
extern void osl_cache_inv(void *va, uint size);
extern void osl_prefetch(const void *ptr);
#define OSL_CACHE_FLUSH(va, len) osl_cache_flush((void *)(va), len)
#define OSL_CACHE_INV(va, len) osl_cache_inv((void *)(va), len)
#define OSL_PREFETCH(ptr) osl_prefetch(ptr)
-#if defined(__ARM_ARCH_7A__) || defined(STB_SOC_WIFI)
+#if defined(__ARM_ARCH_7A__)
extern int osl_arch_is_coherent(void);
#define OSL_ARCH_IS_COHERENT() osl_arch_is_coherent()
extern int osl_acp_war_enab(void);
#define OSL_ARCH_IS_COHERENT() NULL
#define OSL_ACP_WAR_ENAB() NULL
-#endif // endif
-
-#ifdef BCM_BACKPLANE_TIMEOUT
-extern void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx);
-extern void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size);
-#endif /* BCM_BACKPLANE_TIMEOUT */
-
-#if (defined(STB) && defined(__arm__))
-extern void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size);
-#endif // endif
+#endif
/* register access macros */
#if defined(BCMSDIO)
(uintptr)(r), sizeof(*(r)), (v)))
#define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \
(uintptr)(r), sizeof(*(r))))
-#elif defined(BCM_BACKPLANE_TIMEOUT)
-#define OSL_READ_REG(osh, r) \
- ({\
- __typeof(*(r)) __osl_v; \
- osl_bpt_rreg(osh, (uintptr)(r), &__osl_v, sizeof(*(r))); \
- __osl_v; \
- })
#elif (defined(STB) && defined(__arm__))
+extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size);
+
#define OSL_READ_REG(osh, r) \
({\
__typeof(*(r)) __osl_v; \
- osl_pcie_rreg(osh, (uintptr)(r), &__osl_v, sizeof(*(r))); \
+ osl_pcie_rreg(osh, (uintptr)(r), (void *)&__osl_v, sizeof(*(r))); \
__osl_v; \
})
-#endif // endif
+#endif
-#if defined(BCM_BACKPLANE_TIMEOUT) || (defined(STB) && defined(__arm__))
+#if (defined(STB) && defined(__arm__))
#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;})
-#else /* !BCM47XX_CA9 && !BCM_BACKPLANE_TIMEOUT && !(STB && __arm__) */
+#else /* !BCM47XX_CA9 */
#if defined(BCMSDIO)
#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
mmap_op else bus_op
#else
#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
#define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
-#endif // endif
-#endif // endif
+#endif
+#endif
#define OSL_ERROR(bcmerror) osl_error(bcmerror)
extern int osl_error(int bcmerror);
#include <linuxver.h> /* use current 2.4.x calling conventions */
#include <linux/kernel.h> /* for vsn/printf's */
#include <linux/string.h> /* for mem*, str* */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29)
extern uint64 osl_sysuptime_us(void);
#define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies))
#define OSL_SYSUPTIME_US() osl_sysuptime_us()
-extern uint64 osl_localtime_ns(void);
-extern void osl_get_localtime(uint64 *sec, uint64 *usec);
-extern uint64 osl_systztime_us(void);
-#define OSL_LOCALTIME_NS() osl_localtime_ns()
-#define OSL_GET_LOCALTIME(sec, usec) osl_get_localtime((sec), (usec))
-#define OSL_SYSTZTIME_US() osl_systztime_us()
-#define printf(fmt, args...) printk("[dhd] " fmt , ## args)
+#else
+#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ))
+#error "OSL_SYSUPTIME_US() may need to be defined"
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */
+#define printf(fmt, args...) printk(fmt , ## args)
#include <linux/kernel.h> /* for vsn/printf's */
#include <linux/string.h> /* for mem*, str* */
/* bcopy's: Linux kernel doesn't provide these (anymore) */
-#define bcopy_hw(src, dst, len) memcpy((dst), (src), (len))
-#define bcopy_hw_async(src, dst, len) memcpy((dst), (src), (len))
-#define bcopy_hw_poll_for_completion()
#define bcopy(src, dst, len) memcpy((dst), (src), (len))
#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
#define bzero(b, len) memset((b), '\0', (len))
SELECT_BUS_READ(osh, \
({ \
__typeof(*(r)) __osl_v = 0; \
- BCM_REFERENCE(osh); \
switch (sizeof(*(r))) { \
case sizeof(uint8): __osl_v = \
readb((volatile uint8*)(r)); break; \
#define OSL_GETCYCLES(x) rdtscl((x))
#else
#define OSL_GETCYCLES(x) ((x) = 0)
-#endif // endif
+#endif
/* dereference an address that may cause a bus exception */
#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; })
/* map/unmap physical to virtual I/O */
#if !defined(CONFIG_MMC_MSM7X00A)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
-#define REG_MAP(pa, size) ioremap((unsigned long)(pa), (unsigned long)(size))
-#else
#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
-#endif
#else
#define REG_MAP(pa, size) (void *)(0)
#endif /* !defined(CONFIG_MMC_MSM7X00A */
*/
#include <linuxver.h> /* use current 2.4.x calling conventions */
+/* packet primitives */
+#ifdef BCMDBG_CTRACE
+#define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FILE__)
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__)
+#else
+#ifdef BCM_OBJECT_TRACE
+#define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FUNCTION__)
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__)
+#else
+#define PKTGET(osh, len, send) osl_pktget((osh), (len))
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
+#endif /* BCM_OBJECT_TRACE */
+#endif /* BCMDBG_CTRACE */
+#define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh)
+#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
+#if defined(BCM_OBJECT_TRACE)
+#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__)
+#else
+#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send))
+#endif /* BCM_OBJECT_TRACE */
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len))
+#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send))
+#else
+#define PKTGET_STATIC PKTGET
+#define PKTFREE_STATIC PKTFREE
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+#define PKTDATA(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);})
+#define PKTLEN(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);})
+#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTEXPHEADROOM(osh, skb, b) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_realloc_headroom((struct sk_buff*)(skb), (b)); \
+ })
+#define PKTTAILROOM(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_tailroom((struct sk_buff*)(skb)); \
+ })
+#define PKTPADTAILROOM(osh, skb, padlen) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_pad((struct sk_buff*)(skb), (padlen)); \
+ })
+#define PKTNEXT(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);})
+#define PKTSETNEXT(osh, skb, x) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \
+ })
+#define PKTSETLEN(osh, skb, len) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ __skb_trim((struct sk_buff*)(skb), (len)); \
+ })
+#define PKTPUSH(osh, skb, bytes) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_push((struct sk_buff*)(skb), (bytes)); \
+ })
+#define PKTPULL(osh, skb, bytes) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_pull((struct sk_buff*)(skb), (bytes)); \
+ })
+#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTSETPOOL(osh, skb, x, y) BCM_REFERENCE(osh)
+#define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#define PKTFREELIST(skb) PKTLINK(skb)
+#define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x))
+#define PKTPTR(skb) (skb)
+#define PKTID(skb) ({BCM_REFERENCE(skb); 0;})
+#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
+#define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;})
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER)
+#define PKTORPHAN(skb, tsq) osl_pkt_orphan_partial(skb, tsq)
+extern void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq);
+#else
+#define PKTORPHAN(skb, tsq) ({BCM_REFERENCE(skb); 0;})
+#endif /* LINUX VERSION >= 3.6 */
+
+
+#ifdef BCMDBG_CTRACE
+#define DEL_CTRACE(zosh, zskb) { \
+ unsigned long zflags; \
+ spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
+ list_del(&(zskb)->ctrace_list); \
+ (zosh)->ctrace_num--; \
+ (zskb)->ctrace_start = 0; \
+ (zskb)->ctrace_count = 0; \
+ spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define UPDATE_CTRACE(zskb, zfile, zline) { \
+ struct sk_buff *_zskb = (struct sk_buff *)(zskb); \
+ if (_zskb->ctrace_count < CTRACE_NUM) { \
+ _zskb->func[_zskb->ctrace_count] = zfile; \
+ _zskb->line[_zskb->ctrace_count] = zline; \
+ _zskb->ctrace_count++; \
+ } \
+ else { \
+ _zskb->func[_zskb->ctrace_start] = zfile; \
+ _zskb->line[_zskb->ctrace_start] = zline; \
+ _zskb->ctrace_start++; \
+ if (_zskb->ctrace_start >= CTRACE_NUM) \
+ _zskb->ctrace_start = 0; \
+ } \
+}
+
+#define ADD_CTRACE(zosh, zskb, zfile, zline) { \
+ unsigned long zflags; \
+ spin_lock_irqsave(&(zosh)->ctrace_lock, zflags); \
+ list_add(&(zskb)->ctrace_list, &(zosh)->ctrace_list); \
+ (zosh)->ctrace_num++; \
+ UPDATE_CTRACE(zskb, zfile, zline); \
+ spin_unlock_irqrestore(&(zosh)->ctrace_lock, zflags); \
+}
+
+#define PKTCALLER(zskb) UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__)
+#endif /* BCMDBG_CTRACE */
+
+#ifdef CTFPOOL
+#define CTFPOOL_REFILL_THRESH 3
+typedef struct ctfpool {
+ void *head;
+ spinlock_t lock;
+ osl_t *osh;
+ uint max_obj;
+ uint curr_obj;
+ uint obj_size;
+ uint refills;
+ uint fast_allocs;
+ uint fast_frees;
+ uint slow_allocs;
+} ctfpool_t;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define FASTBUF (1 << 0)
+#define PKTSETFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF); \
+ })
+#define PKTCLRFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF)); \
+ })
+#define PKTISFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF); \
+ })
+#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->pktc_flags)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define FASTBUF (1 << 16)
+#define PKTSETFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF); \
+ })
+#define PKTCLRFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)); \
+ })
+#define PKTISFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->mac_len) & FASTBUF); \
+ })
+#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->mac_len)
+#else
+#define FASTBUF (1 << 0)
+#define PKTSETFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->__unused) |= FASTBUF); \
+ })
+#define PKTCLRFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)); \
+ })
+#define PKTISFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->__unused) & FASTBUF); \
+ })
+#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused)
+#endif /* 2.6.22 */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->ctfpool)
+#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->ctfpool)->head)
+#else
+#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->sk)
+#define CTFPOOLHEAD(osh, skb) (((ctfpool_t *)((struct sk_buff*)(skb))->sk)->head)
+#endif
+
+extern void *osl_ctfpool_add(osl_t *osh);
+extern void osl_ctfpool_replenish(osl_t *osh, uint thresh);
+extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
+extern void osl_ctfpool_cleanup(osl_t *osh);
+extern void osl_ctfpool_stats(osl_t *osh, void *b);
+#else /* CTFPOOL */
+#define PKTSETFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#endif /* CTFPOOL */
+
+#define PKTSETCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+
+#ifdef HNDCTF
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define SKIPCT (1 << 2)
+#define CHAINED (1 << 3)
+#define PKTSETSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT); \
+ })
+#define PKTCLRSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT)); \
+ })
+#define PKTSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags & SKIPCT); \
+ })
+#define PKTSETCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags |= CHAINED); \
+ })
+#define PKTCLRCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED)); \
+ })
+#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->pktc_flags & CHAINED)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define SKIPCT (1 << 18)
+#define CHAINED (1 << 19)
+#define PKTSETSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len |= SKIPCT); \
+ })
+#define PKTCLRSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)); \
+ })
+#define PKTSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len & SKIPCT); \
+ })
+#define PKTSETCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len |= CHAINED); \
+ })
+#define PKTCLRCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len &= (~CHAINED)); \
+ })
+#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->mac_len & CHAINED)
+#else /* 2.6.22 */
+#define SKIPCT (1 << 2)
+#define CHAINED (1 << 3)
+#define PKTSETSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused |= SKIPCT); \
+ })
+#define PKTCLRSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)); \
+ })
+#define PKTSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused & SKIPCT); \
+ })
+#define PKTSETCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused |= CHAINED); \
+ })
+#define PKTCLRCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused &= (~CHAINED)); \
+ })
+#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->__unused & CHAINED)
+#endif /* 2.6.22 */
+typedef struct ctf_mark {
+ uint32 value;
+} ctf_mark_t;
+#define CTF_MARK(m) (m.value)
+#else /* HNDCTF */
+#define PKTSETSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define CTF_MARK(m) ({BCM_REFERENCE(m); 0;})
+#endif /* HNDCTF */
+
+#if defined(BCM_GMAC3)
+
+/** pktalloced accounting in devices using GMAC Bulk Forwarding to DHD */
+
+/* Account for packets delivered to downstream forwarder by GMAC interface. */
+extern void osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt);
+#define PKTTOFWDER(osh, skbs, skb_cnt) \
+ osl_pkt_tofwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
+
+/* Account for packets received from downstream forwarder. */
+#if defined(BCMDBG_CTRACE) /* pkt logging */
+extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt,
+ int line, char *file);
+#define PKTFRMFWDER(osh, skbs, skb_cnt) \
+ osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt), \
+ __LINE__, __FILE__)
+#else /* ! (BCMDBG_PKT || BCMDBG_CTRACE) */
+extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt);
+#define PKTFRMFWDER(osh, skbs, skb_cnt) \
+ osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
+#endif
+
+
+/** GMAC Forwarded packet tagging for reduced cache flush/invalidate.
+ * In FWDERBUF tagged packet, only FWDER_PKTMAPSZ amount of data would have
+ * been accessed in the GMAC forwarder. This may be used to limit the number of
+ * cachelines that need to be flushed or invalidated.
+ * Packets sent to the DHD from a GMAC forwarder will be tagged w/ FWDERBUF.
+ * DHD may clear the FWDERBUF tag, if more than FWDER_PKTMAPSZ was accessed.
+ * Likewise, a debug print of a packet payload in say the ethernet driver needs
+ * to be accompanied with a clear of the FWDERBUF tag.
+ */
+
+/** Forwarded packets, have a GMAC_FWDER_HWRXOFF sized rx header (etc.h) */
+#define FWDER_HWRXOFF (18)
+
+/** Maximum amount of a pkt data that a downstream forwarder (GMAC) may have
+ * read into the L1 cache (not dirty). This may be used in reduced cache ops.
+ *
+ * Max 44: ET HWRXOFF[18] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4]
+ * Min 32: GMAC_FWDER_HWRXOFF[18] + EtherHdr[14]
+ */
+#define FWDER_MINMAPSZ (FWDER_HWRXOFF + 14)
+#define FWDER_MAXMAPSZ (FWDER_HWRXOFF + 4 + 14 + 4 + 4)
+#define FWDER_PKTMAPSZ (FWDER_MINMAPSZ)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+
+#define FWDERBUF (1 << 4)
+#define PKTSETFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags |= FWDERBUF); \
+ })
+#define PKTCLRFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags &= (~FWDERBUF)); \
+ })
+#define PKTISFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags & FWDERBUF); \
+ })
+
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+
+#define FWDERBUF (1 << 20)
+#define PKTSETFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len |= FWDERBUF); \
+ })
+#define PKTCLRFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len &= (~FWDERBUF)); \
+ })
+#define PKTISFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len & FWDERBUF); \
+ })
+
+#else /* 2.6.22 */
+
+#define FWDERBUF (1 << 4)
+#define PKTSETFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused |= FWDERBUF); \
+ })
+#define PKTCLRFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused &= (~FWDERBUF)); \
+ })
+#define PKTISFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused & FWDERBUF); \
+ })
+
+#endif /* 2.6.22 */
+
+#else /* ! BCM_GMAC3 */
+
+#define PKTSETFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
+#define PKTCLRFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
+#define PKTISFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+
+#endif /* ! BCM_GMAC3 */
+
+
+#ifdef HNDCTF
+/* For broadstream iqos */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define TOBR (1 << 5)
+#define PKTSETTOBR(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags |= TOBR); \
+ })
+#define PKTCLRTOBR(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags &= (~TOBR)); \
+ })
+#define PKTISTOBR(skb) (((struct sk_buff*)(skb))->pktc_flags & TOBR)
+#define PKTSETCTFIPCTXIF(skb, ifp) (((struct sk_buff*)(skb))->ctf_ipc_txif = ifp)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
+#define PKTSETCTFIPCTXIF(skb, ifp) ({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
+#else /* 2.6.22 */
+#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
+#define PKTSETCTFIPCTXIF(skb, ifp) ({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
+#endif /* 2.6.22 */
+#else /* HNDCTF */
+#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
+#endif /* HNDCTF */
+
+
+#ifdef BCMFA
+#ifdef BCMFA_HW_HASH
+#define PKTSETFAHIDX(skb, idx) (((struct sk_buff*)(skb))->napt_idx = idx)
+#else
+#define PKTSETFAHIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
+#endif /* BCMFA_SW_HASH */
+#define PKTGETFAHIDX(skb) (((struct sk_buff*)(skb))->napt_idx)
+#define PKTSETFADEV(skb, imp) (((struct sk_buff*)(skb))->dev = imp)
+#define PKTSETRXDEV(skb) (((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev)
+
+#define AUX_TCP_FIN_RST (1 << 0)
+#define AUX_FREED (1 << 1)
+#define PKTSETFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST)
+#define PKTCLRFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST))
+#define PKTISFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST)
+#define PKTSETFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_FREED)
+#define PKTCLRFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED))
+#define PKTISFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_FREED)
+#define PKTISFABRIDGED(skb) PKTISFAAUX(skb)
+#else
+#define PKTISFAAUX(skb) ({BCM_REFERENCE(skb); FALSE;})
+#define PKTISFABRIDGED(skb) ({BCM_REFERENCE(skb); FALSE;})
+#define PKTISFAFREED(skb) ({BCM_REFERENCE(skb); FALSE;})
+
+#define PKTCLRFAAUX(skb) BCM_REFERENCE(skb)
+#define PKTSETFAFREED(skb) BCM_REFERENCE(skb)
+#define PKTCLRFAFREED(skb) BCM_REFERENCE(skb)
+#endif /* BCMFA */
+
+#if defined(BCM_OBJECT_TRACE)
+extern void osl_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller);
+#else
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+#endif /* BCM_OBJECT_TRACE */
+extern void *osl_pktget_static(osl_t *osh, uint len);
+extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
+extern void osl_pktclone(osl_t *osh, void **pkt);
+
+#ifdef BCMDBG_CTRACE
+#define PKT_CTRACE_DUMP(osh, b) osl_ctrace_dump((osh), (b))
+extern void *osl_pktget(osl_t *osh, uint len, int line, char *file);
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb, int line, char *file);
+extern int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, char *file);
+struct bcmstrbuf;
+extern void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b);
+#else
+#ifdef BCM_OBJECT_TRACE
+extern void *osl_pktget(osl_t *osh, uint len, int line, const char *caller);
+extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller);
+#else
+extern void *osl_pktget(osl_t *osh, uint len);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+#endif /* BCM_OBJECT_TRACE */
+extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
+#endif /* BCMDBG_CTRACE */
+extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
+#ifdef BCMDBG_CTRACE
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), \
+ (struct sk_buff*)(skb), __LINE__, __FILE__)
+#define PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb))
+#else
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
+#endif /* BCMDBG_CTRACE */
+#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt))
+
+#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev)
+#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority)
+#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x))
+#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
+#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \
+ ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
+/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
+#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned)
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#define PKTMARK(p) (((struct sk_buff *)(p))->mark)
+#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->mark = (m)
+#else /* !2.6.0 */
+#define PKTMARK(p) (((struct sk_buff *)(p))->nfmark)
+#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->nfmark = (m)
+#endif /* 2.6.0 */
+#else /* CONFIG_NF_CONNTRACK_MARK */
+#define PKTMARK(p) 0
+#define PKTSETMARK(p, m)
+#endif /* CONFIG_NF_CONNTRACK_MARK */
+
+#define PKTALLOCED(osh) osl_pktalloced(osh)
+extern uint osl_pktalloced(osl_t *osh);
+
#define OSL_RAND() osl_rand()
extern uint32 osl_rand(void);
-#define DMA_FLUSH(osh, va, size, direction, p, dmah) \
- osl_dma_flush((osh), (va), (size), (direction), (p), (dmah))
#if !defined(BCM_SECURE_DMA)
#define DMA_MAP(osh, va, size, direction, p, dmah) \
osl_dma_map((osh), (va), (size), (direction), (p), (dmah))
#endif /* !(defined(BCM_SECURE_DMA)) */
+#ifdef PKTC
+/* Use 8 bytes of skb tstamp field to store below info */
+struct chain_node {
+ struct sk_buff *link;
+ unsigned int flags:3, pkts:9, bytes:20;
+};
+
+#define CHAIN_NODE(skb) ((struct chain_node*)(((struct sk_buff*)skb)->pktc_cb))
+
+#define PKTCSETATTR(s, f, p, b) ({CHAIN_NODE(s)->flags = (f); CHAIN_NODE(s)->pkts = (p); \
+ CHAIN_NODE(s)->bytes = (b);})
+#define PKTCCLRATTR(s) ({CHAIN_NODE(s)->flags = CHAIN_NODE(s)->pkts = \
+ CHAIN_NODE(s)->bytes = 0;})
+#define PKTCGETATTR(s) (CHAIN_NODE(s)->flags << 29 | CHAIN_NODE(s)->pkts << 20 | \
+ CHAIN_NODE(s)->bytes)
+#define PKTCCNT(skb) (CHAIN_NODE(skb)->pkts)
+#define PKTCLEN(skb) (CHAIN_NODE(skb)->bytes)
+#define PKTCGETFLAGS(skb) (CHAIN_NODE(skb)->flags)
+#define PKTCSETFLAGS(skb, f) (CHAIN_NODE(skb)->flags = (f))
+#define PKTCCLRFLAGS(skb) (CHAIN_NODE(skb)->flags = 0)
+#define PKTCFLAGS(skb) (CHAIN_NODE(skb)->flags)
+#define PKTCSETCNT(skb, c) (CHAIN_NODE(skb)->pkts = (c))
+#define PKTCINCRCNT(skb) (CHAIN_NODE(skb)->pkts++)
+#define PKTCADDCNT(skb, c) (CHAIN_NODE(skb)->pkts += (c))
+#define PKTCSETLEN(skb, l) (CHAIN_NODE(skb)->bytes = (l))
+#define PKTCADDLEN(skb, l) (CHAIN_NODE(skb)->bytes += (l))
+#define PKTCSETFLAG(skb, fb) (CHAIN_NODE(skb)->flags |= (fb))
+#define PKTCCLRFLAG(skb, fb) (CHAIN_NODE(skb)->flags &= ~(fb))
+#define PKTCLINK(skb) (CHAIN_NODE(skb)->link)
+#define PKTSETCLINK(skb, x) (CHAIN_NODE(skb)->link = (struct sk_buff*)(x))
+#define FOREACH_CHAINED_PKT(skb, nskb) \
+ for (; (skb) != NULL; (skb) = (nskb)) \
+ if ((nskb) = (PKTISCHAINED(skb) ? PKTCLINK(skb) : NULL), \
+ PKTSETCLINK((skb), NULL), 1)
+#define PKTCFREE(osh, skb, send) \
+do { \
+ void *nskb; \
+ ASSERT((skb) != NULL); \
+ FOREACH_CHAINED_PKT((skb), nskb) { \
+ PKTCLRCHAINED((osh), (skb)); \
+ PKTCCLRFLAGS((skb)); \
+ PKTFREE((osh), (skb), (send)); \
+ } \
+} while (0)
+#define PKTCENQTAIL(h, t, p) \
+do { \
+ if ((t) == NULL) { \
+ (h) = (t) = (p); \
+ } else { \
+ PKTSETCLINK((t), (p)); \
+ (t) = (p); \
+ } \
+} while (0)
+#endif /* PKTC */
+
#else /* ! BCMDRIVER */
+
/* ASSERT */
#define ASSERT(exp) do {} while (0)
* Adding these dummy values for build apss only
* When we revisit need to change these.
*/
+#if defined(STBLINUX)
+
+#if defined(__ARM_ARCH_7A__)
+#define ACP_WAR_ENAB() 0
+#define ACP_WIN_LIMIT 1
+#define arch_is_coherent() 0
+#endif /* __ARM_ARCH_7A__ */
+
+#endif /* STBLINUX */
#ifdef BCM_SECURE_DMA
#define PKTLIST_UNLINK(x, y) skb_unlink((struct sk_buff *)(y), (struct sk_buff_head *)(x))
#define PKTLIST_FINI(x) skb_queue_purge((struct sk_buff_head *)(x))
-#ifndef _linuxver_h_
-typedef struct timer_list_compat timer_list_compat_t;
-#endif /* _linuxver_h_ */
+#ifdef REPORT_FATAL_TIMEOUTS
typedef struct osl_timer {
- timer_list_compat_t *timer;
+ struct timer_list *timer;
bool set;
} osl_timer_t;
extern void osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
extern void osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic);
extern bool osl_timer_del(osl_t *osh, osl_timer_t *t);
-
-typedef atomic_t osl_atomic_t;
-#define OSL_ATOMIC_SET(osh, v, x) atomic_set(v, x)
-#define OSL_ATOMIC_INIT(osh, v) atomic_set(v, 0)
-#define OSL_ATOMIC_INC(osh, v) atomic_inc(v)
-#define OSL_ATOMIC_INC_RETURN(osh, v) atomic_inc_return(v)
-#define OSL_ATOMIC_DEC(osh, v) atomic_dec(v)
-#define OSL_ATOMIC_DEC_RETURN(osh, v) atomic_dec_return(v)
-#define OSL_ATOMIC_READ(osh, v) atomic_read(v)
-#define OSL_ATOMIC_ADD(osh, v, x) atomic_add(v, x)
-
-#ifndef atomic_set_mask
-#define OSL_ATOMIC_OR(osh, v, x) atomic_or(x, v)
-#define OSL_ATOMIC_AND(osh, v, x) atomic_and(x, v)
-#else
-#define OSL_ATOMIC_OR(osh, v, x) atomic_set_mask(x, v)
-#define OSL_ATOMIC_AND(osh, v, x) atomic_clear_mask(~x, v)
-#endif // endif
-
-#include <linux/rbtree.h>
-
-typedef struct rb_node osl_rb_node_t;
-typedef struct rb_root osl_rb_root_t;
-
-#define OSL_RB_ENTRY(ptr, type, member) rb_entry(ptr, type, member)
-#define OSL_RB_INSERT_COLOR(root, node) rb_insert_color(root, node)
-#define OSL_RB_ERASE(node, root) rb_erase(node, root)
-#define OSL_RB_FIRST(root) rb_first(root)
-#define OSL_RB_LAST(root) rb_last(root)
-#define OSL_RB_LINK_NODE(node, parent, rb_link) \
- rb_link_node(node, parent, rb_link)
-
-extern void *osl_spin_lock_init(osl_t *osh);
-extern void osl_spin_lock_deinit(osl_t *osh, void *lock);
-extern unsigned long osl_spin_lock(void *lock);
-extern void osl_spin_unlock(void *lock, unsigned long flags);
-
-typedef struct osl_timespec {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
- __kernel_old_time_t tv_sec; /* seconds */
-#else
- __kernel_time_t tv_sec; /* seconds */
#endif
- __kernel_suseconds_t tv_usec; /* microseconds */
- long tv_nsec; /* nanoseconds */
-} osl_timespec_t;
-extern void osl_do_gettimeofday(struct osl_timespec *ts);
-extern void osl_get_monotonic_boottime(struct osl_timespec *ts);
+
#endif /* _linux_osl_h_ */
+++ /dev/null
-/*
- * Linux Packet (skb) interface
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: linux_pkt.h 701430 2017-05-25 00:03:02Z $
- */
-
-#ifndef _linux_pkt_h_
-#define _linux_pkt_h_
-
-#include <typedefs.h>
-
-#ifdef __ARM_ARCH_7A__
-#define PKT_HEADROOM_DEFAULT NET_SKB_PAD /**< NET_SKB_PAD is defined in a linux kernel header */
-#else
-#define PKT_HEADROOM_DEFAULT 16
-#endif /* __ARM_ARCH_7A__ */
-
-#ifdef BCMDRIVER
-/*
- * BINOSL selects the slightly slower function-call-based binary compatible osl.
- * Macros expand to calls to functions defined in linux_osl.c .
- */
-/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for
- * performance reasons), we need the Linux headers.
- */
-#include <linuxver.h>
-
-/* packet primitives */
-#ifdef BCM_OBJECT_TRACE
-#define PKTGET(osh, len, send) linux_pktget((osh), (len), __LINE__, __FUNCTION__)
-#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FUNCTION__)
-#else
-#define PKTGET(osh, len, send) linux_pktget((osh), (len))
-#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
-#endif /* BCM_OBJECT_TRACE */
-#define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh)
-#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
-#if defined(BCM_OBJECT_TRACE)
-#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send), __LINE__, __FUNCTION__)
-#else
-#define PKTFREE(osh, skb, send) linux_pktfree((osh), (skb), (send))
-#endif /* BCM_OBJECT_TRACE */
-#ifdef CONFIG_DHD_USE_STATIC_BUF
-#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len))
-#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send))
-#else
-#define PKTGET_STATIC PKTGET
-#define PKTFREE_STATIC PKTFREE
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
-#define PKTDATA(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);})
-#define PKTLEN(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);})
-#define PKTHEAD(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->head);})
-#define PKTSETHEAD(osh, skb, h) ({BCM_REFERENCE(osh); \
- (((struct sk_buff *)(skb))->head = (h));})
-#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
-#define PKTEXPHEADROOM(osh, skb, b) \
- ({ \
- BCM_REFERENCE(osh); \
- skb_realloc_headroom((struct sk_buff*)(skb), (b)); \
- })
-#define PKTTAILROOM(osh, skb) \
- ({ \
- BCM_REFERENCE(osh); \
- skb_tailroom((struct sk_buff*)(skb)); \
- })
-#define PKTPADTAILROOM(osh, skb, padlen) \
- ({ \
- BCM_REFERENCE(osh); \
- skb_pad((struct sk_buff*)(skb), (padlen)); \
- })
-#define PKTNEXT(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);})
-#define PKTSETNEXT(osh, skb, x) \
- ({ \
- BCM_REFERENCE(osh); \
- (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \
- })
-#define PKTSETLEN(osh, skb, len) \
- ({ \
- BCM_REFERENCE(osh); \
- __skb_trim((struct sk_buff*)(skb), (len)); \
- })
-#define PKTPUSH(osh, skb, bytes) \
- ({ \
- BCM_REFERENCE(osh); \
- skb_push((struct sk_buff*)(skb), (bytes)); \
- })
-#define PKTPULL(osh, skb, bytes) \
- ({ \
- BCM_REFERENCE(osh); \
- skb_pull((struct sk_buff*)(skb), (bytes)); \
- })
-#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb))
-#define PKTSETPOOL(osh, skb, x, y) BCM_REFERENCE(osh)
-#define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
-#define PKTFREELIST(skb) PKTLINK(skb)
-#define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x))
-#define PKTPTR(skb) (skb)
-#define PKTID(skb) ({BCM_REFERENCE(skb); 0;})
-#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
-#define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;})
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && defined(TSQ_MULTIPLIER)
-#define PKTORPHAN(skb, tsq) osl_pkt_orphan_partial(skb, tsq)
-extern void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq);
-#else
-#define PKTORPHAN(skb, tsq) ({BCM_REFERENCE(skb); 0;})
-#endif /* LINUX VERSION >= 3.6 */
-
-#define PKTSETFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
-#define PKTCLRFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
-#define PKTISFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
-
-#define PKTSETCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
-#define PKTCLRCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
-#define PKTISCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
-
-#define PKTSETSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
-#define PKTCLRSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
-#define PKTSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
-#define CTF_MARK(m) ({BCM_REFERENCE(m); 0;})
-
-#define PKTFRAGLEN(osh, lb, ix) (0)
-#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh)
-
-#define PKTSETFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
-#define PKTCLRFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
-#define PKTISFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
-
-#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
-#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
-#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
-
-#ifdef BCMFA
-#ifdef BCMFA_HW_HASH
-#define PKTSETFAHIDX(skb, idx) (((struct sk_buff*)(skb))->napt_idx = idx)
-#else
-#define PKTSETFAHIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
-#endif /* BCMFA_SW_HASH */
-#define PKTGETFAHIDX(skb) (((struct sk_buff*)(skb))->napt_idx)
-#define PKTSETFADEV(skb, imp) (((struct sk_buff*)(skb))->dev = imp)
-#define PKTSETRXDEV(skb) (((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev)
-
-#define AUX_TCP_FIN_RST (1 << 0)
-#define AUX_FREED (1 << 1)
-#define PKTSETFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST)
-#define PKTCLRFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST))
-#define PKTISFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST)
-#define PKTSETFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_FREED)
-#define PKTCLRFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED))
-#define PKTISFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_FREED)
-#define PKTISFABRIDGED(skb) PKTISFAAUX(skb)
-#else
-#define PKTISFAAUX(skb) ({BCM_REFERENCE(skb); FALSE;})
-#define PKTISFABRIDGED(skb) ({BCM_REFERENCE(skb); FALSE;})
-#define PKTISFAFREED(skb) ({BCM_REFERENCE(skb); FALSE;})
-
-#define PKTCLRFAAUX(skb) BCM_REFERENCE(skb)
-#define PKTSETFAFREED(skb) BCM_REFERENCE(skb)
-#define PKTCLRFAFREED(skb) BCM_REFERENCE(skb)
-#endif /* BCMFA */
-
-#if defined(BCM_OBJECT_TRACE)
-extern void linux_pktfree(osl_t *osh, void *skb, bool send, int line, const char *caller);
-#else
-extern void linux_pktfree(osl_t *osh, void *skb, bool send);
-#endif /* BCM_OBJECT_TRACE */
-extern void *osl_pktget_static(osl_t *osh, uint len);
-extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
-extern void osl_pktclone(osl_t *osh, void **pkt);
-
-#ifdef BCM_OBJECT_TRACE
-extern void *linux_pktget(osl_t *osh, uint len, int line, const char *caller);
-extern void *osl_pktdup(osl_t *osh, void *skb, int line, const char *caller);
-#else
-extern void *linux_pktget(osl_t *osh, uint len);
-extern void *osl_pktdup(osl_t *osh, void *skb);
-#endif /* BCM_OBJECT_TRACE */
-extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
-extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
-#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
-#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt))
-
-#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev)
-#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
-#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority)
-#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x))
-#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
-#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \
- ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
-/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
-#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned)
-
-#ifdef CONFIG_NF_CONNTRACK_MARK
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
-#define PKTMARK(p) (((struct sk_buff *)(p))->mark)
-#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->mark = (m)
-#else /* !2.6.0 */
-#define PKTMARK(p) (((struct sk_buff *)(p))->nfmark)
-#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->nfmark = (m)
-#endif /* 2.6.0 */
-#else /* CONFIG_NF_CONNTRACK_MARK */
-#define PKTMARK(p) 0
-#define PKTSETMARK(p, m)
-#endif /* CONFIG_NF_CONNTRACK_MARK */
-
-#define PKTALLOCED(osh) osl_pktalloced(osh)
-extern uint osl_pktalloced(osl_t *osh);
-
-#endif /* BCMDRIVER */
-
-#endif /* _linux_pkt_h_ */
* Linux-specific abstractions to gain some independence from linux kernel versions.
* Pave over some 2.2 versus 2.4 versus 2.6 kernel differences.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: linuxver.h 806092 2019-02-21 08:19:13Z $
+ * $Id: linuxver.h 646721 2016-06-30 12:36:41Z $
*/
#ifndef _linuxver_h_
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
#pragma GCC diagnostic ignored "-Wunused-but-set-parameter"
-#endif // endif
+#endif
#include <typedefs.h>
#include <linux/version.h>
#include <generated/autoconf.h>
#else
#include <linux/autoconf.h>
-#endif // endif
+#endif
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
#include <linux/kconfig.h>
-#endif // endif
+#endif
#include <linux/module.h>
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
#undef __NO_VERSION__
#else
#define __NO_VERSION__
-#endif // endif
+#endif
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
#define module_param_string(_name_, _string_, _size_, _perm_) \
MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
-#endif // endif
+#endif
/* linux/malloc.h is deprecated, use linux/slab.h instead. */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
#include <linux/malloc.h>
#else
#include <linux/slab.h>
-#endif // endif
+#endif
#include <linux/types.h>
#include <linux/init.h>
#include <linux/tqueue.h>
#ifndef work_struct
#define work_struct tq_struct
-#endif // endif
+#endif
#ifndef INIT_WORK
#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data))
-#endif // endif
+#endif
#ifndef schedule_work
#define schedule_work(_work) schedule_task((_work))
-#endif // endif
+#endif
#ifndef flush_scheduled_work
#define flush_scheduled_work() flush_scheduled_tasks()
-#endif // endif
+#endif
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
(RHEL_MAJOR == 5))
/* Exclude RHEL 5 */
typedef void (*work_func_t)(void *work);
-#endif // endif
+#endif
#endif /* >= 2.6.20 */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
#define IRQ_NONE
#define IRQ_HANDLED
#define IRQ_RETVAL(x)
-#endif // endif
+#endif
#else
typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
#ifdef CONFIG_NET_RADIO
#define CONFIG_WIRELESS_EXT
-#endif // endif
+#endif
#endif /* < 2.6.17 */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
#include <net/lib80211.h>
-#endif // endif
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
#include <linux/ieee80211.h>
#else
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
#include <net/ieee80211.h>
-#endif // endif
+#endif
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
+
#ifndef __exit
#define __exit
-#endif // endif
+#endif
#ifndef __devexit
#define __devexit
-#endif // endif
+#endif
#ifndef __devinit
# if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
# define __devinit __init
#endif /* !__devinit */
#ifndef __devinitdata
#define __devinitdata
-#endif // endif
+#endif
#ifndef __devexit_p
#define __devexit_p(x) x
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0))
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
#define pci_module_init pci_register_driver
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18))
#ifdef MODULE
#else
#define module_init(x) __initcall(x);
#define module_exit(x) __exitcall(x);
-#endif // endif
+#endif
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
#define WL_USE_NETDEV_OPS
#else
#undef WL_USE_NETDEV_OPS
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL)
#define WL_CONFIG_RFKILL
#else
#undef WL_CONFIG_RFKILL
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48))
#define list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13))
#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)])
#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44))
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23))
#define pci_enable_device(dev) do { } while (0)
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14))
#define net_device device
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
#ifndef PCI_DMA_TODEVICE
#define PCI_DMA_TODEVICE 1
#define PCI_DMA_FROMDEVICE 2
-#endif // endif
+#endif
typedef u32 dma_addr_t;
#endif /* DMA mapping */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
-
-typedef struct timer_list timer_list_compat_t;
-
-#define init_timer_compat(timer_compat, cb, priv) \
- init_timer(timer_compat); \
- (timer_compat)->data = (ulong)priv; \
- (timer_compat)->function = cb
-#define timer_set_private(timer_compat, priv) (timer_compat)->data = (ulong)priv
-#define timer_expires(timer_compat) (timer_compat)->expires
-
-#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
-
-typedef struct timer_list_compat {
- struct timer_list timer;
- void *arg;
- void (*callback)(ulong arg);
-} timer_list_compat_t;
-
-extern void timer_cb_compat(struct timer_list *tl);
-
-#define init_timer_compat(timer_compat, cb, priv) \
- (timer_compat)->arg = priv; \
- (timer_compat)->callback = cb; \
- timer_setup(&(timer_compat)->timer, timer_cb_compat, 0);
-#define timer_set_private(timer_compat, priv) (timer_compat)->arg = priv
-#define timer_expires(timer_compat) (timer_compat)->timer.expires
-
-#define del_timer(t) del_timer(&((t)->timer))
-#define del_timer_sync(t) del_timer_sync(&((t)->timer))
-#define timer_pending(t) timer_pending(&((t)->timer))
-#define add_timer(t) add_timer(&((t)->timer))
-#define mod_timer(t, j) mod_timer(&((t)->timer), j)
-
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) */
-
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
#define dev_kfree_skb_any(a) dev_kfree_skb(a)
#else
#define PCI_SAVE_STATE(a, b) pci_save_state(a, b)
#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b)
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6))
static inline int
/* Old cp0 access macros deprecated in 2.4.19 */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
-#endif // endif
+#endif
/* Module refcount handled internally in 2.6.x */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
#else
#define OLD_MOD_INC_USE_COUNT do {} while (0)
#define OLD_MOD_DEC_USE_COUNT do {} while (0)
-#endif // endif
+#endif
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
#ifndef SET_MODULE_OWNER
#define SET_MODULE_OWNER(dev) do {} while (0)
-#endif // endif
+#endif
#ifndef MOD_INC_USE_COUNT
#define MOD_INC_USE_COUNT do {} while (0)
-#endif // endif
+#endif
#ifndef MOD_DEC_USE_COUNT
#define MOD_DEC_USE_COUNT do {} while (0)
-#endif // endif
+#endif
#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
#ifndef SET_NETDEV_DEV
#define SET_NETDEV_DEV(net, pdev) do {} while (0)
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0))
#ifndef HAVE_FREE_NETDEV
#define free_netdev(dev) kfree(dev)
-#endif // endif
+#endif
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
/* struct packet_type redefined in 2.6.x */
#define af_packet_priv data
-#endif // endif
+#endif
/* suspend args */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
#define DRV_SUSPEND_STATE_TYPE pm_message_t
#else
#define DRV_SUSPEND_STATE_TYPE uint32
-#endif // endif
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
#define CHECKSUM_HW CHECKSUM_PARTIAL
-#endif // endif
+#endif
typedef struct {
void *parent; /* some external entity that the thread supposed to work for */
struct semaphore sema;
int terminated;
struct completion completed;
- int flush_ind;
- struct completion flushed;
spinlock_t spinlock;
int up_cnt;
} tsk_ctl_t;
+
/* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */
/* note this macro assumes there may be only one context waiting on thread's completion */
#ifdef DHD_DEBUG
#define DBG_THR(x) printk x
#else
#define DBG_THR(x)
-#endif // endif
+#endif
static inline bool binary_sema_down(tsk_ctl_t *tsk)
{
#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x)
#else
#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x)
-#endif // endif
+#endif
#define PROC_START(thread_func, owner, tsk_ctl, flags, name) \
{ \
sema_init(&((tsk_ctl)->sema), 0); \
init_completion(&((tsk_ctl)->completed)); \
- init_completion(&((tsk_ctl)->flushed)); \
(tsk_ctl)->parent = owner; \
(tsk_ctl)->proc_name = name; \
(tsk_ctl)->terminated = FALSE; \
- (tsk_ctl)->flush_ind = FALSE; \
- (tsk_ctl)->up_cnt = 0; \
(tsk_ctl)->p_task = kthread_run(thread_func, tsk_ctl, (char*)name); \
if (IS_ERR((tsk_ctl)->p_task)) { \
- (tsk_ctl)->thr_pid = -1; \
- DBG_THR(("%s(): thread:%s create failed\n", __FUNCTION__, \
- (tsk_ctl)->proc_name)); \
+ (tsk_ctl)->thr_pid = DHD_PID_KT_INVALID; \
+ DBG_THR(("%s(): thread:%s:%lx failed\n", __FUNCTION__, \
+ (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
} else { \
(tsk_ctl)->thr_pid = (tsk_ctl)->p_task->pid; \
spin_lock_init(&((tsk_ctl)->spinlock)); \
}; \
}
-#define PROC_WAIT_TIMEOUT_MSEC 5000 /* 5 seconds */
-
#define PROC_STOP(tsk_ctl) \
{ \
- uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
(tsk_ctl)->terminated = TRUE; \
smp_wmb(); \
up(&((tsk_ctl)->sema)); \
- DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
- (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
- if (timeout == 0) \
- DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
- (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- else \
- DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
- (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- (tsk_ctl)->parent = NULL; \
- (tsk_ctl)->proc_name = NULL; \
- (tsk_ctl)->thr_pid = -1; \
- (tsk_ctl)->up_cnt = 0; \
-}
-
-#define PROC_STOP_USING_BINARY_SEMA(tsk_ctl) \
-{ \
- uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
- (tsk_ctl)->terminated = TRUE; \
- smp_wmb(); \
- binary_sema_up(tsk_ctl); \
- DBG_THR(("%s(): thread:%s:%lx wait for terminate\n", __FUNCTION__, \
- (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->completed), timeout); \
- if (timeout == 0) \
- DBG_THR(("%s(): thread:%s:%lx terminate timeout\n", __FUNCTION__, \
- (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- else \
- DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
+ wait_for_completion(&((tsk_ctl)->completed)); \
+ DBG_THR(("%s(): thread:%s:%lx terminated OK\n", __FUNCTION__, \
(tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- (tsk_ctl)->parent = NULL; \
- (tsk_ctl)->proc_name = NULL; \
(tsk_ctl)->thr_pid = -1; \
}
-/*
-* Flush is non-rentrant, so callers must make sure
-* there is no race condition.
-* For safer exit, added wait_for_completion_timeout
-* with 1 sec timeout.
-*/
-#define PROC_FLUSH_USING_BINARY_SEMA(tsk_ctl) \
-{ \
- uint timeout = (uint)msecs_to_jiffies(PROC_WAIT_TIMEOUT_MSEC); \
- (tsk_ctl)->flush_ind = TRUE; \
- smp_wmb(); \
- binary_sema_up(tsk_ctl); \
- DBG_THR(("%s(): thread:%s:%lx wait for flush\n", __FUNCTION__, \
- (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- timeout = (uint)wait_for_completion_timeout(&((tsk_ctl)->flushed), timeout); \
- if (timeout == 0) \
- DBG_THR(("%s(): thread:%s:%lx flush timeout\n", __FUNCTION__, \
- (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
- else \
- DBG_THR(("%s(): thread:%s:%lx flushed OK\n", __FUNCTION__, \
- (tsk_ctl)->proc_name, (tsk_ctl)->thr_pid)); \
-}
-
/* ----------------------- */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)
-/* send_sig declaration moved */
-#include <linux/sched/signal.h>
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) */
-
#define KILL_PROC(nr, sig) \
{ \
struct task_struct *tsk; \
{ \
kill_proc(pid, sig, 1); \
}
-#endif // endif
+#endif
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
#define DEV_PRIV(dev) (dev->priv)
#else
#define DEV_PRIV(dev) netdev_priv(dev)
-#endif // endif
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
#define WL_ISR(i, d, p) wl_isr((i), (d))
#define CAN_SLEEP() ((!in_atomic() && !irqs_disabled()))
#else
#define CAN_SLEEP() (FALSE)
-#endif // endif
+#endif
#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
#define RANDOM32 prandom_u32
-#define RANDOM_BYTES prandom_bytes
#else
#define RANDOM32 random32
-#define RANDOM_BYTES get_random_bytes
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
* Overide latest kfifo functions with
* older version to work on older kernels
*/
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
#define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c)
#define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c)
#define kfifo_esize(a) 1
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic pop
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
-#include <linux/fs.h>
static inline struct inode *file_inode(const struct file *f)
{
return f->f_dentry->d_inode;
}
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
-#define vfs_write(fp, buf, len, pos) kernel_write(fp, buf, len, pos)
-#define vfs_read(fp, buf, len, pos) kernel_read(fp, buf, len, pos)
-int kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count);
-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
-#define kernel_read_compat(file, offset, addr, count) kernel_read(file, offset, addr, count)
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) */
-
#endif /* _linuxver_h_ */
+++ /dev/null
-/*
- * Chip related low power flags
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: lpflags.h 592839 2015-10-14 14:19:09Z $
- */
-#ifndef _lpflags_h_
-#define _lpflags_h_
-
-/* Chip related low power flags (lpflags) */
-#define LPFLAGS_SI_GLOBAL_DISABLE (1 << 0)
-#define LPFLAGS_SI_MEM_STDBY_DISABLE (1 << 1)
-#define LPFLAGS_SI_SFLASH_DISABLE (1 << 2)
-#define LPFLAGS_SI_BTLDO3P3_DISABLE (1 << 3)
-#define LPFLAGS_SI_GCI_FORCE_REGCLK_DISABLE (1 << 4)
-#define LPFLAGS_SI_FORCE_PWM_WHEN_RADIO_ON (1 << 5)
-#define LPFLAGS_SI_DS0_SLEEP_PDA_DISABLE (1 << 6)
-#define LPFLAGS_SI_DS1_SLEEP_PDA_DISABLE (1 << 7)
-#define LPFLAGS_PHY_GLOBAL_DISABLE (1 << 16)
-#define LPFLAGS_PHY_LP_DISABLE (1 << 17)
-#define LPFLAGS_PSM_PHY_CTL (1 << 18)
-
-#endif /* _lpflags_h_ */
+++ /dev/null
-/*
- * Fundamental types and constants relating to WFA MBO
- * (Multiband Operation)
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id$
- */
-
-#ifndef _MBO_H_
-#define _MBO_H_
-
-/* This marks the start of a packed structure section. */
-#include <packed_section_start.h>
-
-/* WiFi MBO OUI values */
-#define MBO_OUI WFA_OUI /* WiFi OUI 50:6F:9A */
-/* oui_type field identifying the type and version of the MBO IE. */
-#define MBO_OUI_TYPE WFA_OUI_TYPE_MBO /* OUI Type/Version */
-/* IEEE 802.11 vendor specific information element. */
-#define MBO_IE_ID 0xdd
-
-/* MBO ATTR related macros */
-#define MBO_ATTR_ID_OFF 0
-#define MBO_ATTR_LEN_OFF 1
-#define MBO_ATTR_DATA_OFF 2
-
-#define MBO_ATTR_ID_LEN 1 /* Attr ID field length */
-#define MBO_ATTR_LEN_LEN 1 /* Attr Length field length */
-#define MBO_ATTR_HDR_LEN 2 /* ID + 1-byte length field */
-
-/* MBO subelemts related */
-#define MBO_SUBELEM_ID 0xdd
-#define MBO_SUBELEM_OUI WFA_OUI
-
-#define MBO_SUBELEM_ID_LEN 1 /* SubElement ID field length */
-#define MBO_SUBELEM_LEN_LEN 1 /* SubElement length field length */
-#define MBO_SUBELEM_HDR_LEN 6 /* ID + length + OUI + OUY TYPE */
-
-#define MBO_NON_PREF_CHAN_SUBELEM_LEN_LEN(L) (7 + (L)) /* value of length field */
-#define MBO_NON_PREF_CHAN_SUBELEM_TOT_LEN(L) \
- (MBO_SUBELEM_ID_LEN + MBO_SUBELEM_LEN_LEN + MBO_NON_PREF_CHAN_SUBELEM_LEN_LEN(L))
-/* MBO attributes as defined in the mbo spec */
-enum {
- MBO_ATTR_MBO_AP_CAPABILITY = 1,
- MBO_ATTR_NON_PREF_CHAN_REPORT = 2,
- MBO_ATTR_CELL_DATA_CAP = 3,
- MBO_ATTR_ASSOC_DISALLOWED = 4,
- MBO_ATTR_CELL_DATA_CONN_PREF = 5,
- MBO_ATTR_TRANS_REASON_CODE = 6,
- MBO_ATTR_TRANS_REJ_REASON_CODE = 7,
- MBO_ATTR_ASSOC_RETRY_DELAY = 8
-};
-
-typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_ie_s {
- uint8 id; /* IE ID: MBO_IE_ID 0xDD */
- uint8 len; /* IE length */
- uint8 oui[WFA_OUI_LEN]; /* MBO_OUI 50:6F:9A */
- uint8 oui_type; /* MBO_OUI_TYPE 0x16 */
- uint8 attr[1]; /* var len attributes */
-} BWL_POST_PACKED_STRUCT wifi_mbo_ie_t;
-
-#define MBO_IE_HDR_SIZE (OFFSETOF(wifi_mbo_ie_t, attr))
-/* oui:3 bytes + oui type:1 byte */
-#define MBO_IE_NO_ATTR_LEN 4
-
-/* MBO AP Capability Attribute */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_ap_cap_ind_attr_s {
- /* Attribute ID - 0x01. */
- uint8 id;
- /* Length of the following fields in the attribute */
- uint8 len;
- /* AP capability bitmap */
- uint8 cap_ind;
-} BWL_POST_PACKED_STRUCT wifi_mbo_ap_cap_ind_attr_t;
-
-/* MBO AP Capability Indication Field Values */
-#define MBO_AP_CAP_IND_CELLULAR_AWARE 0x40
-
-/* Non-preferred Channel Report Attribute */
-#define MBO_NON_PREF_CHAN_ATTR_OPCALSS_OFF 2
-#define MBO_NON_PREF_CHAN_ATTR_CHANLIST_OFF 3
-#define MBO_NON_PREF_CHAN_ATTR_PREF_OFF(L) \
- (MBO_NON_PREF_CHAN_ATTR_CHANLIST_OFF + (L))
-
-#define MBO_NON_PREF_CHAN_ATTR_OPCALSS_LEN 1
-#define MBO_NON_PREF_CHAN_ATTR_PREF_LEN 1
-#define MBO_NON_PREF_CHAN_ATTR_REASON_LEN 1
-
-#define MBO_NON_PREF_CHAN_ATTR_LEN(L) ((L) + 3)
-#define MBO_NON_PREF_CHAN_ATTR_TOT_LEN(L) (MBO_ATTR_HDR_LEN + (L) + 3)
-
-/* attribute len - (opclass + Pref + Reason) */
-#define MBO_NON_PREF_CHAN_ATTR_CHANLIST_LEN(L) ((L) - 3)
-
-/* MBO Non-preferred Channel Report: "Preference" field value */
-enum {
- MBO_STA_NON_OPERABLE_BAND_CHAN = 0,
- MBO_STA_NON_PREFERRED_BAND_CHAN = 1,
- MBO_STA_PREFERRED_BAND_CHAN = 255
-};
-
-/* MBO Non-preferred Channel Report: "Reason Code" field value */
-enum {
- MBO_NON_PREF_CHAN_RC_UNSPECIFIED = 0,
- MBO_NON_PREF_CHAN_RC_BCN_STRENGTH = 1,
- MBO_NON_PREF_CHAN_RC_CO_LOC_INTERFERENCE = 2,
- MBO_NON_PREF_CHAN_RC_IN_DEV_INTERFERENCE = 3
-};
-
-/* Cellular Data Capability Attribute */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_data_cap_attr_s {
- /* Attribute ID - 0x03. */
- uint8 id;
- /* Length of the following fields in the attribute */
- uint8 len;
- /* MBO STA's cellular capability */
- uint8 cell_conn;
-} BWL_POST_PACKED_STRUCT wifi_mbo_cell_data_cap_attr_t;
-
-/* MBO Cellular Data Capability: "Cellular Connectivity" field value */
-enum {
- MBO_CELL_DATA_CONN_AVAILABLE = 1,
- MBO_CELL_DATA_CONN_NOT_AVAILABLE = 2,
- MBO_CELL_DATA_CONN_NOT_CAPABLE = 3
-};
-
-/* Association Disallowed attribute */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_assoc_disallowed_attr_s {
- /* Attribute ID - 0x04. */
- uint8 id;
- /* Length of the following fields in the attribute */
- uint8 len;
- /* Reason of not accepting new association */
- uint8 reason_code;
-} BWL_POST_PACKED_STRUCT wifi_mbo_assoc_disallowed_attr_t;
-
-/* Association Disallowed attr Reason code field values */
-enum {
- MBO_ASSOC_DISALLOWED_RC_UNSPECIFIED = 1,
- MBO_ASSOC_DISALLOWED_RC_MAX_STA_REACHED = 2,
- MBO_ASSOC_DISALLOWED_RC_AIR_IFACE_OVERLOADED = 3,
- MBO_ASSOC_DISALLOWED_RC_AUTH_SRVR_OVERLOADED = 4,
- MBO_ASSOC_DISALLOWED_RC_INSUFFIC_RSSI = 5
-};
-
-/* Cellular Data Conn Pref attribute */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_data_conn_pref_attr_s {
- /* Attribute ID - 0x05. */
- uint8 id;
- /* Length of the following fields in the attribute */
- uint8 len;
- /* Preference value of cellular connection */
- uint8 cell_pref;
-} BWL_POST_PACKED_STRUCT wifi_mbo_cell_data_conn_pref_attr_t;
-
-/* Cellular Data Conn Pref attr: Cellular Pref field values */
-enum {
- MBO_CELLULAR_DATA_CONN_EXCLUDED = 1,
- MBO_CELLULAR_DATA_CONN_NOT_PREFERRED = 2,
- MBO_CELLULAR_DATA_CONN_PREFERRED = 255
-};
-
-/* Transition Reason Code Attribute */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_trans_reason_code_attr_s {
- /* Attribute ID - 0x06. */
- uint8 id;
- /* Length of the following fields in the attribute */
- uint8 len;
- /* Reason of transition recommendation */
- uint8 trans_reason_code;
-} BWL_POST_PACKED_STRUCT wifi_mbo_trans_reason_code_attr_t;
-
-/* Transition Reason Code Attr: trans reason code field values */
-enum {
- MBO_TRANS_REASON_UNSPECIFIED = 0,
- MBO_TRANS_REASON_EXCESSV_FRM_LOSS_RATE = 1,
- MBO_TRANS_REASON_EXCESSV_TRAFFIC_DELAY = 2,
- MBO_TRANS_REASON_INSUFF_BW = 3,
- MBO_TRANS_REASON_LOAD_BALANCING = 4,
- MBO_TRANS_REASON_LOW_RSSI = 5,
- MBO_TRANS_REASON_EXCESSV_RETRANS_RCVD = 6,
- MBO_TRANS_REASON_HIGH_INTERFERENCE = 7,
- MBO_TRANS_REASON_GRAY_ZONE = 8,
- MBO_TRANS_REASON_PREMIUM_AP_TRANS = 9
-};
-
-/* Transition Rejection Reason Code Attribute */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_trans_rej_reason_code_attr_s {
- /* Attribute ID - 0x07. */
- uint8 id;
- /* Length of the following fields in the attribute */
- uint8 len;
- /* Reason of transition rejection */
- uint8 trans_rej_reason_code;
-} BWL_POST_PACKED_STRUCT wifi_mbo_trans_rej_reason_code_attr_t;
-
-/* Transition Rej Reason Code Attr: trans rej reason code field values */
-enum {
- MBO_TRANS_REJ_REASON_UNSPECIFIED = 0,
- MBO_TRANS_REJ_REASON_EXSSIV_FRM_LOSS_RATE = 1,
- MBO_TRANS_REJ_REASON_EXSSIV_TRAFFIC_DELAY = 2,
- MBO_TRANS_REJ_REASON_INSUFF_QOS_CAPACITY = 3,
- MBO_TRANS_REJ_REASON_LOW_RSSI = 4,
- MBO_TRANS_REJ_REASON_HIGH_INTERFERENCE = 5,
- MBO_TRANS_REJ_REASON_SERVICE_UNAVAIL = 6
-};
-
-/* Assoc Retry Delay Attribute */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_assoc_retry_delay_attr_s {
- /* Attribute ID - 0x08. */
- uint8 id;
- /* Length of the following fields in the attribute */
- uint8 len;
- /* No of Seconds before next assoc attempt */
- uint16 reassoc_delay;
-} BWL_POST_PACKED_STRUCT wifi_mbo_assoc_retry_delay_attr_t;
-
-#define MBO_ANQP_OUI_TYPE 0x12 /* OUTI Type/Version */
-
-/* MBO ANQP Element */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_anqp_elem_s {
- /* ID - 56797 */
- uint16 info_id;
- /* Length of the OUI + Vendor Specific content */
- uint16 len;
- /* WFA_OUI 50:6F:9A */
- uint8 oui[WFA_OUI_LEN];
- /* MBO_ANQP_OUI_TYPE 0x12 */
- uint8 oui_type;
- /* MBO ANQP element type */
- uint8 sub_type;
- /* variable len payload */
- uint8 payload[1];
-} BWL_POST_PACKED_STRUCT wifi_mbo_anqp_elem_t;
-
-#define MBO_ANQP_ELEM_HDR_SIZE (OFFSETOF(wifi_mbo_anqp_elem_t, payload))
-
-/* oui:3 bytes + oui type:1 byte + sub type:1 byte */
-#define MBO_ANQP_ELEM_NO_PAYLOAD_LEN 5
-
-/* MBO ANQP Subtype Values */
-enum {
- MBO_ANQP_ELEM_MBO_QUERY_LIST = 1,
- MBO_ANQP_ELEM_CELL_DATA_CONN_PREF = 2
-};
-
-/* MBO sub-elements */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_mbo_cell_cap_subelem_s {
- /* 0xDD */
- uint8 sub_elem_id;
- /* Length of the following fields in sub-element */
- uint8 len;
- /* WFA_OUI 50:6F:9A */
- uint8 oui[WFA_OUI_LEN];
- /* OUI_TYPE 0x03 */
- uint8 oui_type;
- /* STA cellular capability */
- uint8 cell_conn;
-} BWL_POST_PACKED_STRUCT wifi_mbo_cell_cap_subelem_t;
-
-/* This marks the end of a packed structure section. */
-#include <packed_section_end.h>
-
-#endif /* __MBO_H__ */
/*
* Command line options parser.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: miniopt.h 672943 2016-11-30 08:54:06Z $
+ * $Id: miniopt.h 514727 2014-11-12 03:02:48Z $
*/
+
#ifndef MINI_OPT_H
#define MINI_OPT_H
#ifdef __cplusplus
extern "C" {
-#endif // endif
+#endif
/* ---- Include Files ---------------------------------------------------- */
+
/* ---- Constants and Types ---------------------------------------------- */
#define MINIOPT_MAXKEY 128 /* Max options */
void miniopt_init(miniopt_t *t, const char* name, const char* flags, bool longflags);
int miniopt(miniopt_t *t, char **argv);
+
/* ---- Variable Externs ------------------------------------------------- */
/* ---- Function Prototypes ---------------------------------------------- */
+
#ifdef __cplusplus
}
-#endif // endif
+#endif
#endif /* MINI_OPT_H */
+++ /dev/null
-/*
- * Common interface to MSF (multi-segment format) definitions.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: msf.h 619634 2016-02-17 19:01:25Z $
- */
-
-#ifndef _WLC_MSF_H_
-#define _WLC_MSF_H_
-
-struct wl_segment {
- uint32 type;
- uint32 offset;
- uint32 length;
- uint32 crc32;
- uint32 flags;
-};
-typedef struct wl_segment wl_segment_t;
-
-struct wl_segment_info {
- uint8 magic[4];
- uint32 hdr_len;
- uint32 crc32;
- uint32 file_type;
- uint32 num_segments;
- wl_segment_t segments[1];
-};
-typedef struct wl_segment_info wl_segment_info_t;
-
-typedef struct wlc_blob_segment {
- uint32 type;
- uint8 *data;
- uint32 length;
-} wlc_blob_segment_t;
-
-/** Segment types in Binary Eventlog Archive file */
-enum bea_seg_type_e {
- MSF_SEG_TYP_RTECDC_BIN = 1,
- MSF_SEG_TYP_LOGSTRS_BIN = 2,
- MSF_SEG_TYP_FW_SYMBOLS = 3,
- MSF_SEG_TYP_ROML_BIN = 4
-};
-
-#endif /* _WLC_MSF_H */
/*
* Trace messages sent over HBUS
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
* Fundamental types and constants relating to WFA NAN
* (Neighbor Awareness Networking)
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: nan.h 818571 2019-05-08 04:36:41Z $
+ * $Id: nan.h 700076 2017-05-17 14:42:22Z $
*/
#ifndef _NAN_H_
#define _NAN_H_
#include <typedefs.h>
#include <802.11.h>
+
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
/* WiFi NAN OUI values */
-#define NAN_OUI "\x50\x6F\x9A" /* WFA OUI. WiFi-Alliance OUI */
+#define NAN_OUI WFA_OUI /* WiFi OUI */
/* For oui_type field identifying the type and version of the NAN IE. */
#define NAN_OUI_TYPE 0x13 /* Type/Version */
#define NAN_AF_OUI_TYPE 0x18 /* Type/Version */
#define NAN_OPERATING_CLASS_LEN 1 /* operating class field length from NAN FAM */
#define NAN_CHANNEL_NUM_LEN 1 /* channel number field length 1 byte */
-/* generic nan attribute total length */
-#define NAN_ATTR_TOT_LEN(_nan_attr) (ltoh16_ua(((const uint8 *)(_nan_attr)) + \
- NAN_ATTR_ID_LEN) + NAN_ATTR_HDR_LEN)
-
/* NAN slot duration / period */
#define NAN_MIN_TU 16
#define NAN_TU_PER_DW 512
#define NAN_SLOT_DUR_4096TU 4096
#define NAN_SLOT_DUR_8192TU 8192
-#define NAN_SOC_CHAN_2G 6 /* NAN 2.4G discovery channel */
-#define NAN_SOC_CHAN_5G_CH149 149 /* NAN 5G discovery channel if upper band allowed */
-#define NAN_SOC_CHAN_5G_CH44 44 /* NAN 5G discovery channel if only lower band allowed */
+#define NAN_MAP_ID_2G 2 /* NAN Further Avail Map ID for band 2.4G */
+#define NAN_MAP_ID_5G 5 /* NAN Further Avail Map ID for band 5G */
+#define NAN_MAP_NUM_IDS 2 /* Max number of NAN Further Avail Map IDs supported */
/* size of ndc id */
#define NAN_DATA_NDC_ID_SIZE 6
#define NAN_AVAIL_ENTRY_LEN_RES1 5 /* Avail entry len in FAM attribute for resolution 32TU */
#define NAN_AVAIL_ENTRY_LEN_RES2 4 /* Avail entry len in FAM attribute for resolution 64TU */
-/* map id field */
-#define NAN_MAPID_SPECIFIC_MAP_MASK 0x01 /* apply to specific map */
-#define NAN_MAPID_MAPID_MASK 0x1E
-#define NAN_MAPID_MAPID_SHIFT 1
-#define NAN_MAPID_SPECIFIC_MAP(_mapid) ((_mapid) & NAN_MAPID_SPECIFIC_MAP_MASK)
-#define NAN_MAPID_ALL_MAPS(_mapid) (!NAN_MAPID_SPECIFIC_MAP(_mapid))
-#define NAN_MAPID_MAPID(_mapid) (((_mapid) & NAN_MAPID_MAPID_MASK) \
- >> NAN_MAPID_MAPID_SHIFT)
-#define NAN_MAPID_SET_SPECIFIC_MAPID(map_id) ((((map_id) << NAN_MAPID_MAPID_SHIFT) \
- & NAN_MAPID_MAPID_MASK) | NAN_MAPID_SPECIFIC_MAP_MASK)
-
/* Vendor-specific public action frame for NAN */
typedef BWL_PRE_PACKED_STRUCT struct nan_pub_act_frame_s {
/* NAN_PUB_AF_CATEGORY 0x04 */
NAN_ATTR_MCAST_SCHED_OWNER_CHANGE = 38,
NAN_ATTR_PUBLIC_AVAILABILITY = 39,
NAN_ATTR_SUB_SVC_ID_LIST = 40,
- NAN_ATTR_NDPE = 41,
/* change NAN_ATTR_MAX_ID to max ids + 1, excluding NAN_ATTR_VENDOR_SPECIFIC.
* This is used in nan_parse.c
*/
- NAN_ATTR_MAX_ID = NAN_ATTR_NDPE + 1,
+ NAN_ATTR_MAX_ID = NAN_ATTR_SUB_SVC_ID_LIST + 1,
NAN_ATTR_VENDOR_SPECIFIC = 221
};
uint8 avail_bmp[1];
} BWL_POST_PACKED_STRUCT wifi_nan_ibss_attr_t;
-/* Country code attribute */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_country_code_attr_s {
- /* Attribute ID - 0x0B. */
- uint8 id;
- /* Length of the following fields in the attribute */
- uint16 len;
- /* Condensed Country String first two octets */
- uint8 country_str[2];
-} BWL_POST_PACKED_STRUCT wifi_nan_country_code_attr_t;
-
/* Further Availability MAP attr */
typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_favail_attr_s {
/* Attribute ID - 0x0A. */
#define NAN_VENDOR_TYPE_RTT 0
#define NAN_VENDOR_TYPE_P2P 1
-/* Vendor Specific Attribute - old definition */
-/* TODO remove */
+/* Vendor Specific Attribute */
typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_vendor_attr_s {
uint8 id; /* 0xDD */
uint16 len; /* IE length */
#define NAN_VENDOR_HDR_SIZE (OFFSETOF(wifi_nan_vendor_attr_t, attr))
-/* vendor specific attribute */
-typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_vndr_attr_s {
- uint8 id; /* 0xDD */
- uint16 len; /* length of following fields */
- uint8 oui[DOT11_OUI_LEN]; /* vendor specific OUI */
- uint8 body[];
-} BWL_POST_PACKED_STRUCT wifi_nan_vndr_attr_t;
-
/* p2p operation attribute */
typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_p2p_op_attr_s {
/* Attribute ID - 0x06. */
#define NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT 3
#define NAN_AVAIL_ENTRY_CTRL_USAGE(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_USAGE_MASK) \
>> NAN_AVAIL_ENTRY_CTRL_USAGE_SHIFT)
-#define NAN_AVAIL_ENTRY_CTRL_UTIL_MASK 0xE0
+#define NAN_AVAIL_ENTRY_CTRL_UTIL_MASK 0x1E0
#define NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT 5
#define NAN_AVAIL_ENTRY_CTRL_UTIL(_flags) (((_flags) & NAN_AVAIL_ENTRY_CTRL_UTIL_MASK) \
>> NAN_AVAIL_ENTRY_CTRL_UTIL_SHIFT)
(*(uint8 *)(((wifi_nan_avail_entry_attr_t *)avail_entry)->var + 2))
#define NAN_AVAIL_CHAN_LIST_HDR_LEN 1
-#define NAN_AVAIL_CHAN_LIST_TYPE_BAND 0x00
#define NAN_AVAIL_CHAN_LIST_TYPE_CHANNEL 0x01
#define NAN_AVAIL_CHAN_LIST_NON_CONTIG_BW 0x02
#define NAN_AVAIL_CHAN_LIST_NUM_ENTRIES_MASK 0xF0
#define NAN_CHAN_NUM_ENTRIES_MASK 0xF0
typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_band_entry_s {
- uint8 band[1];
+ uint8 band[0];
} BWL_POST_PACKED_STRUCT wifi_nan_band_entry_t;
/* Type of Availability: committed */
#define NAN_NDL_CONFIRM(_ndl) (((_ndl)->type_status & NAN_NDL_TYPE_MASK) == \
NDL_ATTR_TYPE_STATUS_CONFIRM)
+
#define NAN_NDL_STATUS_SHIFT 4
#define NAN_NDL_STATUS_MASK 0xF0
#define NAN_NDL_CONT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \
NDL_ATTR_TYPE_STATUS_ACCEPTED)
#define NAN_NDL_REJECT(_ndl) (((_ndl)->type_status & NAN_NDL_STATUS_MASK) == \
NDL_ATTR_TYPE_STATUS_REJECTED)
-#define NAN_NDL_FRM_STATUS(_ndl) \
- (((_ndl)->type_status & NAN_NDL_STATUS_MASK) >> NAN_NDL_STATUS_SHIFT)
#define NDL_ATTR_CTRL_NONE 0
#define NDL_ATTR_CTRL_PEER_ID_PRESENT (1 << NDL_ATTR_CTRL_PEER_ID_PRESENT_SHIFT)
uint8 capabilities; /* DFS Master, Extended key id etc */
} BWL_POST_PACKED_STRUCT wifi_nan_dev_cap_t;
-/* map id related */
-
-/* all maps */
-#define NAN_DEV_CAP_ALL_MAPS_FLAG_MASK 0x1 /* nan default map control */
-#define NAN_DEV_CAP_ALL_MAPS_FLAG_SHIFT 0
-/* map id */
-#define NAN_DEV_CAP_MAPID_MASK 0x1E
-#define NAN_DEV_CAP_MAPID_SHIFT 1
-
/* Awake DW Info field format */
/* 2.4GHz DW */
#define NAN_DEV_CAP_OP_PAGING_NDL 0x08
#define NAN_DEV_CAP_OP_MODE_VHT_MASK 0x01
-#define NAN_DEV_CAP_OP_MODE_VHT_SHIFT 0
-#define NAN_DEV_CAP_OP_MODE_VHT8080_MASK 0x02
-#define NAN_DEV_CAP_OP_MODE_VHT8080_SHIFT 1
-#define NAN_DEV_CAP_OP_MODE_VHT160_MASK 0x04
-#define NAN_DEV_CAP_OP_MODE_VHT160_SHIFT 2
+#define NAN_DEV_CAP_OP_MODE_VHT8080_MASK 0x03
+#define NAN_DEV_CAP_OP_MODE_VHT160_MASK 0x05
#define NAN_DEV_CAP_OP_MODE_PAGING_NDL_MASK 0x08
-#define NAN_DEV_CAP_OP_MODE_PAGING_NDL_SHIFT 3
#define NAN_DEV_CAP_RX_ANT_SHIFT 4
#define NAN_DEV_CAP_TX_ANT_MASK 0x0F
#define NAN_DEV_CAP_RX_ANT_MASK 0xF0
-#define NAN_DEV_CAP_TX_ANT(_ant) ((_ant) & NAN_DEV_CAP_TX_ANT_MASK)
-#define NAN_DEV_CAP_RX_ANT(_ant) (((_ant) & NAN_DEV_CAP_RX_ANT_MASK) \
- >> NAN_DEV_CAP_RX_ANT_SHIFT)
/* Device capabilities */
/* extended iv cap */
#define NAN_DEV_CAP_EXT_KEYID_MASK 0x02
#define NAN_DEV_CAP_EXT_KEYID_SHIFT 1
-/* NDPE attribute support */
-#define NAN_DEV_CAP_NDPE_ATTR_SUPPORT_MASK 0x08
-#define NAN_DEV_CAP_NDPE_ATTR_SUPPORT(_cap) ((_cap) & NAN_DEV_CAP_NDPE_ATTR_SUPPORT_MASK)
/* Band IDs */
enum {
};
typedef uint8 nan_band_id_t;
-/* NAN supported band in device capability */
-#define NAN_DEV_CAP_SUPPORTED_BANDS_2G (1 << NAN_BAND_ID_2G)
-#define NAN_DEV_CAP_SUPPORTED_BANDS_5G (1 << NAN_BAND_ID_5G)
-
/*
* Unaligned schedule attribute section 10.7.19.6 spec. ver r15
*/
/* NAN Action Frame Subtypes */
/* Subtype-0 is Reserved */
#define NAN_MGMT_FRM_SUBTYPE_RESERVED 0
-#define NAN_MGMT_FRM_SUBTYPE_INVALID 0
/* NAN Ranging Request */
#define NAN_MGMT_FRM_SUBTYPE_RANGING_REQ 1
/* NAN Ranging Response */
/* Schedule Update */
#define NAN_MGMT_FRM_SUBTYPE_SCHED_UPD 13
-#define NAN_SCHEDULE_AF(_naf_subtype) \
- ((_naf_subtype >= NAN_MGMT_FRM_SUBTYPE_SCHED_REQ) && \
- (_naf_subtype <= NAN_MGMT_FRM_SUBTYPE_SCHED_UPD))
-
/* Reason code defines */
#define NAN_REASON_RESERVED 0x0
#define NAN_REASON_UNSPECIFIED 0x1
#define NAN_NDP_CTRL_SPEC_INFO_PRESENT 0x20
#define NAN_NDP_CTRL_RESERVED 0xA0
-/* Used for both NDP Attribute and NDPE Attribute, since the structures are identical */
+/* NDP Attribute */
typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndp_attr_s {
- uint8 id; /* NDP: 0x10, NDPE: 0x29 */
+ uint8 id; /* 0x10 */
uint16 len; /* length */
uint8 dialog_token; /* dialog token */
uint8 type_status; /* bits 0-3 type, 4-7 status */
NAN_NDP_STATUS_ACCEPT)
#define NAN_NDP_REJECT(_ndp) (((_ndp)->type_status & NAN_NDP_STATUS_MASK) == \
NAN_NDP_STATUS_REJECT)
-
-#define NAN_NDP_FRM_STATUS(_ndp) \
- (((_ndp)->type_status & NAN_NDP_STATUS_MASK) >> NAN_NDP_STATUS_SHIFT)
-
/* NDP Setup Status */
#define NAN_NDP_SETUP_STATUS_OK 1
#define NAN_NDP_SETUP_STATUS_FAIL 0
#define NAN_NDP_SETUP_STATUS_REJECT 2
-/* NDPE TLV list */
-#define NDPE_TLV_TYPE_IPV6 0x00
-#define NDPE_TLV_TYPE_SVC_INFO 0x01
-typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_ndpe_tlv_s {
- uint8 type; /* Operating Class */
- uint16 length; /* Channel Bitmap */
- uint8 data[];
-} BWL_POST_PACKED_STRUCT wifi_nan_ndpe_tlv_t;
-
/* Rng setup attribute type and status macros */
#define NAN_RNG_TYPE_MASK 0x0F
#define NAN_RNG_TYPE_REQUEST 0x0
uint8 tbmp[]; /* time bitmap - Optional */
} BWL_POST_PACKED_STRUCT wifi_nan_sched_entry_t;
-#define NAN_SCHED_ENTRY_MAPID_MASK 0x0F
#define NAN_SCHED_ENTRY_MIN_SIZE OFFSETOF(wifi_nan_sched_entry_t, tbmp)
#define NAN_SCHED_ENTRY_SIZE(_entry) (NAN_SCHED_ENTRY_MIN_SIZE + (_entry)->tbmp_len)
} BWL_POST_PACKED_STRUCT wifi_nan_svc_desc_ext_attr_t;
#define NAN_SDE_ATTR_MIN_LEN OFFSETOF(wifi_nan_svc_desc_ext_attr_t, var)
-#define NAN_SDE_ATTR_RANGE_LEN 4
-#define NAN_SDE_ATTR_SUI_LEN 1
-#define NAN_SDE_ATTR_INFO_LEN_PARAM_LEN 2
-#define NAN_SDE_ATTR_RANGE_INGRESS_LEN 2
-#define NAN_SDE_ATTR_RANGE_EGRESS_LEN 2
-#define NAN_SDE_ATTR_CTRL_LEN 2
-/* max length of variable length field (matching filter, service response filter,
- * or service info) in service descriptor attribute
- */
-#define NAN_DISC_SDA_FIELD_MAX_LEN 255
/* SDEA control field bit definitions and access macros */
#define NAN_SDE_CF_FSD_REQUIRED (1 << 0)
#define NAN_SDE_CF_RANGING_REQUIRED (1 << 7)
#define NAN_SDE_CF_RANGE_PRESENT (1 << 8)
#define NAN_SDE_CF_SVC_UPD_IND_PRESENT (1 << 9)
-/* Using Reserved Bits as per Spec */
-#define NAN_SDE_CF_LIFE_CNT_PUB_RX (1 << 15)
#define NAN_SDE_FSD_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_FSD_REQUIRED)
#define NAN_SDE_FSD_GAS(_sde) ((_sde)->control & NAN_SDE_CF_FSD_GAS)
#define NAN_SDE_DP_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_DP_REQUIRED)
#define NAN_SDE_RANGING_REQUIRED(_sde) ((_sde)->control & NAN_SDE_CF_RANGING_REQUIRED)
#define NAN_SDE_RANGE_PRESENT(_sde) ((_sde)->control & NAN_SDE_CF_RANGE_PRESENT)
#define NAN_SDE_SVC_UPD_IND_PRESENT(_sde) ((_sde)->control & NAN_SDE_CF_SVC_UPD_IND_PRESENT)
-#define NAN_SDE_LIFE_COUNT_FOR_PUB_RX(_sde) (_sde & NAN_SDE_CF_LIFE_CNT_PUB_RX)
/* nan2 security */
uint8 var[]; /* multicast sched entry list (schedule_entry_list) */
} BWL_POST_PACKED_STRUCT wifi_nan_mcast_sched_attr_t;
+
/* FAC Channel Entry (section 10.7.19.1.5) */
typedef BWL_PRE_PACKED_STRUCT struct wifi_nan_fac_chan_entry_s {
uint8 oper_class; /* Operating Class */
/*
* OS Abstraction Layer
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: osl.h 813810 2019-04-08 12:25:30Z $
+ * $Id: osl.h 642189 2016-06-07 21:12:50Z $
*/
#ifndef _osl_h_
#include <osl_decl.h>
-enum {
- TAIL_BYTES_TYPE_FCS = 1,
- TAIL_BYTES_TYPE_ICV = 2,
- TAIL_BYTES_TYPE_MIC = 3
-};
-
-#define OSL_PKTTAG_SZ 48 /* standard linux pkttag size is 48 bytes */
+#define OSL_PKTTAG_SZ 32 /* Size of PktTag */
/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */
typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size);
typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size);
+
#if defined(WL_UNITTEST)
#include <utest_osl.h>
#else
#include <linux_osl.h>
-#include <linux_pkt.h>
-#endif // endif
+#endif
#ifndef PKTDBG_TRACE
#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#define PKTCTFMAP(osh, p) BCM_REFERENCE(osh)
#if !defined(OSL_SYSUPTIME)
#define OSL_SYSUPTIME() (0)
-#define OSL_SYSUPTIME_NOT_DEFINED 1
-#endif /* !defined(OSL_SYSUPTIME) */
-
-#if !defined(OSL_SYSUPTIME_US)
-#define OSL_SYSUPTIME_US() (0)
-#define OSL_SYSUPTIME_US_NOT_DEFINED 1
-#endif /* !defined(OSL_SYSUPTIME) */
-
-#if defined(OSL_SYSUPTIME_NOT_DEFINED) && defined(OSL_SYSUPTIME_US_NOT_DEFINED)
#define OSL_SYSUPTIME_SUPPORT FALSE
#else
#define OSL_SYSUPTIME_SUPPORT TRUE
#endif /* OSL_SYSUPTIME */
-#ifndef OSL_GET_LOCALTIME
-#define OSL_GET_LOCALTIME(sec, usec) \
- do { \
- BCM_REFERENCE(sec); \
- BCM_REFERENCE(usec); \
- } while (0)
-#endif /* OSL_GET_LOCALTIME */
-
-#ifndef OSL_LOCALTIME_NS
-#define OSL_LOCALTIME_NS() (OSL_SYSUPTIME_US() * NSEC_PER_USEC)
-#endif /* OSL_LOCALTIME_NS */
-
-#ifndef OSL_SYSTZTIME_US
-#define OSL_SYSTZTIME_US() OSL_SYSUPTIME_US()
-#endif /* OSL_GET_SYSTZTIME */
+#if !defined(OSL_SYSUPTIME_US)
+#define OSL_SYSUPTIME_US() (0)
+#endif /* OSL_SYSUPTIME */
#ifndef OSL_SYS_HALT
#define OSL_SYS_HALT() do {} while (0)
-#endif // endif
-
-#ifndef DMB
-#if defined(STB)
-#define DMB() mb();
-#else /* STB */
-#define DMB() do {} while (0)
-#endif /* STB */
-#endif /* DMB */
+#endif
#ifndef OSL_MEM_AVAIL
#define OSL_MEM_AVAIL() (0xffffffff)
-#endif // endif
+#endif
+
+#if !(defined(PKTC) || defined(PKTC_DONGLE))
#ifndef OSL_OBFUSCATE_BUF
/* For security reasons printing pointers is not allowed.
#define OSL_OBFUSCATE_BUF(x) (x)
#endif /* OSL_OBFUSCATE_BUF */
-#if !defined(PKTC_DONGLE)
-
#define PKTCGETATTR(skb) (0)
#define PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb)
#define PKTCCLRATTR(skb) BCM_REFERENCE(skb)
(h) = (t) = (p); \
} \
} while (0)
-#endif // endif
+#endif /* !linux || !PKTC */
-#ifndef PKTSETCHAINED
+#if !(defined(HNDCTF) || defined(PKTC_TX_DONGLE) || defined(PKTC))
#define PKTSETCHAINED(osh, skb) BCM_REFERENCE(osh)
-#endif // endif
-#ifndef PKTCLRCHAINED
#define PKTCLRCHAINED(osh, skb) BCM_REFERENCE(osh)
-#endif // endif
-#ifndef PKTISCHAINED
#define PKTISCHAINED(skb) FALSE
-#endif // endif
+#endif
/* Lbuf with fraglist */
#ifndef PKTFRAGPKTID
#define PKTFRAGPKTID(osh, lb) (0)
-#endif // endif
+#endif
#ifndef PKTSETFRAGPKTID
#define PKTSETFRAGPKTID(osh, lb, id) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifndef PKTFRAGTOTNUM
#define PKTFRAGTOTNUM(osh, lb) (0)
-#endif // endif
+#endif
#ifndef PKTSETFRAGTOTNUM
#define PKTSETFRAGTOTNUM(osh, lb, tot) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifndef PKTFRAGTOTLEN
#define PKTFRAGTOTLEN(osh, lb) (0)
-#endif // endif
+#endif
#ifndef PKTSETFRAGTOTLEN
#define PKTSETFRAGTOTLEN(osh, lb, len) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifndef PKTIFINDEX
#define PKTIFINDEX(osh, lb) (0)
-#endif // endif
+#endif
#ifndef PKTSETIFINDEX
#define PKTSETIFINDEX(osh, lb, idx) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifndef PKTGETLF
#define PKTGETLF(osh, len, send, lbuf_type) (0)
-#endif // endif
+#endif
/* in rx path, reuse totlen as used len */
#ifndef PKTFRAGUSEDLEN
#define PKTFRAGUSEDLEN(osh, lb) (0)
-#endif // endif
+#endif
#ifndef PKTSETFRAGUSEDLEN
#define PKTSETFRAGUSEDLEN(osh, lb, len) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifndef PKTFRAGLEN
#define PKTFRAGLEN(osh, lb, ix) (0)
-#endif // endif
+#endif
#ifndef PKTSETFRAGLEN
#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifndef PKTFRAGDATA_LO
#define PKTFRAGDATA_LO(osh, lb, ix) (0)
-#endif // endif
+#endif
#ifndef PKTSETFRAGDATA_LO
#define PKTSETFRAGDATA_LO(osh, lb, ix, addr) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifndef PKTFRAGDATA_HI
#define PKTFRAGDATA_HI(osh, lb, ix) (0)
-#endif // endif
+#endif
#ifndef PKTSETFRAGDATA_HI
#define PKTSETFRAGDATA_HI(osh, lb, ix, addr) BCM_REFERENCE(osh)
-#endif // endif
+#endif
/* RX FRAG */
#ifndef PKTISRXFRAG
#define PKTISRXFRAG(osh, lb) (0)
-#endif // endif
+#endif
#ifndef PKTSETRXFRAG
#define PKTSETRXFRAG(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifndef PKTRESETRXFRAG
#define PKTRESETRXFRAG(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
+#endif
/* TX FRAG */
#ifndef PKTISTXFRAG
#define PKTISTXFRAG(osh, lb) (0)
-#endif // endif
+#endif
#ifndef PKTSETTXFRAG
#define PKTSETTXFRAG(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
+#endif
/* Need Rx completion used for AMPDU reordering */
#ifndef PKTNEEDRXCPL
#define PKTNEEDRXCPL(osh, lb) (TRUE)
-#endif // endif
+#endif
#ifndef PKTSETNORXCPL
#define PKTSETNORXCPL(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifndef PKTRESETNORXCPL
#define PKTRESETNORXCPL(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifndef PKTISFRAG
#define PKTISFRAG(osh, lb) (0)
-#endif // endif
+#endif
#ifndef PKTFRAGISCHAINED
#define PKTFRAGISCHAINED(osh, i) (0)
-#endif // endif
+#endif
/* TRIM Tail bytes from lfrag */
#ifndef PKTFRAG_TRIM_TAILBYTES
#define PKTFRAG_TRIM_TAILBYTES(osh, p, len, type) PKTSETLEN(osh, p, PKTLEN(osh, p) - len)
-#endif // endif
+#endif
#ifndef PKTISHDRCONVTD
#define PKTISHDRCONVTD(osh, lb) (0)
-#endif // endif
-
-/* Forwarded pkt indication */
-#ifndef PKTISFRWDPKT
-#define PKTISFRWDPKT(osh, lb) 0
-#endif // endif
-#ifndef PKTSETFRWDPKT
-#define PKTSETFRWDPKT(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
-#ifndef PKTRESETFRWDPKT
-#define PKTRESETFRWDPKT(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
-
-/* SFD Frame */
-#ifndef PKTISSFDFRAME
-#define PKTISSFDFRAME(osh, lb) (0)
-#endif // endif
-#ifndef PKTSETSFDFRAME
-#define PKTSETSFDFRAME(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
-#ifndef PKTRESETSFDFRAME
-#define PKTRESETSFDFRAME(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
-#ifndef PKTISSFDTXC
-#define PKTISSFDTXC(osh, lb) (0)
-#endif // endif
-#ifndef PKTSETSFDTXC
-#define PKTSETSFDTXC(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
-#ifndef PKTRESETSFDTXC
-#define PKTRESETSFDTXC(osh, lb) BCM_REFERENCE(osh)
-#endif // endif
+#endif
#ifdef BCM_SECURE_DMA
#define SECURE_DMA_ENAB(osh) (1)
#ifndef BCMDMA64OSL
#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) ((dmaaddr_t) ((0)))
#else
-#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) \
- ((dmaaddr_t) {.hiaddr = 0, .loaddr = 0})
-#endif // endif
+#define SECURE_DMA_MAP(osh, va, size, direction, p, dmah, pcma, offset) ((dmaaddr_t) {(0)})
+#endif
#define SECURE_DMA_DD_MAP(osh, va, size, direction, p, dmah) 0
#ifndef BCMDMA64OSL
#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) ((dmaaddr_t) ((0)))
#else
-#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) \
- ((dmaaddr_t) {.hiaddr = 0, .loaddr = 0})
-#endif // endif
+#define SECURE_DMA_MAP_TXMETA(osh, va, size, direction, p, dmah, pcma) ((dmaaddr_t) {(0)})
+#endif
#define SECURE_DMA_UNMAP(osh, pa, size, direction, p, dmah, pcma, offset)
#define SECURE_DMA_UNMAP_ALL(osh, pcma)
#endif /* BCMDMA64OSL */
+
#ifndef ROMMABLE_ASSERT
#define ROMMABLE_ASSERT(exp) ASSERT(exp)
#endif /* ROMMABLE_ASSERT */
-#ifndef MALLOC_NOPERSIST
- #define MALLOC_NOPERSIST MALLOC
-#endif /* !MALLOC_NOPERSIST */
-
-#ifndef MALLOC_PERSIST
- #define MALLOC_PERSIST MALLOC
-#endif /* !MALLOC_PERSIST */
-
-#ifndef MALLOC_NOPERSIST
- #define MALLOC_NOPERSIST MALLOC
-#endif /* !MALLOC_NOPERSIST */
-
-#ifndef MALLOC_PERSIST_ATTACH
- #define MALLOC_PERSIST_ATTACH MALLOC
-#endif /* !MALLOC_PERSIST_ATTACH */
-
-#ifndef MALLOCZ_PERSIST_ATTACH
- #define MALLOCZ_PERSIST_ATTACH MALLOCZ
-#endif /* !MALLOCZ_PERSIST_ATTACH */
-
-#ifndef MALLOCZ_NOPERSIST
- #define MALLOCZ_NOPERSIST MALLOCZ
-#endif /* !MALLOCZ_NOPERSIST */
-
-#ifndef MALLOCZ_PERSIST
- #define MALLOCZ_PERSIST MALLOCZ
-#endif /* !MALLOCZ_PERSIST */
-
-#ifndef MFREE_PERSIST
- #define MFREE_PERSIST MFREE
-#endif /* !MFREE_PERSIST */
-
-#ifndef MALLOC_SET_NOPERSIST
- #define MALLOC_SET_NOPERSIST(osh) do { } while (0)
-#endif /* !MALLOC_SET_NOPERSIST */
-
-#ifndef MALLOC_CLEAR_NOPERSIST
- #define MALLOC_CLEAR_NOPERSIST(osh) do { } while (0)
-#endif /* !MALLOC_CLEAR_NOPERSIST */
-
-#if defined(OSL_MEMCHECK)
-#define MEMCHECK(f, l) osl_memcheck(f, l)
-#else
-#define MEMCHECK(f, l)
-#endif /* OSL_MEMCHECK */
-
#endif /* _osl_h_ */
/*
* osl forward declarations
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
typedef struct osl_info osl_t;
typedef struct osl_dmainfo osldma_t;
extern unsigned int lmtest; /* low memory test */
-#endif // endif
+#endif
* OS Abstraction Layer Extension - the APIs defined by the "extension" API
* are only supported by a subset of all operating systems.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: osl_ext.h 759145 2018-04-24 05:09:37Z $
+ * $Id: osl_ext.h 611959 2016-01-12 15:23:56Z $
*/
#ifndef _osl_ext_h_
#define _osl_ext_h_
+
/* ---- Include Files ---------------------------------------------------- */
#if defined(TARGETOS_symbian)
#include <threadx_osl_ext.h>
#else
#define OSL_EXT_DISABLED
-#endif // endif
+#endif
/* Include base operating system abstraction. */
#include <osl.h>
#ifdef __cplusplus
extern "C" {
-#endif // endif
+#endif
/* ---- Constants and Types ---------------------------------------------- */
#define OSL_EXT_TIME_FOREVER ((osl_ext_time_ms_t)(-1))
typedef unsigned int osl_ext_time_ms_t;
-typedef unsigned int osl_ext_time_us_t;
typedef unsigned int osl_ext_event_bits_t;
typedef void* osl_ext_timer_arg_t;
typedef void (*osl_ext_timer_callback)(osl_ext_timer_arg_t arg);
+
/* -----------------------------------------------------------------------
* Tasks.
*/
OSL_EXT_TASK_NUM_PRIORITES
} osl_ext_task_priority_t;
+
#ifndef OSL_EXT_DISABLED
/* ---- Variable Externs ------------------------------------------------- */
/* ---- Function Prototypes ---------------------------------------------- */
+
/* --------------------------------------------------------------------------
** Semaphore
*/
*/
osl_ext_status_t osl_ext_sem_take(osl_ext_sem_t *sem, osl_ext_time_ms_t timeout_msec);
+
/* --------------------------------------------------------------------------
** Mutex
*/
*/
osl_ext_status_t osl_ext_mutex_release(osl_ext_mutex_t *mutex);
+
/* --------------------------------------------------------------------------
** Timers
*/
osl_ext_timer_start(osl_ext_timer_t *timer,
osl_ext_time_ms_t timeout_msec, osl_ext_timer_mode_t mode);
-/****************************************************************************
-* Function: osl_ext_timer_start
-*
-* Purpose: Start a previously created timer object.
-*
-* Parameters: timer (in) Timer object.
-* timeout_usec (in) Invoke callback after this number of micro-seconds.
-* mode (in) One-shot or periodic timer.
-*
-* Returns: OSL_EXT_SUCCESS if the timer was created successfully, or an
-* error code if the timer could not be created.
-*****************************************************************************
-*/
-osl_ext_status_t
-osl_ext_timer_start_us(osl_ext_timer_t *timer,
- osl_ext_time_us_t timeout_usec, osl_ext_timer_mode_t mode);
-
/****************************************************************************
* Function: osl_ext_timer_stop
*
#define osl_ext_task_create(name, stack, stack_size, priority, func, arg, task) \
osl_ext_task_create_ex((name), (stack), (stack_size), (priority), 0, (func), \
- (arg), TRUE, (task))
-
-/****************************************************************************
-* Function: osl_ext_task_create_ex
-*
-* Purpose: Create a task with autostart option.
-*
-* Parameters: name (in) Pointer to task string descriptor.
-* stack (in) Pointer to stack. NULL to allocate.
-* stack_size (in) Stack size - in bytes.
-* priority (in) Abstract task priority.
-* func (in) A pointer to the task entry point function.
-* arg (in) Value passed into task entry point function.
-* autostart (in) TRUE to start task after creation.
-* task (out) Task to create.
-*
-* Returns: OSL_EXT_SUCCESS if the task was created successfully, or an
-* error code if the task could not be created.
-*****************************************************************************
-*/
+ (arg), (task))
osl_ext_status_t osl_ext_task_create_ex(char* name,
void *stack, unsigned int stack_size, osl_ext_task_priority_t priority,
osl_ext_time_ms_t timslice_msec, osl_ext_task_entry func, osl_ext_task_arg_t arg,
- bool autostart, osl_ext_task_t *task);
+ osl_ext_task_t *task);
/****************************************************************************
* Function: osl_ext_task_delete
*/
osl_ext_status_t osl_ext_task_delete(osl_ext_task_t *task);
+
/****************************************************************************
* Function: osl_ext_task_is_running
*
*/
osl_ext_task_t *osl_ext_task_current(void);
-/****************************************************************************
-* Function: osl_ext_task_yield
-*
-* Purpose: Yield the CPU to other tasks of the same priority that are
-* ready-to-run.
-*
-* Parameters: None.
-*
-* Returns: OSL_EXT_SUCCESS if successful, else error code.
-*****************************************************************************
-*/
-osl_ext_status_t osl_ext_task_yield(void);
/****************************************************************************
* Function: osl_ext_task_yield
*/
osl_ext_status_t osl_ext_task_yield(void);
-/****************************************************************************
-* Function: osl_ext_task_suspend
-*
-* Purpose: Suspend a task.
-*
-* Parameters: task (mod) Task to suspend.
-*
-* Returns: OSL_EXT_SUCCESS if the task was suspended successfully, or an
-* error code if the task could not be suspended.
-*****************************************************************************
-*/
-osl_ext_status_t osl_ext_task_suspend(osl_ext_task_t *task);
-
-/****************************************************************************
-* Function: osl_ext_task_resume
-*
-* Purpose: Resume a task.
-*
-* Parameters: task (mod) Task to resume.
-*
-* Returns: OSL_EXT_SUCCESS if the task was resumed successfully, or an
-* error code if the task could not be resumed.
-*****************************************************************************
-*/
-osl_ext_status_t osl_ext_task_resume(osl_ext_task_t *task);
/****************************************************************************
* Function: osl_ext_task_enable_stack_check
*/
osl_ext_status_t osl_ext_task_enable_stack_check(void);
+
/* --------------------------------------------------------------------------
** Queue
*/
*/
osl_ext_status_t osl_ext_queue_count(osl_ext_queue_t *queue, int *count);
+
/* --------------------------------------------------------------------------
** Event
*/
osl_ext_status_t osl_ext_event_set(osl_ext_event_t *event,
osl_ext_event_bits_t event_bits);
+
/* --------------------------------------------------------------------------
** Interrupt
*/
*/
osl_ext_interrupt_state_t osl_ext_interrupt_disable(void);
+
/****************************************************************************
* Function: osl_ext_interrupt_restore
*
/* ---- Constants and Types ---------------------------------------------- */
-/* Interrupt control */
-#define OSL_INTERRUPT_SAVE_AREA
-#define OSL_DISABLE
-#define OSL_RESTORE
-
/* Semaphore. */
#define osl_ext_sem_t
#define OSL_EXT_SEM_DECL(sem)
#ifdef __cplusplus
}
-#endif // endif
+#endif
#endif /* _osl_ext_h_ */
/*
* Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: p2p.h 757905 2018-04-16 23:16:27Z $
+ * $Id: p2p.h 700076 2017-05-17 14:42:22Z $
*/
#ifndef _P2P_H_
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
#include <wlioctl.h>
#include <802.11.h>
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
+
/* WiFi P2P OUI values */
+#define P2P_OUI WFA_OUI /* WiFi P2P OUI */
#define P2P_VER WFA_OUI_TYPE_P2P /* P2P version: 9=WiFi P2P v1.0 */
#define P2P_IE_ID 0xdd /* P2P IE element ID */
#define P2P_SE_VS_ID_SERVICES 0x1b
+
/* WiFi P2P IE subelement: P2P Capability (capabilities info) */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
uint8 eltId; /* SE ID: P2P_SEID_P2P_INFO */
#define P2P_CAPSE_GRP_PERSISTENT 0x20 /* Persistent Reconnect */
#define P2P_CAPSE_GRP_FORMATION 0x40 /* Group Formation */
+
/* WiFi P2P IE subelement: Group Owner Intent */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
uint8 eltId; /* SE ID: P2P_SEID_INTENT */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
+
/* Channel Entry structure within the Channel List SE */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
uint8 band; /* Regulatory Class (band) */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_serv_inst_data_se_s wifi_p2p_serv_inst_data_se_t;
+
/* WiFi P2P IE subelement: Connection capability */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_conn_cap_data_se_s {
uint8 eltId; /* SE ID: P2P_SEID_CONNECT_CAP */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_conn_cap_data_se_s wifi_p2p_conn_cap_data_se_t;
+
/* WiFi P2P IE subelement: Advertisement ID */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_id_se_s {
uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_ID */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_advt_id_se_s wifi_p2p_advt_id_se_t;
+
/* WiFi P2P IE subelement: Advertise Service Hash */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_adv_serv_info_s {
uint8 advt_id[4]; /* SE Advertise ID for the service */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_adv_serv_info_s wifi_p2p_adv_serv_info_t;
+
/* WiFi P2P IE subelement: Advertise Service Hash */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_serv_se_s {
uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_SERVICE */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_advt_serv_se_s wifi_p2p_advt_serv_se_t;
+
/* WiFi P2P IE subelement: Session ID */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_ssn_id_se_s {
uint8 eltId; /* SE ID: P2P_SEID_SESSION_ID */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_ssn_id_se_s wifi_p2p_ssn_id_se_t;
+
#define P2P_ADVT_SERV_SE_FIXED_LEN 3 /* Includes only the element ID and len */
#define P2P_ADVT_SERV_INFO_FIXED_LEN 7 /* Per ADV Service Instance advt_id +
* nw_config_method + serv_name_len
#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */
#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */
+
/* WiFi P2P Public Action Frame */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
uint8 category; /* P2P_PUB_AF_CATEGORY */
* #include <packed_section_end.h>
*
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: packed_section_end.h 776894 2018-08-16 05:50:57Z $
+ * $Id: packed_section_end.h 514727 2014-11-12 03:02:48Z $
*/
+
/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
* and undefined in packed_section_end.h. If it is NOT defined at this
* point, then there is a missing include of packed_section_start.h.
#undef BWL_PACKED_SECTION
#else
#error "BWL_PACKED_SECTION is NOT defined!"
-#endif // endif
+#endif
+
+
+
/* Compiler-specific directives for structure packing are declared in
* packed_section_start.h. This marks the end of the structure packing section,
* #include <packed_section_end.h>
*
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: packed_section_start.h 776894 2018-08-16 05:50:57Z $
+ * $Id: packed_section_start.h 514727 2014-11-12 03:02:48Z $
*/
-#ifndef _alignment_test_
-#define _alignment_test_
-
-/* ASSERT default packing */
-typedef struct T4 {
- uint8 a;
- uint32 b;
- uint16 c;
- uint8 d;
-} T4_t;
-
-/* 4 byte alignment support */
-/*
-* a . . .
-* b b b b
-* c c d .
-*/
-
-/*
- * Below function is meant to verify that this file is compiled with the default alignment of 4.
- * Function will fail to compile if the condition is not met.
- */
-#ifdef __GNUC__
-#define VARIABLE_IS_NOT_USED __attribute__ ((unused))
-#else
-#define VARIABLE_IS_NOT_USED
-#endif // endif
-static void alignment_test(void);
-static void
-VARIABLE_IS_NOT_USED alignment_test(void)
-{
- /* verify 4 byte alignment support */
- STATIC_ASSERT(sizeof(T4_t) == 12);
-}
-#endif /* _alignment_test_ */
/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
* and undefined in packed_section_end.h. If it is already defined at this
#error "BWL_PACKED_SECTION is already defined!"
#else
#define BWL_PACKED_SECTION
-#endif // endif
+#endif
+
+
-#if defined(BWL_DEFAULT_PACKING)
- /* generate an error if BWL_DEFAULT_PACKING is defined */
- #error "BWL_DEFAULT_PACKING not supported any more."
-#endif /* BWL_PACKED_SECTION */
/* Declare compiler-specific directives for structure packing. */
#if defined(__GNUC__) || defined(__lint)
#define BWL_POST_PACKED_STRUCT
#else
#error "Unknown compiler!"
-#endif // endif
+#endif
/*
* pcicfg.h: PCI configuration constants and structures.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: pcicfg.h 795237 2018-12-18 03:26:49Z $
+ * $Id: pcicfg.h 621340 2016-02-25 12:26:40Z $
*/
#ifndef _h_pcicfg_
#define _h_pcicfg_
+
/* pci config status reg has a bit to indicate that capability ptr is present */
#define PCI_CAPPTR_PRESENT 0x0010
* 0x18 as per the PCIe full dongle spec. Need to modify the values below
* correctly at a later point of time
*/
+#ifdef DHD_EFI
+#define PCI_CFG_BAR1 0x18
+#else
#define PCI_CFG_BAR1 0x14
+#endif /* DHD_EFI */
#define PCI_CFG_BAR2 0x18
#define PCI_CFG_BAR3 0x1c
#define PCI_CFG_BAR4 0x20
#define PCI_CFG_DEVCTRL 0xd8
#define PCI_CFG_TLCNTRL_5 0x814
+
/* PCI CAPABILITY DEFINES */
#define PCI_CAP_POWERMGMTCAP_ID 0x01
#define PCI_CAP_MSICAP_ID 0x05
#define PCI_CAP_VENDSPEC_ID 0x09
#define PCI_CAP_PCIECAP_ID 0x10
-#define PCI_CAP_MSIXCAP_ID 0x11
/* Data structure to define the Message Signalled Interrupt facility
* Valid for PCI and PCIE configurations
/* PCIE Extended configuration */
#define PCIE_ADV_CORR_ERR_MASK 0x114
-#define PCIE_ADV_CORR_ERR_MASK_OFFSET 0x14
#define CORR_ERR_RE (1 << 0) /* Receiver */
-#define CORR_ERR_BT (1 << 6) /* Bad TLP */
+#define CORR_ERR_BT (1 << 6) /* Bad TLP */
#define CORR_ERR_BD (1 << 7) /* Bad DLLP */
#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */
#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */
-#define CORR_ERR_AE (1 << 13) /* Adviosry Non-Fital Error Mask */
#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \
CORR_ERR_RR | CORR_ERR_RT)
/* PCIE Root Capability Register bits (Host mode only) */
#define PCIE_RC_CRS_VISIBILITY 0x0001
-/* PCIe PMCSR Register bits */
-#define PCIE_PMCSR_PMESTAT 0x8000
-
/* Header to define the PCIE specific capabilities in the extended config space */
typedef struct _pcie_enhanced_caphdr {
uint16 capID;
uint16 next_ptr : 12;
} pcie_enhanced_caphdr;
-#define PCIE_CFG_PMCSR 0x4C
+
#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */
#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */
#define PCI_SPROM_CONTROL 0x88 /* sprom property control */
-#define PCIE_CFG_SUBSYSTEM_CONTROL 0x88 /* used as subsystem control in PCIE devices */
#define PCI_BAR1_CONTROL 0x8c /* BAR1 region burst control */
#define PCI_INT_STATUS 0x90 /* PCI and other cores interrupts */
#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */
#define PCI_CLK_CTL_ST 0xa8 /* pci config space clock control/status (>=rev14) */
#define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */
#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */
-#define PCIE_CFG_DEVICE_CAPABILITY 0xb0 /* used as device capability in PCIE devices */
#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */
#define PCIE_CFG_DEVICE_CONTROL 0xb4 /* 0xb4 is used as device control in PCIE devices */
#define PCIE_DC_AER_CORR_EN (1u << 0u)
#define PCI_CONFIG_EXT_CLK_MIN_TIME_MASK (1u << 31u)
#define PCI_CONFIG_EXT_CLK_MIN_TIME_SHIFT (31)
-#define PCI_ADV_ERR_CAP 0x100
-#define PCI_UC_ERR_STATUS 0x104
-#define PCI_UNCORR_ERR_MASK 0x108
-#define PCI_UCORR_ERR_SEVR 0x10c
-#define PCI_CORR_ERR_STATUS 0x110
-#define PCI_CORR_ERR_MASK 0x114
-#define PCI_ERR_CAP_CTRL 0x118
-#define PCI_TLP_HDR_LOG1 0x11c
-#define PCI_TLP_HDR_LOG2 0x120
-#define PCI_TLP_HDR_LOG3 0x124
-#define PCI_TLP_HDR_LOG4 0x128
-#define PCI_TL_CTRL_5 0x814
-#define PCI_TL_HDR_FC_ST 0x980
-#define PCI_TL_TGT_CRDT_ST 0x990
-#define PCI_TL_SMLOGIC_ST 0x998
-#define PCI_DL_ATTN_VEC 0x1040
-#define PCI_DL_STATUS 0x1048
-
-#define PCI_PHY_CTL_0 0x1800
-#define PCI_SLOW_PMCLK_EXT_RLOCK (1 << 7)
-
-#define PCI_LINK_STATE_DEBUG 0x1c24
-#define PCI_RECOVERY_HIST 0x1ce4
-#define PCI_PHY_LTSSM_HIST_0 0x1cec
-#define PCI_PHY_LTSSM_HIST_1 0x1cf0
-#define PCI_PHY_LTSSM_HIST_2 0x1cf4
-#define PCI_PHY_LTSSM_HIST_3 0x1cf8
-#define PCI_PHY_DBG_CLKREG_0 0x1e10
-#define PCI_PHY_DBG_CLKREG_1 0x1e14
-#define PCI_PHY_DBG_CLKREG_2 0x1e18
-#define PCI_PHY_DBG_CLKREG_3 0x1e1c
-
-/* Bit settings for PCIE_CFG_SUBSYSTEM_CONTROL register */
-#define PCIE_BAR1COHERENTACCEN_BIT 8
-#define PCIE_BAR2COHERENTACCEN_BIT 9
-#define PCIE_SSRESET_STATUS_BIT 13
-#define PCIE_SSRESET_DISABLE_BIT 14
-#define PCIE_SSRESET_DIS_ENUM_RST_BIT 15
-
-#define PCIE_BARCOHERENTACCEN_MASK 0x300
-
-/* Bit settings for PCI_UC_ERR_STATUS register */
-#define PCI_UC_ERR_URES (1 << 20) /* Unsupported Request Error Status */
-#define PCI_UC_ERR_ECRCS (1 << 19) /* ECRC Error Status */
-#define PCI_UC_ERR_MTLPS (1 << 18) /* Malformed TLP Status */
-#define PCI_UC_ERR_ROS (1 << 17) /* Receiver Overflow Status */
-#define PCI_UC_ERR_UCS (1 << 16) /* Unexpected Completion Status */
-#define PCI_UC_ERR_CAS (1 << 15) /* Completer Abort Status */
-#define PCI_UC_ERR_CTS (1 << 14) /* Completer Timeout Status */
-#define PCI_UC_ERR_FCPES (1 << 13) /* Flow Control Protocol Error Status */
-#define PCI_UC_ERR_PTLPS (1 << 12) /* Poisoned TLP Status */
-#define PCI_UC_ERR_DLPES (1 << 4) /* Data Link Protocol Error Status */
-
-#define PCI_DL_STATUS_PHY_LINKUP (1 << 13) /* Status of LINK */
-
#define PCI_PMCR_REFUP 0x1814 /* Trefup time */
-#define PCI_PMCR_TREFUP_LO_MASK 0x3f
-#define PCI_PMCR_TREFUP_LO_SHIFT 24
-#define PCI_PMCR_TREFUP_LO_BITS 6
-#define PCI_PMCR_TREFUP_HI_MASK 0xf
-#define PCI_PMCR_TREFUP_HI_SHIFT 5
-#define PCI_PMCR_TREFUP_HI_BITS 4
-#define PCI_PMCR_TREFUP_MAX 0x400
-#define PCI_PMCR_TREFUP_MAX_SCALE 0x2000
-
#define PCI_PMCR_REFUP_EXT 0x1818 /* Trefup extend Max */
-#define PCI_PMCR_TREFUP_EXT_SHIFT 22
-#define PCI_PMCR_TREFUP_EXT_SCALE 3
-#define PCI_PMCR_TREFUP_EXT_ON 1
-#define PCI_PMCR_TREFUP_EXT_OFF 0
-
#define PCI_TPOWER_SCALE_MASK 0x3
#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */
+
#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) /* bar0 + 2K accesses sprom shadow (in pci core) */
#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */
#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */
#define PCIE2_BAR0_WIN2 0x70 /* backplane addres space accessed by second 4KB of BAR0 */
#define PCIE2_BAR0_CORE2_WIN 0x74 /* backplane addres space accessed by second 4KB of BAR0 */
#define PCIE2_BAR0_CORE2_WIN2 0x78 /* backplane addres space accessed by second 4KB of BAR0 */
-#define PCIE2_BAR0_WINSZ 0x8000
#define PCI_BAR0_WIN2_OFFSET 0x1000u
#define PCI_CORE_ENUM_OFFSET 0x2000u
/*
* BCM43XX PCIE core hardware definitions.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: pcie_core.h 792442 2018-12-05 00:20:53Z $
+ * $Id: pcie_core.h 673814 2016-12-05 06:10:24Z $
*/
#ifndef _PCIE_CORE_H
#define _PCIE_CORE_H
#include <sbhnddma.h>
#include <siutils.h>
-#define REV_GE_64(rev) (rev >= 64)
-
/* cpp contortions to concatenate w/arg prescan */
#ifndef PAD
#define _PADLINE(line) pad ## line
#define _XSTR(line) _PADLINE(line)
#define PAD _XSTR(__LINE__)
-#endif // endif
+#endif
/* PCIE Enumeration space offsets */
#define PCIE_CORE_CONFIG_OFFSET 0x0
#define PCIE_SPROM_SHADOW_OFFSET 0x800
#define PCIE_SBCONFIG_OFFSET 0xE00
+
#define PCIEDEV_MAX_DMAS 4
/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */
#define IFRM_VECSTAT_MASK 0x3
#define IFRM_VEC_MASK 0xff
-/* HMAP Windows */
-#define HMAP_MAX_WINDOWS 8
-
/* idma frm array */
typedef struct pcie_ifrm_array {
uint32 addr;
uint32 intmask;
} pcie_ifrm_intr_t;
-/* HMAP window register set */
-typedef volatile struct pcie_hmapwindow {
- uint32 baseaddr_lo; /* BaseAddrLower */
- uint32 baseaddr_hi; /* BaseAddrUpper */
- uint32 windowlength; /* Window Length */
- uint32 PAD[1];
-} pcie_hmapwindow_t;
-
-typedef volatile struct pcie_hmapviolation {
- uint32 hmap_violationaddr_lo; /* violating address lo */
- uint32 hmap_violationaddr_hi; /* violating addr hi */
- uint32 hmap_violation_info; /* violation info */
- uint32 PAD[1];
-} pcie_hmapviolation_t;
/* SB side: PCIE core and host control registers */
typedef volatile struct sbpcieregs {
uint32 control; /* host mode only */
uint32 biststatus; /* bist Status: 0x00C */
uint32 gpiosel; /* PCIE gpio sel: 0x010 */
uint32 gpioouten; /* PCIE gpio outen: 0x14 */
- uint32 gpioout; /* PCIE gpio out: 0x18 */
- uint32 PAD;
+ uint32 PAD[2];
uint32 intstatus; /* Interrupt status: 0x20 */
uint32 intmask; /* Interrupt mask: 0x24 */
uint32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */
uint32 sbtopcie0; /* sb to pcie translation 0: 0x100 */
uint32 sbtopcie1; /* sb to pcie translation 1: 0x104 */
uint32 sbtopcie2; /* sb to pcie translation 2: 0x108 */
- uint32 sbtopcie0upper; /* sb to pcie translation 0: 0x10C */
- uint32 sbtopcie1upper; /* sb to pcie translation 1: 0x110 */
- uint32 PAD[3];
+ uint32 PAD[5];
/* pcie core supports in direct access to config space */
uint32 configaddr; /* pcie config space access: Address field: 0x120 */
uint32 err_hdr_logreg3; /* 0x1B8 */
uint32 err_hdr_logreg4; /* 0x1BC */
uint32 err_code_logreg; /* 0x1C0 */
- uint32 axi_dbg_ctl; /* 0x1C4 */
- uint32 axi_dbg_data0; /* 0x1C8 */
- uint32 axi_dbg_data1; /* 0x1CC */
- uint32 PAD[4]; /* 0x1D0 - 0x1DF */
+ uint32 PAD[7]; /* 0x1C4 - 0x1DF */
uint32 clk_ctl_st; /* 0x1E0 */
uint32 PAD[1]; /* 0x1E4 */
uint32 powerctl; /* 0x1E8 */
/* 0x480 - 0x4FF */
pcie_ifrm_intr_t ifrm_intr[IFRM_FR_DEV_MAX];
/* 0x500 - 0x53F */
- /* HMAP regs for PCIE corerev >= 24 [0x540 - 0x5DF] */
- pcie_hmapwindow_t hmapwindow[HMAP_MAX_WINDOWS]; /* 0x540 - 0x5BF */
- pcie_hmapviolation_t hmapviolation; /* 0x5C0 - 0x5CF */
- uint32 hmap_window_config; /* 0x5D0 */
- uint32 PAD[3]; /* 0x5D4 - 0x5DF */
-
- uint32 PAD[8]; /* 0x5E0 - 0x5FF */
+ uint32 PAD[48]; /* 0x540 - 0x5FF */
uint32 PAD[2][64]; /* 0x600 - 0x7FF */
} pcie2;
} u;
uint16 sprom[64]; /* SPROM shadow Area : 0x800 - 0x880 */
uint32 PAD[96]; /* 0x880 - 0x9FF */
/* direct memory access (pcie2 rev19 and after) : 0xA00 - 0xAFF */
- union {
- /* corerev < 64 */
- struct {
- uint32 dar_ctrl; /* 0xA00 */
- uint32 PAD[7]; /* 0xA04-0xA1F */
- uint32 intstatus; /* 0xA20 */
- uint32 PAD[1]; /* 0xA24 */
- uint32 h2d_db_0_0; /* 0xA28 */
- uint32 h2d_db_0_1; /* 0xA2C */
- uint32 h2d_db_1_0; /* 0xA30 */
- uint32 h2d_db_1_1; /* 0xA34 */
- uint32 h2d_db_2_0; /* 0xA38 */
- uint32 h2d_db_2_1; /* 0xA3C */
- uint32 errlog; /* 0xA40 */
- uint32 erraddr; /* 0xA44 */
- uint32 mbox_int; /* 0xA48 */
- uint32 fis_ctrl; /* 0xA4C */
- uint32 PAD[36]; /* 0xA50 - 0xADC */
- uint32 clk_ctl_st; /* 0xAE0 */
- uint32 PAD[1]; /* 0xAE4 */
- uint32 powerctl; /* 0xAE8 */
- uint32 PAD[5]; /* 0xAEC-0xAFF */
- } dar;
- /* corerev > = 64 */
- struct {
- uint32 dar_ctrl; /* 0xA00 */
- uint32 dar_cap; /* 0xA04 */
- uint32 clk_ctl_st; /* 0xA08 */
- uint32 powerctl; /* 0xA0C */
- uint32 intstatus; /* 0xA10 */
- uint32 PAD[3]; /* 0xA14-0xA1F */
- uint32 h2d_db_0_0; /* 0xA20 */
- uint32 h2d_db_0_1; /* 0xA24 */
- uint32 h2d_db_1_0; /* 0xA28 */
- uint32 h2d_db_1_1; /* 0xA2C */
- uint32 h2d_db_2_0; /* 0xA30 */
- uint32 h2d_db_2_1; /* 0xA34 */
- uint32 h2d_db_3_0; /* 0xA38 */
- uint32 h2d_db_3_1; /* 0xA3C */
- uint32 h2d_db_4_0; /* 0xA40 */
- uint32 h2d_db_4_1; /* 0xA44 */
- uint32 h2d_db_5_0; /* 0xA48 */
- uint32 h2d_db_5_1; /* 0xA4C */
- uint32 h2d_db_6_0; /* 0xA50 */
- uint32 h2d_db_6_1; /* 0xA54 */
- uint32 h2d_db_7_0; /* 0xA58 */
- uint32 h2d_db_7_1; /* 0xA5C */
- uint32 errlog; /* 0xA60 */
- uint32 erraddr; /* 0xA64 */
- uint32 mbox_int; /* 0xA68 */
- uint32 fis_ctrl; /* 0xA6C */
- uint32 PAD[36]; /* 0xA70-0xAFF */
- } dar_64;
- } u1;
- uint32 PAD[64]; /* 0xB00-0xBFF */
- /* Function Control/Status Registers for corerev >= 64 */
- /* 0xC00 - 0xCFF */
- struct {
- uint32 control; /* 0xC00 */
- uint32 iostatus; /* 0xC04 */
- uint32 capability; /* 0xC08 */
- uint32 PAD[1]; /* 0xC0C */
- uint32 intstatus; /* 0xC10 */
- uint32 intmask; /* 0xC14 */
- uint32 pwr_intstatus; /* 0xC18 */
- uint32 pwr_intmask; /* 0xC1C */
- uint32 msi_vector; /* 0xC20 */
- uint32 msi_intmask; /* 0xC24 */
- uint32 msi_intstatus; /* 0xC28 */
- uint32 msi_pend_cnt; /* 0xC2C */
- uint32 mbox_intstatus; /* 0xC30 */
- uint32 mbox_intmask; /* 0xC34 */
- uint32 ltr_state; /* 0xC38 */
- uint32 PAD[1]; /* 0xC3C */
- uint32 intr_vector; /* 0xC40 */
- uint32 intr_addrlow; /* 0xC44 */
- uint32 intr_addrhigh; /* 0xC48 */
- uint32 PAD[45]; /* 0xC4C-0xCFF */
- } ftn_ctrl;
+ uint32 PAD[16]; /* 0xA00 - 0xA3F */
+ uint32 dm_errlog; /* 0xA40 */
+ uint32 dm_erraddr; /* 0xA44 */
+ uint32 PAD[37]; /* 0xA48 - 0xADC */
+ uint32 dm_clk_ctl_st; /* 0xAE0 */
+ uint32 PAD[1]; /* 0xAE4 */
+ uint32 dm_powerctl; /* 0xAE8 */
} sbpcieregs_t;
#define PCIE_CFG_DA_OFFSET 0x400 /* direct access register offset for configuration space */
#define PCIE_MULTIMSI_EN 0x2000 /* enable multi-vector MSI messages */
#define PCIE_PipeIddqDisable0 0x8000 /* Disable assertion of pcie_pipe_iddq during L1.2 and L2 */
#define PCIE_PipeIddqDisable1 0x10000 /* Disable assertion of pcie_pipe_iddq during L2 */
-#define PCIE_EN_MDIO_IN_PERST 0x20000 /* enable access to internal registers when PERST */
#define PCIE_MSI_B2B_EN 0x100000 /* enable back-to-back MSI messages */
#define PCIE_MSI_FIFO_CLEAR 0x200000 /* reset MSI FIFO */
-#define PCIE_IDMA_MODE_EN(rev) (REV_GE_64(rev) ? 0x1 : 0x800000) /* implicit M2M DMA mode */
-#define PCIE_TL_CLK_DETCT 0x4000000 /* enable TL clk detection */
-
-/* Function control (corerev > 64) */
-#define PCIE_CPLCA_ENABLE 0x01
-/* 1: send CPL with CA on BP error, 0: send CPLD with SC and data is FFFF */
-#define PCIE_DLY_PERST_TO_COE 0x02
-/* when set, PERST is holding asserted until sprom-related register updates has completed */
+#define PCIE_IDMA_MODE_EN 0x800000 /* implicit M2M DMA mode */
#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */
#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */
#define MSIVEC_D2H0_DB1 (0x1 << 4) /* MSI Vector offset for interface0 door bell 1 is 5 */
/* PCIE MailboxInt/MailboxIntMask register */
-#define PCIE_MB_TOSB_FN0_0 0x0001 /* write to assert PCIEtoSB Mailbox interrupt */
-#define PCIE_MB_TOSB_FN0_1 0x0002
-#define PCIE_MB_TOSB_FN1_0 0x0004
-#define PCIE_MB_TOSB_FN1_1 0x0008
-#define PCIE_MB_TOSB_FN2_0 0x0010
-#define PCIE_MB_TOSB_FN2_1 0x0020
-#define PCIE_MB_TOSB_FN3_0 0x0040
-#define PCIE_MB_TOSB_FN3_1 0x0080
-#define PCIE_MB_TOPCIE_FN0_0 0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */
-#define PCIE_MB_TOPCIE_FN0_1 0x0200
-#define PCIE_MB_TOPCIE_FN1_0 0x0400
-#define PCIE_MB_TOPCIE_FN1_1 0x0800
-#define PCIE_MB_TOPCIE_FN2_0 0x1000
-#define PCIE_MB_TOPCIE_FN2_1 0x2000
-#define PCIE_MB_TOPCIE_FN3_0 0x4000
-#define PCIE_MB_TOPCIE_FN3_1 0x8000
-
-#define PCIE_MB_TOPCIE_DB0_D2H0(rev) (REV_GE_64(rev) ? 0x0001 : 0x010000)
-#define PCIE_MB_TOPCIE_DB0_D2H1(rev) (REV_GE_64(rev) ? 0x0002 : 0x020000)
-#define PCIE_MB_TOPCIE_DB1_D2H0(rev) (REV_GE_64(rev) ? 0x0004 : 0x040000)
-#define PCIE_MB_TOPCIE_DB1_D2H1(rev) (REV_GE_64(rev) ? 0x0008 : 0x080000)
-#define PCIE_MB_TOPCIE_DB2_D2H0(rev) (REV_GE_64(rev) ? 0x0010 : 0x100000)
-#define PCIE_MB_TOPCIE_DB2_D2H1(rev) (REV_GE_64(rev) ? 0x0020 : 0x200000)
-#define PCIE_MB_TOPCIE_DB3_D2H0(rev) (REV_GE_64(rev) ? 0x0040 : 0x400000)
-#define PCIE_MB_TOPCIE_DB3_D2H1(rev) (REV_GE_64(rev) ? 0x0080 : 0x800000)
-#define PCIE_MB_TOPCIE_DB4_D2H0(rev) (REV_GE_64(rev) ? 0x0100 : 0x0)
-#define PCIE_MB_TOPCIE_DB4_D2H1(rev) (REV_GE_64(rev) ? 0x0200 : 0x0)
-#define PCIE_MB_TOPCIE_DB5_D2H0(rev) (REV_GE_64(rev) ? 0x0400 : 0x0)
-#define PCIE_MB_TOPCIE_DB5_D2H1(rev) (REV_GE_64(rev) ? 0x0800 : 0x0)
-#define PCIE_MB_TOPCIE_DB6_D2H0(rev) (REV_GE_64(rev) ? 0x1000 : 0x0)
-#define PCIE_MB_TOPCIE_DB6_D2H1(rev) (REV_GE_64(rev) ? 0x2000 : 0x0)
-#define PCIE_MB_TOPCIE_DB7_D2H0(rev) (REV_GE_64(rev) ? 0x4000 : 0x0)
-#define PCIE_MB_TOPCIE_DB7_D2H1(rev) (REV_GE_64(rev) ? 0x8000 : 0x0)
-
-#define PCIE_MB_D2H_MB_MASK(rev) \
- (PCIE_MB_TOPCIE_DB0_D2H0(rev) | PCIE_MB_TOPCIE_DB0_D2H1(rev) | \
- PCIE_MB_TOPCIE_DB1_D2H0(rev) | PCIE_MB_TOPCIE_DB1_D2H1(rev) | \
- PCIE_MB_TOPCIE_DB2_D2H0(rev) | PCIE_MB_TOPCIE_DB2_D2H1(rev) | \
- PCIE_MB_TOPCIE_DB3_D2H0(rev) | PCIE_MB_TOPCIE_DB3_D2H1(rev) | \
- PCIE_MB_TOPCIE_DB4_D2H0(rev) | PCIE_MB_TOPCIE_DB4_D2H1(rev) | \
- PCIE_MB_TOPCIE_DB5_D2H0(rev) | PCIE_MB_TOPCIE_DB5_D2H1(rev) | \
- PCIE_MB_TOPCIE_DB6_D2H0(rev) | PCIE_MB_TOPCIE_DB6_D2H1(rev) | \
- PCIE_MB_TOPCIE_DB7_D2H0(rev) | PCIE_MB_TOPCIE_DB7_D2H1(rev))
+#define PCIE_MB_TOSB_FN0_0 0x0001 /* write to assert PCIEtoSB Mailbox interrupt */
+#define PCIE_MB_TOSB_FN0_1 0x0002
+#define PCIE_MB_TOSB_FN1_0 0x0004
+#define PCIE_MB_TOSB_FN1_1 0x0008
+#define PCIE_MB_TOSB_FN2_0 0x0010
+#define PCIE_MB_TOSB_FN2_1 0x0020
+#define PCIE_MB_TOSB_FN3_0 0x0040
+#define PCIE_MB_TOSB_FN3_1 0x0080
+#define PCIE_MB_TOPCIE_FN0_0 0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */
+#define PCIE_MB_TOPCIE_FN0_1 0x0200
+#define PCIE_MB_TOPCIE_FN1_0 0x0400
+#define PCIE_MB_TOPCIE_FN1_1 0x0800
+#define PCIE_MB_TOPCIE_FN2_0 0x1000
+#define PCIE_MB_TOPCIE_FN2_1 0x2000
+#define PCIE_MB_TOPCIE_FN3_0 0x4000
+#define PCIE_MB_TOPCIE_FN3_1 0x8000
+#define PCIE_MB_TOPCIE_D2H0_DB0 0x10000
+#define PCIE_MB_TOPCIE_D2H0_DB1 0x20000
+#define PCIE_MB_TOPCIE_D2H1_DB0 0x40000
+#define PCIE_MB_TOPCIE_D2H1_DB1 0x80000
+#define PCIE_MB_TOPCIE_D2H2_DB0 0x100000
+#define PCIE_MB_TOPCIE_D2H2_DB1 0x200000
+#define PCIE_MB_TOPCIE_D2H3_DB0 0x400000
+#define PCIE_MB_TOPCIE_D2H3_DB1 0x800000
+
+#define PCIE_MB_D2H_MB_MASK \
+ (PCIE_MB_TOPCIE_D2H0_DB0 | PCIE_MB_TOPCIE_D2H0_DB1 | \
+ PCIE_MB_TOPCIE_D2H1_DB0 | PCIE_MB_TOPCIE_D2H1_DB1 | \
+ PCIE_MB_TOPCIE_D2H2_DB0 | PCIE_MB_TOPCIE_D2H2_DB1 | \
+ PCIE_MB_TOPCIE_D2H3_DB0 | PCIE_MB_TOPCIE_D2H3_DB1)
#define SBTOPCIE0_BASE 0x08000000
#define SBTOPCIE1_BASE 0x0c000000
#define PCIE2_MDIO_WR_DATA 0x12C
#define PCIE2_MDIO_RD_DATA 0x130
+
/* MDIO control */
#define MDIOCTL_DIVISOR_MASK 0x7fu /* clock to be used on MDIO */
#define MDIOCTL_DIVISOR_VAL 0x2u
#define MDIODATA2_MASK 0x7FFFFFFF /* rd/wr transaction data */
#define MDIODATA2_DEVADDR_SHF 4 /* Physmedia devaddr shift */
+
/* MDIO devices (SERDES modules)
* unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks.
* two layers mapping (blockidx, register offset) is required
#define MDIO_DEV_SERDESID 0x831
#define MDIO_DEV_RXCTRL0 0x840
+
/* XgxsBlk1_A Register Offsets */
#define BLK1_PWR_MGMT0 0x16
#define BLK1_PWR_MGMT1 0x17
* #define PCIE_CAP_RTSTA 32 // Root Status
*/
+
/* Linkcapability reg offset in PCIE Cap */
#define PCIE_CAP_LINKCAP_OFFSET 12 /* linkcap offset in pcie cap */
#define PCIE_CAP_LINKCAP_LNKSPEED_MASK 0xf /* Supported Link Speeds */
/* Uc_Err reg offset in AER Cap */
#define PCIE_EXTCAP_ID_ERR 0x01 /* Advanced Error Reporting */
#define PCIE_EXTCAP_AER_UCERR_OFFSET 4 /* Uc_Err reg offset in AER Cap */
-#define PCIE_EXTCAP_ERR_HEADER_LOG_0 28
-#define PCIE_EXTCAP_ERR_HEADER_LOG_1 32
-#define PCIE_EXTCAP_ERR_HEADER_LOG_2 36
-#define PCIE_EXTCAP_ERR_HEADER_LOG_3 40
-
-/* L1SS reg offset in L1SS Ext Cap */
-#define PCIE_EXTCAP_ID_L1SS 0x1e /* PCI Express L1 PM Substates Capability */
-#define PCIE_EXTCAP_L1SS_CAP_OFFSET 4 /* L1SSCap reg offset in L1SS Cap */
-#define PCIE_EXTCAP_L1SS_CONTROL_OFFSET 8 /* L1SSControl reg offset in L1SS Cap */
-#define PCIE_EXTCAP_L1SS_CONTROL2_OFFSET 0xc /* L1SSControl reg offset in L1SS Cap */
/* Linkcontrol reg offset in PCIE Cap */
#define PCIE_CAP_LINKCTRL_OFFSET 16 /* linkctrl offset in pcie cap */
#define PCIE_CAP_DEVCTRL_MPS_512B 2 /* 512 Byte */
#define PCIE_CAP_DEVCTRL_MPS_1024B 3 /* 1024 Byte */
-#define PCIE_ASPM_CTRL_MASK 3 /* bit 0 and 1 */
#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */
#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */
#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */
#define PCIE_ASPM_L11_ENAB 8 /* ASPM L1.1 in PML1_sub_control2 */
#define PCIE_ASPM_L12_ENAB 4 /* ASPM L1.2 in PML1_sub_control2 */
-#define PCIE_EXT_L1SS_MASK 0xf /* Bits [3:0] of L1SSControl 0x248 */
-#define PCIE_EXT_L1SS_ENAB 0xf /* Bits [3:0] of L1SSControl 0x248 */
-
/* NumMsg and NumMsgEn in PCIE MSI Cap */
#define MSICAP_NUM_MSG_SHF 17
#define MSICAP_NUM_MSG_MASK (0x7 << MSICAP_NUM_MSG_SHF)
#define PCIE_LTR0_REG_DEFAULT_150 0x88968896u /* active latency default to 150usec */
#define PCIE_LTR1_REG_DEFAULT 0x88648864u /* idle latency default to 100usec */
#define PCIE_LTR2_REG_DEFAULT 0x90039003u /* sleep latency default to 3msec */
-#define PCIE_LTR_LAT_VALUE_MASK 0x3FF /* LTR Latency mask */
-#define PCIE_LTR_LAT_SCALE_SHIFT 10 /* LTR Scale shift */
-#define PCIE_LTR_LAT_SCALE_MASK 0x1C00 /* LTR Scale mask */
-#define PCIE_LTR_SNOOP_REQ_SHIFT 15 /* LTR SNOOP REQ shift */
-#define PCIE_LTR_SNOOP_REQ_MASK 0x8000 /* LTR SNOOP REQ mask */
/* Status reg PCIE_PLP_STATUSREG */
#define PCIE_PLP_POLARITYINV_STAT 0x10
+
/* PCIE BRCM Vendor CAP REVID reg bits */
#define BRCMCAP_PCIEREV_CT_MASK 0xF00u
#define BRCMCAP_PCIEREV_CT_SHIFT 8u
#define PCIECFGREG_MSI_ADDR_L 0x5C
#define PCIECFGREG_MSI_ADDR_H 0x60
#define PCIECFGREG_MSI_DATA 0x64
-#define PCIECFGREG_SPROM_CTRL 0x88
#define PCIECFGREG_LINK_STATUS_CTRL 0xBCu
-#define PCIECFGREG_LINK_STATUS_CTRL2 0xDCu
#define PCIECFGREG_DEV_STATUS_CTRL 0xB4u
#define PCIECFGGEN_DEV_STATUS_CTRL2 0xD4
+#define PCIECFGREG_LINK_STATUS_CTRL2 0xDCu
#define PCIECFGREG_RBAR_CTRL 0x228
#define PCIECFGREG_PML1_SUB_CTRL1 0x248
#define PCIECFGREG_PML1_SUB_CTRL2 0x24C
#define PCIECFGREG_PHY_DBG_CLKREQ1 0x1E14
#define PCIECFGREG_PHY_DBG_CLKREQ2 0x1E18
#define PCIECFGREG_PHY_DBG_CLKREQ3 0x1E1C
-#define PCIECFGREG_PHY_LTSSM_HIST_0 0x1CEC
-#define PCIECFGREG_PHY_LTSSM_HIST_1 0x1CF0
-#define PCIECFGREG_PHY_LTSSM_HIST_2 0x1CF4
-#define PCIECFGREG_PHY_LTSSM_HIST_3 0x1CF8
-#define PCIECFGREG_TREFUP 0x1814
-#define PCIECFGREG_TREFUP_EXT 0x1818
/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */
#define PCI_PM_L1_2_ENA_MASK 0x00000001 /* PCI-PM L1.2 Enabled */
#define PCIH2D_MailBox_2 0x160 /* for dma channel2 which will be used for Implicit DMA */
#define PCIH2D_DB1_2 0x164
#define PCID2H_MailBox_2 0x168
-#define PCIE_CLK_CTRL 0x1E0
-#define PCIE_PWR_CTRL 0x1E8
-
-#define PCIControl(rev) (REV_GE_64(rev) ? 0xC00 : 0x00)
-/* for corerev < 64 idma_en is in PCIControl regsiter */
-#define IDMAControl(rev) (REV_GE_64(rev) ? 0x480 : 0x00)
-#define PCIMailBoxInt(rev) (REV_GE_64(rev) ? 0xC30 : 0x48)
-#define PCIMailBoxMask(rev) (REV_GE_64(rev) ? 0xC34 : 0x4C)
-#define PCIFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xC10 : 0x20)
-#define PCIFunctionIntmask(rev) (REV_GE_64(rev) ? 0xC14 : 0x24)
-#define PCIPowerIntstatus(rev) (REV_GE_64(rev) ? 0xC18 : 0x1A4)
-#define PCIPowerIntmask(rev) (REV_GE_64(rev) ? 0xC1C : 0x1A8)
-#define PCIDARClkCtl(rev) (REV_GE_64(rev) ? 0xA08 : 0xAE0)
-#define PCIDARPwrCtl(rev) (REV_GE_64(rev) ? 0xA0C : 0xAE8)
-#define PCIDARFunctionIntstatus(rev) (REV_GE_64(rev) ? 0xA10 : 0xA20)
-#define PCIDARH2D_DB0(rev) (REV_GE_64(rev) ? 0xA20 : 0xA28)
-#define PCIDARErrlog(rev) (REV_GE_64(rev) ? 0xA60 : 0xA40)
-#define PCIDARErrlog_Addr(rev) (REV_GE_64(rev) ? 0xA64 : 0xA44)
-#define PCIDARMailboxint(rev) (REV_GE_64(rev) ? 0xA68 : 0xA48)
+#define PCIMailBoxInt 0x48
+#define PCIMailBoxMask 0x4C
#define PCIMSIVecAssign 0x58
-/* HMAP Registers */
-/* base of all HMAP window registers */
-#define PCI_HMAP_WINDOW_BASE(rev) (REV_GE_64(rev) ? 0x580u : 0x540u)
-#define PCI_HMAP_VIOLATION_ADDR_L(rev) (REV_GE_64(rev) ? 0x600u : 0x5C0u)
-#define PCI_HMAP_VIOLATION_ADDR_U(rev) (REV_GE_64(rev) ? 0x604u : 0x5C4u)
-#define PCI_HMAP_VIOLATION_INFO(rev) (REV_GE_64(rev) ? 0x608u : 0x5C8u)
-#define PCI_HMAP_WINDOW_CONFIG(rev) (REV_GE_64(rev) ? 0x610u : 0x5D0u)
-#define PCI_HMAP_NWINDOWS_SHIFT 8
-#define PCI_HMAP_NWINDOWS_MASK 0x0000ff00 /* bits 8:15 */
-
#define I_F0_B0 (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */
#define I_F0_B1 (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */
#define PCIECFGREG_DEVCONTROL 0xB4
#define PCIECFGREG_BASEADDR0 0x10
-#define PCIECFGREG_BASEADDR1 0x18
#define PCIECFGREG_DEVCONTROL_MRRS_SHFT 12
#define PCIECFGREG_DEVCONTROL_MRRS_MASK (0x7 << PCIECFGREG_DEVCONTROL_MRRS_SHFT)
#define PCIECFGREG_DEVCTRL_MPS_SHFT 5
#define PCIECFGREG_PM_CSR_STATE_D3_HOT 3
#define PCIECFGREG_PM_CSR_STATE_D3_COLD 4
-/* Direct Access regs */
-#define DAR_ERRADDR(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.erraddr) : \
- OFFSETOF(sbpcieregs_t, u1.dar.erraddr))
-#define DAR_ERRLOG(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.errlog) : \
- OFFSETOF(sbpcieregs_t, u1.dar.errlog))
-#define DAR_PCIH2D_DB0_0(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_0_0) : \
- OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_0_0))
-#define DAR_PCIH2D_DB0_1(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_0_1) : \
- OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_0_1))
-#define DAR_PCIH2D_DB1_0(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_1_0) : \
- OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_1_0))
-#define DAR_PCIH2D_DB1_1(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_1_1) : \
- OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_1_1))
-#define DAR_PCIH2D_DB2_0(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_2_0) : \
- OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_2_0))
-#define DAR_PCIH2D_DB2_1(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_2_1) : \
- OFFSETOF(sbpcieregs_t, u1.dar.h2d_db_2_1))
-#define DAR_PCIH2D_DB3_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_3_0)
-#define DAR_PCIH2D_DB3_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_3_1)
-#define DAR_PCIH2D_DB4_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_4_0)
-#define DAR_PCIH2D_DB4_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_4_1)
-#define DAR_PCIH2D_DB5_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_5_0)
-#define DAR_PCIH2D_DB5_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_5_1)
-#define DAR_PCIH2D_DB6_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_6_0)
-#define DAR_PCIH2D_DB6_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_6_1)
-#define DAR_PCIH2D_DB7_0(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_7_0)
-#define DAR_PCIH2D_DB7_1(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.h2d_db_7_1)
-
-#define DAR_PCIMailBoxInt(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.mbox_int) : \
- OFFSETOF(sbpcieregs_t, u1.dar.mbox_int))
-#define DAR_PCIE_PWR_CTRL(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.powerctl) : \
- OFFSETOF(sbpcieregs_t, u1.dar.powerctl))
-#define DAR_CLK_CTRL(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.clk_ctl_st) : \
- OFFSETOF(sbpcieregs_t, u1.dar.clk_ctl_st))
-#define DAR_INTSTAT(rev) (REV_GE_64(rev) ? \
- OFFSETOF(sbpcieregs_t, u1.dar_64.intstatus) : \
- OFFSETOF(sbpcieregs_t, u1.dar.intstatus))
-
-#define DAR_FIS_CTRL(rev) OFFSETOF(sbpcieregs_t, u1.dar_64.fis_ctrl)
-
-#define DAR_FIS_START_SHIFT 0u
-#define DAR_FIS_START_MASK (1u << DAR_FIS_START_SHIFT)
-
-#define PCIE_PWR_REQ_PCIE (0x1 << 8)
/* SROM hardware region */
#define SROM_OFFSET_BAR1_CTRL 52
#define PCIEGEN2_IOC_L1_LINK_SHIFT 13
#define PCIEGEN2_IOC_L1L2_LINK_SHIFT 14
#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT 15
-#define PCIEGEN2_IOC_BME_SHIFT 20
#define PCIEGEN2_IOC_D0_STATE_MASK (1 << PCIEGEN2_IOC_D0_STATE_SHIFT)
#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIFT)
#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIFT)
#define PCIEGEN2_IOC_L1L2_LINK_MASK (1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT)
#define PCIEGEN2_IOC_L2_L3_LINK_MASK (1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT)
-#define PCIEGEN2_IOC_BME_MASK (1 << PCIEGEN2_IOC_BME_SHIFT)
/* stat_ctrl */
#define PCIE_STAT_CTRL_RESET 0x1
#define PCIE_STAT_CTRL_INTENABLE 0x4
#define PCIE_STAT_CTRL_INTSTATUS 0x8
-/* SPROMControl */
-#define PCIE_BAR1COHERENTACCEN (1 << 8)
-#define PCIE_BAR2COHERENTACCEN (1 << 9)
-
/* cpl_timeout_ctrl_reg */
#define PCIE_CTO_TO_THRESHOLD_SHIFT 0
#define PCIE_CTO_TO_THRESHHOLD_MASK (0xfffff << PCIE_CTO_TO_THRESHOLD_SHIFT)
#define PCIE_BP_IN_RESET_ERR_MASK (0x1 << PCIE_BP_IN_RESET_ERR_SHIFT)
#ifdef BCMDRIVER
-void pcie_watchdog_reset(osl_t *osh, si_t *sih, uint32 wd_mask, uint32 wd_val);
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs);
void pcie_serdes_iddqdisable(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs);
void pcie_set_trefup_time_100us(si_t *sih);
#endif /* BCMDRIVER */
/*
* HND Run Time Environment ioctl.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: rte_ioctl.h 699094 2017-05-11 22:41:10Z $
+ * $Id: rte_ioctl.h 615249 2016-01-27 02:04:07Z $
*/
#ifndef _rte_ioctl_h_
#define RTEGPERMADDR 0x890B
#define RTEDEVPWRSTCHG 0x890C /* Device pwr state change for PCIedev */
#define RTEDEVPMETOGGLE 0x890D /* Toggle PME# to wake up the host */
-#define RTEDEVTIMESYNC 0x890E /* Device TimeSync */
-#define RTEDEVDSNOTIFY 0x890F /* Bus DS state notification */
-#define RTED11DMALPBK_INIT 0x8910 /* D11 DMA loopback init */
-#define RTED11DMALPBK_UNINIT 0x8911 /* D11 DMA loopback uninit */
-#define RTED11DMALPBK_RUN 0x8912 /* D11 DMA loopback run */
-#define RTEDEVTSBUFPOST 0x8913 /* Async interface for tsync buffer post */
#define RTE_IOCTL_QUERY 0x00
#define RTE_IOCTL_SET 0x01
BUS_FLUSH_CHAINED_PKTS = 6,
BUS_SET_COPY_COUNT = 7,
BUS_UPDATE_FLOW_PKTS_MAX = 8,
- BUS_UPDATE_EXTRA_TXLFRAGS = 9,
- BUS_UPDATE_FRWD_RESRV_BUFCNT = 10,
- BUS_PCIE_CONFIG_ACCESS = 11
+ BUS_UPDATE_EXTRA_TXLFRAGS = 9
};
#define SDPCMDEV_SET_MAXTXPKTGLOM 1
-#define RTE_MEMUSEINFO_VER 0x00
typedef struct memuse_info {
uint16 ver; /* version of this struct */
uint32 inuse_hwm; /* High watermark of memory - reclaimed memory */
uint32 inuse_overhead; /* tally of allocated mem_t blocks */
uint32 inuse_total; /* Heap in-use + Heap overhead memory */
- uint32 free_lwm; /* Least free size since reclaim */
- uint32 mf_count; /* Malloc failure count */
} memuse_info_t;
-/* For D11 DMA loopback test */
-typedef struct d11_dmalpbk_args {
- uint8 *buf;
- int32 len;
-} d11_dmalpbk_args_t;
#endif /* _rte_ioctl_h_ */
* JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
* GPIO interface, extbus, and support for serial and parallel flashes.
*
- * $Id: sbchipc.h 825481 2019-06-14 10:06:03Z $
- *
- * Copyright (C) 1999-2019, Broadcom.
+ * $Id: sbchipc.h 657872 2016-09-02 22:17:34Z $
*
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#define PAD _XSTR(__LINE__)
#endif /* PAD */
-#define BCM_MASK32(msb, lsb) ((~0u >> (32u - (msb) - 1u)) & (~0u << (lsb)))
-
/**
* In chipcommon rev 49 the pmu registers have been moved from chipc to the pmu core if the
* 'AOBPresent' bit of 'CoreCapabilitiesExt' is set. If this field is set, the traditional chipc to
*/
typedef volatile struct {
uint32 PAD[384];
- uint32 pmucontrol; /* 0x600 */
- uint32 pmucapabilities; /* 0x604 */
- uint32 pmustatus; /* 0x608 */
- uint32 res_state; /* 0x60C */
- uint32 res_pending; /* 0x610 */
- uint32 pmutimer; /* 0x614 */
- uint32 min_res_mask; /* 0x618 */
- uint32 max_res_mask; /* 0x61C */
- uint32 res_table_sel; /* 0x620 */
- uint32 res_dep_mask;
- uint32 res_updn_timer;
- uint32 res_timer;
- uint32 clkstretch;
- uint32 pmuwatchdog;
- uint32 gpiosel; /* 0x638, rev >= 1 */
- uint32 gpioenable; /* 0x63c, rev >= 1 */
- uint32 res_req_timer_sel; /* 0x640 */
- uint32 res_req_timer; /* 0x644 */
- uint32 res_req_mask; /* 0x648 */
- uint32 core_cap_ext; /* 0x64C */
- uint32 chipcontrol_addr; /* 0x650 */
- uint32 chipcontrol_data; /* 0x654 */
- uint32 regcontrol_addr;
- uint32 regcontrol_data;
- uint32 pllcontrol_addr;
- uint32 pllcontrol_data;
- uint32 pmustrapopt; /* 0x668, corerev >= 28 */
- uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
- uint32 retention_ctl; /* 0x670 */
- uint32 ILPPeriod; /* 0x674 */
+ uint32 pmucontrol; /* 0x600 */
+ uint32 pmucapabilities; /* 0x604 */
+ uint32 pmustatus; /* 0x608 */
+ uint32 res_state; /* 0x60C */
+ uint32 res_pending; /* 0x610 */
+ uint32 pmutimer; /* 0x614 */
+ uint32 min_res_mask; /* 0x618 */
+ uint32 max_res_mask; /* 0x61C */
+ uint32 res_table_sel; /* 0x620 */
+ uint32 res_dep_mask;
+ uint32 res_updn_timer;
+ uint32 res_timer;
+ uint32 clkstretch;
+ uint32 pmuwatchdog;
+ uint32 gpiosel; /* 0x638, rev >= 1 */
+ uint32 gpioenable; /* 0x63c, rev >= 1 */
+ uint32 res_req_timer_sel; /* 0x640 */
+ uint32 res_req_timer; /* 0x644 */
+ uint32 res_req_mask; /* 0x648 */
+ uint32 core_cap_ext; /* 0x64C */
+ uint32 chipcontrol_addr; /* 0x650 */
+ uint32 chipcontrol_data; /* 0x654 */
+ uint32 regcontrol_addr;
+ uint32 regcontrol_data;
+ uint32 pllcontrol_addr;
+ uint32 pllcontrol_data;
+ uint32 pmustrapopt; /* 0x668, corerev >= 28 */
+ uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
+ uint32 retention_ctl; /* 0x670 */
+ uint32 ILPPeriod; /* 0x674 */
uint32 PAD[2];
- uint32 retention_grpidx; /* 0x680 */
- uint32 retention_grpctl; /* 0x684 */
- uint32 mac_res_req_timer; /* 0x688 */
- uint32 mac_res_req_mask; /* 0x68c */
+ uint32 retention_grpidx; /* 0x680 */
+ uint32 retention_grpctl; /* 0x684 */
+ uint32 mac_res_req_timer; /* 0x688 */
+ uint32 mac_res_req_mask; /* 0x68c */
uint32 PAD[18];
- uint32 pmucontrol_ext; /* 0x6d8 */
- uint32 slowclkperiod; /* 0x6dc */
- uint32 pmu_statstimer_addr; /* 0x6e0 */
- uint32 pmu_statstimer_ctrl; /* 0x6e4 */
- uint32 pmu_statstimer_N; /* 0x6e8 */
- uint32 PAD[1];
- uint32 mac_res_req_timer1; /* 0x6f0 */
- uint32 mac_res_req_mask1; /* 0x6f4 */
- uint32 PAD[2];
- uint32 pmuintmask0; /* 0x700 */
- uint32 pmuintmask1; /* 0x704 */
+ uint32 pmucontrol_ext; /* 0x6d8 */
+ uint32 slowclkperiod; /* 0x6dc */
+ uint32 PAD[8];
+ uint32 pmuintmask0; /* 0x700 */
+ uint32 pmuintmask1; /* 0x704 */
uint32 PAD[14];
- uint32 pmuintstatus; /* 0x740 */
- uint32 extwakeupstatus; /* 0x744 */
- uint32 watchdog_res_mask; /* 0x748 */
- uint32 PAD[1]; /* 0x74C */
- uint32 swscratch; /* 0x750 */
- uint32 PAD[3]; /* 0x754-0x75C */
- uint32 extwakemask0; /* 0x760 */
- uint32 extwakemask1; /* 0x764 */
- uint32 PAD[2]; /* 0x768-0x76C */
- uint32 extwakereqmask[2]; /* 0x770-0x774 */
- uint32 PAD[2]; /* 0x778-0x77C */
- uint32 pmuintctrl0; /* 0x780 */
- uint32 pmuintctrl1; /* 0x784 */
- uint32 PAD[2];
- uint32 extwakectrl[2]; /* 0x790 */
- uint32 PAD[7];
- uint32 fis_ctrl_status; /* 0x7b4 */
- uint32 fis_min_res_mask; /* 0x7b8 */
- uint32 PAD[1];
- uint32 PrecisionTmrCtrlStatus; /* 0x7c0 */
+ uint32 pmuintstatus; /* 0x740 */
+ uint32 extwakeupstatus; /* 0x744 */
+ uint32 watchdog_res_mask; /* 0x748 */
+ uint32 PAD[1]; /* 0x74C */
+ uint32 swscratch; /* 0x750 */
+ uint32 PAD[3]; /* 0x754-0x75C */
+ uint32 extwakemask[2]; /* 0x760-0x764 */
+ uint32 PAD[2]; /* 0x768-0x76C */
+ uint32 extwakereqmask[2]; /* 0x770-0x774 */
+ uint32 PAD[2]; /* 0x778-0x77C */
+ uint32 pmuintctrl0; /* 0x780 */
+ uint32 pmuintctrl1; /* 0x784 */
+ uint32 PAD[2];
+ uint32 extwakectrl[2] ; /* 0x790 */
} pmuregs_t;
typedef struct eci_prerev35 {
uint8 uart1lsr;
uint8 uart1msr;
uint8 uart1scratch; /* 0x407 */
- uint32 PAD[50];
- uint32 sr_memrw_addr; /* 0x4d0 */
- uint32 sr_memrw_data; /* 0x4d4 */
- uint32 PAD[10];
+ uint32 PAD[62];
/* save/restore, corerev >= 48 */
uint32 sr_capability; /* 0x500 */
uint32 pmustrapopt; /* 0x668, corerev >= 28 */
uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
uint32 retention_ctl; /* 0x670 */
- uint32 ILPPeriod; /* 0x674 */
- uint32 PAD[2];
+ uint32 PAD[3];
uint32 retention_grpidx; /* 0x680 */
uint32 retention_grpctl; /* 0x684 */
- uint32 mac_res_req_timer; /* 0x688 */
- uint32 mac_res_req_mask; /* 0x68c */
- uint32 PAD[18];
+ uint32 PAD[20];
uint32 pmucontrol_ext; /* 0x6d8 */
uint32 slowclkperiod; /* 0x6dc */
- uint32 pmu_statstimer_addr; /* 0x6e0 */
- uint32 pmu_statstimer_ctrl; /* 0x6e4 */
- uint32 pmu_statstimer_N; /* 0x6e8 */
- uint32 PAD[1];
- uint32 mac_res_req_timer1; /* 0x6f0 */
- uint32 mac_res_req_mask1; /* 0x6f4 */
- uint32 PAD[2];
+ uint32 PAD[8];
uint32 pmuintmask0; /* 0x700 */
uint32 pmuintmask1; /* 0x704 */
uint32 PAD[14];
uint32 pmuintstatus; /* 0x740 */
- uint32 extwakeupstatus; /* 0x744 */
- uint32 PAD[6];
- uint32 extwakemask0; /* 0x760 */
- uint32 extwakemask1; /* 0x764 */
- uint32 PAD[2]; /* 0x768-0x76C */
- uint32 extwakereqmask[2]; /* 0x770-0x774 */
- uint32 PAD[2]; /* 0x778-0x77C */
+ uint32 PAD[15];
uint32 pmuintctrl0; /* 0x780 */
- uint32 PAD[3]; /* 0x784 - 0x78c */
- uint32 extwakectrl[1]; /* 0x790 */
- uint32 PAD[8];
- uint32 fis_ctrl_status; /* 0x7b4 */
- uint32 fis_min_res_mask; /* 0x7b8 */
- uint32 PAD[17];
+ uint32 PAD[31];
uint16 sromotp[512]; /* 0x800 */
#ifdef CCNFLASH_SUPPORT
/* Nand flash MLC controller registers (corerev >= 38) */
uint32 gci_gpioctl; /* 0xC44 */
uint32 gci_gpiostatus;
uint32 gci_gpiomask; /* 0xC4C */
- uint32 gci_eventsummary; /* 0xC50 */
+ uint32 PAD;
uint32 gci_miscctl; /* 0xC54 */
uint32 gci_gpiointmask;
uint32 gci_gpiowakemask;
#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
#define CC_CHIPID 0
#define CC_CAPABILITIES 4
#define CC_CHIPST 0x2c
#define PMU_PLL_CONTROL_DATA 0x664
#define CC_SROM_CTRL 0x190
-#define CC_SROM_ADDRESS 0x194u
-#define CC_SROM_DATA 0x198u
#ifdef SROM16K_4364_ADDRSPACE
#define CC_SROM_OTP 0xa000 /* SROM/OTP address space */
#else
#define CC_SROM_OTP 0x0800
-#endif // endif
+#endif
#define CC_GCI_INDIRECT_ADDR_REG 0xC40
#define CC_GCI_CHIP_CTRL_REG 0xE00
#define CC_GCI_CC_OFFSET_2 2
#define CC_SWD_CTRL 0x380
#define CC_SWD_REQACK 0x384
#define CC_SWD_DATA 0x388
-#define GPIO_SEL_0 0x00001111
-#define GPIO_SEL_1 0x11110000
-#define GPIO_SEL_8 0x00001111
-#define GPIO_SEL_9 0x11110000
#define CHIPCTRLREG0 0x0
#define CHIPCTRLREG1 0x1
#define CC_BP_IND_ACCESS_ERROR_SHIFT 10
#define CC_BP_IND_ACCESS_ERROR_MASK (1 << CC_BP_IND_ACCESS_ERROR_SHIFT)
-#define LPO_SEL_TIMEOUT 1000
-
-#define LPO_FINAL_SEL_SHIFT 18
-
-#define LHL_LPO1_SEL 0
-#define LHL_LPO2_SEL 0x1
-#define LHL_32k_SEL 0x2
-#define LHL_EXT_SEL 0x3
-
-#define EXTLPO_BUF_PD 0x40
-#define LPO1_PD_EN 0x1
-#define LPO1_PD_SEL 0x6
-#define LPO1_PD_SEL_VAL 0x4
-#define LPO2_PD_EN 0x8
-#define LPO2_PD_SEL 0x30
-#define LPO2_PD_SEL_VAL 0x20
-#define OSC_32k_PD 0x80
-
-#define LHL_CLK_DET_CTL_AD_CNTR_CLK_SEL 0x3
-
-#define LHL_LPO_AUTO 0x0
-#define LHL_LPO1_ENAB 0x1
-#define LHL_LPO2_ENAB 0x2
-#define LHL_OSC_32k_ENAB 0x3
-#define LHL_EXT_LPO_ENAB 0x4
-#define RADIO_LPO_ENAB 0x5
-
-#define LHL_CLK_DET_CTL_ADR_LHL_CNTR_EN 0x4
-#define LHL_CLK_DET_CTL_ADR_LHL_CNTR_CLR 0x8
-#define LHL_CLK_DET_CNT 0xF0
-#define LHL_CLK_DET_CNT_SHIFT 4
-#define LPO_SEL_SHIFT 9
-
-#define LHL_MAIN_CTL_ADR_FINAL_CLK_SEL 0x3C0000
-#define LHL_MAIN_CTL_ADR_LHL_WLCLK_SEL 0x600
-
-#define CLK_DET_CNT_THRESH 8
-
#ifdef SR_DEBUG
#define SUBCORE_POWER_ON 0x0001
#define PHY_POWER_ON 0x0010
#define GCI_WL_CHN_INFO_MASK (0xFF00)
/* WL indication of MCHAN enabled/disabled to BT in awdl mode- bit 36 */
#define GCI_WL_MCHAN_BIT_MASK (0x0010)
-
-#ifdef WLC_SW_DIVERSITY
-/* WL indication of SWDIV enabled/disabled to BT - bit 33 */
-#define GCI_WL_SWDIV_ANT_VALID_BIT_MASK (0x0002)
-#define GCI_SWDIV_ANT_VALID_SHIFT 0x1
-#define GCI_SWDIV_ANT_VALID_DISABLE 0x0
-#endif // endif
-
/* WL Strobe to BT */
#define GCI_WL_STROBE_BIT_MASK (0x0020)
/* bits [51:48] - reserved for wlan TX pwr index */
#define RCTL_MEM_RET_SLEEP_LOG_MASK (1 << RCTL_MEM_RET_SLEEP_LOG_SHIFT)
/* 4321 chipcontrol */
+#define CHIPCTRL_4321A0_DEFAULT 0x3a4
+#define CHIPCTRL_4321A1_DEFAULT 0x0a4
#define CHIPCTRL_4321_PLL_DOWN 0x800000 /**< serdes PLL down override */
/* Fields in the otpstatus register in rev >= 21 */
/* Fields in otplayoutextension */
#define OTPLAYOUTEXT_FUSE_MASK 0x3FF
+
/* Jtagm characteristics that appeared at a given corerev */
#define JTAGM_CREV_OLD 10 /**< Old command set, 16bit max IR */
#define JTAGM_CREV_IRP 22 /**< Able to do pause-ir */
#define CLKD_JTAG_SHIFT 8
#define CLKD_UART 0x000000ff
-#define CLKD2_SROM 0x00000007
-#define CLKD2_SROMDIV_32 0
-#define CLKD2_SROMDIV_64 1
-#define CLKD2_SROMDIV_96 2
-#define CLKD2_SROMDIV_128 3
-#define CLKD2_SROMDIV_192 4
-#define CLKD2_SROMDIV_256 5
-#define CLKD2_SROMDIV_384 6
-#define CLKD2_SROMDIV_512 7
+#define CLKD2_SROM 0x00000003
#define CLKD2_SWD 0xf8000000
#define CLKD2_SWD_SHIFT 27
#define CI_ECI 0x00000010 /**< eci intr (corerev >= 21) */
#define CI_PMU 0x00000020 /**< pmu intr (corerev >= 21) */
#define CI_UART 0x00000040 /**< uart intr (corerev >= 21) */
-#define CI_WECI 0x00000080 /* eci wakeup intr (corerev >= 21) */
#define CI_WDRESET 0x80000000 /**< watchdog reset occurred */
/* slow_clk_ctl */
#define SYCC_CD_MASK 0xffff0000 /**< ClkDiv (ILP = 1/(4 * (divisor + 1)) */
#define SYCC_CD_SHIFT 16
-/* watchdogcounter */
-/* WL sub-system reset */
-#define WD_SSRESET_PCIE_F0_EN 0x10000000
-/* BT sub-system reset */
-#define WD_SSRESET_PCIE_F1_EN 0x20000000
-#define WD_SSRESET_PCIE_F2_EN 0x40000000
-/* Both WL and BT sub-system reset */
-#define WD_SSRESET_PCIE_ALL_FN_EN 0x80000000
-#define WD_COUNTER_MASK 0x0fffffff
-#define WD_ENABLE_MASK \
- (WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_F1_EN | \
- WD_SSRESET_PCIE_F2_EN | WD_SSRESET_PCIE_ALL_FN_EN)
-
/* Indirect backplane access */
#define BPIA_BYTEEN 0x0000000f
#define BPIA_SZ1 0x00000001
#define PCTL_LPO_SEL 0x00000001
/* Fields in pmucontrol_ext */
-#define PCTL_EXT_USE_LHL_TIMER 0x00000010
-#define PCTL_EXT_FASTLPO_ENAB 0x00000080
+#define PCTL_EXT_FASTLPO_ENAB 0x00000080
#define PCTL_EXT_FASTLPO_SWENAB 0x00000200
-#define PCTL_EXT_FASTSEQ_ENAB 0x00001000
#define PCTL_EXT_FASTLPO_PCIE_SWENAB 0x00004000 /**< rev33 for FLL1M */
#define DEFAULT_43012_MIN_RES_MASK 0x0f8bfe77
#define PMU_RCTLGRP_DFT_ENABLE_MASK (1 << 15)
#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT 16
#define PMU_RCTLGRP_NSRST_DISABLE_MASK (1 << 16)
+/* Retention Group Control special for 4334 */
+#define PMU4334_RCTLGRP_CHAIN_LEN_GRP0 338
+#define PMU4334_RCTLGRP_CHAIN_LEN_GRP1 315
+/* Retention Group Control special for 43341 */
+#define PMU43341_RCTLGRP_CHAIN_LEN_GRP0 366
+#define PMU43341_RCTLGRP_CHAIN_LEN_GRP1 330
/* Fields in clkstretch */
#define CSTRETCH_HT 0xffff0000
#define CSTRETCH_ALP 0x0000ffff
-#define CSTRETCH_REDUCE_8 0x00080008
/* gpiotimerval */
#define GPIO_ONTIME_SHIFT 16
#define CC_EB_PCMCIA1_CFG 0x1ac00000 /**< PCMCIA 1 config base address */
#define CC_EB_PROGIF 0x1b000000 /**< ProgIF Async/Sync base address */
+
/* Start/busy bit in flashcontrol */
#define SFLASH_OPCODE 0x000000ff
#define SFLASH_ACTION 0x00000700
#define GSIO_START 0x80000000
#define GSIO_BUSY GSIO_START
-/* GCI UART Function sel related */
-#define MUXENAB_GCI_UART_MASK (0x00000f00)
-#define MUXENAB_GCI_UART_SHIFT 8
-#define MUXENAB_GCI_UART_FNSEL_MASK (0x00003000)
-#define MUXENAB_GCI_UART_FNSEL_SHIFT 12
-
/*
* These are the UART port assignments, expressed as offsets from the base
* register. These assignments should hold for any serial port based on
#define PCAP5_CC_MASK 0xf8000000
#define PCAP5_CC_SHIFT 27
-/* pmucapabilities ext */
-#define PCAP_EXT_ST_NUM_SHIFT (8) /* stat timer number */
-#define PCAP_EXT_ST_NUM_MASK (0xf << PCAP_EXT_ST_NUM_SHIFT)
-#define PCAP_EXT_ST_SRC_NUM_SHIFT (12) /* stat timer source number */
-#define PCAP_EXT_ST_SRC_NUM_MASK (0xf << PCAP_EXT_ST_SRC_NUM_SHIFT)
-
-/* pmustattimer ctrl */
-#define PMU_ST_SRC_SHIFT (0) /* stat timer source number */
-#define PMU_ST_SRC_MASK (0xff << PMU_ST_SRC_SHIFT)
-#define PMU_ST_CNT_MODE_SHIFT (10) /* stat timer count mode */
-#define PMU_ST_CNT_MODE_MASK (0x3 << PMU_ST_CNT_MODE_SHIFT)
-#define PMU_ST_EN_SHIFT (8) /* stat timer enable */
-#define PMU_ST_EN_MASK (0x1 << PMU_ST_EN_SHIFT)
-#define PMU_ST_ENAB 1
-#define PMU_ST_DISAB 0
-#define PMU_ST_INT_EN_SHIFT (9) /* stat timer enable */
-#define PMU_ST_INT_EN_MASK (0x1 << PMU_ST_INT_EN_SHIFT)
-#define PMU_ST_INT_ENAB 1
-#define PMU_ST_INT_DISAB 0
-
/* CoreCapabilitiesExtension */
#define PCAP_EXT_USE_MUXED_ILP_CLK_MASK 0x04000000
/* This is based on PmuRev0 */
#define PRRT_TIME_MASK 0x03ff
#define PRRT_INTEN 0x0400
-/* ReqActive 25
- * The hardware sets this field to 1 when the timer expires.
- * Software writes this field to 1 to make immediate resource requests.
- */
-#define PRRT_REQ_ACTIVE 0x0800 /* To check h/w status */
-#define PRRT_IMMEDIATE_RES_REQ 0x0800 /* macro for sw immediate res req */
+#define PRRT_REQ_ACTIVE 0x0800
#define PRRT_ALP_REQ 0x1000
#define PRRT_HT_REQ 0x2000
#define PRRT_HQ_REQ 0x4000
/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */
#define RSRC_INTR_MASK_TIMER_INT_0 1
-#define PMU_INTR_MASK_EXTWAKE_REQ_ACTIVE_0 (1 << 20)
-
-/* bit 16 of the PMU interrupt vector - Stats Timer Interrupt */
-#define PMU_INT_STAT_TIMER_INT_SHIFT 16
-#define PMU_INT_STAT_TIMER_INT_MASK (1 << PMU_INT_STAT_TIMER_INT_SHIFT)
/* PMU resource bit position */
#define PMURES_BIT(bit) (1 << (bit))
/* PMU chip control0 register */
#define PMU_CHIPCTL0 0
-
-#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_START_VAL (0x20 << 0)
-#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3F << 0)
-#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0xF << 6)
-#define PMU_CC0_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3F << 6)
-#define PMU_CC0_4369_XTAL_RES_BYPASS_START_VAL (0 << 12)
-#define PMU_CC0_4369_XTAL_RES_BYPASS_START_MASK (0x7 << 12)
-#define PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_VAL (0x1 << 15)
-#define PMU_CC0_4369_XTAL_RES_BYPASS_NORMAL_MASK (0x7 << 15)
+#define PMU43143_CC0_SDIO_DRSTR_OVR (1 << 31) /* sdio drive strength override enable */
/* clock req types */
#define PMU_CC1_CLKREQ_TYPE_SHIFT 19
#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0
#define CLKREQ_TYPE_CONFIG_PUSHPULL 1
-/* Power Control */
-#define PWRCTL_ENAB_MEM_CLK_GATE_SHIFT 5
-#define PWRCTL_AUTO_MEM_STBYRET 28
-
/* PMU chip control1 register */
#define PMU_CHIPCTL1 1
#define PMU_CC1_RXC_DLL_BYPASS 0x00010000
#define PMU_CC1_ENABLE_CLOSED_LOOP_MASK 0x00000080
#define PMU_CC1_ENABLE_CLOSED_LOOP 0x00000000
-#define PMU_CC1_PWRSW_CLKSTRSTP_DELAY_MASK 0x00003F00u
-#define PMU_CC1_PWRSW_CLKSTRSTP_DELAY 0x00000400u
-
/* PMU chip control2 register */
#define PMU_CC2_RFLDO3P3_PU_FORCE_ON (1 << 15)
#define PMU_CC2_RFLDO3P3_PU_CLEAR 0x00000000
#define PMU_CC2_INV_GPIO_POLARITY_PMU_WAKE (1 << 25)
#define PMU_CC2_GCI2_WAKE (1 << 31)
-#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_START_VAL (0x3 << 26)
-#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_START_MASK (0x3 << 26)
-#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_VAL (0x0 << 28)
-#define PMU_CC2_4369_XTALCORESIZE_BIAS_ADJ_NORMAL_MASK (0x3 << 28)
-
/* PMU chip control3 register */
#define PMU_CHIPCTL3 3
#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT 19
#define PMU_CC3_ENABLE_RF_SHIFT 22
#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23
-#define PMU_CC3_4369_XTALCORESIZE_PMOS_START_VAL (0x3F << 0)
-#define PMU_CC3_4369_XTALCORESIZE_PMOS_START_MASK (0x3F << 0)
-#define PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_VAL (0x3F << 15)
-#define PMU_CC3_4369_XTALCORESIZE_PMOS_NORMAL_MASK (0x3F << 15)
-#define PMU_CC3_4369_XTALCORESIZE_NMOS_START_VAL (0x3F << 6)
-#define PMU_CC3_4369_XTALCORESIZE_NMOS_START_MASK (0x3F << 6)
-#define PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_VAL (0x3F << 21)
-#define PMU_CC3_4369_XTALCORESIZE_NMOS_NORMAL_MASK (0x3F << 21)
-#define PMU_CC3_4369_XTALSEL_BIAS_RES_START_VAL (0x2 << 12)
-#define PMU_CC3_4369_XTALSEL_BIAS_RES_START_MASK (0x7 << 12)
-#define PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_VAL (0x6 << 27)
-#define PMU_CC3_4369_XTALSEL_BIAS_RES_NORMAL_MASK (0x7 << 27)
-
/* PMU chip control4 register */
#define PMU_CHIPCTL4 4
#define PMU_CC4_SW_TYPE_RGMII 0x0000c000
#define PMU_CC4_DISABLE_LQ_AVAIL (1<<27)
-#define PMU_CC4_4369_MAIN_PD_CBUCK2VDDB_ON (1u << 15u)
-#define PMU_CC4_4369_MAIN_PD_CBUCK2VDDRET_ON (1u << 16u)
-#define PMU_CC4_4369_MAIN_PD_MEMLPLDO2VDDB_ON (1u << 17u)
-#define PMU_CC4_4369_MAIN_PD_MEMLPDLO2VDDRET_ON (1u << 18u)
-
-#define PMU_CC4_4369_AUX_PD_CBUCK2VDDB_ON (1u << 21u)
-#define PMU_CC4_4369_AUX_PD_CBUCK2VDDRET_ON (1u << 22u)
-#define PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDB_ON (1u << 23u)
-#define PMU_CC4_4369_AUX_PD_MEMLPLDO2VDDRET_ON (1u << 24u)
-
/* PMU chip control5 register */
#define PMU_CHIPCTL5 5
-#define PMU_CC5_4369_SUBCORE_CBUCK2VDDB_ON (1u << 9u)
-#define PMU_CC5_4369_SUBCORE_CBUCK2VDDRET_ON (1u << 10u)
-#define PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDB_ON (1u << 11u)
-#define PMU_CC5_4369_SUBCORE_MEMLPLDO2VDDRET_ON (1u << 12u)
-
/* PMU chip control6 register */
#define PMU_CHIPCTL6 6
#define PMU_CC6_ENABLE_CLKREQ_WAKEUP (1 << 4)
#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP (1 << 6)
-#define PMU_CC6_ENABLE_PCIE_RETENTION (1 << 12)
-#define PMU_CC6_ENABLE_PMU_EXT_PERST (1 << 13)
-#define PMU_CC6_ENABLE_PMU_WAKEUP_PERST (1 << 14)
/* PMU chip control7 register */
#define PMU_CHIPCTL7 7
#define PMU_CHIPCTL8 8
#define PMU_CHIPCTL9 9
-#define PMU_CHIPCTL10 10
-#define PMU_CC10_PCIE_PWRSW_RESET0_CNT_SHIFT 0
-#define PMU_CC10_PCIE_PWRSW_RESET0_CNT_MASK 0x000000ff
-#define PMU_CC10_PCIE_PWRSW_RESET1_CNT_SHIFT 8
-#define PMU_CC10_PCIE_PWRSW_RESET1_CNT_MASK 0x0000ff00
-#define PMU_CC10_PCIE_PWRSW_UP_DLY_SHIFT 16
-#define PMU_CC10_PCIE_PWRSW_UP_DLY_MASK 0x000f0000
-#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_SHIFT 20
-#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_MASK 0x00f00000
-#define PMU_CC10_FORCE_PCIE_ON (1 << 24)
-#define PMU_CC10_FORCE_PCIE_SW_ON (1 << 25)
-#define PMU_CC10_FORCE_PCIE_RETNT_ON (1 << 26)
-
-#define PMU_CC10_PCIE_PWRSW_RESET_CNT_4US 1
-#define PMU_CC10_PCIE_PWRSW_RESET_CNT_8US 2
-
-#define PMU_CC10_PCIE_PWRSW_UP_DLY_0US 0
-
-#define PMU_CC10_PCIE_PWRSW_FORCE_PWROK_DLY_4US 1
-
-#define PMU_CHIPCTL11 11
-#define PMU_CHIPCTL12 12
-
-/* PMU chip control13 register */
-#define PMU_CHIPCTL13 13
-
-#define PMU_CC13_SUBCORE_CBUCK2VDDB_OFF (1u << 0u)
-#define PMU_CC13_SUBCORE_CBUCK2VDDRET_OFF (1u << 1u)
-#define PMU_CC13_SUBCORE_MEMLPLDO2VDDB_OFF (1u << 2u)
-#define PMU_CC13_SUBCORE_MEMLPLDO2VDDRET_OFF (1u << 3u)
-
-#define PMU_CC13_MAIN_CBUCK2VDDB_OFF (1u << 4u)
-#define PMU_CC13_MAIN_CBUCK2VDDRET_OFF (1u << 5u)
-#define PMU_CC13_MAIN_MEMLPLDO2VDDB_OFF (1u << 6u)
-#define PMU_CC13_MAIN_MEMLPLDO2VDDRET_OFF (1u << 7u)
-
-#define PMU_CC13_AUX_CBUCK2VDDB_OFF (1u << 8u)
-#define PMU_CC13_AUX_MEMLPLDO2VDDB_OFF (1u << 10u)
-#define PMU_CC13_AUX_MEMLPLDO2VDDRET_OFF (1u << 11u)
-#define PMU_CC13_AUX_CBUCK2VDDRET_OFF (1u << 12u)
-
-#define PMU_CHIPCTL14 14
-#define PMU_CHIPCTL15 15
-#define PMU_CHIPCTL16 16
-#define PMU_CC16_CLK4M_DIS (1 << 4)
-#define PMU_CC16_FF_ZERO_ADJ (4 << 5)
-
-/* PMU chip control14 register */
-#define PMU_CC14_MAIN_VDDB2VDDRET_UP_DLY_MASK (0xF)
-#define PMU_CC14_MAIN_VDDB2VDD_UP_DLY_MASK (0xF << 4)
-#define PMU_CC14_AUX_VDDB2VDDRET_UP_DLY_MASK (0xF << 8)
-#define PMU_CC14_AUX_VDDB2VDD_UP_DLY_MASK (0xF << 12)
-#define PMU_CC14_PCIE_VDDB2VDDRET_UP_DLY_MASK (0xF << 16)
-#define PMU_CC14_PCIE_VDDB2VDD_UP_DLY_MASK (0xF << 20)
-
/* PMU corerev and chip specific PLL controls.
* PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
* to differentiate different PLLs controlled by the same PMU rev.
#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12
#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24
#define PMU1_PLL0_PC1_M4DIV_BY_60 0x3C
-#define PMU1_PLL0_PC1_M2_M4DIV_MASK 0xff00ff00
-#define PMU1_PLL0_PC1_HOLD_LOAD_CH 0x28
+
#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
#define PMU5_MAINPLL_MEM 2
#define PMU5_MAINPLL_SI 3
+/* 4706 PMU */
+#define PMU4706_MAINPLL_PLL0 0
+#define PMU6_4706_PROCPLL_OFF 4 /**< The CPU PLL */
+#define PMU6_4706_PROC_P2DIV_MASK 0x000f0000
+#define PMU6_4706_PROC_P2DIV_SHIFT 16
+#define PMU6_4706_PROC_P1DIV_MASK 0x0000f000
+#define PMU6_4706_PROC_P1DIV_SHIFT 12
+#define PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8
+#define PMU6_4706_PROC_NDIV_INT_SHIFT 3
+#define PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007
+#define PMU6_4706_PROC_NDIV_MODE_SHIFT 0
+
#define PMU7_PLL_PLLCTL7 7
#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000
#define PMU7_PLL_CTL7_M4DIV_SHIFT 24
#define PMU15_ARM_98MHZ 98400000 /**< 98.4 Mhz */
#define PMU15_ARM_97MHZ 97000000 /**< 97 Mhz */
+
#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070
#define PMU17_PLLCTL2_NDIVTYPE_SHIFT 4
#define PMU4347_PLL1_PC6_NDIV_FRAC_MASK 0xfffff000
#define PMU4347_PLL1_PC6_NDIV_FRAC_SHIFT 12
-/* Even though the masks are same as 4347, separate macros are
-created for 4369
-*/
-/* PLL usage in 4369 */
-#define PMU4369_PLL0_PC2_PDIV_MASK 0x000f0000
-#define PMU4369_PLL0_PC2_PDIV_SHIFT 16
-#define PMU4369_PLL0_PC2_NDIV_INT_MASK 0x3ff00000
-#define PMU4369_PLL0_PC2_NDIV_INT_SHIFT 20
-#define PMU4369_PLL0_PC3_NDIV_FRAC_MASK 0x000fffff
-#define PMU4369_PLL0_PC3_NDIV_FRAC_SHIFT 0
-#define PMU4369_PLL1_PC5_P1DIV_MASK 0xc0000000
-#define PMU4369_PLL1_PC5_P1DIV_SHIFT 30
-#define PMU4369_PLL1_PC6_P1DIV_MASK 0x00000003
-#define PMU4369_PLL1_PC6_P1DIV_SHIFT 0
-#define PMU4369_PLL1_PC6_NDIV_INT_MASK 0x00000ffc
-#define PMU4369_PLL1_PC6_NDIV_INT_SHIFT 2
-#define PMU4369_PLL1_PC6_NDIV_FRAC_MASK 0xfffff000
-#define PMU4369_PLL1_PC6_NDIV_FRAC_SHIFT 12
+/* PLL usage in 5356/5357 */
+#define PMU5356_MAINPLL_PLL0 0
+#define PMU5357_MAINPLL_PLL0 0
+
+/* 4716/47162 resources */
+#define RES4716_PROC_PLL_ON 0x00000040
+#define RES4716_PROC_HT_AVAIL 0x00000080
+
+/* 4716/4717/4718 Chip specific ChipControl register bits */
+#define CCTRL_471X_I2S_PINS_ENABLE 0x0080 /* I2S pins off by default, shared w/ pflash */
+
+/* 5357 Chip specific ChipControl register bits */
+/* 2nd - 32-bit reg */
+#define CCTRL_5357_I2S_PINS_ENABLE 0x00040000 /* I2S pins enable */
+#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000 /* I2C/SPI pins enable */
+
+/* 5354 resources */
+#define RES5354_EXT_SWITCHER_PWM 0 /**< 0x00001 */
+#define RES5354_BB_SWITCHER_PWM 1 /**< 0x00002 */
+#define RES5354_BB_SWITCHER_BURST 2 /**< 0x00004 */
+#define RES5354_BB_EXT_SWITCHER_BURST 3 /**< 0x00008 */
+#define RES5354_ILP_REQUEST 4 /**< 0x00010 */
+#define RES5354_RADIO_SWITCHER_PWM 5 /**< 0x00020 */
+#define RES5354_RADIO_SWITCHER_BURST 6 /**< 0x00040 */
+#define RES5354_ROM_SWITCH 7 /**< 0x00080 */
+#define RES5354_PA_REF_LDO 8 /**< 0x00100 */
+#define RES5354_RADIO_LDO 9 /**< 0x00200 */
+#define RES5354_AFE_LDO 10 /**< 0x00400 */
+#define RES5354_PLL_LDO 11 /**< 0x00800 */
+#define RES5354_BG_FILTBYP 12 /**< 0x01000 */
+#define RES5354_TX_FILTBYP 13 /**< 0x02000 */
+#define RES5354_RX_FILTBYP 14 /**< 0x04000 */
+#define RES5354_XTAL_PU 15 /**< 0x08000 */
+#define RES5354_XTAL_EN 16 /**< 0x10000 */
+#define RES5354_BB_PLL_FILTBYP 17 /**< 0x20000 */
+#define RES5354_RF_PLL_FILTBYP 18 /**< 0x40000 */
+#define RES5354_BB_PLL_PU 19 /**< 0x80000 */
/* 5357 Chip specific ChipControl register bits */
#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */
#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */
#define CCTRL5357_NFLASH (1<<16) /* Nandflash in ChipControl 1, bit 16 */
+
/* 43217 Chip specific ChipControl register bits */
#define CCTRL43217_EXTPA_C0 (1<<13) /* core0 extPA in ChipControl 1, bit 13 */
#define CCTRL43217_EXTPA_C1 (1<<8) /* core1 extPA in ChipControl 1, bit 8 */
+/* 43228 Chip specific ChipControl register bits */
+#define CCTRL43228_EXTPA_C0 (1<<14) /* core1 extPA in ChipControl 1, bit 14 */
+#define CCTRL43228_EXTPA_C1 (1<<9) /* core0 extPA in ChipControl 1, bit 1 */
+
+/* 4328 resources */
+#define RES4328_EXT_SWITCHER_PWM 0 /**< 0x00001 */
+#define RES4328_BB_SWITCHER_PWM 1 /**< 0x00002 */
+#define RES4328_BB_SWITCHER_BURST 2 /**< 0x00004 */
+#define RES4328_BB_EXT_SWITCHER_BURST 3 /**< 0x00008 */
+#define RES4328_ILP_REQUEST 4 /**< 0x00010 */
+#define RES4328_RADIO_SWITCHER_PWM 5 /**< 0x00020 */
+#define RES4328_RADIO_SWITCHER_BURST 6 /**< 0x00040 */
+#define RES4328_ROM_SWITCH 7 /**< 0x00080 */
+#define RES4328_PA_REF_LDO 8 /**< 0x00100 */
+#define RES4328_RADIO_LDO 9 /**< 0x00200 */
+#define RES4328_AFE_LDO 10 /**< 0x00400 */
+#define RES4328_PLL_LDO 11 /**< 0x00800 */
+#define RES4328_BG_FILTBYP 12 /**< 0x01000 */
+#define RES4328_TX_FILTBYP 13 /**< 0x02000 */
+#define RES4328_RX_FILTBYP 14 /**< 0x04000 */
+#define RES4328_XTAL_PU 15 /**< 0x08000 */
+#define RES4328_XTAL_EN 16 /**< 0x10000 */
+#define RES4328_BB_PLL_FILTBYP 17 /**< 0x20000 */
+#define RES4328_RF_PLL_FILTBYP 18 /**< 0x40000 */
+#define RES4328_BB_PLL_PU 19 /**< 0x80000 */
+
+/* 4325 A0/A1 resources */
+#define RES4325_BUCK_BOOST_BURST 0 /**< 0x00000001 */
+#define RES4325_CBUCK_BURST 1 /**< 0x00000002 */
+#define RES4325_CBUCK_PWM 2 /**< 0x00000004 */
+#define RES4325_CLDO_CBUCK_BURST 3 /**< 0x00000008 */
+#define RES4325_CLDO_CBUCK_PWM 4 /**< 0x00000010 */
+#define RES4325_BUCK_BOOST_PWM 5 /**< 0x00000020 */
+#define RES4325_ILP_REQUEST 6 /**< 0x00000040 */
+#define RES4325_ABUCK_BURST 7 /**< 0x00000080 */
+#define RES4325_ABUCK_PWM 8 /**< 0x00000100 */
+#define RES4325_LNLDO1_PU 9 /**< 0x00000200 */
+#define RES4325_OTP_PU 10 /**< 0x00000400 */
+#define RES4325_LNLDO3_PU 11 /**< 0x00000800 */
+#define RES4325_LNLDO4_PU 12 /**< 0x00001000 */
+#define RES4325_XTAL_PU 13 /**< 0x00002000 */
+#define RES4325_ALP_AVAIL 14 /**< 0x00004000 */
+#define RES4325_RX_PWRSW_PU 15 /**< 0x00008000 */
+#define RES4325_TX_PWRSW_PU 16 /**< 0x00010000 */
+#define RES4325_RFPLL_PWRSW_PU 17 /**< 0x00020000 */
+#define RES4325_LOGEN_PWRSW_PU 18 /**< 0x00040000 */
+#define RES4325_AFE_PWRSW_PU 19 /**< 0x00080000 */
+#define RES4325_BBPLL_PWRSW_PU 20 /**< 0x00100000 */
+#define RES4325_HT_AVAIL 21 /**< 0x00200000 */
+
+/* 4325 B0/C0 resources */
+#define RES4325B0_CBUCK_LPOM 1 /**< 0x00000002 */
+#define RES4325B0_CBUCK_BURST 2 /**< 0x00000004 */
+#define RES4325B0_CBUCK_PWM 3 /**< 0x00000008 */
+#define RES4325B0_CLDO_PU 4 /**< 0x00000010 */
+
+/* 4325 C1 resources */
+#define RES4325C1_LNLDO2_PU 12 /**< 0x00001000 */
+
+/* 4325 chip-specific ChipStatus register bits */
+#define CST4325_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4325_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */
+#define CST4325_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */
+#define CST4325_OTP_SEL 2 /**< OTP is powered up, no SPROM */
+#define CST4325_OTP_PWRDN 3 /**< OTP is powered down, SPROM is present */
+#define CST4325_SDIO_USB_MODE_MASK 0x00000004
+#define CST4325_SDIO_USB_MODE_SHIFT 2
+#define CST4325_RCAL_VALID_MASK 0x00000008
+#define CST4325_RCAL_VALID_SHIFT 3
+#define CST4325_RCAL_VALUE_MASK 0x000001f0
+#define CST4325_RCAL_VALUE_SHIFT 4
+#define CST4325_PMUTOP_2B_MASK 0x00000200 /**< 1 for 2b, 0 for to 2a */
+#define CST4325_PMUTOP_2B_SHIFT 9
+
+#define RES4329_RESERVED0 0 /**< 0x00000001 */
+#define RES4329_CBUCK_LPOM 1 /**< 0x00000002 */
+#define RES4329_CBUCK_BURST 2 /**< 0x00000004 */
+#define RES4329_CBUCK_PWM 3 /**< 0x00000008 */
+#define RES4329_CLDO_PU 4 /**< 0x00000010 */
+#define RES4329_PALDO_PU 5 /**< 0x00000020 */
+#define RES4329_ILP_REQUEST 6 /**< 0x00000040 */
+#define RES4329_RESERVED7 7 /**< 0x00000080 */
+#define RES4329_RESERVED8 8 /**< 0x00000100 */
+#define RES4329_LNLDO1_PU 9 /**< 0x00000200 */
+#define RES4329_OTP_PU 10 /**< 0x00000400 */
+#define RES4329_RESERVED11 11 /**< 0x00000800 */
+#define RES4329_LNLDO2_PU 12 /**< 0x00001000 */
+#define RES4329_XTAL_PU 13 /**< 0x00002000 */
+#define RES4329_ALP_AVAIL 14 /**< 0x00004000 */
+#define RES4329_RX_PWRSW_PU 15 /**< 0x00008000 */
+#define RES4329_TX_PWRSW_PU 16 /**< 0x00010000 */
+#define RES4329_RFPLL_PWRSW_PU 17 /**< 0x00020000 */
+#define RES4329_LOGEN_PWRSW_PU 18 /**< 0x00040000 */
+#define RES4329_AFE_PWRSW_PU 19 /**< 0x00080000 */
+#define RES4329_BBPLL_PWRSW_PU 20 /**< 0x00100000 */
+#define RES4329_HT_AVAIL 21 /**< 0x00200000 */
+
+#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4329_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */
+#define CST4329_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */
+#define CST4329_OTP_SEL 2 /**< OTP is powered up, no SPROM */
+#define CST4329_OTP_PWRDN 3 /**< OTP is powered down, SPROM is present */
+#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
+#define CST4329_SPI_SDIO_MODE_SHIFT 2
+
+/* 4312 chip-specific ChipStatus register bits */
+#define CST4312_SPROM_OTP_SEL_MASK 0x00000003
+#define CST4312_DEFCIS_SEL 0 /**< OTP is powered up, use def. CIS, no SPROM */
+#define CST4312_SPROM_SEL 1 /**< OTP is powered up, SPROM is present */
+#define CST4312_OTP_SEL 2 /**< OTP is powered up, no SPROM */
+#define CST4312_OTP_BAD 3 /**< OTP is broken, SPROM is present */
+
+/* 4312 resources (all PMU chips with little memory constraint) */
+#define RES4312_SWITCHER_BURST 0 /**< 0x00000001 */
+#define RES4312_SWITCHER_PWM 1 /**< 0x00000002 */
+#define RES4312_PA_REF_LDO 2 /**< 0x00000004 */
+#define RES4312_CORE_LDO_BURST 3 /**< 0x00000008 */
+#define RES4312_CORE_LDO_PWM 4 /**< 0x00000010 */
+#define RES4312_RADIO_LDO 5 /**< 0x00000020 */
+#define RES4312_ILP_REQUEST 6 /**< 0x00000040 */
+#define RES4312_BG_FILTBYP 7 /**< 0x00000080 */
+#define RES4312_TX_FILTBYP 8 /**< 0x00000100 */
+#define RES4312_RX_FILTBYP 9 /**< 0x00000200 */
+#define RES4312_XTAL_PU 10 /**< 0x00000400 */
+#define RES4312_ALP_AVAIL 11 /**< 0x00000800 */
+#define RES4312_BB_PLL_FILTBYP 12 /**< 0x00001000 */
+#define RES4312_RF_PLL_FILTBYP 13 /**< 0x00002000 */
+#define RES4312_HT_AVAIL 14 /**< 0x00004000 */
+
+/* 4322 resources */
+#define RES4322_RF_LDO 0
+#define RES4322_ILP_REQUEST 1
+#define RES4322_XTAL_PU 2
+#define RES4322_ALP_AVAIL 3
+#define RES4322_SI_PLL_ON 4
+#define RES4322_HT_SI_AVAIL 5
+#define RES4322_PHY_PLL_ON 6
+#define RES4322_HT_PHY_AVAIL 7
+#define RES4322_OTP_PU 8
+
+/* 4322 chip-specific ChipStatus register bits */
+#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020
+#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0
+#define CST4322_SPROM_OTP_SEL_SHIFT 6
+#define CST4322_NO_SPROM_OTP 0 /**< no OTP, no SPROM */
+#define CST4322_SPROM_PRESENT 1 /**< SPROM is present */
+#define CST4322_OTP_PRESENT 2 /**< OTP is present */
+#define CST4322_PCI_OR_USB 0x00000100
+#define CST4322_BOOT_MASK 0x00000600
+#define CST4322_BOOT_SHIFT 9
+#define CST4322_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */
+#define CST4322_BOOT_FROM_ROM 1 /**< boot from ROM */
+#define CST4322_BOOT_FROM_FLASH 2 /**< boot from FLASH */
+#define CST4322_BOOT_FROM_INVALID 3
+#define CST4322_ILP_DIV_EN 0x00000800
+#define CST4322_FLASH_TYPE_MASK 0x00001000
+#define CST4322_FLASH_TYPE_SHIFT 12
+#define CST4322_FLASH_TYPE_SHIFT_ST 0 /**< ST serial FLASH */
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 /**< ATMEL flash */
+#define CST4322_ARM_TAP_SEL 0x00002000
+#define CST4322_RES_INIT_MODE_MASK 0x0000c000
+#define CST4322_RES_INIT_MODE_SHIFT 14
+#define CST4322_RES_INIT_MODE_ILPAVAIL 0 /**< resinitmode: ILP available */
+#define CST4322_RES_INIT_MODE_ILPREQ 1 /**< resinitmode: ILP request */
+#define CST4322_RES_INIT_MODE_ALPAVAIL 2 /**< resinitmode: ALP available */
+#define CST4322_RES_INIT_MODE_HTAVAIL 3 /**< resinitmode: HT available */
+#define CST4322_PCIPLLCLK_GATING 0x00010000
+#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000
+#define CST4322_PCI_CARDBUS_MODE 0x00040000
+
+/* 43224 chip-specific ChipControl register bits */
+#define CCTRL43224_GPIO_TOGGLE 0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */
+#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */
+#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */
+
/* 43236 resources */
#define RES43236_REGULATOR 0
#define RES43236_ILP_REQUEST 1
#define CST43236_BOOT_FROM_FLASH 2 /**< boot from FLASH */
#define CST43236_BOOT_FROM_INVALID 3
+/* 43237 resources */
+#define RES43237_REGULATOR 0
+#define RES43237_ILP_REQUEST 1
+#define RES43237_XTAL_PU 2
+#define RES43237_ALP_AVAIL 3
+#define RES43237_SI_PLL_ON 4
+#define RES43237_HT_SI_AVAIL 5
+
+/* 43237 chip-specific ChipControl register bits */
+#define CCTRL43237_BT_COEXIST (1<<0) /**< 0 disable */
+#define CCTRL43237_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */
+#define CCTRL43237_EXT_LNA (1<<2) /**< 0 disable */
+#define CCTRL43237_ANT_MUX_2o3 (1<<3) /**< 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43237_GSIO (1<<4) /**< 0 disable */
+
+/* 43237 Chip specific ChipStatus register bits */
+#define CST43237_SFLASH_MASK 0x00000040
+#define CST43237_OTP_SEL_MASK 0x00000080
+#define CST43237_OTP_SEL_SHIFT 7
+#define CST43237_HSIC_MASK 0x00000100 /**< USB/HSIC */
+#define CST43237_BP_CLK 0x00000200 /**< 120/96Mbps */
+#define CST43237_BOOT_MASK 0x00001800
+#define CST43237_BOOT_SHIFT 11
+#define CST43237_BOOT_FROM_SRAM 0 /**< boot from SRAM, ARM in reset */
+#define CST43237_BOOT_FROM_ROM 1 /**< boot from ROM */
+#define CST43237_BOOT_FROM_FLASH 2 /**< boot from FLASH */
+#define CST43237_BOOT_FROM_INVALID 3
+
+/* 43239 resources */
+#define RES43239_OTP_PU 9
+#define RES43239_MACPHY_CLKAVAIL 23
+#define RES43239_HT_AVAIL 24
+
+/* 43239 Chip specific ChipStatus register bits */
+#define CST43239_SPROM_MASK 0x00000002
+#define CST43239_SFLASH_MASK 0x00000004
+#define CST43239_RES_INIT_MODE_SHIFT 7
+#define CST43239_RES_INIT_MODE_MASK 0x000001f0
+#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15)) /**< SDIO || gSPI */
+#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15)) /**< USB || USBDA */
+#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0) /**< SDIO */
+#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0)) /**< gSPI */
+
+/* 4324 resources */
+/* 43242 use same PMU as 4324 */
+#define RES4324_LPLDO_PU 0
+#define RES4324_RESET_PULLDN_DIS 1
+#define RES4324_PMU_BG_PU 2
+#define RES4324_HSIC_LDO_PU 3
+#define RES4324_CBUCK_LPOM_PU 4
+#define RES4324_CBUCK_PFM_PU 5
+#define RES4324_CLDO_PU 6
+#define RES4324_LPLDO2_LVM 7
+#define RES4324_LNLDO1_PU 8
+#define RES4324_LNLDO2_PU 9
+#define RES4324_LDO3P3_PU 10
+#define RES4324_OTP_PU 11
+#define RES4324_XTAL_PU 12
+#define RES4324_BBPLL_PU 13
+#define RES4324_LQ_AVAIL 14
+#define RES4324_WL_CORE_READY 17
+#define RES4324_ILP_REQ 18
+#define RES4324_ALP_AVAIL 19
+#define RES4324_PALDO_PU 20
+#define RES4324_RADIO_PU 21
+#define RES4324_SR_CLK_STABLE 22
+#define RES4324_SR_SAVE_RESTORE 23
+#define RES4324_SR_PHY_PWRSW 24
+#define RES4324_SR_PHY_PIC 25
+#define RES4324_SR_SUBCORE_PWRSW 26
+#define RES4324_SR_SUBCORE_PIC 27
+#define RES4324_SR_MEM_PM0 28
+#define RES4324_HT_AVAIL 29
+#define RES4324_MACPHY_CLKAVAIL 30
+
+/* 4324 Chip specific ChipStatus register bits */
+#define CST4324_SPROM_MASK 0x00000080
+#define CST4324_SFLASH_MASK 0x00400000
+#define CST4324_RES_INIT_MODE_SHIFT 10
+#define CST4324_RES_INIT_MODE_MASK 0x00000c00
+#define CST4324_CHIPMODE_MASK 0x7
+#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2)) /**< SDIO || gSPI */
+#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6) /**< USB || USBDA */
+
+/* 43242 Chip specific ChipStatus register bits */
+#define CST43242_SFLASH_MASK 0x00000008
+#define CST43242_SR_HALT (1<<25)
+#define CST43242_SR_CHIP_STATUS_2 27 /* bit 27 */
+
+/* 4331 resources */
+#define RES4331_REGULATOR 0
+#define RES4331_ILP_REQUEST 1
+#define RES4331_XTAL_PU 2
+#define RES4331_ALP_AVAIL 3
+#define RES4331_SI_PLL_ON 4
+#define RES4331_HT_SI_AVAIL 5
+
+/* 4331 chip-specific ChipControl register bits */
+#define CCTRL4331_BT_COEXIST (1<<0) /**< 0 disable */
+#define CCTRL4331_SECI (1<<1) /**< 0 SECI is disabled (JATG functional) */
+#define CCTRL4331_EXT_LNA_G (1<<2) /**< 0 disable */
+#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /**< sprom/gpio13-15 mux */
+#define CCTRL4331_EXTPA_EN (1<<4) /**< 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /**< set drive out GPIO_CLK on sprom_cs pin */
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /**< use sprom_cs pin as PCIE mdio interface */
+#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */
+#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /**< override core control on pipe_AuxClkEnable */
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /**< override core control on pipe_AuxPowerDown */
+#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /**< pcie_auxclkenable */
+#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /**< pcie_pipe_pllpowerdown */
+#define CCTRL4331_EXTPA_EN2 (1<<12) /**< 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_EXT_LNA_A (1<<13) /**< 0 disable */
+#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /**< enable bt_shd0 at gpio4 */
+#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /**< enable bt_shd1 at gpio5 */
+#define CCTRL4331_EXTPA_ANA_EN (1<<24) /**< 0 ext pa disable, 1 ext pa enabled */
+
+/* 4331 Chip specific ChipStatus register bits */
+#define CST4331_XTAL_FREQ 0x00000001 /**< crystal frequency 20/40Mhz */
+#define CST4331_SPROM_OTP_SEL_MASK 0x00000006
+#define CST4331_SPROM_OTP_SEL_SHIFT 1
+#define CST4331_SPROM_PRESENT 0x00000002
+#define CST4331_OTP_PRESENT 0x00000004
+#define CST4331_LDO_RF 0x00000008
+#define CST4331_LDO_PAR 0x00000010
+
+/* 4315 resource */
+#define RES4315_CBUCK_LPOM 1 /**< 0x00000002 */
+#define RES4315_CBUCK_BURST 2 /**< 0x00000004 */
+#define RES4315_CBUCK_PWM 3 /**< 0x00000008 */
+#define RES4315_CLDO_PU 4 /**< 0x00000010 */
+#define RES4315_PALDO_PU 5 /**< 0x00000020 */
+#define RES4315_ILP_REQUEST 6 /**< 0x00000040 */
+#define RES4315_LNLDO1_PU 9 /**< 0x00000200 */
+#define RES4315_OTP_PU 10 /**< 0x00000400 */
+#define RES4315_LNLDO2_PU 12 /**< 0x00001000 */
+#define RES4315_XTAL_PU 13 /**< 0x00002000 */
+#define RES4315_ALP_AVAIL 14 /**< 0x00004000 */
+#define RES4315_RX_PWRSW_PU 15 /**< 0x00008000 */
+#define RES4315_TX_PWRSW_PU 16 /**< 0x00010000 */
+#define RES4315_RFPLL_PWRSW_PU 17 /**< 0x00020000 */
+#define RES4315_LOGEN_PWRSW_PU 18 /**< 0x00040000 */
+#define RES4315_AFE_PWRSW_PU 19 /**< 0x00080000 */
+#define RES4315_BBPLL_PWRSW_PU 20 /**< 0x00100000 */
+#define RES4315_HT_AVAIL 21 /**< 0x00200000 */
+
+/* 4315 chip-specific ChipStatus register bits */
+#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 /**< gpio [7:6], SDIO CIS selection */
+#define CST4315_DEFCIS_SEL 0x00000000 /**< use default CIS, OTP is powered up */
+#define CST4315_SPROM_SEL 0x00000001 /**< use SPROM, OTP is powered up */
+#define CST4315_OTP_SEL 0x00000002 /**< use OTP, OTP is powered up */
+#define CST4315_OTP_PWRDN 0x00000003 /**< use SPROM, OTP is powered down */
+#define CST4315_SDIO_MODE 0x00000004 /**< gpio [8], sdio/usb mode */
+#define CST4315_RCAL_VALID 0x00000008
+#define CST4315_RCAL_VALUE_MASK 0x000001f0
+#define CST4315_RCAL_VALUE_SHIFT 4
+#define CST4315_PALDO_EXTPNP 0x00000200 /**< PALDO is configured with external PNP */
+#define CST4315_CBUCK_MODE_MASK 0x00000c00
+#define CST4315_CBUCK_MODE_BURST 0x00000400
+#define CST4315_CBUCK_MODE_LPBURST 0x00000c00
+
+/* 4319 resources */
+#define RES4319_CBUCK_LPOM 1 /**< 0x00000002 */
+#define RES4319_CBUCK_BURST 2 /**< 0x00000004 */
+#define RES4319_CBUCK_PWM 3 /**< 0x00000008 */
+#define RES4319_CLDO_PU 4 /**< 0x00000010 */
+#define RES4319_PALDO_PU 5 /**< 0x00000020 */
+#define RES4319_ILP_REQUEST 6 /**< 0x00000040 */
+#define RES4319_LNLDO1_PU 9 /**< 0x00000200 */
+#define RES4319_OTP_PU 10 /**< 0x00000400 */
+#define RES4319_LNLDO2_PU 12 /**< 0x00001000 */
+#define RES4319_XTAL_PU 13 /**< 0x00002000 */
+#define RES4319_ALP_AVAIL 14 /**< 0x00004000 */
+#define RES4319_RX_PWRSW_PU 15 /**< 0x00008000 */
+#define RES4319_TX_PWRSW_PU 16 /**< 0x00010000 */
+#define RES4319_RFPLL_PWRSW_PU 17 /**< 0x00020000 */
+#define RES4319_LOGEN_PWRSW_PU 18 /**< 0x00040000 */
+#define RES4319_AFE_PWRSW_PU 19 /**< 0x00080000 */
+#define RES4319_BBPLL_PWRSW_PU 20 /**< 0x00100000 */
+#define RES4319_HT_AVAIL 21 /**< 0x00200000 */
+
+/* 4319 chip-specific ChipStatus register bits */
+#define CST4319_SPI_CPULESSUSB 0x00000001
+#define CST4319_SPI_CLK_POL 0x00000002
+#define CST4319_SPI_CLK_PH 0x00000008
+#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /**< gpio [7:6], SDIO CIS selection */
+#define CST4319_SPROM_OTP_SEL_SHIFT 6
+#define CST4319_DEFCIS_SEL 0x00000000 /**< use default CIS, OTP is powered up */
+#define CST4319_SPROM_SEL 0x00000040 /**< use SPROM, OTP is powered up */
+#define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */
+#define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */
+#define CST4319_SDIO_USB_MODE 0x00000100 /**< gpio [8], sdio/usb mode */
+#define CST4319_REMAP_SEL_MASK 0x00000600
+#define CST4319_ILPDIV_EN 0x00000800
+#define CST4319_XTAL_PD_POL 0x00001000
+#define CST4319_LPO_SEL 0x00002000
+#define CST4319_RES_INIT_MODE 0x0000c000
+#define CST4319_PALDO_EXTPNP 0x00010000 /**< PALDO is configured with external PNP */
+#define CST4319_CBUCK_MODE_MASK 0x00060000
+#define CST4319_CBUCK_MODE_BURST 0x00020000
+#define CST4319_CBUCK_MODE_LPBURST 0x00060000
+#define CST4319_RCAL_VALID 0x01000000
+#define CST4319_RCAL_VALUE_MASK 0x3e000000
+#define CST4319_RCAL_VALUE_SHIFT 25
+
#define PMU1_PLL0_CHIPCTL0 0
#define PMU1_PLL0_CHIPCTL1 1
#define PMU1_PLL0_CHIPCTL2 2
-
+#define CCTL_4319USB_XTAL_SEL_MASK 0x00180000
+#define CCTL_4319USB_XTAL_SEL_SHIFT 19
+#define CCTL_4319USB_48MHZ_PLL_SEL 1
+#define CCTL_4319USB_24MHZ_PLL_SEL 2
+
+/* PMU resources for 4336 */
+#define RES4336_CBUCK_LPOM 0
+#define RES4336_CBUCK_BURST 1
+#define RES4336_CBUCK_LP_PWM 2
+#define RES4336_CBUCK_PWM 3
+#define RES4336_CLDO_PU 4
+#define RES4336_DIS_INT_RESET_PD 5
+#define RES4336_ILP_REQUEST 6
+#define RES4336_LNLDO_PU 7
+#define RES4336_LDO3P3_PU 8
+#define RES4336_OTP_PU 9
+#define RES4336_XTAL_PU 10
+#define RES4336_ALP_AVAIL 11
+#define RES4336_RADIO_PU 12
+#define RES4336_BG_PU 13
+#define RES4336_VREG1p4_PU_PU 14
+#define RES4336_AFE_PWRSW_PU 15
+#define RES4336_RX_PWRSW_PU 16
+#define RES4336_TX_PWRSW_PU 17
+#define RES4336_BB_PWRSW_PU 18
+#define RES4336_SYNTH_PWRSW_PU 19
+#define RES4336_MISC_PWRSW_PU 20
+#define RES4336_LOGEN_PWRSW_PU 21
+#define RES4336_BBPLL_PWRSW_PU 22
+#define RES4336_MACPHY_CLKAVAIL 23
+#define RES4336_HT_AVAIL 24
+#define RES4336_RSVD 25
+
+/* 4336 chip-specific ChipStatus register bits */
+#define CST4336_SPI_MODE_MASK 0x00000001
+#define CST4336_SPROM_PRESENT 0x00000002
+#define CST4336_OTP_PRESENT 0x00000004
+#define CST4336_ARMREMAP_0 0x00000008
+#define CST4336_ILPDIV_EN_MASK 0x00000010
+#define CST4336_ILPDIV_EN_SHIFT 4
+#define CST4336_XTAL_PD_POL_MASK 0x00000020
+#define CST4336_XTAL_PD_POL_SHIFT 5
+#define CST4336_LPO_SEL_MASK 0x00000040
+#define CST4336_LPO_SEL_SHIFT 6
+#define CST4336_RES_INIT_MODE_MASK 0x00000180
+#define CST4336_RES_INIT_MODE_SHIFT 7
+#define CST4336_CBUCK_MODE_MASK 0x00000600
+#define CST4336_CBUCK_MODE_SHIFT 9
+
+/* 4336 Chip specific PMU ChipControl register bits */
+#define PCTL_4336_SERIAL_ENAB (1 << 24)
+
+/* 4330 resources */
+#define RES4330_CBUCK_LPOM 0
+#define RES4330_CBUCK_BURST 1
+#define RES4330_CBUCK_LP_PWM 2
+#define RES4330_CBUCK_PWM 3
+#define RES4330_CLDO_PU 4
+#define RES4330_DIS_INT_RESET_PD 5
+#define RES4330_ILP_REQUEST 6
+#define RES4330_LNLDO_PU 7
+#define RES4330_LDO3P3_PU 8
+#define RES4330_OTP_PU 9
+#define RES4330_XTAL_PU 10
+#define RES4330_ALP_AVAIL 11
+#define RES4330_RADIO_PU 12
+#define RES4330_BG_PU 13
+#define RES4330_VREG1p4_PU_PU 14
+#define RES4330_AFE_PWRSW_PU 15
+#define RES4330_RX_PWRSW_PU 16
+#define RES4330_TX_PWRSW_PU 17
+#define RES4330_BB_PWRSW_PU 18
+#define RES4330_SYNTH_PWRSW_PU 19
+#define RES4330_MISC_PWRSW_PU 20
+#define RES4330_LOGEN_PWRSW_PU 21
+#define RES4330_BBPLL_PWRSW_PU 22
+#define RES4330_MACPHY_CLKAVAIL 23
+#define RES4330_HT_AVAIL 24
+#define RES4330_5gRX_PWRSW_PU 25
+#define RES4330_5gTX_PWRSW_PU 26
+#define RES4330_5g_LOGEN_PWRSW_PU 27
+
+/* 4330 chip-specific ChipStatus register bits */
+#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /**< SDIO || gSPI */
+#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /**< USB || USBDA */
+#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /**< SDIO */
+#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /**< gSPI */
+#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /**< USB packet-oriented */
+#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /**< USB Direct Access */
+#define CST4330_OTP_PRESENT 0x00000010
+#define CST4330_LPO_AUTODET_EN 0x00000020
+#define CST4330_ARMREMAP_0 0x00000040
+#define CST4330_SPROM_PRESENT 0x00000080 /**< takes priority over OTP if both set */
+#define CST4330_ILPDIV_EN 0x00000100
+#define CST4330_LPO_SEL 0x00000200
+#define CST4330_RES_INIT_MODE_SHIFT 10
+#define CST4330_RES_INIT_MODE_MASK 0x00000c00
+#define CST4330_CBUCK_MODE_SHIFT 12
+#define CST4330_CBUCK_MODE_MASK 0x00003000
+#define CST4330_CBUCK_POWER_OK 0x00004000
+#define CST4330_BB_PLL_LOCKED 0x00008000
#define SOCDEVRAM_BP_ADDR 0x1E000000
#define SOCDEVRAM_ARM_ADDR 0x00800000
+/* 4330 Chip specific PMU ChipControl register bits */
+#define PCTL_4330_SERIAL_ENAB (1 << 24)
+
+/* 4330 Chip specific ChipControl register bits */
+#define CCTRL_4330_GPIO_SEL 0x00000001 /* 1=select GPIOs to be muxed out */
+#define CCTRL_4330_ERCX_SEL 0x00000002 /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL_4330_SDIO_HOST_WAKE 0x00000004 /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL_4330_JTAG_DISABLE 0x00000008 /* 1=disable JTAG interface on mux'd pins */
+
+#define PMU_VREG0_ADDR 0
#define PMU_VREG0_I_SR_CNTL_EN_SHIFT 0
#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT 2
#define PMU_VREG0_DISABLE_PULLD_WL_SHIFT 3
#define PMU_VREG5_HSICDVDD_PD_SHIFT 11
#define PMU_VREG5_HSICDVDD_PD_MASK 0x1
+/* 4334 resources */
+#define RES4334_LPLDO_PU 0
+#define RES4334_RESET_PULLDN_DIS 1
+#define RES4334_PMU_BG_PU 2
+#define RES4334_HSIC_LDO_PU 3
+#define RES4334_CBUCK_LPOM_PU 4
+#define RES4334_CBUCK_PFM_PU 5
+#define RES4334_CLDO_PU 6
+#define RES4334_LPLDO2_LVM 7
+#define RES4334_LNLDO_PU 8
+#define RES4334_LDO3P3_PU 9
+#define RES4334_OTP_PU 10
+#define RES4334_XTAL_PU 11
+#define RES4334_WL_PWRSW_PU 12
+#define RES4334_LQ_AVAIL 13
+#define RES4334_LOGIC_RET 14
+#define RES4334_MEM_SLEEP 15
+#define RES4334_MACPHY_RET 16
+#define RES4334_WL_CORE_READY 17
+#define RES4334_ILP_REQ 18
+#define RES4334_ALP_AVAIL 19
+#define RES4334_MISC_PWRSW_PU 20
+#define RES4334_SYNTH_PWRSW_PU 21
+#define RES4334_RX_PWRSW_PU 22
+#define RES4334_RADIO_PU 23
+#define RES4334_WL_PMU_PU 24
+#define RES4334_VCO_LDO_PU 25
+#define RES4334_AFE_LDO_PU 26
+#define RES4334_RX_LDO_PU 27
+#define RES4334_TX_LDO_PU 28
+#define RES4334_HT_AVAIL 29
+#define RES4334_MACPHY_CLK_AVAIL 30
+
+/* 4334 chip-specific ChipStatus register bits */
+#define CST4334_CHIPMODE_MASK 7
+#define CST4334_SDIO_MODE 0x00000000
+#define CST4334_SPI_MODE 0x00000004
+#define CST4334_HSIC_MODE 0x00000006
+#define CST4334_BLUSB_MODE 0x00000007
+#define CST4334_CHIPMODE_HSIC(cs) (((cs) & CST4334_CHIPMODE_MASK) == CST4334_HSIC_MODE)
+#define CST4334_OTP_PRESENT 0x00000010
+#define CST4334_LPO_AUTODET_EN 0x00000020
+#define CST4334_ARMREMAP_0 0x00000040
+#define CST4334_SPROM_PRESENT 0x00000080
+#define CST4334_ILPDIV_EN_MASK 0x00000100
+#define CST4334_ILPDIV_EN_SHIFT 8
+#define CST4334_LPO_SEL_MASK 0x00000200
+#define CST4334_LPO_SEL_SHIFT 9
+#define CST4334_RES_INIT_MODE_MASK 0x00000C00
+#define CST4334_RES_INIT_MODE_SHIFT 10
+
+/* 4334 Chip specific PMU ChipControl register bits */
+#define PCTL_4334_GPIO3_ENAB (1 << 3)
+
+/* 4334 Chip control */
+#define CCTRL4334_PMU_WAKEUP_GPIO1 (1 << 0)
+#define CCTRL4334_PMU_WAKEUP_HSIC (1 << 1)
+#define CCTRL4334_PMU_WAKEUP_AOS (1 << 2)
+#define CCTRL4334_HSIC_WAKE_MODE (1 << 3)
+#define CCTRL4334_HSIC_INBAND_GPIO1 (1 << 4)
+#define CCTRL4334_HSIC_LDO_PU (1 << 23)
+
+/* 4334 Chip control 3 */
+#define CCTRL4334_BLOCK_EXTRNL_WAKE (1 << 4)
+#define CCTRL4334_SAVERESTORE_FIX (1 << 5)
+
+/* 43341 Chip control 3 */
+#define CCTRL43341_BLOCK_EXTRNL_WAKE (1 << 13)
+#define CCTRL43341_SAVERESTORE_FIX (1 << 14)
+#define CCTRL43341_BT_ISO_SEL (1 << 16)
+
+/* 4334 Chip specific ChipControl1 register bits */
+#define CCTRL1_4334_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4334_ERCX_SEL (1 << 1) /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL1_4334_JTAG_DISABLE (1 << 3) /* 1=disable JTAG interface on mux'd pins */
+#define CCTRL1_4334_UART_ON_4_5 (1 << 28) /**< 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */
+
+/* 4324 Chip specific ChipControl1 register bits */
+#define CCTRL1_4324_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */
+
+/* 43143 chip-specific ChipStatus register bits based on Confluence documentation */
+/* register contains strap values sampled during POR */
+#define CST43143_REMAP_TO_ROM (3 << 0) /* 00=Boot SRAM, 01=Boot ROM, 10=Boot SFLASH */
+#define CST43143_SDIO_EN (1 << 2) /* 0 = USB Enab, SDIO pins are GPIO or I2S */
+#define CST43143_SDIO_ISO (1 << 3) /* 1 = SDIO isolated */
+#define CST43143_USB_CPU_LESS (1 << 4) /* 1 = CPULess mode Enabled */
+#define CST43143_CBUCK_MODE (3 << 6) /* Indicates what controller mode CBUCK is in */
+#define CST43143_POK_CBUCK (1 << 8) /* 1 = 1.2V CBUCK voltage ready */
+#define CST43143_PMU_OVRSPIKE (1 << 9)
+#define CST43143_PMU_OVRTEMP (0xF << 10)
+#define CST43143_SR_FLL_CAL_DONE (1 << 14)
+#define CST43143_USB_PLL_LOCKDET (1 << 15)
+#define CST43143_PMU_PLL_LOCKDET (1 << 16)
+#define CST43143_CHIPMODE_SDIOD(cs) (((cs) & CST43143_SDIO_EN) != 0) /* SDIO */
+
+/* 43143 Chip specific ChipControl register bits */
+/* 00: SECI is disabled (JATG functional), 01: 2 wire, 10: 4 wire */
+#define CCTRL_43143_SECI (1<<0)
+#define CCTRL_43143_BT_LEGACY (1<<1)
+#define CCTRL_43143_I2S_MODE (1<<2) /**< 0: SDIO enabled */
+#define CCTRL_43143_I2S_MASTER (1<<3) /**< 0: I2S MCLK input disabled */
+#define CCTRL_43143_I2S_FULL (1<<4) /**< 0: I2S SDIN and SPDIF_TX inputs disabled */
+#define CCTRL_43143_GSIO (1<<5) /**< 0: sFlash enabled */
+#define CCTRL_43143_RF_SWCTRL_MASK (7<<6) /**< 0: disabled */
+#define CCTRL_43143_RF_SWCTRL_0 (1<<6)
+#define CCTRL_43143_RF_SWCTRL_1 (2<<6)
+#define CCTRL_43143_RF_SWCTRL_2 (4<<6)
+#define CCTRL_43143_RF_XSWCTRL (1<<9) /**< 0: UART enabled */
+#define CCTRL_43143_HOST_WAKE0 (1<<11) /**< 1: SDIO separate interrupt output from GPIO4 */
+#define CCTRL_43143_HOST_WAKE1 (1<<12) /* 1: SDIO separate interrupt output from GPIO16 */
+
+/* 43143 resources, based on pmu_params.xls V1.19 */
+#define RES43143_EXT_SWITCHER_PWM 0 /**< 0x00001 */
+#define RES43143_XTAL_PU 1 /**< 0x00002 */
+#define RES43143_ILP_REQUEST 2 /**< 0x00004 */
+#define RES43143_ALP_AVAIL 3 /**< 0x00008 */
+#define RES43143_WL_CORE_READY 4 /**< 0x00010 */
+#define RES43143_BBPLL_PWRSW_PU 5 /**< 0x00020 */
+#define RES43143_HT_AVAIL 6 /**< 0x00040 */
+#define RES43143_RADIO_PU 7 /**< 0x00080 */
+#define RES43143_MACPHY_CLK_AVAIL 8 /**< 0x00100 */
+#define RES43143_OTP_PU 9 /**< 0x00200 */
+#define RES43143_LQ_AVAIL 10 /**< 0x00400 */
+
+#define PMU43143_XTAL_CORE_SIZE_MASK 0x3F
+
+/* 4313 resources */
+#define RES4313_BB_PU_RSRC 0
+#define RES4313_ILP_REQ_RSRC 1
+#define RES4313_XTAL_PU_RSRC 2
+#define RES4313_ALP_AVAIL_RSRC 3
+#define RES4313_RADIO_PU_RSRC 4
+#define RES4313_BG_PU_RSRC 5
+#define RES4313_VREG1P4_PU_RSRC 6
+#define RES4313_AFE_PWRSW_RSRC 7
+#define RES4313_RX_PWRSW_RSRC 8
+#define RES4313_TX_PWRSW_RSRC 9
+#define RES4313_BB_PWRSW_RSRC 10
+#define RES4313_SYNTH_PWRSW_RSRC 11
+#define RES4313_MISC_PWRSW_RSRC 12
+#define RES4313_BB_PLL_PWRSW_RSRC 13
+#define RES4313_HT_AVAIL_RSRC 14
+#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
+
+/* 4313 chip-specific ChipStatus register bits */
+#define CST4313_SPROM_PRESENT 1
+#define CST4313_OTP_PRESENT 2
+#define CST4313_SPROM_OTP_SEL_MASK 0x00000002
+#define CST4313_SPROM_OTP_SEL_SHIFT 0
+
+/* 4313 Chip specific ChipControl register bits */
+#define CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */
+
+/* PMU respources for 4314 */
+#define RES4314_LPLDO_PU 0
+#define RES4314_PMU_SLEEP_DIS 1
+#define RES4314_PMU_BG_PU 2
+#define RES4314_CBUCK_LPOM_PU 3
+#define RES4314_CBUCK_PFM_PU 4
+#define RES4314_CLDO_PU 5
+#define RES4314_LPLDO2_LVM 6
+#define RES4314_WL_PMU_PU 7
+#define RES4314_LNLDO_PU 8
+#define RES4314_LDO3P3_PU 9
+#define RES4314_OTP_PU 10
+#define RES4314_XTAL_PU 11
+#define RES4314_WL_PWRSW_PU 12
+#define RES4314_LQ_AVAIL 13
+#define RES4314_LOGIC_RET 14
+#define RES4314_MEM_SLEEP 15
+#define RES4314_MACPHY_RET 16
+#define RES4314_WL_CORE_READY 17
+#define RES4314_ILP_REQ 18
+#define RES4314_ALP_AVAIL 19
+#define RES4314_MISC_PWRSW_PU 20
+#define RES4314_SYNTH_PWRSW_PU 21
+#define RES4314_RX_PWRSW_PU 22
+#define RES4314_RADIO_PU 23
+#define RES4314_VCO_LDO_PU 24
+#define RES4314_AFE_LDO_PU 25
+#define RES4314_RX_LDO_PU 26
+#define RES4314_TX_LDO_PU 27
+#define RES4314_HT_AVAIL 28
+#define RES4314_MACPHY_CLK_AVAIL 29
+
+/* 4314 chip-specific ChipStatus register bits */
+#define CST4314_OTP_ENABLED 0x00200000
+
+/* 43228 resources */
+#define RES43228_NOT_USED 0
+#define RES43228_ILP_REQUEST 1
+#define RES43228_XTAL_PU 2
+#define RES43228_ALP_AVAIL 3
+#define RES43228_PLL_EN 4
+#define RES43228_HT_PHY_AVAIL 5
+
/* 43228 chipstatus reg bits */
+#define CST43228_ILP_DIV_EN 0x1
#define CST43228_OTP_PRESENT 0x2
+#define CST43228_SERDES_REFCLK_PADSEL 0x4
+#define CST43228_SDIO_MODE 0x8
+#define CST43228_SDIO_OTP_PRESENT 0x10
+#define CST43228_SDIO_RESET 0x20
+
+/* 4706 chipstatus reg bits */
+#define CST4706_PKG_OPTION (1<<0) /* 0: full-featured package 1: low-cost package */
+#define CST4706_SFLASH_PRESENT (1<<1) /* 0: parallel, 1: serial flash is present */
+#define CST4706_SFLASH_TYPE (1<<2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
+#define CST4706_MIPS_BENDIAN (1<<3) /* 0: little, 1: big endian */
+#define CST4706_PCIE1_DISABLE (1<<5) /* PCIE1 enable strap pin */
+
+/* 4706 flashstrconfig reg bits */
+#define FLSTRCF4706_MASK 0x000000ff
+#define FLSTRCF4706_SF1 0x00000001 /**< 2nd serial flash present */
+#define FLSTRCF4706_PF1 0x00000002 /**< 2nd parallel flash present */
+#define FLSTRCF4706_SF1_TYPE 0x00000004 /**< 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define FLSTRCF4706_NF1 0x00000008 /**< 2nd NAND flash present */
+#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0 /**< Valid value mask */
+#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010 /**< 4MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020 /**< 8MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030 /**< 16MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040 /**< 32MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050 /**< 64MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060 /**< 128MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070 /**< 256MB */
/* 4360 Chip specific ChipControl register bits */
#define CCTRL4360_I2C_MODE (1 << 0)
#define CST4360_AVBBPLL_LOCK 0x00001000
#define CST4360_USBBBPLL_LOCK 0x00002000
#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
- CST4360_RSRC_INIT_MODE_SHIFT)
-
-#define CCTRL_4360_UART_SEL 0x2
+ CST4360_RSRC_INIT_MODE_SHIFT)
+#define CCTRL_4360_UART_SEL 0x2
#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
- CST4360_RSRC_INIT_MODE_SHIFT)
+ CST4360_RSRC_INIT_MODE_SHIFT)
#define PMU4360_CC1_GPIO7_OVRD (1<<23) /* GPIO7 override */
+
/* 43602 PMU resources based on pmu_params.xls version v0.95 */
#define RES43602_LPLDO_PU 0
#define RES43602_REGULATOR 1
#define RES4365_RADIO_PU 12
#define RES4365_MACPHY_CLK_AVAIL 13
-/* 43684 PMU resources */
-#define RES43684_REGULATOR_PU 0
-#define RES43684_PCIE_LDO_BG_PU 1
-#define RES43684_XTAL_LDO_PU 2
-#define RES43684_XTAL_PU 3
-#define RES43684_CPU_PLL_LDO_PU 4
-#define RES43684_CPU_PLL_PU 5
-#define RES43684_WL_CORE_RDY 6
-#define RES43684_ILP_REQ 7
-#define RES43684_ALP_AVAIL 8
-#define RES43684_HT_AVAIL 9
-#define RES43684_BB_PLL_LDO_PU 10
-#define RES43684_BB_PLL_PU 11
-#define RES43684_MINI_PMU_PU 12
-#define RES43684_RADIO_PU 13
-#define RES43684_MACPHY_CLK_AVAIL 14
-#define RES43684_PCIE_LDO_PU 15
-
-/* 7271 PMU resources */
-#define RES7271_REGULATOR_PU 0
-#define RES7271_WL_CORE_RDY 1
-#define RES7271_ILP_REQ 2
-#define RES7271_ALP_AVAIL 3
-#define RES7271_HT_AVAIL 4
-#define RES7271_BB_PLL_PU 5
-#define RES7271_MINIPMU_PU 6
-#define RES7271_RADIO_PU 7
-#define RES7271_MACPHY_CLK_AVAIL 8
-
/* 4349 related */
#define RES4349_LPLDO_PU 0
#define RES4349_BG_PU 1
#define RES4349_HT_AVAIL 29
#define RES4349_MACPHY_CLKAVAIL 30
-/* 4373 PMU resources */
-#define RES4373_LPLDO_PU 0
-#define RES4373_BG_PU 1
-#define RES4373_PMU_SLEEP 2
-#define RES4373_PALDO3P3_PU 3
-#define RES4373_CBUCK_LPOM_PU 4
-#define RES4373_CBUCK_PFM_PU 5
-#define RES4373_COLD_START_WAIT 6
-#define RES4373_RSVD_7 7
-#define RES4373_LNLDO_PU 8
-#define RES4373_XTALLDO_PU 9
-#define RES4373_LDO3P3_PU 10
-#define RES4373_OTP_PU 11
-#define RES4373_XTAL_PU 12
-#define RES4373_SR_CLK_START 13
-#define RES4373_LQ_AVAIL 14
-#define RES4373_LQ_START 15
-#define RES4373_PERST_OVR 16
-#define RES4373_WL_CORE_RDY 17
-#define RES4373_ILP_REQ 18
-#define RES4373_ALP_AVAIL 19
-#define RES4373_MINI_PMU 20
-#define RES4373_RADIO_PU 21
-#define RES4373_SR_CLK_STABLE 22
-#define RES4373_SR_SAVE_RESTORE 23
-#define RES4373_SR_PHY_PWRSW 24
-#define RES4373_SR_VDDM_PWRSW 25
-#define RES4373_SR_SUBCORE_PWRSW 26
-#define RES4373_SR_SLEEP 27
-#define RES4373_HT_START 28
-#define RES4373_HT_AVAIL 29
-#define RES4373_MACPHY_CLKAVAIL 30
/* SR Control0 bits */
-#define CC_SR0_4349_SR_ENG_EN_MASK 0x1
+#define CC_SR0_4349_SR_ENG_EN_MASK 0x1
#define CC_SR0_4349_SR_ENG_EN_SHIFT 0
#define CC_SR0_4349_SR_ENG_CLK_EN (1 << 1)
#define CC_SR0_4349_SR_RSRC_TRIGGER (0xC << 2)
#define CC_SR0_4349_SR_ENABLE_HT (1 << 19)
#define CC_SR0_4349_SR_ALLOW_PIC (3 << 20)
#define CC_SR0_4349_SR_PMU_MEM_DISABLE (1 << 30)
+
/* SR Control0 bits */
-#define CC_SR0_4349_SR_ENG_EN_MASK 0x1
+#define CC_SR0_4349_SR_ENG_EN_MASK 0x1
#define CC_SR0_4349_SR_ENG_EN_SHIFT 0
#define CC_SR0_4349_SR_ENG_CLK_EN (1 << 1)
#define CC_SR0_4349_SR_RSRC_TRIGGER (0xC << 2)
/* SR binary offset is at 8K */
#define CC_SR1_4349_SR_ASM_ADDR (0x10)
+
#define CST4349_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */
#define CST4349_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */
-#define CST4349_SPROM_PRESENT 0x00000010
-/* 4373 related */
-#define CST4373_CHIPMODE_USB20D(cs) (((cs) & (1 << 8)) != 0) /* USB */
-#define CST4373_CHIPMODE_SDIOD(cs) (((cs) & (1 << 7)) != 0) /* SDIO */
-#define CST4373_CHIPMODE_PCIE(cs) (((cs) & (1 << 6)) != 0) /* PCIE */
-#define CST4373_SFLASH_PRESENT 0x00000010
+#define CST4349_SPROM_PRESENT 0x00000010
#define VREG4_4349_MEMLPLDO_PWRUP_MASK (1 << 31)
#define VREG4_4349_MEMLPLDO_PWRUP_SHIFT (31)
#define CC6_4349_PMU_EN_MDIO_MASK (1 << 16)
#define CC6_4349_PMU_EN_ASSERT_L2_MASK (1 << 25)
+
/* 4349 GCI function sel values */
/*
* Reference
#define CC_SR0_4364_SR_ENABLE_ILP (1 << 17)
#define CC_SR0_4364_SR_ENABLE_ALP (1 << 18)
#define CC_SR0_4364_SR_ENABLE_HT (1 << 19)
-#define CC_SR0_4364_SR_INVERT_CLK (1 << 11)
#define CC_SR0_4364_SR_ALLOW_PIC (3 << 20)
#define CC_SR0_4364_SR_PMU_MEM_DISABLE (1 << 30)
#define PMU_4364_CC3_MEMLPLDO3x3_PWRSW_FORCE_OFF (0)
#define PMU_4364_CC3_MEMLPLDO1x1_PWRSW_FORCE_OFF (0)
+
#define PMU_4364_CC5_DISABLE_BBPLL_CLKOUT6_DIV2_MASK (1 << 26)
#define PMU_4364_CC5_ENABLE_ARMCR4_DEBUG_CLK_MASK (1 << 4)
#define PMU_4364_CC5_DISABLE_BBPLL_CLKOUT6_DIV2 (1 << 26)
#define PMU_4364_VREG0_DISABLE_BT_PULL_DOWN (1 << 2)
#define PMU_4364_VREG1_DISABLE_WL_PULL_DOWN (1 << 2)
-/* Indices of PMU voltage regulator registers */
-#define PMU_VREG_0 (0u)
-#define PMU_VREG_1 (1u)
-#define PMU_VREG_2 (2u)
-#define PMU_VREG_3 (3u)
-#define PMU_VREG_4 (4u)
-#define PMU_VREG_5 (5u)
-#define PMU_VREG_6 (6u)
-#define PMU_VREG_7 (7u)
-#define PMU_VREG_8 (8u)
-#define PMU_VREG_9 (9u)
-#define PMU_VREG_10 (10u)
-#define PMU_VREG_11 (11u)
-#define PMU_VREG_12 (12u)
-#define PMU_VREG_13 (13u)
-#define PMU_VREG_14 (14u)
-#define PMU_VREG_15 (15u)
-#define PMU_VREG_16 (16u)
-
-/* 43012 Chipcommon ChipStatus bits */
-#define CST43012_FLL_LOCK (1 << 13)
-/* 43012 resources - End */
-
-/* 43012 related Cbuck modes */
-#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE0 0x00001c03
-#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE0 0x00492490
-#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE1 0x00001c03
-#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE1 0x00490410
-
-/* 43012 related dynamic cbuck mode mask */
-#define PMU_43012_VREG8_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFC07
-#define PMU_43012_VREG9_DYNAMIC_CBUCK_MODE_MASK 0xFFFFFFFF
-
-/* 4369 related VREG masks */
-#define PMU_4369_VREG_5_MISCLDO_POWER_UP_MASK (1u << 11u)
-#define PMU_4369_VREG_5_MISCLDO_POWER_UP_SHIFT 11u
-#define PMU_4369_VREG_5_LPLDO_POWER_UP_MASK (1u << 27u)
-#define PMU_4369_VREG_5_LPLDO_POWER_UP_SHIFT 27u
-#define PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_MASK BCM_MASK32(31, 28)
-#define PMU_4369_VREG_5_MEMLPLDO_OP_VLT_ADJ_CTRL_SHIFT 28u
-
-#define PMU_4369_VREG_6_MEMLPLDO_POWER_UP_MASK (1u << 3u)
-#define PMU_4369_VREG_6_MEMLPLDO_POWER_UP_SHIFT 3u
-
-#define PMU_4369_VREG_7_PMU_FORCE_HP_MODE_MASK (1u << 27u)
-#define PMU_4369_VREG_7_PMU_FORCE_HP_MODE_SHIFT 27u
-#define PMU_4369_VREG_7_WL_PMU_LP_MODE_MASK (1u << 28u)
-#define PMU_4369_VREG_7_WL_PMU_LP_MODE_SHIFT 28u
-#define PMU_4369_VREG_7_WL_PMU_LV_MODE_MASK (1u << 29u)
-#define PMU_4369_VREG_7_WL_PMU_LV_MODE_SHIFT 29u
-
-#define PMU_4369_VREG8_ASR_OVADJ_LPPFM_MASK BCM_MASK32(4, 0)
-#define PMU_4369_VREG8_ASR_OVADJ_LPPFM_SHIFT 0u
-
-#define PMU_4369_VREG13_RSRC_EN_ASR_MASK4_MASK BCM_MASK32(10, 9)
-#define PMU_4369_VREG13_RSRC_EN_ASR_MASK4_SHIFT 9u
-
-#define PMU_4369_VREG14_RSRC_EN_CSR_MASK0_MASK (1u << 23u)
-#define PMU_4369_VREG14_RSRC_EN_CSR_MASK0_SHIFT 23u
-
-#define PMU_4369_VREG16_RSRC0_CBUCK_MODE_MASK BCM_MASK32(2, 0)
-#define PMU_4369_VREG16_RSRC0_CBUCK_MODE_SHIFT 0u
-#define PMU_4369_VREG16_RSRC0_ABUCK_MODE_MASK BCM_MASK32(17, 15)
-#define PMU_4369_VREG16_RSRC0_ABUCK_MODE_SHIFT 15u
-#define PMU_4369_VREG16_RSRC1_ABUCK_MODE_MASK BCM_MASK32(20, 18)
-#define PMU_4369_VREG16_RSRC1_ABUCK_MODE_SHIFT 18u
-
-/* 4364 related VREG masks */
+#define PMU_VREG_0 (0x0)
+#define PMU_VREG_1 (0x1)
+#define PMU_VREG_3 (0x3)
+#define PMU_VREG_4 (0x4)
+#define PMU_VREG_5 (0x5)
+#define PMU_VREG_6 (0x6)
+
#define PMU_4364_VREG3_DISABLE_WPT_REG_ON_PULL_DOWN (1 << 11)
#define PMU_4364_VREG4_MEMLPLDO_PU_ON (1 << 31)
#define PMU_4364_MACCORE_0_RES_REQ_MASK 0x3FCBF7FF
#define PMU_4364_MACCORE_1_RES_REQ_MASK 0x7FFB3647
-#define PMU_4364_RSDB_MODE (0)
-#define PMU_4364_1x1_MODE (1)
-#define PMU_4364_3x3_MODE (2)
-
-#define PMU_4364_MAX_MASK_1x1 (0x7FFF3E47)
-#define PMU_4364_MAX_MASK_RSDB (0x7FFFFFFF)
-#define PMU_4364_MAX_MASK_3x3 (0x3FCFFFFF)
-
-#define PMU_4364_SAVE_RESTORE_UPDNTIME_1x1 (0xC000C)
-#define PMU_4364_SAVE_RESTORE_UPDNTIME_3x3 (0xF000F)
-
-#define FORCE_CLK_ON 1
-#define FORCE_CLK_OFF 0
#define PMU1_PLL0_SWITCH_MACCLOCK_120MHZ (0)
#define PMU1_PLL0_SWITCH_MACCLOCK_160MHZ (1)
#define PMU1_PLL0_PC1_M2DIV_VALUE_120MHZ 8
#define PMU1_PLL0_PC1_M2DIV_VALUE_160MHZ 6
-/* 4347/4369 Related */
-
-/*
- * PMU VREG Definitions:
- * http://confluence.broadcom.com/display/WLAN/BCM4347+PMU+Vreg+Control+Register
- * http://confluence.broadcom.com/display/WLAN/BCM4369+PMU+Vreg+Control+Register
- */
-/* PMU VREG4 */
-#define PMU_28NM_VREG4_WL_LDO_CNTL_EN (0x1 << 10)
-
-/* PMU VREG6 */
-#define PMU_28NM_VREG6_BTLDO3P3_PU (0x1 << 12)
-
-/* PMU resources */
-#define RES4347_MEMLPLDO_PU 0
-#define RES4347_AAON 1
-#define RES4347_PMU_SLEEP 2
-#define RES4347_RESERVED_3 3
-#define RES4347_LDO3P3_PU 4
-#define RES4347_FAST_LPO_AVAIL 5
-#define RES4347_XTAL_PU 6
-#define RES4347_XTAL_STABLE 7
-#define RES4347_PWRSW_DIG 8
-#define RES4347_SR_DIG 9
-#define RES4347_SLEEP_DIG 10
-#define RES4347_PWRSW_AUX 11
-#define RES4347_SR_AUX 12
-#define RES4347_SLEEP_AUX 13
-#define RES4347_PWRSW_MAIN 14
-#define RES4347_SR_MAIN 15
-#define RES4347_SLEEP_MAIN 16
-#define RES4347_CORE_RDY_DIG 17
-#define RES4347_CORE_RDY_AUX 18
-#define RES4347_ALP_AVAIL 19
-#define RES4347_RADIO_AUX_PU 20
-#define RES4347_MINIPMU_AUX_PU 21
-#define RES4347_CORE_RDY_MAIN 22
-#define RES4347_RADIO_MAIN_PU 23
-#define RES4347_MINIPMU_MAIN_PU 24
-#define RES4347_PCIE_EP_PU 25
-#define RES4347_COLD_START_WAIT 26
-#define RES4347_ARMHTAVAIL 27
-#define RES4347_HT_AVAIL 28
-#define RES4347_MACPHY_AUX_CLK_AVAIL 29
-#define RES4347_MACPHY_MAIN_CLK_AVAIL 30
-#define RES4347_RESERVED_31 31
-
-/* 4369 PMU Resources */
-#define RES4369_DUMMY 0
-#define RES4369_ABUCK 1
-#define RES4369_PMU_SLEEP 2
-#define RES4369_MISCLDO 3
-#define RES4369_LDO3P3 4
-#define RES4369_FAST_LPO_AVAIL 5
-#define RES4369_XTAL_PU 6
-#define RES4369_XTAL_STABLE 7
-#define RES4369_PWRSW_DIG 8
-#define RES4369_SR_DIG 9
-#define RES4369_SLEEP_DIG 10
-#define RES4369_PWRSW_AUX 11
-#define RES4369_SR_AUX 12
-#define RES4369_SLEEP_AUX 13
-#define RES4369_PWRSW_MAIN 14
-#define RES4369_SR_MAIN 15
-#define RES4369_SLEEP_MAIN 16
-#define RES4369_DIG_CORE_RDY 17
-#define RES4369_CORE_RDY_AUX 18
-#define RES4369_ALP_AVAIL 19
-#define RES4369_RADIO_AUX_PU 20
-#define RES4369_MINIPMU_AUX_PU 21
-#define RES4369_CORE_RDY_MAIN 22
-#define RES4369_RADIO_MAIN_PU 23
-#define RES4369_MINIPMU_MAIN_PU 24
-#define RES4369_PCIE_EP_PU 25
-#define RES4369_COLD_START_WAIT 26
-#define RES4369_ARMHTAVAIL 27
-#define RES4369_HT_AVAIL 28
-#define RES4369_MACPHY_AUX_CLK_AVAIL 29
-#define RES4369_MACPHY_MAIN_CLK_AVAIL 30
-
-/* chip status */
#define CST4347_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */
#define CST4347_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */
-#define CST4347_JTAG_STRAP_ENABLED(cs) (((cs) & (1 << 20)) != 0) /* JTAG strap st */
#define CST4347_SPROM_PRESENT 0x00000010
-/* GCI chip status */
-#define GCI_CS_4347_FLL1MHZ_LOCK_MASK (1 << 1)
-
-/* GCI chip control registers */
-#define GCI_CC7_AAON_BYPASS_PWRSW_SEL 13
-#define GCI_CC7_AAON_BYPASS_PWRSW_SEQ_ON 14
-
-/* PMU chip control registers */
-#define CC2_4347_VASIP_MEMLPLDO_VDDB_OFF_MASK (1 << 11)
-#define CC2_4347_VASIP_MEMLPLDO_VDDB_OFF_SHIFT 11
-#define CC2_4347_MAIN_MEMLPLDO_VDDB_OFF_MASK (1 << 12)
-#define CC2_4347_MAIN_MEMLPLDO_VDDB_OFF_SHIFT 12
-#define CC2_4347_AUX_MEMLPLDO_VDDB_OFF_MASK (1 << 13)
-#define CC2_4347_AUX_MEMLPLDO_VDDB_OFF_SHIFT 13
-#define CC2_4347_VASIP_VDDRET_ON_MASK (1 << 14)
-#define CC2_4347_VASIP_VDDRET_ON_SHIFT 14
-#define CC2_4347_MAIN_VDDRET_ON_MASK (1 << 15)
-#define CC2_4347_MAIN_VDDRET_ON_SHIFT 15
-#define CC2_4347_AUX_VDDRET_ON_MASK (1 << 16)
-#define CC2_4347_AUX_VDDRET_ON_SHIFT 16
-#define CC2_4347_GCI2WAKE_MASK (1 << 31)
-#define CC2_4347_GCI2WAKE_SHIFT 31
-
-#define CC2_4347_SDIO_AOS_WAKEUP_MASK (1 << 24)
-#define CC2_4347_SDIO_AOS_WAKEUP_SHIFT 24
-
-#define CC4_4347_LHL_TIMER_SELECT (1 << 0)
-
-#define CC6_4347_PWROK_WDT_EN_IN_MASK (1 << 6)
-#define CC6_4347_PWROK_WDT_EN_IN_SHIFT 6
-
-#define CC6_4347_SDIO_AOS_CHIP_WAKEUP_MASK (1 << 24)
-#define CC6_4347_SDIO_AOS_CHIP_WAKEUP_SHIFT 24
-
-#define PCIE_GPIO1_GPIO_PIN CC_GCI_GPIO_0
-#define PCIE_PERST_GPIO_PIN CC_GCI_GPIO_1
-#define PCIE_CLKREQ_GPIO_PIN CC_GCI_GPIO_2
-
-#define VREG5_4347_MEMLPLDO_ADJ_MASK 0xF0000000
-#define VREG5_4347_MEMLPLDO_ADJ_SHIFT 28
-#define VREG5_4347_LPLDO_ADJ_MASK 0x00F00000
-#define VREG5_4347_LPLDO_ADJ_SHIFT 20
-
-/* lpldo/memlpldo voltage */
-#define PMU_VREG5_LPLDO_VOLT_0_88 0xf /* 0.88v */
-#define PMU_VREG5_LPLDO_VOLT_0_86 0xe /* 0.86v */
-#define PMU_VREG5_LPLDO_VOLT_0_84 0xd /* 0.84v */
-#define PMU_VREG5_LPLDO_VOLT_0_82 0xc /* 0.82v */
-#define PMU_VREG5_LPLDO_VOLT_0_80 0xb /* 0.80v */
-#define PMU_VREG5_LPLDO_VOLT_0_78 0xa /* 0.78v */
-#define PMU_VREG5_LPLDO_VOLT_0_76 0x9 /* 0.76v */
-#define PMU_VREG5_LPLDO_VOLT_0_74 0x8 /* 0.74v */
-#define PMU_VREG5_LPLDO_VOLT_0_72 0x7 /* 0.72v */
-#define PMU_VREG5_LPLDO_VOLT_1_10 0x6 /* 1.10v */
-#define PMU_VREG5_LPLDO_VOLT_1_00 0x5 /* 1.00v */
-#define PMU_VREG5_LPLDO_VOLT_0_98 0x4 /* 0.98v */
-#define PMU_VREG5_LPLDO_VOLT_0_96 0x3 /* 0.96v */
-#define PMU_VREG5_LPLDO_VOLT_0_94 0x2 /* 0.94v */
-#define PMU_VREG5_LPLDO_VOLT_0_92 0x1 /* 0.92v */
-#define PMU_VREG5_LPLDO_VOLT_0_90 0x0 /* 0.90v */
-
-/* Save/Restore engine */
-
-#define BM_ADDR_TO_SR_ADDR(bmaddr) ((bmaddr) >> 9)
-
-/* Txfifo is 512KB for main core and 128KB for aux core
- * We use first 12kB (0x3000) in BMC buffer for template in main core and
- * 6.5kB (0x1A00) in aux core, followed by ASM code
- */
-#define SR_ASM_ADDR_MAIN_4347 (0x18)
-#define SR_ASM_ADDR_AUX_4347 (0xd)
-#define SR_ASM_ADDR_DIG_4347 (0x0)
-
-#define SR_ASM_ADDR_MAIN_4369 BM_ADDR_TO_SR_ADDR(0xC00)
-#define SR_ASM_ADDR_AUX_4369 BM_ADDR_TO_SR_ADDR(0xC00)
-#define SR_ASM_ADDR_DIG_4369 (0x0)
-
-/* 512 bytes block */
-#define SR_ASM_ADDR_BLK_SIZE_SHIFT 9
-
-/* SR Control0 bits */
-#define SR0_SR_ENG_EN_MASK 0x1
-#define SR0_SR_ENG_EN_SHIFT 0
-#define SR0_SR_ENG_CLK_EN (1 << 1)
-#define SR0_RSRC_TRIGGER (0xC << 2)
-#define SR0_WD_MEM_MIN_DIV (0x3 << 6)
-#define SR0_INVERT_SR_CLK (1 << 11)
-#define SR0_MEM_STBY_ALLOW (1 << 16)
-#define SR0_ENABLE_SR_ILP (1 << 17)
-#define SR0_ENABLE_SR_ALP (1 << 18)
-#define SR0_ENABLE_SR_HT (1 << 19)
-#define SR0_ALLOW_PIC (3 << 20)
-#define SR0_ENB_PMU_MEM_DISABLE (1 << 30)
-
-/* SR Control0 bits for 4369 */
-#define SR0_4369_SR_ENG_EN_MASK 0x1
-#define SR0_4369_SR_ENG_EN_SHIFT 0
-#define SR0_4369_SR_ENG_CLK_EN (1 << 1)
-#define SR0_4369_RSRC_TRIGGER (0xC << 2)
-#define SR0_4369_WD_MEM_MIN_DIV (0x2 << 6)
-#define SR0_4369_INVERT_SR_CLK (1 << 11)
-#define SR0_4369_MEM_STBY_ALLOW (1 << 16)
-#define SR0_4369_ENABLE_SR_ILP (1 << 17)
-#define SR0_4369_ENABLE_SR_ALP (1 << 18)
-#define SR0_4369_ENABLE_SR_HT (1 << 19)
-#define SR0_4369_ALLOW_PIC (3 << 20)
-#define SR0_4369_ENB_PMU_MEM_DISABLE (1 << 30)
-/* =========== LHL regs =========== */
-/* 4369 LHL register settings */
-#define LHL4369_UP_CNT 0
-#define LHL4369_DN_CNT 2
-#define LHL4369_PWRSW_EN_DWN_CNT (LHL4369_DN_CNT + 2)
-#define LHL4369_ISO_EN_DWN_CNT (LHL4369_PWRSW_EN_DWN_CNT + 3)
-#define LHL4369_SLB_EN_DWN_CNT (LHL4369_ISO_EN_DWN_CNT + 1)
-#define LHL4369_ASR_CLK4M_DIS_DWN_CNT (LHL4369_DN_CNT)
-#define LHL4369_ASR_LPPFM_MODE_DWN_CNT (LHL4369_DN_CNT)
-#define LHL4369_ASR_MODE_SEL_DWN_CNT (LHL4369_DN_CNT)
-#define LHL4369_ASR_MANUAL_MODE_DWN_CNT (LHL4369_DN_CNT)
-#define LHL4369_ASR_ADJ_DWN_CNT (LHL4369_DN_CNT)
-#define LHL4369_ASR_OVERI_DIS_DWN_CNT (LHL4369_DN_CNT)
-#define LHL4369_ASR_TRIM_ADJ_DWN_CNT (LHL4369_DN_CNT)
-#define LHL4369_VDDC_SW_DIS_DWN_CNT (LHL4369_SLB_EN_DWN_CNT + 1)
-#define LHL4369_VMUX_ASR_SEL_DWN_CNT (LHL4369_VDDC_SW_DIS_DWN_CNT + 1)
-#define LHL4369_CSR_ADJ_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
-#define LHL4369_CSR_MODE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
-#define LHL4369_CSR_OVERI_DIS_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
-#define LHL4369_HPBG_CHOP_DIS_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
-#define LHL4369_SRBG_REF_SEL_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
-#define LHL4369_PFM_PWR_SLICE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
-#define LHL4369_CSR_TRIM_ADJ_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
-#define LHL4369_CSR_VOLTAGE_DWN_CNT (LHL4369_VMUX_ASR_SEL_DWN_CNT + 1)
-#define LHL4369_HPBG_PU_EN_DWN_CNT (LHL4369_CSR_MODE_DWN_CNT + 1)
-
-#define LHL4369_HPBG_PU_EN_UP_CNT (LHL4369_UP_CNT + 1)
-#define LHL4369_CSR_ADJ_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
-#define LHL4369_CSR_MODE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
-#define LHL4369_CSR_OVERI_DIS_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
-#define LHL4369_HPBG_CHOP_DIS_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
-#define LHL4369_SRBG_REF_SEL_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
-#define LHL4369_PFM_PWR_SLICE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
-#define LHL4369_CSR_TRIM_ADJ_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
-#define LHL4369_CSR_VOLTAGE_UP_CNT (LHL4369_HPBG_PU_EN_UP_CNT + 1)
-#define LHL4369_VMUX_ASR_SEL_UP_CNT (LHL4369_CSR_MODE_UP_CNT + 1)
-#define LHL4369_VDDC_SW_DIS_UP_CNT (LHL4369_VMUX_ASR_SEL_UP_CNT + 1)
-#define LHL4369_SLB_EN_UP_CNT (LHL4369_VDDC_SW_DIS_UP_CNT + 8)
-#define LHL4369_ISO_EN_UP_CNT (LHL4369_SLB_EN_UP_CNT + 1)
-#define LHL4369_PWRSW_EN_UP_CNT (LHL4369_ISO_EN_UP_CNT + 3)
-#define LHL4369_ASR_ADJ_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
-#define LHL4369_ASR_CLK4M_DIS_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
-#define LHL4369_ASR_LPPFM_MODE_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
-#define LHL4369_ASR_MODE_SEL_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
-#define LHL4369_ASR_MANUAL_MODE_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
-#define LHL4369_ASR_OVERI_DIS_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
-#define LHL4369_ASR_TRIM_ADJ_UP_CNT (LHL4369_PWRSW_EN_UP_CNT + 1)
-
-/* MacResourceReqTimer0/1 */
-#define MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT 24
-#define MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT 26
-#define MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT 27
-#define MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT 28
-#define MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT 29
-
-/* for pmu rev32 and higher */
-#define PMU32_MAC_MAIN_RSRC_REQ_TIMER ((1 << MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT) | \
- (1 << MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT) | \
- (1 << MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT) | \
- (1 << MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT) | \
- (0 << MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT))
-
-#define PMU32_MAC_AUX_RSRC_REQ_TIMER ((1 << MAC_RSRC_REQ_TIMER_INT_ENAB_SHIFT) | \
- (1 << MAC_RSRC_REQ_TIMER_FORCE_ALP_SHIFT) | \
- (1 << MAC_RSRC_REQ_TIMER_FORCE_HT_SHIFT) | \
- (1 << MAC_RSRC_REQ_TIMER_FORCE_HQ_SHIFT) | \
- (0 << MAC_RSRC_REQ_TIMER_CLKREQ_GRP_SEL_SHIFT))
-
-/* 4369 related: 4369 parameters
- * http://www.sj.broadcom.com/projects/BCM4369/gallery_backend.RC6.0/design/backplane/pmu_params.xls
- */
-#define RES4369_DUMMY 0
-#define RES4369_ABUCK 1
-#define RES4369_PMU_SLEEP 2
-#define RES4369_MISCLDO_PU 3
-#define RES4369_LDO3P3_PU 4
-#define RES4369_FAST_LPO_AVAIL 5
-#define RES4369_XTAL_PU 6
-#define RES4369_XTAL_STABLE 7
-#define RES4369_PWRSW_DIG 8
-#define RES4369_SR_DIG 9
-#define RES4369_SLEEP_DIG 10
-#define RES4369_PWRSW_AUX 11
-#define RES4369_SR_AUX 12
-#define RES4369_SLEEP_AUX 13
-#define RES4369_PWRSW_MAIN 14
-#define RES4369_SR_MAIN 15
-#define RES4369_SLEEP_MAIN 16
-#define RES4369_DIG_CORE_RDY 17
-#define RES4369_CORE_RDY_AUX 18
-#define RES4369_ALP_AVAIL 19
-#define RES4369_RADIO_AUX_PU 20
-#define RES4369_MINIPMU_AUX_PU 21
-#define RES4369_CORE_RDY_MAIN 22
-#define RES4369_RADIO_MAIN_PU 23
-#define RES4369_MINIPMU_MAIN_PU 24
-#define RES4369_PCIE_EP_PU 25
-#define RES4369_COLD_START_WAIT 26
-#define RES4369_ARMHTAVAIL 27
-#define RES4369_HT_AVAIL 28
-#define RES4369_MACPHY_AUX_CLK_AVAIL 29
-#define RES4369_MACPHY_MAIN_CLK_AVAIL 30
-#define RES4369_RESERVED_31 31
-
-#define CST4369_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */
-#define CST4369_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */
-#define CST4369_SPROM_PRESENT 0x00000010
-
-#define PMU_4369_MACCORE_0_RES_REQ_MASK 0x3FCBF7FF
-#define PMU_4369_MACCORE_1_RES_REQ_MASK 0x7FFB3647
-
/* 43430 PMU resources based on pmu_params.xls */
#define RES43430_LPLDO_PU 0
#define RES43430_BG_PU 1
#define CC2_43430_SDIO_AOS_WAKEUP_MASK (1 << 24)
#define CC2_43430_SDIO_AOS_WAKEUP_SHIFT (24)
+
#define PMU_MACCORE_0_RES_REQ_TIMER 0x1d000000
#define PMU_MACCORE_0_RES_REQ_MASK 0x5FF2364F
-#define PMU43012_MAC_RES_REQ_TIMER 0x1D000000
-#define PMU43012_MAC_RES_REQ_MASK 0x3FBBF7FF
-
#define PMU_MACCORE_1_RES_REQ_TIMER 0x1d000000
#define PMU_MACCORE_1_RES_REQ_MASK 0x5FF2364F
#define CR4_4347_RAM_BASE (0x170000)
#define CR4_4362_RAM_BASE (0x170000)
-#define CR4_4369_RAM_BASE (0x170000)
-#define CR4_4377_RAM_BASE (0x170000)
-#define CR4_43751_RAM_BASE (0x170000)
-#define CR4_43752_RAM_BASE (0x170000)
-#define CA7_4367_RAM_BASE (0x200000)
-#define CR4_4378_RAM_BASE (0x352000)
/* 4335 chip OTP present & OTP select bits. */
#define SPROM4335_OTP_SELECT 0x00000010
#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK 0x003C
#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT 2
+
/* 4335 chip OTP present & OTP select bits. */
#define SPROM4335_OTP_SELECT 0x00000010
#define SPROM4335_OTP_PRESENT 0x00000020
#define RES43012_XTAL_STABLE 14
#define RES43012_FCBS 15
#define RES43012_CBUCK_MODE 16
-#define RES43012_CORE_READY 17
+#define RES43012_WL_CORE_RDY 17
#define RES43012_ILP_REQ 18
#define RES43012_ALP_AVAIL 19
-#define RES43012_RADIOLDO_1P8 20
+#define RES43012_RADIO_LDO 20
#define RES43012_MINI_PMU 21
-#define RES43012_UNUSED 22
+#define RES43012_DUMMY 22
#define RES43012_SR_SAVE_RESTORE 23
-#define RES43012_PHY_PWRSW 24
-#define RES43012_VDDB_CLDO 25
-#define RES43012_SUBCORE_PWRSW 26
+#define RES43012_SR_PHY_PWRSW 24
+#define RES43012_SR_VDDB_CLDO 25
+#define RES43012_SR_SUBCORE_PWRSW 26
#define RES43012_SR_SLEEP 27
#define RES43012_HT_START 28
#define RES43012_HT_AVAIL 29
#define RES43012_MACPHY_CLK_AVAIL 30
#define CST43012_SPROM_PRESENT 0x00000010
-/* SR Control0 bits */
-#define SR0_43012_SR_ENG_EN_MASK 0x1
-#define SR0_43012_SR_ENG_EN_SHIFT 0
-#define SR0_43012_SR_ENG_CLK_EN (1 << 1)
-#define SR0_43012_SR_RSRC_TRIGGER (0xC << 2)
-#define SR0_43012_SR_WD_MEM_MIN_DIV (0x3 << 6)
-#define SR0_43012_SR_MEM_STBY_ALLOW_MSK (1 << 16)
-#define SR0_43012_SR_MEM_STBY_ALLOW_SHIFT 16
-#define SR0_43012_SR_ENABLE_ILP (1 << 17)
-#define SR0_43012_SR_ENABLE_ALP (1 << 18)
-#define SR0_43012_SR_ENABLE_HT (1 << 19)
-#define SR0_43012_SR_ALLOW_PIC (3 << 20)
-#define SR0_43012_SR_PMU_MEM_DISABLE (1 << 30)
-#define CC_43012_VDDM_PWRSW_EN_MASK (1 << 20)
-#define CC_43012_VDDM_PWRSW_EN_SHIFT (20)
-#define CC_43012_SDIO_AOS_WAKEUP_MASK (1 << 24)
-#define CC_43012_SDIO_AOS_WAKEUP_SHIFT (24)
-
-/* 43012 - offset at 5K */
-#define SR1_43012_SR_INIT_ADDR_MASK 0x3ff
-#define SR1_43012_SR_ASM_ADDR 0xA
-
/* PLL usage in 43012 */
#define PMU43012_PLL0_PC0_NDIV_INT_MASK 0x0000003f
#define PMU43012_PLL0_PC0_NDIV_INT_SHIFT 0
/* PMU Rev >= 30 */
#define PMU30_ALPCLK_ONEMHZ_ENAB 0x80000000
-#define BCM7271_PMU30_ALPCLK_ONEMHZ_ENAB 0x00010000
-
/* 43012 PMU Chip Control Registers */
#define PMUCCTL02_43012_SUBCORE_PWRSW_FORCE_ON 0x00000010
#define PMUCCTL02_43012_PHY_PWRSW_FORCE_ON 0x00000040
#define PMUCCTL02_43012_LHL_TIMER_SELECT 0x00000800
#define PMUCCTL02_43012_RFLDO3P3_PU_FORCE_ON 0x00008000
#define PMUCCTL02_43012_WL2CDIG_I_PMU_SLEEP_ENAB 0x00010000
-#define PMUCCTL02_43012_BTLDO3P3_PU_FORCE_OFF (1 << 12)
#define PMUCCTL04_43012_BBPLL_ENABLE_PWRDN 0x00100000
#define PMUCCTL04_43012_BBPLL_ENABLE_PWROFF 0x00200000
#define PMUCCTL04_43012_USE_LOCK 0x20000000
#define PMUCCTL04_43012_OPEN_LOOP_ENABLE 0x40000000
#define PMUCCTL04_43012_FORCE_OPEN_LOOP 0x80000000
-#define PMUCCTL05_43012_DISABLE_SPM_CLK (1 << 8)
-#define PMUCCTL05_43012_RADIO_DIG_CLK_GATING_EN (1 << 14)
-#define PMUCCTL06_43012_GCI2RDIG_USE_ASYNCAPB (1 << 31)
#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_MASK 0x00000FC0
#define PMUCCTL08_43012_XTAL_CORE_SIZE_PMOS_NORMAL_SHIFT 6
#define PMUCCTL08_43012_XTAL_CORE_SIZE_NMOS_NORMAL_MASK 0x00FC0000
#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 12
#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_MASK 0x00000038
#define PMUCCTL09_43012_XTAL_CORESIZE_RES_BYPASS_NORMAL_SHIFT 3
+#define PMUCCTL13_43012_FCBS_UP_TRIG_EN 0x00000400
-#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_MASK 0x00000FC0
-#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_SHIFT 6
-/* during normal operation normal value is reduced for optimized power */
-#define PMUCCTL09_43012_XTAL_CORESIZE_BIAS_ADJ_STARTUP_VAL 0x1F
-
-#define PMUCCTL13_43012_FCBS_UP_TRIG_EN 0x00000400
-
-#define PMUCCTL14_43012_ARMCM3_RESET_INITVAL 0x00000001
-#define PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL 0x00000020
-#define PMUCCTL14_43012_DOT11MAC_PHY_CLK_EN_INITVAL 0x00000080
-#define PMUCCTL14_43012_DOT11MAC_PHY_CNTL_EN_INITVAL 0x00000200
-#define PMUCCTL14_43012_SDIOD_RESET_INIVAL 0x00000400
-#define PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL 0x00001000
-#define PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL 0x00004000
-#define PMUCCTL14_43012_M2MDMA_RESET_INITVAL 0x00008000
-#define PMUCCTL14_43012_DISABLE_LQ_AVAIL 0x08000000
-
-#define VREG6_43012_MEMLPLDO_ADJ_MASK 0x0000F000
-#define VREG6_43012_MEMLPLDO_ADJ_SHIFT 12
-
-#define VREG6_43012_LPLDO_ADJ_MASK 0x000000F0
-#define VREG6_43012_LPLDO_ADJ_SHIFT 4
-
-#define VREG7_43012_PWRSW_1P8_PU_MASK 0x00400000
-#define VREG7_43012_PWRSW_1P8_PU_SHIFT 22
-
-/* 4347 PMU Chip Control Registers */
-#define PMUCCTL03_4347_XTAL_CORESIZE_PMOS_NORMAL_MASK 0x001F8000
-#define PMUCCTL03_4347_XTAL_CORESIZE_PMOS_NORMAL_SHIFT 15
-#define PMUCCTL03_4347_XTAL_CORESIZE_PMOS_NORMAL_VAL 0x3F
-
-#define PMUCCTL03_4347_XTAL_CORESIZE_NMOS_NORMAL_MASK 0x07E00000
-#define PMUCCTL03_4347_XTAL_CORESIZE_NMOS_NORMAL_SHIFT 21
-#define PMUCCTL03_4347_XTAL_CORESIZE_NMOS_NORMAL_VAL 0x3F
-
-#define PMUCCTL03_4347_XTAL_SEL_BIAS_RES_NORMAL_MASK 0x38000000
-#define PMUCCTL03_4347_XTAL_SEL_BIAS_RES_NORMAL_SHIFT 27
-#define PMUCCTL03_4347_XTAL_SEL_BIAS_RES_NORMAL_VAL 0x0
-
-#define PMUCCTL00_4347_XTAL_CORESIZE_BIAS_ADJ_NORMAL_MASK 0x00000FC0
-#define PMUCCTL00_4347_XTAL_CORESIZE_BIAS_ADJ_NORMAL_SHIFT 6
-#define PMUCCTL00_4347_XTAL_CORESIZE_BIAS_ADJ_NORMAL_VAL 0x5
+#define PMUCCTL14_43012_ARMCM3_RESET_INITVAL 0x00000001
+#define PMUCCTL14_43012_DOT11MAC_CLKEN_INITVAL 0x00000020
+#define PMUCCTL14_43012_SDIOD_RESET_INIVAL 0x00000400
+#define PMUCCTL14_43012_SDIO_CLK_DMN_RESET_INITVAL 0x00001000
+#define PMUCCTL14_43012_SOCRAM_CLKEN_INITVAL 0x00004000
+#define PMUCCTL14_43012_M2MDMA_RESET_INITVAL 0x00008000
+#define PMUCCTL14_43012_DISABLE_LQ_AVAIL 0x08000000
-#define PMUCCTL00_4347_XTAL_RES_BYPASS_NORMAL_MASK 0x00038000
-#define PMUCCTL00_4347_XTAL_RES_BYPASS_NORMAL_SHIFT 15
-#define PMUCCTL00_4347_XTAL_RES_BYPASS_NORMAL_VAL 0x7
/* 4345 Chip specific ChipStatus register bits */
#define CST4345_SPROM_MASK 0x00000020
#define MUXENAB4350_HOSTWAKE_SHIFT 4
#define MUXENAB4349_UART_MASK (0xf)
+
#define CC4350_GPIO_COUNT 16
/* 4350 GCI function sel values */
#define CC4350_FNSEL_TRISTATE (15)
#define CC4350C_FNSEL_UART (3)
+
/* 4350 GPIO */
#define CC4350_PIN_GPIO_00 (0)
#define CC4350_PIN_GPIO_01 (1)
#define CC6_4345_PMU_EN_MDIO_MASK (1 << 24)
#define CC6_4345_PMU_EN_MDIO_SHIFT (24)
-/* 4347 GCI function sel values */
-#define CC4347_FNSEL_HWDEF (0)
-#define CC4347_FNSEL_SAMEASPIN (1)
-#define CC4347_FNSEL_GPIO0 (2)
-#define CC4347_FNSEL_FUART (3)
-#define CC4347_FNSEL_GCI0 (4)
-#define CC4347_FNSEL_GCI1 (5)
-#define CC4347_FNSEL_DBG_UART (6)
-#define CC4347_FNSEL_SPI (7)
-#define CC4347_FNSEL_SPROM (8)
-#define CC4347_FNSEL_MISC0 (9)
-#define CC4347_FNSEL_MISC1 (10)
-#define CC4347_FNSEL_MISC2 (11)
-#define CC4347_FNSEL_IND (12)
-#define CC4347_FNSEL_PDN (13)
-#define CC4347_FNSEL_PUP (14)
-#define CC4347_FNSEL_TRISTATE (15)
-
-/* 4347 GPIO */
-#define CC4347_PIN_GPIO_02 (2)
-#define CC4347_PIN_GPIO_03 (3)
-#define CC4347_PIN_GPIO_04 (4)
-#define CC4347_PIN_GPIO_05 (5)
-#define CC4347_PIN_GPIO_06 (6)
-#define CC4347_PIN_GPIO_07 (7)
-#define CC4347_PIN_GPIO_08 (8)
-#define CC4347_PIN_GPIO_09 (9)
-#define CC4347_PIN_GPIO_10 (10)
-#define CC4347_PIN_GPIO_11 (11)
-#define CC4347_PIN_GPIO_12 (12)
-#define CC4347_PIN_GPIO_13 (13)
/* GCI chipcontrol register indices */
#define CC_GCI_CHIPCTRL_00 (0)
#define CC_GCI_CHIPCTRL_01 (1)
#define CC_GCI_CHIPCTRL_11 (11)
#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12)
-#define CC_GCI_04_SDIO_DRVSTR_SHIFT 15
-#define CC_GCI_04_SDIO_DRVSTR_MASK (0x0f << CC_GCI_04_SDIO_DRVSTR_SHIFT) /* 0x00078000 */
-#define CC_GCI_04_SDIO_DRVSTR_OVERRIDE_BIT (1 << 18)
-#define CC_GCI_04_SDIO_DRVSTR_DEFAULT_MA 14
-#define CC_GCI_04_SDIO_DRVSTR_MIN_MA 2
-#define CC_GCI_04_SDIO_DRVSTR_MAX_MA 16
-
#define CC_GCI_06_JTAG_SEL_SHIFT 4
#define CC_GCI_06_JTAG_SEL_MASK (1 << 4)
#define CC_GCI_NUMCHIPCTRLREGS(cap1) ((cap1 & 0xF00) >> 8)
-#define CC_GCI_03_LPFLAGS_SFLASH_MASK (0xFFFFFF << 8)
-#define CC_GCI_03_LPFLAGS_SFLASH_VAL (0xCCCCCC << 8)
-#define GPIO_CTRL_REG_DISABLE_INTERRUPT (3 << 9)
-#define GPIO_CTRL_REG_COUNT 40
-
/* GCI chipstatus register indices */
#define GCI_CHIPSTATUS_00 (0)
#define GCI_CHIPSTATUS_01 (1)
#define GCI_CHIPSTATUS_06 (6)
#define GCI_CHIPSTATUS_07 (7)
#define GCI_CHIPSTATUS_08 (8)
-#define GCI_CHIPSTATUS_09 (9)
-#define GCI_CHIPSTATUS_10 (10)
-#define GCI_CHIPSTATUS_11 (11)
-#define GCI_CHIPSTATUS_12 (12)
-#define GCI_CHIPSTATUS_13 (13)
/* 43021 GCI chipstatus registers */
#define GCI43012_CHIPSTATUS_07_BBPLL_LOCK_MASK (1 << 3)
#define GCI_CORECTRL_SOM_MASK (7 << 4) /**< SECI Op Mode */
#define GCI_CORECTRL_US_MASK (1 << 7) /**< Update SECI */
#define GCI_CORECTRL_BOS_MASK (1 << 8) /**< Break On Sleep */
-#define GCI_CORECTRL_FORCEREGCLK_MASK (1 << 18) /* ForceRegClk */
/* 4345 pins
* note: only the values set as default/used are added here.
#define CC_GCI_GPIO_14 (14)
#define CC_GCI_GPIO_15 (15)
+
/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */
#define CC_GCI_GPIO_INVALID 0xFF
/* Extract nibble from a given position */
#define GCIGETNBL(val, pos) ((val >> pos) & 0xF)
+
/* find the 8 bit mask given the bit position */
#define GCIMASK_8B(pos) (((uint32)0xFF) << pos)
/* get the value which can be used to directly OR with chipcontrol reg */
/* Extract nibble from a given position */
#define GCIGETNBL_4B(val, pos) ((val >> pos) & 0xF)
+
/* 4335 GCI Intstatus(Mask)/WakeMask Register bits. */
#define GCI_INTSTATUS_RBI (1 << 0) /**< Rx Break Interrupt */
#define GCI_INTSTATUS_UB (1 << 1) /**< UART Break Interrupt */
#define GCI_INTSTATUS_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */
#define GCI_INTSTATUS_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */
#define GCI_INTSTATUS_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */
-#define GCI_INTSTATUS_EVENT (1 << 21) /* GCI Event Interrupt */
-#define GCI_INTSTATUS_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */
-#define GCI_INTSTATUS_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */
#define GCI_INTSTATUS_GPIOINT (1 << 25) /**< GCIGpioInt */
#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /**< GCIGpioWake */
-#define GCI_INTSTATUS_LHLWLWAKE (1 << 30) /* LHL WL wake */
/* 4335 GCI IntMask Register bits. */
#define GCI_INTMASK_RBI (1 << 0) /**< Rx Break Interrupt */
#define GCI_INTMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */
#define GCI_INTMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */
#define GCI_INTMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */
-#define GCI_INTMASK_EVENT (1 << 21) /* GCI Event Interrupt */
-#define GCI_INTMASK_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */
-#define GCI_INTMASK_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */
#define GCI_INTMASK_GPIOINT (1 << 25) /**< GCIGpioInt */
#define GCI_INTMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */
-#define GCI_INTMASK_LHLWLWAKE (1 << 30) /* LHL WL wake */
/* 4335 GCI WakeMask Register bits. */
#define GCI_WAKEMASK_RBI (1 << 0) /**< Rx Break Interrupt */
#define GCI_WAKEMASK_SRFAF (1 << 12) /**< SECI Rx FIFO Almost Full */
#define GCI_WAKEMASK_SRFNE (1 << 14) /**< SECI Rx FIFO Not Empty */
#define GCI_WAKEMASK_SRFOF (1 << 15) /**< SECI Rx FIFO Not Empty Timeout */
-#define GCI_WAKEMASK_EVENT (1 << 21) /* GCI Event Interrupt */
-#define GCI_WAKEMASK_LEVELWAKE (1 << 22) /* GCI Wake Level Interrupt */
-#define GCI_WAKEMASK_EVENTWAKE (1 << 23) /* GCI Wake Event Interrupt */
#define GCI_WAKEMASK_GPIOINT (1 << 25) /**< GCIGpioInt */
#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /**< GCIGpioWake */
-#define GCI_WAKEMASK_LHLWLWAKE (1 << 30) /* LHL WL wake */
#define GCI_WAKE_ON_GCI_GPIO1 1
#define GCI_WAKE_ON_GCI_GPIO2 2
#define GCI_WAKE_ON_GCI_GPIO8 8
#define GCI_WAKE_ON_GCI_SECI_IN 9
-#define PMU_EXT_WAKE_MASK_0_SDIO (1 << 2)
-
-/* =========== LHL regs =========== */
-#define LHL_PWRSEQCTL_SLEEP_EN (1 << 0)
-#define LHL_PWRSEQCTL_PMU_SLEEP_MODE (1 << 1)
-#define LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN (1 << 2)
-#define LHL_PWRSEQCTL_PMU_TOP_ISO_EN (1 << 3)
-#define LHL_PWRSEQCTL_PMU_TOP_SLB_EN (1 << 4)
-#define LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN (1 << 5)
-#define LHL_PWRSEQCTL_PMU_CLDO_PD (1 << 6)
-#define LHL_PWRSEQCTL_PMU_LPLDO_PD (1 << 7)
-#define LHL_PWRSEQCTL_PMU_RSRC6_EN (1 << 8)
-
-#define PMU_SLEEP_MODE_0 (LHL_PWRSEQCTL_SLEEP_EN |\
- LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN)
-
-#define PMU_SLEEP_MODE_1 (LHL_PWRSEQCTL_SLEEP_EN |\
- LHL_PWRSEQCTL_PMU_SLEEP_MODE |\
- LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN |\
- LHL_PWRSEQCTL_PMU_TOP_ISO_EN |\
- LHL_PWRSEQCTL_PMU_TOP_SLB_EN |\
- LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN |\
- LHL_PWRSEQCTL_PMU_CLDO_PD |\
- LHL_PWRSEQCTL_PMU_RSRC6_EN)
-
-#define PMU_SLEEP_MODE_2 (LHL_PWRSEQCTL_SLEEP_EN |\
- LHL_PWRSEQCTL_PMU_SLEEP_MODE |\
- LHL_PWRSEQCTL_PMU_FINAL_PMU_SLEEP_EN |\
- LHL_PWRSEQCTL_PMU_TOP_ISO_EN |\
- LHL_PWRSEQCTL_PMU_TOP_SLB_EN |\
- LHL_PWRSEQCTL_PMU_TOP_PWRSW_EN |\
- LHL_PWRSEQCTL_PMU_CLDO_PD |\
- LHL_PWRSEQCTL_PMU_LPLDO_PD |\
- LHL_PWRSEQCTL_PMU_RSRC6_EN)
-
-#define LHL_PWRSEQ_CTL (0x000000ff)
-
-/* LHL Top Level Power Up Control Register (lhl_top_pwrup_ctl_adr, Offset 0xE78)
-* Top Level Counter values for isolation, retention, Power Switch control
-*/
-#define LHL_PWRUP_ISOLATION_CNT (0x6 << 8)
-#define LHL_PWRUP_RETENTION_CNT (0x5 << 16)
-#define LHL_PWRUP_PWRSW_CNT (0x7 << 24)
-/* Mask is taken only for isolation 8:13 , Retention 16:21 ,
-* Power Switch control 24:29
-*/
-#define LHL_PWRUP_CTL_MASK (0x3F3F3F00)
-#define LHL_PWRUP_CTL (LHL_PWRUP_ISOLATION_CNT |\
- LHL_PWRUP_RETENTION_CNT |\
- LHL_PWRUP_PWRSW_CNT)
-
-#define LHL_PWRUP_ISOLATION_CNT_4347 (0x7 << 8)
-#define LHL_PWRUP_RETENTION_CNT_4347 (0x5 << 16)
-#define LHL_PWRUP_PWRSW_CNT_4347 (0x7 << 24)
-
-#define LHL_PWRUP_CTL_4347 (LHL_PWRUP_ISOLATION_CNT_4347 |\
- LHL_PWRUP_RETENTION_CNT_4347 |\
- LHL_PWRUP_PWRSW_CNT_4347)
-
-#define LHL_PWRUP2_CLDO_DN_CNT (0x0)
-#define LHL_PWRUP2_LPLDO_DN_CNT (0x0 << 8)
-#define LHL_PWRUP2_RSRC6_DN_CN (0x4 << 16)
-#define LHL_PWRUP2_RSRC7_DN_CN (0x0 << 24)
-#define LHL_PWRUP2_CTL_MASK (0x3F3F3F3F)
-#define LHL_PWRUP2_CTL (LHL_PWRUP2_CLDO_DN_CNT |\
- LHL_PWRUP2_LPLDO_DN_CNT |\
- LHL_PWRUP2_RSRC6_DN_CN |\
- LHL_PWRUP2_RSRC7_DN_CN)
-
-/* LHL Top Level Power Down Control Register (lhl_top_pwrdn_ctl_adr, Offset 0xE74) */
-#define LHL_PWRDN_SLEEP_CNT (0x4)
-#define LHL_PWRDN_CTL_MASK (0x3F)
-
-/* LHL Top Level Power Down Control 2 Register (lhl_top_pwrdn2_ctl_adr, Offset 0xE80) */
-#define LHL_PWRDN2_CLDO_DN_CNT (0x4)
-#define LHL_PWRDN2_LPLDO_DN_CNT (0x4 << 8)
-#define LHL_PWRDN2_RSRC6_DN_CN (0x3 << 16)
-#define LHL_PWRDN2_RSRC7_DN_CN (0x0 << 24)
-#define LHL_PWRDN2_CTL (LHL_PWRDN2_CLDO_DN_CNT |\
- LHL_PWRDN2_LPLDO_DN_CNT |\
- LHL_PWRDN2_RSRC6_DN_CN |\
- LHL_PWRDN2_RSRC7_DN_CN)
-#define LHL_PWRDN2_CTL_MASK (0x3F3F3F3F)
-
-#define LHL_FAST_WRITE_EN (1 << 14)
-
-/* WL ARM Timer0 Interrupt Mask (lhl_wl_armtim0_intrp_adr) */
-#define LHL_WL_ARMTIM0_INTRP_EN 0x00000001
-#define LHL_WL_ARMTIM0_INTRP_EDGE_TRIGGER 0x00000002
-
-/* WL MAC Timer0 Interrupt Mask (lhl_wl_mactim0_intrp_adr) */
-#define LHL_WL_MACTIM0_INTRP_EN 0x00000001
-#define LHL_WL_MACTIM0_INTRP_EDGE_TRIGGER 0x00000002
-
-/* LHL Wakeup Status (lhl_wkup_status_adr) */
-#define LHL_WKUP_STATUS_WR_PENDING_ARMTIM0 0x00100000
-
-/* WL ARM Timer0 Interrupt Status (lhl_wl_armtim0_st_adr) */
-#define LHL_WL_ARMTIM0_ST_WL_ARMTIM_INT_ST 0x00000001
-
-#define LHL_PS_MODE_0 0
-#define LHL_PS_MODE_1 1
-
-/* GCI EventIntMask Register SW bits */
-#define GCI_MAILBOXDATA_TOWLAN (1 << 0)
-#define GCI_MAILBOXDATA_TOBT (1 << 1)
-#define GCI_MAILBOXDATA_TONFC (1 << 2)
-#define GCI_MAILBOXDATA_TOGPS (1 << 3)
-#define GCI_MAILBOXDATA_TOLTE (1 << 4)
-#define GCI_MAILBOXACK_TOWLAN (1 << 8)
-#define GCI_MAILBOXACK_TOBT (1 << 9)
-#define GCI_MAILBOXACK_TONFC (1 << 10)
-#define GCI_MAILBOXACK_TOGPS (1 << 11)
-#define GCI_MAILBOXACK_TOLTE (1 << 12)
-#define GCI_WAKE_TOWLAN (1 << 16)
-#define GCI_WAKE_TOBT (1 << 17)
-#define GCI_WAKE_TONFC (1 << 18)
-#define GCI_WAKE_TOGPS (1 << 19)
-#define GCI_WAKE_TOLTE (1 << 20)
-#define GCI_SWREADY (1 << 24)
-
-/* 4349 Group (4349, 4355, 4359) GCI SECI_OUT TX Status Regiser bits */
-#define GCI_SECIOUT_TXSTATUS_TXHALT (1 << 0)
-#define GCI_SECIOUT_TXSTATUS_TI (1 << 16)
+/* 43012 ULB dividers */
+#define PMU43012_CC0_ULB_DIVMASK 0xfffffc00
+#define PMU43012_10MHZ_ULB_DIV ((1 << 0) | (1 << 5))
+#define PMU43012_5MHZ_ULB_DIV ((3 << 0) | (3 << 5))
+#define PMU43012_2P5MHZ_ULB_DIV ((7 << 0) | (7 << 5))
+#define PMU43012_ULB_NO_DIV 0
/* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic
* for now only UART for bootloader.
#define CST53573_CHIPMODE_PCIE(cs) FALSE
+
/* SECI Status (0x134) & Mask (0x138) bits - Rev 35 */
#define SECI_STAT_BI (1 << 0) /* Break Interrupt */
#define SECI_STAT_SPE (1 << 1) /* Parity Error */
#define SECI_MODE_SHIFT 4 /* (bits 5, 6, 7) */
#define SECI_UPD_SECI (1 << 7)
-#define SECI_AUX_TX_START (1 << 31)
#define SECI_SLIP_ESC_CHAR 0xDB
#define SECI_SIGNOFF_0 SECI_SLIP_ESC_CHAR
#define SECI_SIGNOFF_1 0
#define LTECX_MUX_MODE_WCI2 0x0
#define LTECX_MUX_MODE_GPIO 0x1
+
/* LTECX GPIO Information Index */
#define LTECX_NVRAM_FSYNC_IDX 0
#define LTECX_NVRAM_LTERX_IDX 1
#define GCI_GPIO_STS_FAST_EDGE_BIT 3
#define GCI_GPIO_STS_CLEAR 0xF
-#define GCI_GPIO_STS_EDGE_TRIG_BIT 0
-#define GCI_GPIO_STS_NEG_EDGE_TRIG_BIT 1
-#define GCI_GPIO_STS_DUAL_EDGE_TRIG_BIT 2
-#define GCI_GPIO_STS_WL_DIN_SELECT 6
-
#define GCI_GPIO_STS_VALUE (1 << GCI_GPIO_STS_VALUE_BIT)
/* SR Power Control */
#define SRPWR_DMN3_MACMAIN (3) /* MAC/Phy Main */
#define SRPWR_DMN3_MACMAIN_SHIFT (SRPWR_DMN3_MACMAIN) /* MAC/Phy Main */
#define SRPWR_DMN3_MACMAIN_MASK (1 << SRPWR_DMN3_MACMAIN_SHIFT) /* MAC/Phy Main */
-
-#define SRPWR_DMN4_MACSCAN (4) /* MAC/Phy Scan */
-#define SRPWR_DMN4_MACSCAN_SHIFT (SRPWR_DMN4_MACSCAN) /* MAC/Phy Scan */
-#define SRPWR_DMN4_MACSCAN_MASK (1 << SRPWR_DMN4_MACSCAN_SHIFT) /* MAC/Phy Scan */
-
-/* all power domain mask */
-#define SRPWR_DMN_ALL_MASK(sih) si_srpwr_domain_all_mask(sih)
+#define SRPWR_DMN_ALL_MASK (0xF)
#define SRPWR_REQON_SHIFT (8) /* PowerOnRequest[11:8] */
-#define SRPWR_REQON_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_REQON_SHIFT)
-
+#define SRPWR_REQON_MASK (SRPWR_DMN_ALL_MASK << SRPWR_REQON_SHIFT)
#define SRPWR_STATUS_SHIFT (16) /* ExtPwrStatus[19:16], RO */
-#define SRPWR_STATUS_MASK(sih) (SRPWR_DMN_ALL_MASK(sih) << SRPWR_STATUS_SHIFT)
-
-#define SRPWR_DMN_ID_SHIFT (28) /* PowerDomain[31:28], RO */
-#define SRPWR_DMN_ID_MASK (0xF)
-
-/* PMU Precision Usec Timer */
-#define PMU_PREC_USEC_TIMER_ENABLE 0x1
-
-/* FISCtrlStatus */
-#define PMU_CLEAR_FIS_DONE_SHIFT 1u
-#define PMU_CLEAR_FIS_DONE_MASK (1u << PMU_CLEAR_FIS_DONE_SHIFT)
+#define SRPWR_STATUS_MASK (SRPWR_DMN_ALL_MASK << SRPWR_STATUS_SHIFT)
+#define SRPWR_DMN_SHIFT (28) /* PowerDomain[31:28], RO */
+#define SRPWR_DMN_MASK (SRPWR_DMN_ALL_MASK << SRPWR_DMN_SHIFT)
#endif /* _SBCHIPC_H */
/*
* Broadcom SiliconBackplane hardware register definitions.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sbconfig.h 654158 2016-08-11 09:30:01Z $
+ * $Id: sbconfig.h 530150 2015-01-29 08:43:40Z $
*/
#ifndef _SBCONFIG_H
#define _PADLINE(line) pad ## line
#define _XSTR(line) _PADLINE(line)
#define PAD _XSTR(__LINE__)
-#endif // endif
+#endif
-/* enumeration in SB is based on the premise that cores are contiguous in the
+/* enumeration in SB is based on the premise that cores are contiguos in the
* enumeration space.
*/
#define SB_BUS_SIZE 0x10000 /**< Each bus gets 64Kbytes for cores */
-#define SB_BUS_BASE(sih, b) (SI_ENUM_BASE(sih) + (b) * SB_BUS_SIZE)
+#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE)
#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /**< Max cores per bus */
/*
/*
* SiliconBackplane GCI core hardware definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sbgci.h 696881 2017-04-28 17:20:35Z $
+ * $Id: sbgci.h 612498 2016-01-14 05:09:09Z $
*/
#ifndef _SBGCI_H
#define _SBGCI_H
-#include <bcmutils.h>
-
#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
/* cpp contortions to concatenate w/arg prescan */
#define GCI_CORE_IDX(sih) (AOB_ENAB(sih) ? si_findcoreidx(sih, GCI_CORE_ID, 0) : SI_CC_IDX)
typedef volatile struct {
- uint32 gci_corecaps0; /* 0x000 */
- uint32 gci_corecaps1; /* 0x004 */
- uint32 gci_corecaps2; /* 0x008 */
- uint32 gci_corectrl; /* 0x00c */
- uint32 gci_corestat; /* 0x010 */
- uint32 gci_intstat; /* 0x014 */
- uint32 gci_intmask; /* 0x018 */
- uint32 gci_wakemask; /* 0x01c */
- uint32 gci_levelintstat; /* 0x020 */
- uint32 gci_eventintstat; /* 0x024 */
- uint32 gci_wakelevelintstat; /* 0x028 */
- uint32 gci_wakeeventintstat; /* 0x02c */
- uint32 semaphoreintstatus; /* 0x030 */
- uint32 semaphoreintmask; /* 0x034 */
- uint32 semaphorerequest; /* 0x038 */
- uint32 semaphorereserve; /* 0x03c */
- uint32 gci_indirect_addr; /* 0x040 */
- uint32 gci_gpioctl; /* 0x044 */
- uint32 gci_gpiostatus; /* 0x048 */
- uint32 gci_gpiomask; /* 0x04c */
- uint32 gci_eventsummary; /* 0x050 */
- uint32 gci_miscctl; /* 0x054 */
- uint32 gci_gpiointmask; /* 0x058 */
- uint32 gci_gpiowakemask; /* 0x05c */
- uint32 gci_input[32]; /* 0x060 */
- uint32 gci_event[32]; /* 0x0e0 */
- uint32 gci_output[4]; /* 0x160 */
- uint32 gci_control_0; /* 0x170 */
- uint32 gci_control_1; /* 0x174 */
- uint32 gci_intpolreg; /* 0x178 */
- uint32 gci_levelintmask; /* 0x17c */
- uint32 gci_eventintmask; /* 0x180 */
- uint32 wakelevelintmask; /* 0x184 */
- uint32 wakeeventintmask; /* 0x188 */
- uint32 hwmask; /* 0x18c */
- uint32 PAD;
- uint32 gci_inbandeventintmask; /* 0x194 */
- uint32 PAD;
- uint32 gci_inbandeventstatus; /* 0x19c */
- uint32 gci_seciauxtx; /* 0x1a0 */
- uint32 gci_seciauxrx; /* 0x1a4 */
- uint32 gci_secitx_datatag; /* 0x1a8 */
- uint32 gci_secirx_datatag; /* 0x1ac */
- uint32 gci_secitx_datamask; /* 0x1b0 */
- uint32 gci_seciusef0tx_reg; /* 0x1b4 */
- uint32 gci_secif0tx_offset; /* 0x1b8 */
- uint32 gci_secif0rx_offset; /* 0x1bc */
- uint32 gci_secif1tx_offset; /* 0x1c0 */
- uint32 gci_rxfifo_common_ctrl; /* 0x1c4 */
- uint32 gci_rxfifoctrl; /* 0x1c8 */
- uint32 gci_hw_sema_status; /* 0x1cc */
- uint32 gci_seciuartescval; /* 0x1d0 */
- uint32 gic_seciuartautobaudctr; /* 0x1d4 */
- uint32 gci_secififolevel; /* 0x1d8 */
- uint32 gci_seciuartdata; /* 0x1dc */
- uint32 gci_secibauddiv; /* 0x1e0 */
- uint32 gci_secifcr; /* 0x1e4 */
- uint32 gci_secilcr; /* 0x1e8 */
- uint32 gci_secimcr; /* 0x1ec */
- uint32 gci_secilsr; /* 0x1f0 */
- uint32 gci_secimsr; /* 0x1f4 */
- uint32 gci_baudadj; /* 0x1f8 */
- uint32 gci_inbandintmask; /* 0x1fc */
- uint32 gci_chipctrl; /* 0x200 */
- uint32 gci_chipsts; /* 0x204 */
- uint32 gci_gpioout; /* 0x208 */
- uint32 gci_gpioout_read; /* 0x20C */
- uint32 gci_mpwaketx; /* 0x210 */
- uint32 gci_mpwakedetect; /* 0x214 */
- uint32 gci_seciin_ctrl; /* 0x218 */
- uint32 gci_seciout_ctrl; /* 0x21C */
- uint32 gci_seciin_auxfifo_en; /* 0x220 */
- uint32 gci_seciout_txen_txbr; /* 0x224 */
- uint32 gci_seciin_rxbrstatus; /* 0x228 */
- uint32 gci_seciin_rxerrstatus; /* 0x22C */
- uint32 gci_seciin_fcstatus; /* 0x230 */
- uint32 gci_seciout_txstatus; /* 0x234 */
- uint32 gci_seciout_txbrstatus; /* 0x238 */
- uint32 wlan_mem_info; /* 0x23C */
- uint32 wlan_bankxinfo; /* 0x240 */
- uint32 bt_smem_select; /* 0x244 */
- uint32 bt_smem_stby; /* 0x248 */
- uint32 bt_smem_status; /* 0x24C */
- uint32 wlan_bankxactivepda; /* 0x250 */
- uint32 wlan_bankxsleeppda; /* 0x254 */
- uint32 wlan_bankxkill; /* 0x258 */
- uint32 PAD[PADSZ(0x25c, 0x268)]; /* 0x25c-0x268 */
- uint32 bt_smem_control0; /* 0x26C */
- uint32 bt_smem_control1; /* 0x270 */
- uint32 PAD[PADSZ(0x274, 0x2fc)]; /* 0x274-0x2fc */
- uint32 gci_chipid; /* 0x300 */
- uint32 PAD[PADSZ(0x304, 0x30c)]; /* 0x304-0x30c */
- uint32 otpstatus; /* 0x310 */
- uint32 otpcontrol; /* 0x314 */
- uint32 otpprog; /* 0x318 */
- uint32 otplayout; /* 0x31c */
- uint32 otplayoutextension; /* 0x320 */
- uint32 otpcontrol1; /* 0x324 */
- uint32 otpprogdata; /* 0x328 */
- uint32 PAD[PADSZ(0x32c, 0x3f8)]; /* 0x32c-0x3f8 */
- uint32 otpECCstatus; /* 0x3FC */
- uint32 PAD[PADSZ(0x400, 0xbfc)]; /* 0x400-0xbfc */
- uint32 lhl_core_capab_adr; /* 0xC00 */
- uint32 lhl_main_ctl_adr; /* 0xC04 */
- uint32 lhl_pmu_ctl_adr; /* 0xC08 */
- uint32 lhl_extlpo_ctl_adr; /* 0xC0C */
- uint32 lpo_ctl_adr; /* 0xC10 */
- uint32 lhl_lpo2_ctl_adr; /* 0xC14 */
- uint32 lhl_osc32k_ctl_adr; /* 0xC18 */
- uint32 lhl_clk_status_adr; /* 0xC1C */
- uint32 lhl_clk_det_ctl_adr; /* 0xC20 */
- uint32 lhl_clk_sel_adr; /* 0xC24 */
- uint32 hidoff_cnt_adr[2]; /* 0xC28-0xC2C */
- uint32 lhl_autoclk_ctl_adr; /* 0xC30 */
- uint32 PAD; /* reserved */
- uint32 lhl_hibtim_adr; /* 0xC38 */
- uint32 lhl_wl_ilp_val_adr; /* 0xC3C */
- uint32 lhl_wl_armtim0_intrp_adr; /* 0xC40 */
- uint32 lhl_wl_armtim0_st_adr; /* 0xC44 */
- uint32 lhl_wl_armtim0_adr; /* 0xC48 */
- uint32 PAD[PADSZ(0xc4c, 0xc6c)]; /* 0xC4C-0xC6C */
+ uint32 gci_corecaps0; /* 0x000 */
+ uint32 gci_corecaps1; /* 0x004 */
+ uint32 gci_corecaps2; /* 0x008 */
+ uint32 gci_corectrl; /* 0x00c */
+ uint32 gci_corestat; /* 0x010 */
+ uint32 gci_intstat; /* 0x014 */
+ uint32 gci_intmask; /* 0x018 */
+ uint32 gci_wakemask; /* 0x01c */
+ uint32 gci_levelintstat; /* 0x020 */
+ uint32 gci_eventintstat; /* 0x024 */
+ uint32 gci_wakelevelintstat; /* 0x028 */
+ uint32 gci_wakeeventintstat; /* 0x02c */
+ uint32 semaphoreintstatus; /* 0x030 */
+ uint32 semaphoreintmask; /* 0x034 */
+ uint32 semaphorerequest; /* 0x038 */
+ uint32 semaphorereserve; /* 0x03c */
+ uint32 gci_indirect_addr; /* 0x040 */
+ uint32 gci_gpioctl; /* 0x044 */
+ uint32 gci_gpiostatus; /* 0x048 */
+ uint32 gci_gpiomask; /* 0x04c */
+ uint32 eventsummary; /* 0x050 */
+ uint32 gci_miscctl; /* 0x054 */
+ uint32 gci_gpiointmask; /* 0x058 */
+ uint32 gci_gpiowakemask; /* 0x05c */
+ uint32 gci_input[32]; /* 0x060 */
+ uint32 gci_event[32]; /* 0x0e0 */
+ uint32 gci_output[4]; /* 0x160 */
+ uint32 gci_control_0; /* 0x170 */
+ uint32 gci_control_1; /* 0x174 */
+ uint32 gci_intpolreg; /* 0x178 */
+ uint32 gci_levelintmask; /* 0x17c */
+ uint32 gci_eventintmask; /* 0x180 */
+ uint32 wakelevelintmask; /* 0x184 */
+ uint32 wakeeventintmask; /* 0x188 */
+ uint32 hwmask; /* 0x18c */
+ uint32 PAD;
+ uint32 gci_inbandeventintmask; /* 0x194 */
+ uint32 PAD;
+ uint32 gci_inbandeventstatus; /* 0x19c */
+ uint32 gci_seciauxtx; /* 0x1a0 */
+ uint32 gci_seciauxrx; /* 0x1a4 */
+ uint32 gci_secitx_datatag; /* 0x1a8 */
+ uint32 gci_secirx_datatag; /* 0x1ac */
+ uint32 gci_secitx_datamask; /* 0x1b0 */
+ uint32 gci_seciusef0tx_reg; /* 0x1b4 */
+ uint32 gci_secif0tx_offset; /* 0x1b8 */
+ uint32 gci_secif0rx_offset; /* 0x1bc */
+ uint32 gci_secif1tx_offset; /* 0x1c0 */
+ uint32 gci_rxfifo_common_ctrl; /* 0x1c4 */
+ uint32 gci_rxfifoctrl; /* 0x1c8 */
+ uint32 gci_hw_sema_status; /* 0x1cc */
+ uint32 gci_seciuartescval; /* 0x1d0 */
+ uint32 gic_seciuartautobaudctr; /* 0x1d4 */
+ uint32 gci_secififolevel; /* 0x1d8 */
+ uint32 gci_seciuartdata; /* 0x1dc */
+ uint32 gci_secibauddiv; /* 0x1e0 */
+ uint32 gci_secifcr; /* 0x1e4 */
+ uint32 gci_secilcr; /* 0x1e8 */
+ uint32 gci_secimcr; /* 0x1ec */
+ uint32 gci_secilsr; /* 0x1f0 */
+ uint32 gci_secimsr; /* 0x1f4 */
+ uint32 gci_baudadj; /* 0x1f8 */
+ uint32 gci_inbandintmask; /* 0x1fc */
+ uint32 gci_chipctrl; /* 0x200 */
+ uint32 gci_chipsts; /* 0x204 */
+ uint32 gci_gpioout; /* 0x208 */
+ uint32 gci_gpioout_read; /* 0x20C */
+ uint32 gci_mpwaketx; /* 0x210 */
+ uint32 gci_mpwakedetect; /* 0x214 */
+ uint32 gci_seciin_ctrl; /* 0x218 */
+ uint32 gci_seciout_ctrl; /* 0x21C */
+ uint32 gci_seciin_auxfifo_en; /* 0x220 */
+ uint32 gci_seciout_txen_txbr; /* 0x224 */
+ uint32 gci_seciin_rxbrstatus; /* 0x228 */
+ uint32 gci_seciin_rxerrstatus; /* 0x22C */
+ uint32 gci_seciin_fcstatus; /* 0x230 */
+ uint32 gci_seciout_txstatus; /* 0x234 */
+ uint32 gci_seciout_txbrstatus; /* 0x238 */
+ uint32 wlan_mem_info; /* 0x23C */
+ uint32 wlan_bankxinfo; /* 0x240 */
+ uint32 bt_smem_select; /* 0x244 */
+ uint32 bt_smem_stby; /* 0x248 */
+ uint32 bt_smem_status; /* 0x24C */
+ uint32 wlan_bankxactivepda; /* 0x250 */
+ uint32 wlan_bankxsleeppda; /* 0x254 */
+ uint32 wlan_bankxkill; /* 0x258 */
+ uint32 PAD[41];
+ uint32 gci_chipid; /* 0x300 */
+ uint32 PAD[3];
+ uint32 otpstatus; /* 0x310 */
+ uint32 otpcontrol; /* 0x314 */
+ uint32 otpprog; /* 0x318 */
+ uint32 otplayout; /* 0x31c */
+ uint32 otplayoutextension; /* 0x320 */
+ uint32 otpcontrol1; /* 0x324 */
+ uint32 otpprogdata; /* 0x328 */
+ uint32 PAD[52];
+ uint32 otpECCstatus; /* 0x3FC */
+ uint32 PAD[512];
+ uint32 lhl_core_capab_adr; /* 0xC00 */
+ uint32 lhl_main_ctl_adr; /* 0xC04 */
+ uint32 lhl_pmu_ctl_adr; /* 0xC08 */
+ uint32 lhl_extlpo_ctl_adr; /* 0xC0C */
+ uint32 lpo_ctl_adr; /* 0xC10 */
+ uint32 lhl_lpo2_ctl_adr; /* 0xC14 */
+ uint32 lhl_osc32k_ctl_adr; /* 0xC18 */
+ uint32 lhl_clk_status_adr; /* 0xC1C */
+ uint32 lhl_clk_det_ctl_adr; /* 0xC20 */
+ uint32 lhl_clk_sel_adr; /* 0xC24 */
+ uint32 hidoff_cnt_adr[2]; /* 0xC28-0xC2C */
+ uint32 lhl_autoclk_ctl_adr; /* 0xC30 */
+ uint32 PAD; /* reserved */
+ uint32 lhl_hibtim_adr; /* 0xC38 */
+ uint32 lhl_wl_ilp_val_adr; /* 0xC3C */
+ uint32 lhl_wl_armtim0_intrp_adr; /* 0xC40 */
+ uint32 lhl_wl_armtim0_st_adr; /* 0xC44 */
+ uint32 lhl_wl_armtim0_adr; /* 0xC48 */
+ uint32 PAD[9]; /* 0xC4C-0xC6C */
uint32 lhl_wl_mactim0_intrp_adr; /* 0xC70 */
uint32 lhl_wl_mactim0_st_adr; /* 0xC74 */
uint32 lhl_wl_mactim_int0_adr; /* 0xC78 */
uint32 lhl_wl_mactim1_st_adr; /* 0xC84 */
uint32 lhl_wl_mactim_int1_adr; /* 0xC88 */
uint32 lhl_wl_mactim_frac1_adr; /* 0xC8C */
- uint32 PAD[PADSZ(0xc90, 0xcac)]; /* 0xC90-0xCAC */
- uint32 gpio_int_en_port_adr[4]; /* 0xCB0-0xCBC */
- uint32 gpio_int_st_port_adr[4]; /* 0xCC0-0xCCC */
- uint32 gpio_ctrl_iocfg_p_adr[40]; /* 0xCD0-0xD6C */
- uint32 lhl_lp_up_ctl1_adr; /* 0xd70 */
- uint32 lhl_lp_dn_ctl1_adr; /* 0xd74 */
- uint32 PAD[PADSZ(0xd78, 0xdb4)]; /* 0xd78-0xdb4 */
- uint32 lhl_sleep_timer_adr; /* 0xDB8 */
- uint32 lhl_sleep_timer_ctl_adr; /* 0xDBC */
- uint32 lhl_sleep_timer_load_val_adr; /* 0xDC0 */
- uint32 lhl_lp_main_ctl_adr; /* 0xDC4 */
- uint32 lhl_lp_up_ctl_adr; /* 0xDC8 */
- uint32 lhl_lp_dn_ctl_adr; /* 0xDCC */
- uint32 gpio_gctrl_iocfg_p0_p39_adr; /* 0xDD0 */
- uint32 gpio_gdsctrl_iocfg_p0_p25_p30_p39_adr; /* 0xDD4 */
- uint32 gpio_gdsctrl_iocfg_p26_p29_adr; /* 0xDD8 */
- uint32 PAD[PADSZ(0xddc, 0xdf8)]; /* 0xDDC-0xDF8 */
- uint32 lhl_gpio_din0_adr; /* 0xDFC */
- uint32 lhl_gpio_din1_adr; /* 0xE00 */
- uint32 lhl_wkup_status_adr; /* 0xE04 */
- uint32 lhl_ctl_adr; /* 0xE08 */
- uint32 lhl_adc_ctl_adr; /* 0xE0C */
- uint32 lhl_qdxyz_in_dly_adr; /* 0xE10 */
- uint32 lhl_optctl_adr; /* 0xE14 */
- uint32 lhl_optct2_adr; /* 0xE18 */
- uint32 lhl_scanp_cntr_init_val_adr; /* 0xE1C */
- uint32 lhl_opt_togg_val_adr[6]; /* 0xE20-0xE34 */
- uint32 lhl_optx_smp_val_adr; /* 0xE38 */
- uint32 lhl_opty_smp_val_adr; /* 0xE3C */
- uint32 lhl_optz_smp_val_adr; /* 0xE40 */
- uint32 lhl_hidoff_keepstate_adr[3]; /* 0xE44-0xE4C */
- uint32 lhl_bt_slmboot_ctl0_adr[4]; /* 0xE50-0xE5C */
- uint32 lhl_wl_fw_ctl; /* 0xE60 */
- uint32 lhl_wl_hw_ctl_adr[2]; /* 0xE64-0xE68 */
- uint32 lhl_bt_hw_ctl_adr; /* 0xE6C */
- uint32 lhl_top_pwrseq_en_adr; /* 0xE70 */
- uint32 lhl_top_pwrdn_ctl_adr; /* 0xE74 */
- uint32 lhl_top_pwrup_ctl_adr; /* 0xE78 */
- uint32 lhl_top_pwrseq_ctl_adr; /* 0xE7C */
- uint32 lhl_top_pwrdn2_ctl_adr; /* 0xE80 */
- uint32 lhl_top_pwrup2_ctl_adr; /* 0xE84 */
- uint32 wpt_regon_intrp_cfg_adr; /* 0xE88 */
- uint32 bt_regon_intrp_cfg_adr; /* 0xE8C */
- uint32 wl_regon_intrp_cfg_adr; /* 0xE90 */
- uint32 regon_intrp_st_adr; /* 0xE94 */
- uint32 regon_intrp_en_adr; /* 0xE98 */
- uint32 PAD[PADSZ(0xe9c, 0xeb4)]; /* 0xe9c-0xeb4 */
- uint32 lhl_lp_main_ctl1_adr; /* 0xeb8 */
- uint32 lhl_lp_up_ctl2_adr; /* 0xebc */
- uint32 lhl_lp_dn_ctl2_adr; /* 0xec0 */
- uint32 lhl_lp_up_ctl3_adr; /* 0xec4 */
- uint32 lhl_lp_dn_ctl3_adr; /* 0xec8 */
- uint32 PAD[PADSZ(0xecc, 0xed8)]; /* 0xecc-0xed8 */
- uint32 lhl_lp_main_ctl2_adr; /* 0xedc */
- uint32 lhl_lp_up_ctl4_adr; /* 0xee0 */
- uint32 lhl_lp_dn_ctl4_adr; /* 0xee4 */
- uint32 lhl_lp_up_ctl5_adr; /* 0xee8 */
- uint32 lhl_lp_dn_ctl5_adr; /* 0xeec */
-
+ uint32 PAD[8]; /* 0xC90-0xCAC */
+ uint32 gpio_int_en_port_adr[4]; /* 0xCB0-0xCBC */
+ uint32 gpio_int_st_port_adr[4]; /* 0xCC0-0xCCC */
+ uint32 gpio_ctrl_iocfg_p_adr[64]; /* 0xCD0-0xDCC */
+ uint32 gpio_gctrl_iocfg_p0_p39_adr; /* 0xDD0 */
+ uint32 gpio_gdsctrl_iocfg_p0_p25_p30_p39_adr; /* 0xDD4 */
+ uint32 gpio_gdsctrl_iocfg_p26_p29_adr; /* 0xDD8 */
+ uint32 PAD[8]; /* 0xDDC-0xDF8 */
+ uint32 lhl_gpio_din0_adr; /* 0xDFC */
+ uint32 lhl_gpio_din1_adr; /* 0xE00 */
+ uint32 lhl_wkup_status_adr; /* 0xE04 */
+ uint32 lhl_ctl_adr; /* 0xE08 */
+ uint32 lhl_adc_ctl_adr; /* 0xE0C */
+ uint32 lhl_qdxyz_in_dly_adr; /* 0xE10 */
+ uint32 lhl_optctl_adr; /* 0xE14 */
+ uint32 lhl_optct2_adr; /* 0xE18 */
+ uint32 lhl_scanp_cntr_init_val_adr; /* 0xE1C */
+ uint32 lhl_opt_togg_val_adr[6]; /* 0xE20-0xE34 */
+ uint32 lhl_optx_smp_val_adr; /* 0xE38 */
+ uint32 lhl_opty_smp_val_adr; /* 0xE3C */
+ uint32 lhl_optz_smp_val_adr; /* 0xE40 */
+ uint32 lhl_hidoff_keepstate_adr[3]; /* 0xE44-0xE4C */
+ uint32 lhl_bt_slmboot_ctl0_adr[4]; /* 0xE50-0xE5C */
+ uint32 lhl_wl_fw_ctl; /* 0xE60 */
+ uint32 lhl_wl_hw_ctl_adr[2]; /* 0xE64-0xE68 */
+ uint32 lhl_bt_hw_ctl_adr; /* 0xE6C */
+ uint32 lhl_top_pwrseq_en_adr; /* 0xE70 */
+ uint32 lhl_top_pwrdn_ctl_adr; /* 0xE74 */
+ uint32 lhl_top_pwrup_ctl_adr; /* 0xE78 */
+ uint32 lhl_top_pwrseq_ctl_adr; /* 0xE7C */
+ uint32 lhl_top_pwrdn2_ctl_adr; /* 0xE80 */
+ uint32 lhl_top_pwrup2_ctl_adr; /* 0xE84 */
+ uint32 wpt_regon_intrp_cfg_adr; /* 0xE88 */
+ uint32 bt_regon_intrp_cfg_adr; /* 0xE8C */
+ uint32 wl_regon_intrp_cfg_adr; /* 0xE90 */
+ uint32 regon_intrp_st_adr; /* 0xE94 */
+ uint32 regon_intrp_en_adr; /* 0xE98 */
} gciregs_t;
#define GCI_CAP0_REV_MASK 0x000000ff
#define WLAN_MEM_INFO_REG_NUMD11MACSHM_MASK 0xFF000000
#define WLAN_MEM_INFO_REG_NUMD11MACSHM_SHIFT 24
+
#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+
#endif /* _SBGCI_H */
+++ /dev/null
-/*
- * Broadcom SiliconBackplane ARM definitions
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: sbhndarm.h 799498 2019-01-16 06:02:27Z $
- */
-
-#ifndef _sbhndarm_h_
-#define _sbhndarm_h_
-
-#ifndef _LANGUAGE_ASSEMBLY
-
-/* cpp contortions to concatenate w/arg prescan */
-#ifndef PAD
-#define _PADLINE(line) pad ## line
-#define _XSTR(line) _PADLINE(line)
-#define PAD _XSTR(__LINE__)
-#endif /* PAD */
-
-/* cortex-m3 */
-typedef volatile struct {
- uint32 corecontrol; /* 0x0 */
- uint32 corestatus; /* 0x4 */
- uint32 PAD[1];
- uint32 biststatus; /* 0xc */
- uint32 nmiisrst; /* 0x10 */
- uint32 nmimask; /* 0x14 */
- uint32 isrmask; /* 0x18 */
- uint32 PAD[1];
- uint32 resetlog; /* 0x20 */
- uint32 gpioselect; /* 0x24 */
- uint32 gpioenable; /* 0x28 */
- uint32 PAD[1];
- uint32 bpaddrlo; /* 0x30 */
- uint32 bpaddrhi; /* 0x34 */
- uint32 bpdata; /* 0x38 */
- uint32 bpindaccess; /* 0x3c */
- uint32 ovlidx; /* 0x40 */
- uint32 ovlmatch; /* 0x44 */
- uint32 ovladdr; /* 0x48 */
- uint32 PAD[13];
- uint32 bwalloc; /* 0x80 */
- uint32 PAD[3];
- uint32 cyclecnt; /* 0x90 */
- uint32 inttimer; /* 0x94 */
- uint32 intmask; /* 0x98 */
- uint32 intstatus; /* 0x9c */
- uint32 PAD[80];
- uint32 clk_ctl_st; /* 0x1e0 */
- uint32 PAD[1];
- uint32 powerctl; /* 0x1e8 */
-} cm3regs_t;
-#define ARM_CM3_REG(regs, reg) (&((cm3regs_t *)regs)->reg)
-
-/* cortex-R4 */
-typedef volatile struct {
- uint32 corecontrol; /* 0x0 */
- uint32 corecapabilities; /* 0x4 */
- uint32 corestatus; /* 0x8 */
- uint32 biststatus; /* 0xc */
- uint32 nmiisrst; /* 0x10 */
- uint32 nmimask; /* 0x14 */
- uint32 isrmask; /* 0x18 */
- uint32 swintreg; /* 0x1C */
- uint32 intstatus; /* 0x20 */
- uint32 intmask; /* 0x24 */
- uint32 cyclecnt; /* 0x28 */
- uint32 inttimer; /* 0x2c */
- uint32 gpioselect; /* 0x30 */
- uint32 gpioenable; /* 0x34 */
- uint32 PAD[2];
- uint32 bankidx; /* 0x40 */
- uint32 bankinfo; /* 0x44 */
- uint32 bankstbyctl; /* 0x48 */
- uint32 bankpda; /* 0x4c */
- uint32 PAD[6];
- uint32 tcampatchctrl; /* 0x68 */
- uint32 tcampatchtblbaseaddr; /* 0x6c */
- uint32 tcamcmdreg; /* 0x70 */
- uint32 tcamdatareg; /* 0x74 */
- uint32 tcambankxmaskreg; /* 0x78 */
- uint32 PAD[89];
- uint32 clk_ctl_st; /* 0x1e0 */
- uint32 PAD[1];
- uint32 powerctl; /* 0x1e8 */
-} cr4regs_t;
-#define ARM_CR4_REG(regs, reg) (&((cr4regs_t *)regs)->reg)
-
-/* cortex-A7 */
-typedef volatile struct {
- uint32 corecontrol; /* 0x0 */
- uint32 corecapabilities; /* 0x4 */
- uint32 corestatus; /* 0x8 */
- uint32 tracecontrol; /* 0xc */
- uint32 PAD[8];
- uint32 gpioselect; /* 0x30 */
- uint32 gpioenable; /* 0x34 */
- uint32 PAD[106];
- uint32 clk_ctl_st; /* 0x1e0 */
- uint32 PAD[1];
- uint32 powerctl; /* 0x1e8 */
-} ca7regs_t;
-#define ARM_CA7_REG(regs, reg) (&((ca7regs_t *)regs)->reg)
-
-#if defined(__ARM_ARCH_7M__)
-#define ARMREG(regs, reg) ARM_CM3_REG(regs, reg)
-#endif /* __ARM_ARCH_7M__ */
-
-#if defined(__ARM_ARCH_7R__)
-#define ARMREG(regs, reg) ARM_CR4_REG(regs, reg)
-#endif /* __ARM_ARCH_7R__ */
-
-#if defined(__ARM_ARCH_7A__)
-#define ARMREG(regs, reg) ARM_CA7_REG(regs, reg)
-#endif /* __ARM_ARCH_7A__ */
-
-#endif /* _LANGUAGE_ASSEMBLY */
-
-#endif /* _sbhndarm_h_ */
* Generic Broadcom Home Networking Division (HND) DMA engine HW interface
* This supports the following chips: BCM42xx, 44xx, 47xx .
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sbhnddma.h 694506 2017-04-13 05:10:05Z $
+ * $Id: sbhnddma.h 615537 2016-01-28 00:46:34Z $
*/
#ifndef _sbhnddma_h_
* a pair of channels is defined for convenience
*/
+
/* 32 bits addressing */
/** dma registers per channel(xmt or rcv) */
#define DMA_PC_4 1
#define DMA_PC_8 2
#define DMA_PC_16 3
-#define DMA_PC_32 4
/* others: reserved */
/** Prefetch threshold */
#define D64_DEF_USBBURSTLEN 2
#define D64_DEF_SDIOBURSTLEN 1
+
#ifndef D64_USBBURSTLEN
#define D64_USBBURSTLEN DMA_BL_64
-#endif // endif
+#endif
#ifndef D64_SDIOBURSTLEN
#define D64_SDIOBURSTLEN DMA_BL_32
-#endif // endif
+#endif
/* transmit channel control */
#define D64_XC_XE 0x00000001 /**< transmit enable */
#define D64_XC_PC_SHIFT 21
#define D64_XC_PT_MASK 0x03000000 /**< Prefetch threshold */
#define D64_XC_PT_SHIFT 24
-#define D64_XC_CO_MASK 0x04000000 /**< coherent transactions for descriptors */
-#define D64_XC_CO_SHIFT 26
/* transmit descriptor table pointer */
#define D64_XP_LD_MASK 0x00001fff /**< last valid descriptor */
#define D64_RC_SHIFT 9 /**< separate rx header descriptor enable */
#define D64_RC_OC 0x00000400 /**< overflow continue */
#define D64_RC_PD 0x00000800 /**< parity check disable */
-#define D64_RC_WAITCMP_MASK 0x00001000
-#define D64_RC_WAITCMP_SHIFT 12
#define D64_RC_SA 0x00002000 /**< select active */
#define D64_RC_GE 0x00004000 /**< Glom enable */
#define D64_RC_AE 0x00030000 /**< address extension bits */
#define D64_RC_PC_SHIFT 21
#define D64_RC_PT_MASK 0x03000000 /**< Prefetch threshold */
#define D64_RC_PT_SHIFT 24
-#define D64_RC_CO_MASK 0x04000000 /**< coherent transactions for descriptors */
-#define D64_RC_CO_SHIFT 26
-#define D64_RC_ROEXT_MASK 0x08000000 /**< receive frame offset extension bit */
-#define D64_RC_ROEXT_SHIFT 27
+#define D64_RC_WAITCMP_MASK 0x00001000
+#define D64_RC_WAITCMP_SHIFT 12
/* flags for dma controller */
#define DMA_CTRL_PEN (1 << 0) /**< partity enable */
#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /**< DMA avoidance WAR for 4331 */
#define DMA_CTRL_RXSINGLE (1 << 6) /**< always single buffer */
#define DMA_CTRL_SDIO_RXGLOM (1 << 7) /**< DMA Rx glome is enabled */
-#define DMA_CTRL_DESC_ONLY_FLAG (1 << 8) /**< For DMA which posts only descriptors,
- * no packets
- */
-#define DMA_CTRL_DESC_CD_WAR (1 << 9) /**< WAR for descriptor only DMA's CD not being
- * updated correctly by HW in CT mode.
- */
-#define DMA_CTRL_CS (1 << 10) /* channel switch enable */
-#define DMA_CTRL_ROEXT (1 << 11) /* receive frame offset extension support */
-#define DMA_CTRL_RX_ALIGN_8BYTE (1 << 12) /* RXDMA address 8-byte aligned for 43684A0 */
/* receive descriptor table pointer */
#define D64_RP_LD_MASK 0x00001fff /**< last valid descriptor */
/* descriptor control flags 1 */
#define D64_CTRL_COREFLAGS 0x0ff00000 /**< core specific flags */
-#define D64_CTRL1_COHERENT ((uint32)1 << 17) /* cache coherent per transaction */
#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /**< buirst size control */
#define D64_CTRL1_EOT ((uint32)1 << 28) /**< end of descriptor table */
#define D64_CTRL1_IOC ((uint32)1 << 29) /**< interrupt on completion */
#define D64_CTRL1_SOF ((uint32)1 << 31) /**< start of frame */
/* descriptor control flags 2 */
-#define D64_CTRL2_MAX_LEN 0x0000fff7 /* Max transfer length (buffer byte count) <= 65527 */
-#define D64_CTRL2_BC_MASK 0x0000ffff /**< mask for buffer byte count */
+#define D64_CTRL2_BC_MASK 0x00007fff /**< buffer byte count. real data len must <= 16KB */
#define D64_CTRL2_AE 0x00030000 /**< address extension bits */
#define D64_CTRL2_AE_SHIFT 16
#define D64_CTRL2_PARITY 0x00040000 /* parity bit */
#define D64_RX_FRM_STS_LEN 0x0000ffff /**< frame length mask */
#define D64_RX_FRM_STS_OVFL 0x00800000 /**< RxOverFlow */
#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /**< no. of descriptors used - 1, d11corerev >= 22 */
-#define D64_RX_FRM_STS_DSCRCNT_SHIFT 24 /* Shift for no .of dma descriptor field */
#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /**< core-dependent data type */
-#define BCM_D64_CTRL2_BOUND_DMA_LENGTH(len) \
-(((len) > D64_CTRL2_MAX_LEN) ? D64_CTRL2_MAX_LEN : (len))
-
/** receive frame status */
typedef volatile struct {
uint16 len;
/*
* BCM43XX Sonics SiliconBackplane PCMCIA core hardware definitions.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sbpcmcia.h 647676 2016-07-07 02:59:05Z $
+ * $Id: sbpcmcia.h 616054 2016-01-29 13:22:24Z $
*/
#ifndef _SBPCMCIA_H
#define COR_BLREN 0x01
#define COR_FUNEN 0x01
+
#define PCICIA_FCSR (2 / 2)
#define PCICIA_PRR (4 / 2)
#define PCICIA_SCR (6 / 2)
#define PCICIA_ESR (8 / 2)
+
#define PCM_MEMOFF 0x0000
#define F0_MEMOFF 0x1000
#define F1_MEMOFF 0x2000
#define SRI_BLANK 0x04
#define SRI_OTP 0x80
+
#define SROM16K_BANK_SEL_MASK (3 << 11)
#define SROM16K_BANK_SHFT_MASK 11
#define SROM16K_ADDR_SEL_MASK ((1 << SROM16K_BANK_SHFT_MASK) - 1)
-#define SROM_PRSNT_MASK 0x1
-#define SROM_SUPPORT_SHIFT_MASK 30
-#define SROM_SUPPORTED (0x1 << SROM_SUPPORT_SHIFT_MASK)
-#define SROM_SIZE_MASK 0x00000006
-#define SROM_SIZE_2K 2
-#define SROM_SIZE_512 1
-#define SROM_SIZE_128 0
-#define SROM_SIZE_SHFT_MASK 1
+
+
/* Standard tuples we know about */
#define CISTPL_NULL 0x00
#define CISTPL_END 0xff /* End of the CIS tuple chain */
+
#define CISTPL_BRCM_HNBU 0x80
+
#define HNBU_BOARDREV 0x02 /* One byte board revision */
+
#define HNBU_BOARDTYPE 0x1b /* 2 bytes; boardtype */
+
#define HNBU_HNBUCIS 0x1d /* what follows is proprietary HNBU CIS format */
+
/* sbtmstatelow */
#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */
#define SBTML_INT_EN 0x20000 /* enable sb interrupt */
*
* SDIO core support 1bit, 4 bit SDIO mode as well as SPI mode.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sbsdio.h 665717 2016-10-18 23:29:25Z $
+ * $Id: sbsdio.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _SBSDIO_H
#define SROM_BLANK 0x04 /* depreciated in corerev 6 */
#define SROM_OTP 0x80 /* OTP present */
+/* SBSDIO_CHIP_CTRL */
+#define SBSDIO_CHIP_CTRL_XTAL 0x01 /* or'd with onchip xtal_pu,
+ * 1: power on oscillator
+ * (for 4318 only)
+ */
/* SBSDIO_WATERMARK */
#define SBSDIO_WATERMARK_MASK 0x7f /* number of words - 1 for sd device
* to wait before sending data to host
/* direct(mapped) cis space */
#define SBSDIO_CIS_BASE_COMMON 0x1000 /* MAPPED common CIS address */
-#ifdef BCMSPI
-#define SBSDIO_CIS_SIZE_LIMIT 0x100 /* maximum bytes in one spi CIS */
-#else
#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */
-#endif /* !BCMSPI */
#define SBSDIO_OTP_CIS_SIZE_LIMIT 0x078 /* maximum bytes OTP CIS */
#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF /* cis offset addr is < 17 bits */
* Broadcom SiliconBackplane SDIO/PCMCIA hardware-specific
* device core support
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sbsdpcmdev.h 616398 2016-02-01 09:37:52Z $
+ * $Id: sbsdpcmdev.h 610395 2016-01-06 22:52:57Z $
*/
#ifndef _sbsdpcmdev_h_
#define PAD _XSTR(__LINE__)
#endif /* PAD */
+
typedef volatile struct {
dma64regs_t xmt; /* dma tx */
uint32 PAD[2];
/* synchronized access to registers in SDIO clock domain */
uint32 sdioaccess; /* SdioAccess, 0x050, rev8 */
- uint32 PAD[1];
- uint32 MiscHostAccessIntEn;
- uint32 PAD[1];
+ uint32 PAD[3];
/* PCMCIA frame control */
uint8 pcmciaframectrl; /* pcmciaFrameCtrl, 0x060, rev8 */
#define SDA_F1_FBR_SPACE 0x100 /* sdioAccess F1 FBR register space */
#define SDA_F2_FBR_SPACE 0x200 /* sdioAccess F2 FBR register space */
#define SDA_F1_REG_SPACE 0x300 /* sdioAccess F1 core-specific register space */
-#define SDA_F3_FBR_SPACE 0x400 /* sdioAccess F3 FBR register space */
/* SDA_F1_REG_SPACE sdioaccess-accessible F1 reg space register offsets */
#define SDA_CHIPCONTROLDATA 0x006 /* ChipControlData */
#define SDA_SDIOWRFRAMEBCHIGH 0x01a /* SdioWrFrameBCHigh */
#define SDA_SDIORDFRAMEBCLOW 0x01b /* SdioRdFrameBCLow */
#define SDA_SDIORDFRAMEBCHIGH 0x01c /* SdioRdFrameBCHigh */
-#define SDA_MESBUSYCNTRL 0x01d /* mesBusyCntrl */
-#define SDA_WAKEUPCTRL 0x01e /* WakeupCtrl */
-#define SDA_SLEEPCSR 0x01f /* sleepCSR */
-
-/* SDA_F1_REG_SPACE register bits */
-/* sleepCSR register */
-#define SDA_SLEEPCSR_KEEP_SDIO_ON 0x1
/* SDA_F2WATERMARK */
#define SDA_F2WATERMARK_MASK 0x7f /* F2Watermark Mask */
/*
* BCM47XX Sonics SiliconBackplane embedded ram core
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sbsocram.h 619629 2016-02-17 18:37:56Z $
+ * $Id: sbsocram.h 604712 2015-12-08 08:05:42Z $
*/
#ifndef _SBSOCRAM_H
#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8
/* socram bankinfo memtype */
#define SOCRAM_MEMTYPE_RAM 0
-#define SOCRAM_MEMTYPE_ROM 1
+#define SOCRAM_MEMTYPE_R0M 1
#define SOCRAM_MEMTYPE_DEVRAM 2
#define SOCRAM_BANKINFO_REG 0x40
#define SOCRAM_BANKINFO_SZBASE 8192
#define SOCRAM_BANKSIZE_SHIFT 13 /* SOCRAM_BANKINFO_SZBASE */
+
#endif /* _SBSOCRAM_H */
/*
* SiliconBackplane System Memory core
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
* SDIO spec header file
* Protocol and standard (common) device definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sdio.h 689948 2017-03-14 05:21:03Z $
+ * $Id: sdio.h 644725 2016-06-21 12:26:04Z $
*/
#ifndef _SDIO_H
#define CARDREG_STATUS_BIT_IOCURRENTSTATE0 9
#define CARDREG_STATUS_BIT_FUN_NUM_ERROR 4
+
+
#define SD_CMD_GO_IDLE_STATE 0 /* mandatory for SDIO */
#define SD_CMD_SEND_OPCOND 1
#define SD_CMD_MMC_SET_RCA 3
#define SD_RSP_R5_ERRBITS 0xCB
+
/* ------------------------------------------------
* SDIO Commands and responses
*
#define CMD52_RW_FLAG_M BITFIELD_MASK(1) /* Bit 31 - R/W flag */
#define CMD52_RW_FLAG_S 31
+
#define CMD53_BYTE_BLK_CNT_M BITFIELD_MASK(9) /* Bits [8:0] - Byte/Block Count of CMD53 */
#define CMD53_BYTE_BLK_CNT_S 0
#define CMD53_REG_ADDR_M BITFIELD_MASK(17) /* Bits [25:9] - register address */
#define RSP1_OUT_OF_RANGE_M BITFIELD_MASK(1) /* Bit 31 - Cmd arg was out of range */
#define RSP1_OUT_OF_RANGE_S 31
+
#define RSP5_DATA_M BITFIELD_MASK(8) /* Bits [0:7] - data */
#define RSP5_DATA_S 0
#define RSP5_FLAGS_M BITFIELD_MASK(8) /* Bit [15:8] - Rsp flags */
* SDIO Host Controller Spec header file
* Register map and definitions for the Standard Host Controller
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sdioh.h 768099 2018-06-18 13:58:07Z $
+ * $Id: sdioh.h 514727 2014-11-12 03:02:48Z $
*/
#ifndef _SDIOH_H
#define SD3_Tuning_Info_Register 0x0EC
#define SD3_WL_BT_reset_register 0x0F0
+
/* preset value indices */
#define SD3_PRESETVAL_INITIAL_IX 0
#define SD3_PRESETVAL_DESPEED_IX 1
#define SDIO_OCR_READ_FAIL (2)
+
#define CAP_ASYNCINT_SUP_M BITFIELD_MASK(1)
#define CAP_ASYNCINT_SUP_S 29
#define CAP3_RETUNING_MODES_M BITFIELD_MASK(2)
#define CAP3_RETUNING_MODES_S (46 - CAP3_MSBits_OFFSET)
-#define CAP3_RETUNING_TC_DISABLED (0x0)
-#define CAP3_RETUNING_TC_1024S (0xB)
-#define CAP3_RETUNING_TC_OTHER (0xF)
-
#define CAP3_CLK_MULT_M BITFIELD_MASK(8)
#define CAP3_CLK_MULT_S (48 - CAP3_MSBits_OFFSET)
* Structure used by apps whose drivers access SDIO drivers.
* Pulled out separately so dhdu and wlu can both use it.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sdiovar.h 660496 2016-09-20 19:28:50Z $
+ * $Id: sdiovar.h 610006 2016-01-06 01:38:47Z $
*/
#ifndef _sdiovar_h_
#include <typedefs.h>
+/* require default structure packing */
+#define BWL_DEFAULT_PACKING
+#include <packed_section_start.h>
+
typedef struct sdreg {
int func;
int offset;
#define NUM_PREV_TRANSACTIONS 16
-#ifdef BCMSPI
-/* Error statistics for gSPI */
-struct spierrstats_t {
- uint32 dna; /* The requested data is not available. */
- uint32 rdunderflow; /* FIFO underflow happened due to current (F2, F3) rd command */
- uint32 wroverflow; /* FIFO underflow happened due to current (F1, F2, F3) wr command */
-
- uint32 f2interrupt; /* OR of all F2 related intr status bits. */
- uint32 f3interrupt; /* OR of all F3 related intr status bits. */
-
- uint32 f2rxnotready; /* F2 FIFO is not ready to receive data (FIFO empty) */
- uint32 f3rxnotready; /* F3 FIFO is not ready to receive data (FIFO empty) */
-
- uint32 hostcmddataerr; /* Error in command or host data, detected by CRC/checksum
- * (optional)
- */
- uint32 f2pktavailable; /* Packet is available in F2 TX FIFO */
- uint32 f3pktavailable; /* Packet is available in F2 TX FIFO */
-
- uint32 dstatus[NUM_PREV_TRANSACTIONS]; /* dstatus bits of last 16 gSPI transactions */
- uint32 spicmd[NUM_PREV_TRANSACTIONS];
-};
-#endif /* BCMSPI */
typedef struct sdio_bus_metrics {
uint32 active_dur; /* msecs */
sdio_bus_metrics_t sdio; /* stats from SDIO bus driver */
} wl_pwr_sdio_stats_t;
+#include <packed_section_end.h>
+
#endif /* _sdiovar_h_ */
/*
* SD-SPI Protocol Standard
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
* Misc utility routines for accessing the SOC Interconnects
* of Broadcom HNBU chips.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: siutils.h 798061 2019-01-04 23:27:15Z $
+ * $Id: siutils.h 668442 2016-11-03 08:42:43Z $
*/
#ifndef _siutils_h_
#include "wlioctl.h"
#endif /* SR_DEBUG */
+
#define WARM_BOOT 0xA0B0C0D0
#ifdef BCM_BACKPLANE_TIMEOUT
uint boardvendor; /**< board vendor */
uint boardflags; /**< board flags */
uint boardflags2; /**< board flags2 */
- uint boardflags4; /**< board flags4 */
uint chip; /**< chip number */
uint chiprev; /**< chip revision */
uint chippkg; /**< chip package option */
uint socirev; /**< SOC interconnect rev */
bool pci_pr32414;
int gcirev; /**< gci core rev */
- int lpflags; /**< low power flags */
- uint32 enum_base; /**< backplane address where the chipcommon core resides */
-
#ifdef BCM_BACKPLANE_TIMEOUT
si_axi_error_info_t * err_info;
#endif /* BCM_BACKPLANE_TIMEOUT */
-
- bool _multibp_enable;
};
/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver
* for monolithic driver, it is readonly to prevent accident change
*/
-typedef struct si_pub si_t;
+typedef const struct si_pub si_t;
/*
* Many of the routines below take an 'sih' handle as their first arg.
#define PMUCTL_ENAB(sih) (BCMPMUCTL)
#else
#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU)
-#endif // endif
+#endif
#if defined(BCMAOBENAB)
#define AOB_ENAB(sih) (BCMAOBENAB)
#else
#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL)
#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK)
-#endif // endif
+#endif
typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg);
#define GPIO_CTRL_7_6_EN_MASK 0xC0
#define GPIO_OUT_7_EN_MASK 0x80
+
+
/* CR4 specific defines used by the host driver */
#define SI_CR4_CAP (0x04)
#define SI_CR4_BANKIDX (0x40)
#define ARMCR4_TCBANB_SHIFT 0
#define SICF_CPUHALT (0x0020)
-#define ARMCR4_BSZ_MASK 0x7f
-#define ARMCR4_BUNITSZ_MASK 0x200
-#define ARMCR4_BSZ_8K 8192
-#define ARMCR4_BSZ_1K 1024
+#define ARMCR4_BSZ_MASK 0x3f
+#define ARMCR4_BSZ_MULT 8192
#define SI_BPIND_1BYTE 0x1
#define SI_BPIND_2BYTE 0x3
#define SI_BPIND_4BYTE 0xF
-
-#define GET_GCI_OFFSET(sih, gci_reg) \
- (AOB_ENAB(sih)? OFFSETOF(gciregs_t, gci_reg) : OFFSETOF(chipcregs_t, gci_reg))
-
-#define GET_GCI_CORE(sih) \
- (AOB_ENAB(sih)? si_findcoreidx(sih, GCI_CORE_ID, 0) : SI_CC_IDX)
-
#include <osl_decl.h>
/* === exported functions === */
extern si_t *si_attach(uint pcidev, osl_t *osh, volatile void *regs, uint bustype,
void *sdh, char **vars, uint *varsz);
extern si_t *si_kattach(osl_t *osh);
extern void si_detach(si_t *sih);
+extern bool si_pci_war16165(si_t *sih);
extern volatile void *
si_d11_switch_addrbase(si_t *sih, uint coreunit);
extern uint si_corelist(si_t *sih, uint coreid[]);
extern uint si_coreunit(si_t *sih);
extern uint si_corevendor(si_t *sih);
extern uint si_corerev(si_t *sih);
-extern uint si_corerev_minor(si_t *sih);
extern void *si_osh(si_t *sih);
extern void si_setosh(si_t *sih, osl_t *osh);
-extern int si_backplane_access(si_t *sih, uint addr, uint size,
+extern uint si_backplane_access(si_t *sih, uint addr, uint size,
uint *val, bool read);
extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
-extern uint si_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val);
extern volatile uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff);
extern volatile void *si_coreregs(si_t *sih);
extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
extern volatile void *si_setcoreidx(si_t *sih, uint coreidx);
extern volatile void *si_setcore(si_t *sih, uint coreid, uint coreunit);
-extern uint32 si_oobr_baseaddr(si_t *sih, bool second);
extern volatile void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val);
extern void si_restore_core(si_t *sih, uint coreid, uint intr_val);
extern int si_numaddrspaces(si_t *sih);
-extern uint32 si_addrspace(si_t *sih, uint spidx, uint baidx);
-extern uint32 si_addrspacesize(si_t *sih, uint spidx, uint baidx);
+extern uint32 si_addrspace(si_t *sih, uint asidx);
+extern uint32 si_addrspacesize(si_t *sih, uint asidx);
extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
extern int si_corebist(si_t *sih);
extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
extern void si_core_disable(si_t *sih, uint32 bits);
extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m);
extern uint si_chip_hostif(si_t *sih);
+extern bool si_read_pmu_autopll(si_t *sih);
extern uint32 si_clock(si_t *sih);
extern uint32 si_alp_clock(si_t *sih); /* returns [Hz] units */
extern uint32 si_ilp_clock(si_t *sih); /* returns [Hz] units */
extern void si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode);
extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value);
extern uint8 si_gci_host_wake_gpio_init(si_t *sih);
-extern uint8 si_gci_time_sync_gpio_init(si_t *sih);
extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state);
-extern void si_gci_time_sync_gpio_enable(si_t *sih, uint8 gpio, bool state);
extern void si_invalidate_second_bar0win(si_t *sih);
-extern void si_gci_shif_config_wake_pin(si_t *sih, uint8 gpio_n,
- uint8 wake_events, bool gci_gpio);
-extern void si_shif_int_enable(si_t *sih, uint8 gpio_n, uint8 wake_events, bool enable);
-
/* GCI interrupt handlers */
extern void si_gci_handler_process(si_t *sih);
-extern void si_enable_gpio_wake(si_t *sih, uint8 *wake_mask, uint8 *cur_status, uint8 gci_gpio,
- uint32 pmu_cc2_mask, uint32 pmu_cc2_value);
-
/* GCI GPIO event handlers */
extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts,
gci_gpio_handler_t cb, void *arg);
extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i);
-
extern uint8 si_gci_gpio_status(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value);
-extern void si_gci_config_wake_pin(si_t *sih, uint8 gpio_n, uint8 wake_events,
- bool gci_gpio);
-extern void si_gci_free_wake_pin(si_t *sih, uint8 gpio_n);
/* Wake-on-wireless-LAN (WOWL) */
extern bool si_pci_pmecap(si_t *sih);
extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val);
extern void si_deepsleep_count(si_t *sih, bool arm_wakeup);
+
#ifdef BCMSDIO
extern void si_sdio_init(si_t *sih);
-#endif // endif
+#endif
extern uint16 si_d11_devid(si_t *sih);
extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
#define TSMC_FAB12 0x2 /**< TSMC Fab12/Fab14 chip */
#define SMIC_FAB4 0x3 /**< SMIC Fab4 chip */
+extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw);
extern uint16 si_fabid(si_t *sih);
extern uint16 si_chipid(si_t *sih);
extern int si_getdevpathintvar(si_t *sih, const char *name);
extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name);
+
extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val);
extern uint8 si_pcieltrenable(si_t *sih, uint32 mask, uint32 val);
extern void si_chippkg_set(si_t *sih, uint);
extern bool si_is_warmboot(void);
+extern void si_chipcontrl_btshd0_4331(si_t *sih, bool on);
extern void si_chipcontrl_restore(si_t *sih, uint32 val);
extern uint32 si_chipcontrl_read(si_t *sih);
+extern void si_chipcontrl_epa4331(si_t *sih, bool on);
+extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl);
extern void si_chipcontrl_srom4360(si_t *sih, bool on);
-extern void si_srom_clk_set(si_t *sih); /**< for chips with fast BP clock */
+extern void si_clk_srom4365(si_t *sih);
+/* Enable BT-COEX & Ex-PA for 4313 */
+extern void si_epa_4313war(si_t *sih);
extern void si_btc_enable_chipcontrol(si_t *sih);
+/* BT/WL selection for 4313 bt combo >= P250 boards */
+extern void si_btcombo_p250_4313_war(si_t *sih);
+extern void si_btcombo_43228_war(si_t *sih);
+extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear);
extern void si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag);
+extern void si_pmu_synth_pwrsw_4313_war(si_t *sih);
+extern uint si_pll_reset(si_t *sih);
/* === debug routines === */
extern bool si_taclear(si_t *sih, bool details);
extern int si_dump_pcieinfo(si_t *sih, struct bcmstrbuf *b);
extern void si_dump_pmuregs(si_t *sih, struct bcmstrbuf *b);
extern int si_dump_pcieregs(si_t *sih, struct bcmstrbuf *b);
-#endif // endif
+#endif
#if defined(BCMDBG_PHYDUMP)
extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b);
-#endif // endif
+#endif
extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
extern int si_pcie_configspace_restore(si_t *sih);
extern int si_pcie_configspace_get(si_t *sih, uint8 *buf, uint size);
+
#ifdef BCM_BACKPLANE_TIMEOUT
extern const si_axi_error_info_t * si_get_axi_errlog_info(si_t *sih);
extern void si_reset_axi_errlog_info(si_t * sih);
#endif /* BCM_BACKPLANE_TIMEOUT */
-extern void si_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout, uint32 cid);
-
extern uint32 si_tcm_size(si_t *sih);
extern bool si_has_flops(si_t *sih);
extern uint32 si_gci_int_enable(si_t *sih, bool enable);
extern void si_gci_reset(si_t *sih);
#ifdef BCMLTECOEX
+extern void si_gci_seci_init(si_t *sih);
extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum,
uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
#endif /* BCMLTECOEX */
-extern void si_gci_seci_init(si_t *sih);
extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum,
- uint32 ltecx_fnsel, uint32 ltecx_gcigpio, uint32 xtalfreq);
+ uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
extern bool si_btcx_wci2_init(si_t *sih);
extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val);
extern uint32 si_gci_chipstatus(si_t *sih, uint reg);
extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status);
-extern uint8 si_get_device_wake_opt(si_t *sih);
extern void si_swdenable(si_t *sih, uint32 swdflag);
extern uint8 si_enable_perst_wake(si_t *sih, uint8 *perst_wake_mask, uint8 *perst_cur_status);
void si_force_islanding(si_t *sih, bool enable);
extern uint32 si_pmu_res_req_timer_clr(si_t *sih);
extern void si_pmu_rfldo(si_t *sih, bool on);
+extern void si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 spert_val);
extern uint32 si_pcie_set_ctrlreg(si_t *sih, uint32 sperst_mask, uint32 spert_val);
extern void si_pcie_ltr_war(si_t *sih);
extern void si_pcie_hw_LTR_war(si_t *sih);
#ifdef WLRSDB
extern void si_d11rsdb_core_disable(si_t *sih, uint32 bits);
extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
-extern void set_secondary_d11_core(si_t *sih, volatile void **secmap, volatile void **secwrap);
-#endif // endif
+extern void set_secondary_d11_core(si_t *sih, void **secmap, void **secwrap);
+#endif
+
/* Macro to enable clock gating changes in different cores */
#define MEM_CLK_GATE_BIT 5
OFFSETOF(pmuregs_t, member), mask, val): \
si_corereg(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member), mask, val))
-/* Used only for the regs present in the pmu core and not present in the old cc core */
-#define PMU_REG_NEW(si, member, mask, val) \
- si_corereg(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
- OFFSETOF(pmuregs_t, member), mask, val)
-
-#define GCI_REG(si, offset, mask, val) \
- (AOB_ENAB(si) ? \
- si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \
- offset, mask, val): \
- si_corereg(si, SI_CC_IDX, offset, mask, val))
-
-/* Used only for the regs present in the gci core and not present in the old cc core */
-#define GCI_REG_NEW(si, member, mask, val) \
- si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \
- OFFSETOF(gciregs_t, member), mask, val)
-
#define LHL_REG(si, member, mask, val) \
si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \
OFFSETOF(gciregs_t, member), mask, val)
#define GCI_CCTL_FGCA_OFFSET 20 /**< ForceGciClkAvail */
#define GCI_CCTL_FGCAV_OFFSET 21 /**< ForceGciClkAvailValue */
#define GCI_CCTL_SCS_OFFSET 24 /**< SeciClkStretch, 31:24 */
-#define GCI_CCTL_SCS 25 /* SeciClkStretch */
#define GCI_MODE_UART 0x0
#define GCI_MODE_SECI 0x1
#define GCI_SECIIN_GCIGPIO_OFFSET 4
#define GCI_SECIIN_RXID2IP_OFFSET 8
-#define GCI_SECIIN_MODE_MASK 0x7
-#define GCI_SECIIN_GCIGPIO_MASK 0xF
-
#define GCI_SECIOUT_MODE_OFFSET 0
#define GCI_SECIOUT_GCIGPIO_OFFSET 4
#define GCI_SECIOUT_LOOPBACK_OFFSET 8
#define GCI_SECIOUT_SECIINRELATED_OFFSET 16
-#define GCI_SECIOUT_MODE_MASK 0x7
-#define GCI_SECIOUT_GCIGPIO_MASK 0xF
-#define GCI_SECIOUT_SECIINRELATED_MASK 0x1
-
-#define GCI_SECIOUT_SECIINRELATED 0x1
-
#define GCI_SECIAUX_RXENABLE_OFFSET 0
#define GCI_SECIFIFO_RXENABLE_OFFSET 16
#define GCI_GPIOIDX_OFFSET 16
#define GCI_LTECX_SECI_ID 0 /**< SECI port for LTECX */
-#define GCI_LTECX_TXCONF_EN_OFFSET 2
-#define GCI_LTECX_PRISEL_EN_OFFSET 3
/* To access per GCI bit registers */
#define GCI_REG_WIDTH 32
-/* number of event summary bits */
-#define GCI_EVENT_NUM_BITS 32
-
-/* gci event bits per core */
-#define GCI_EVENT_BITS_PER_CORE 4
-#define GCI_EVENT_HWBIT_1 1
-#define GCI_EVENT_HWBIT_2 2
-#define GCI_EVENT_SWBIT_1 3
-#define GCI_EVENT_SWBIT_2 4
-
-#define GCI_MBDATA_TOWLAN_POS 96
-#define GCI_MBACK_TOWLAN_POS 104
-#define GCI_WAKE_TOWLAN_PO 112
-#define GCI_SWREADY_POS 120
-
/* GCI bit positions */
/* GCI [127:000] = WLAN [127:0] */
#define GCI_WLAN_IP_ID 0
#define GCI_WLAN_PRIO_POS (GCI_WLAN_BEGIN + 4)
#define GCI_WLAN_PERST_POS (GCI_WLAN_BEGIN + 15)
-/* GCI [255:128] = BT [127:0] */
-#define GCI_BT_IP_ID 1
-#define GCI_BT_BEGIN 128
-#define GCI_BT_MBDATA_TOWLAN_POS (GCI_BT_BEGIN + GCI_MBDATA_TOWLAN_POS)
-#define GCI_BT_MBACK_TOWLAN_POS (GCI_BT_BEGIN + GCI_MBACK_TOWLAN_POS)
-#define GCI_BT_WAKE_TOWLAN_POS (GCI_BT_BEGIN + GCI_WAKE_TOWLAN_PO)
-#define GCI_BT_SWREADY_POS (GCI_BT_BEGIN + GCI_SWREADY_POS)
-
/* GCI [639:512] = LTE [127:0] */
#define GCI_LTE_IP_ID 4
#define GCI_LTE_BEGIN 512
/* Bit offset of ECI bit no x in 32-bit words */
#define GCI_BITOFFSET(x) ((x)%GCI_REG_WIDTH)
-/* BT SMEM Control Register 0 */
-#define GCI_BT_SMEM_CTRL0_SUBCORE_ENABLE_PKILL (1 << 28)
-
/* End - GCI Macros */
-#define AXI_OOB 0x7
+#ifdef REROUTE_OOBINT
+#define CC_OOB 0x0
+#define M2MDMA_OOB 0x1
+#define PMU_OOB 0x2
+#define D11_OOB 0x3
+#define SDIOD_OOB 0x4
+#define WLAN_OOB 0x5
+#define PMU_OOB_BIT 0x12
+#endif /* REROUTE_OOBINT */
+
+#define GCI_REG(si, offset, mask, val) \
+ (AOB_ENAB(si) ? \
+ si_corereg(si, si_findcoreidx(si, GCI_CORE_ID, 0), \
+ offset, mask, val): \
+ si_corereg(si, SI_CC_IDX, offset, mask, val))
extern void si_pll_sr_reinit(si_t *sih);
extern void si_pll_closeloop(si_t *sih);
void si_config_4364_d11_oob(si_t *sih, uint coreid);
+extern void si_update_macclk_mul_fact(si_t *sih, uint mul_fact);
+extern uint32 si_get_macclk_mul_fact(si_t *sih);
extern void si_gci_set_femctrl(si_t *sih, osl_t *osh, bool set);
extern void si_gci_set_femctrl_mask_ant01(si_t *sih, osl_t *osh, bool set);
extern uint si_num_slaveports(si_t *sih, uint coreid);
-extern uint32 si_get_slaveport_addr(si_t *sih, uint spidx, uint baidx,
- uint core_id, uint coreunit);
-extern uint32 si_get_d11_slaveport_addr(si_t *sih, uint spidx,
- uint baidx, uint coreunit);
+extern uint32 si_get_slaveport_addr(si_t *sih, uint asidx, uint core_id, uint coreunit);
+extern uint32 si_get_d11_slaveport_addr(si_t *sih, uint asidx, uint coreunit);
uint si_introff(si_t *sih);
void si_intrrestore(si_t *sih, uint intr_val);
void si_nvram_res_masks(si_t *sih, uint32 *min_mask, uint32 *max_mask);
-extern uint32 si_xtalfreq(si_t *sih);
-extern uint8 si_getspurmode(si_t *sih);
-extern uint32 si_get_openloop_dco_code(si_t *sih);
-extern void si_set_openloop_dco_code(si_t *sih, uint32 openloop_dco_code);
+uint32 si_xtalfreq(si_t *sih);
extern uint32 si_wrapper_dump_buf_size(si_t *sih);
extern uint32 si_wrapper_dump_binary(si_t *sih, uchar *p);
-extern uint32 si_wrapper_dump_last_timeout(si_t *sih, uint32 *error, uint32 *core, uint32 *ba,
- uchar *p);
/* SR Power Control */
extern uint32 si_srpwr_request(si_t *sih, uint32 mask, uint32 val);
extern uint32 si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val);
extern uint32 si_srpwr_stat(si_t *sih);
extern uint32 si_srpwr_domain(si_t *sih);
-extern uint32 si_srpwr_domain_all_mask(si_t *sih);
/* SR Power Control */
+#ifdef BCMSRPWR
/* No capabilities bit so using chipid for now */
-#define SRPWR_CAP(sih) (BCM4347_CHIP(sih->chip) || BCM4369_CHIP(sih->chip))
+ #define SRPWR_CAP(sih) (\
+ (CHIPID(sih->chip) == BCM4347_CHIP_ID) || \
+ (0))
-#ifdef BCMSRPWR
extern bool _bcmsrpwr;
- #if defined(ROM_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
#define SRPWR_ENAB() (_bcmsrpwr)
#elif defined(BCMSRPWR_DISABLED)
#define SRPWR_ENAB() (0)
#define SRPWR_ENAB() (1)
#endif
#else
+ #define SRPWR_CAP(sih) (0)
#define SRPWR_ENAB() (0)
#endif /* BCMSRPWR */
-/*
- * Multi-BackPlane architecture. Each can power up/down independently.
- * Common backplane: shared between BT and WL
- * ChipC, PCIe, GCI, PMU, SRs
- * HW powers up as needed
- * WL BackPlane (WLBP):
- * ARM, TCM, Main, Aux
- * Host needs to power up
- */
-#define MULTIBP_CAP(sih) (FALSE)
-#define MULTIBP_ENAB(sih) ((sih) && (sih)->_multibp_enable)
-
-uint32 si_enum_base(uint devid);
-
-extern uint8 si_lhl_ps_mode(si_t *sih);
-
-#ifdef UART_TRAP_DBG
-void ai_dump_APB_Bridge_registers(si_t *sih);
-#endif /* UART_TRAP_DBG */
-
-void si_clrirq_idx(si_t *sih, uint core_idx);
-
-/* return if scan core is present */
-bool si_scan_core_present(si_t *sih);
-
#endif /* _siutils_h_ */
/*
* SPI device spec header file
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
/*
* TRX image file header format.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef BCMTRXV2
#define TRX_VERSION TRX_V1 /* Version 1 */
#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS
-#endif // endif
+#endif
/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as
* Ver 2 of trx header. To make it generic, trx_header is structure is modified
uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */
#else
uint32 offsets[1]; /* Offsets of partitions from start of header */
-#endif // endif
+#endif
};
#ifdef BCMTRXV2
/*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: typedefs.h 742663 2018-01-23 06:57:52Z $
+ * $Id: typedefs.h 639587 2016-05-24 06:44:44Z $
*/
#ifndef _TYPEDEFS_H_
#define TYPEDEF_BOOL
#ifndef FALSE
#define FALSE false
-#endif // endif
+#endif
#ifndef TRUE
#define TRUE true
-#endif // endif
+#endif
#else /* ! __cplusplus */
+
#endif /* ! __cplusplus */
-#if !defined(TYPEDEF_UINTPTR)
#if defined(__LP64__)
#define TYPEDEF_UINTPTR
typedef unsigned long long int uintptr;
-#endif // endif
-#endif /* TYPEDEF_UINTPTR */
+#endif
+
+
+
/* float_t types conflict with the same typedefs from the standard ANSI-C
** math.h header file. Don't re-typedef them here.
#if defined(_NEED_SIZE_T_)
typedef long unsigned int size_t;
-#endif // endif
+#endif
+
+
+
+
#if defined(__sparc__)
#define TYPEDEF_ULONG
-#endif // endif
+#endif
/*
* If this is either a Linux hybrid build or the per-port code of a hybrid build
#include <linux/compiler.h>
#ifdef noinline_for_stack
#define TYPEDEF_BOOL
-#endif // endif
+#endif
#endif /* == 2.6.18 */
#endif /* __KERNEL__ */
+
/* Do not support the (u)int64 types with strict ansi for GNU C */
#if defined(__GNUC__) && defined(__STRICT_ANSI__)
#define TYPEDEF_INT64
#if defined(__STDC__)
#define TYPEDEF_UINT64
-#endif // endif
+#endif
#endif /* __ICL */
#endif /* linux && __KERNEL__ */
-#endif // endif
+#endif
+
/* use the default typedefs in the next section of this file */
#define USE_TYPEDEF_DEFAULTS
#endif /* SITE_TYPEDEFS */
+
/*
* Default Typedefs
*/
#ifndef TYPEDEF_UCHAR
typedef unsigned char uchar;
-#endif // endif
+#endif
#ifndef TYPEDEF_USHORT
typedef unsigned short ushort;
-#endif // endif
+#endif
#ifndef TYPEDEF_UINT
typedef unsigned int uint;
-#endif // endif
+#endif
#ifndef TYPEDEF_ULONG
typedef unsigned long ulong;
-#endif // endif
+#endif
/* define [u]int8/16/32/64, uintptr */
#ifndef TYPEDEF_UINT8
typedef unsigned char uint8;
-#endif // endif
+#endif
#ifndef TYPEDEF_UINT16
typedef unsigned short uint16;
-#endif // endif
+#endif
#ifndef TYPEDEF_UINT32
typedef unsigned int uint32;
-#endif // endif
+#endif
#ifndef TYPEDEF_UINT64
typedef unsigned long long uint64;
-#endif // endif
+#endif
#ifndef TYPEDEF_UINTPTR
typedef unsigned int uintptr;
-#endif // endif
+#endif
#ifndef TYPEDEF_INT8
typedef signed char int8;
-#endif // endif
+#endif
#ifndef TYPEDEF_INT16
typedef signed short int16;
-#endif // endif
+#endif
#ifndef TYPEDEF_INT32
typedef signed int int32;
-#endif // endif
+#endif
#ifndef TYPEDEF_INT64
typedef signed long long int64;
-#endif // endif
+#endif
/* define float32/64, float_t */
#ifndef TYPEDEF_FLOAT32
typedef float float32;
-#endif // endif
+#endif
#ifndef TYPEDEF_FLOAT64
typedef double float64;
-#endif // endif
+#endif
/*
* abstracted floating point type allows for compile time selection of
typedef float32 float_t;
#else /* default to double precision floating point */
typedef float64 float_t;
-#endif // endif
+#endif
#endif /* TYPEDEF_FLOAT_T */
#ifndef FALSE
#define FALSE 0
-#endif // endif
+#endif
#ifndef TRUE
#define TRUE 1 /* TRUE */
-#endif // endif
+#endif
#ifndef NULL
#define NULL 0
-#endif // endif
+#endif
#ifndef OFF
#define OFF 0
-#endif // endif
+#endif
#ifndef ON
#define ON 1 /* ON = 1 */
-#endif // endif
+#endif
#define AUTO (-1) /* Auto = -1 */
#ifndef PTRSZ
#define PTRSZ sizeof(char*)
-#endif // endif
+#endif
+
/* Detect compiler type. */
#if defined(__GNUC__) || defined(__lint)
#define BWL_COMPILER_ARMCC
#else
#error "Unknown compiler!"
-#endif // endif
+#endif
+
#ifndef INLINE
#if defined(BWL_COMPILER_MICROSOFT)
#define INLINE __inline
#else
#define INLINE
- #endif
+ #endif
#endif /* INLINE */
#undef TYPEDEF_BOOL
/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */
#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
-#else /* !EDK_RELEASE_VERSION || (EDK_RELEASE_VERSION < 0x00020000) */
+#else
#include <sys/types.h>
#include <strings.h>
#ifdef stderr
#undef stderr
#define stderr stdout
-#endif // endif
+#endif
+typedef UINT32 uint;
+typedef UINT64 ulong;
+typedef UINT16 ushort;
typedef UINT8 uint8;
typedef UINT16 uint16;
typedef UINT32 uint32;
typedef unsigned char uchar;
typedef UINTN uintptr;
+typedef UINT8 u_char;
+typedef UINT16 u_short;
+typedef UINTN u_int;
+typedef ULONGN u_long;
+
#define UNUSED_PARAMETER(x) (void)(x)
#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
#define INLINE
/*
* 802.1Q VLAN protocol definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
-#endif // endif
+#endif
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
#ifndef VLAN_VID_MASK
#define VLAN_VID_MASK 0xfff /* low 12 bits are vlan id */
-#endif // endif
+#endif
#define VLAN_CFI_SHIFT 12 /* canonical format indicator bit */
#define VLAN_PRI_SHIFT 13 /* user priority */
#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN)
+
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
/*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wlfc_proto.h 735303 2017-12-08 06:20:29Z $
+ * $Id: wlfc_proto.h 675983 2016-12-19 23:18:49Z $
*
*/
/** WL flow control for PROP_TXSTATUS. Related to host AMPDU reordering. */
+
#ifndef __wlfc_proto_definitions_h__
#define __wlfc_proto_definitions_h__
---------------------------------------------------------------------------
*/
-typedef enum {
- WLFC_CTL_TYPE_MAC_OPEN = 1,
- WLFC_CTL_TYPE_MAC_CLOSE = 2,
- WLFC_CTL_TYPE_MAC_REQUEST_CREDIT = 3,
- WLFC_CTL_TYPE_TXSTATUS = 4,
- WLFC_CTL_TYPE_PKTTAG = 5, /** host<->dongle */
-
- WLFC_CTL_TYPE_MACDESC_ADD = 6,
- WLFC_CTL_TYPE_MACDESC_DEL = 7,
- WLFC_CTL_TYPE_RSSI = 8,
+#define WLFC_CTL_TYPE_MAC_OPEN 1
+#define WLFC_CTL_TYPE_MAC_CLOSE 2
+#define WLFC_CTL_TYPE_MAC_REQUEST_CREDIT 3
+#define WLFC_CTL_TYPE_TXSTATUS 4
+#define WLFC_CTL_TYPE_PKTTAG 5 /** host<->dongle */
- WLFC_CTL_TYPE_INTERFACE_OPEN = 9,
- WLFC_CTL_TYPE_INTERFACE_CLOSE = 10,
+#define WLFC_CTL_TYPE_MACDESC_ADD 6
+#define WLFC_CTL_TYPE_MACDESC_DEL 7
+#define WLFC_CTL_TYPE_RSSI 8
- WLFC_CTL_TYPE_FIFO_CREDITBACK = 11,
+#define WLFC_CTL_TYPE_INTERFACE_OPEN 9
+#define WLFC_CTL_TYPE_INTERFACE_CLOSE 10
- WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP = 12, /** host->dongle */
- WLFC_CTL_TYPE_MAC_REQUEST_PACKET = 13,
- WLFC_CTL_TYPE_HOST_REORDER_RXPKTS = 14,
+#define WLFC_CTL_TYPE_FIFO_CREDITBACK 11
- WLFC_CTL_TYPE_TX_ENTRY_STAMP = 15,
- WLFC_CTL_TYPE_RX_STAMP = 16,
- WLFC_CTL_TYPE_TX_STATUS_STAMP = 17, /** obsolete */
+#define WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP 12 /** host->dongle */
+#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET 13
+#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS 14
- WLFC_CTL_TYPE_TRANS_ID = 18,
- WLFC_CTL_TYPE_COMP_TXSTATUS = 19,
+#define WLFC_CTL_TYPE_TX_ENTRY_STAMP 15
+#define WLFC_CTL_TYPE_RX_STAMP 16
+#define WLFC_CTL_TYPE_TX_STATUS_STAMP 17 /** obsolete */
- WLFC_CTL_TYPE_TID_OPEN = 20,
- WLFC_CTL_TYPE_TID_CLOSE = 21,
- WLFC_CTL_TYPE_UPD_FLR_WEIGHT = 22,
- WLFC_CTL_TYPE_ENAB_FFSCH = 23,
- WLFC_CTL_TYPE_UPDATE_FLAGS = 24, /* clear the flags set in flowring */
- WLFC_CTL_TYPE_CLEAR_SUPPR = 25, /* free the supression info in the flowring */
+#define WLFC_CTL_TYPE_TRANS_ID 18
+#define WLFC_CTL_TYPE_COMP_TXSTATUS 19
- WLFC_CTL_TYPE_FLOWID_OPEN = 26,
- WLFC_CTL_TYPE_FLOWID_CLOSE = 27,
+#define WLFC_CTL_TYPE_TID_OPEN 20
+#define WLFC_CTL_TYPE_TID_CLOSE 21
- WLFC_CTL_TYPE_FILLER = 255
-} wlfc_ctl_type_t;
-#define WLFC_CTL_VALUE_LEN_FLOWID 2
+#define WLFC_CTL_TYPE_FILLER 255
#define WLFC_CTL_VALUE_LEN_MACDESC 8 /** handle, interface, MAC */
#define WLFC_CTL_VALUE_LEN_SEQ 2
-/* Reset the flags set for the corresponding flowring of the SCB which is de-inited */
-/* FLOW_RING_FLAG_LAST_TIM | FLOW_RING_FLAG_INFORM_PKTPEND | FLOW_RING_FLAG_PKT_REQ */
-#define WLFC_RESET_ALL_FLAGS 0
-#define WLFC_CTL_VALUE_LEN_FLAGS 7 /** flags, MAC */
-
-/* free the data stored to be used for suppressed packets in future */
-#define WLFC_CTL_VALUE_LEN_SUPR 7 /** tid, MAC */
-
/* The high bits of ratespec report in timestamp are used for various status */
#define WLFC_TSFLAGS_RX_RETRY (1 << 31)
#define WLFC_TSFLAGS_PM_ENABLED (1 << 30)
#define WLFC_CTL_VALUE_LEN_REQUEST_CREDIT 3 /* credit, MAC-handle, prec_bitmap */
#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */
+
#define WLFC_PKTFLAG_PKTFROMHOST 0x01
#define WLFC_PKTFLAG_PKT_REQUESTED 0x02
-#define WLFC_PKTFLAG_PKT_SENDTOHOST 0x04
#define WL_TXSTATUS_STATUS_MASK 0xff /* allow 8 bits */
#define WL_TXSTATUS_STATUS_SHIFT 24
#define WL_SEQ_IS_AMSDU(x) (((x) >> WL_SEQ_AMSDU_SHIFT) & \
WL_SEQ_AMSDU_MASK)
-/* indicates last_suppr_seq is valid */
-#define WL_SEQ_VALIDSUPPR_MASK 0x1 /* allow 1 bit */
-#define WL_SEQ_VALIDSUPPR_SHIFT 12
-#define WL_SEQ_SET_VALIDSUPPR(x, val) ((x) = \
- ((x) & ~(WL_SEQ_VALIDSUPPR_MASK << WL_SEQ_VALIDSUPPR_SHIFT)) | \
- (((val) & WL_SEQ_VALIDSUPPR_MASK) << WL_SEQ_VALIDSUPPR_SHIFT))
-#define WL_SEQ_GET_VALIDSUPPR(x) (((x) >> WL_SEQ_VALIDSUPPR_SHIFT) & \
- WL_SEQ_VALIDSUPPR_MASK)
-
#define WL_SEQ_FROMFW_MASK 0x1 /* allow 1 bit */
#define WL_SEQ_FROMFW_SHIFT 13
#define WL_SEQ_SET_FROMFW(x, val) ((x) = \
#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \
~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
+
#define WLFC_MAX_PENDING_DATALEN 120
/* host is free to discard the packet */
#define WLFC_CTL_PKTFLAG_DISCARD_NOACK 4
/* Firmware wrongly reported suppressed previously,now fixing to acked */
#define WLFC_CTL_PKTFLAG_SUPPRESS_ACKED 5
-/* Firmware send this packet expired, lifetime expiration */
-#define WLFC_CTL_PKTFLAG_EXPIRED 6
-/* Firmware drop this packet for any other reason */
-#define WLFC_CTL_PKTFLAG_DROPPED 7
-/* Firmware free this packet */
-#define WLFC_CTL_PKTFLAG_MKTFREE 8
#define WLFC_CTL_PKTFLAG_MASK (0x0f) /* For 4-bit mask with one extra bit */
#ifdef PROP_TXSTATUS_DEBUG
#define WLFC_BREADCRUMB(x) do {if ((x) == NULL) \
{printf("WLFC: %s():%d:caller:%p\n", \
__FUNCTION__, __LINE__, CALL_SITE);}} while (0)
+#define WLFC_PRINTMAC(banner, ea) do {printf("%s MAC: [%02x:%02x:%02x:%02x:%02x:%02x]\n", \
+ banner, ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]); } while (0)
#define WLFC_WHEREIS(s) printf("WLFC: at %s():%d, %s\n", __FUNCTION__, __LINE__, (s))
#else
#define WLFC_DBGMESG(x)
#define WLFC_BREADCRUMB(x)
+#define WLFC_PRINTMAC(banner, ea)
#define WLFC_WHEREIS(s)
-#endif /* PROP_TXSTATUS_DEBUG */
+#endif
/* AMPDU host reorder packet flags */
#define WLHOST_REORDERDATA_MAXFLOWS 256
#define WLFC_SET_REUSESEQ(x, val) ((x) = \
((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \
(((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT))
-
/** returns TRUE if 'd11 sequence reuse' has been agreed upon between host and dongle */
-#if defined(BCMPCIEDEV_ENABLED) && !defined(ROM_ENAB_RUNTIME_CHECK)
-/* GET_REUSESEQ is always TRUE in pciedev */
-#define WLFC_GET_REUSESEQ(x) (TRUE)
-#else
#define WLFC_GET_REUSESEQ(x) (((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1)
-#endif /* defined(BCMPCIEDEV_ENABLED) && !defined(ROM_ENAB_RUNTIME_CHECK) */
#define WLFC_MODE_REORDERSUPP_SHIFT 4 /* host reorder suppress pkt bit */
#define WLFC_SET_REORDERSUPP(x, val) ((x) = \
#define FLOW_RING_TIM_SET 7
#define FLOW_RING_TIM_RESET 8
#define FLOW_RING_FLUSH_TXFIFO 9
-#define FLOW_RING_GET_PKT_MAX 10
-#define FLOW_RING_RESET_WEIGHT 11
-#define FLOW_RING_UPD_PRIOMAP 12
/* bit 7, indicating if is TID(1) or AC(0) mapped info in tid field) */
#define PCIEDEV_IS_AC_TID_MAP_MASK 0x80
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
- *
- *
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wlioctl.h 824900 2019-06-12 05:42:13Z $
+ * $Id: wlioctl.h 677952 2017-01-05 23:25:28Z $
*/
#ifndef _wlioctl_h_
#include <802.11s.h>
#include <802.1d.h>
#include <bcmwifi_channels.h>
-#ifdef WL11AX
-#include <802.11ax.h>
-#endif /* WL11AX */
#include <bcmwifi_rates.h>
#include <wlioctl_defs.h>
#include <bcmipv6.h>
#include <bcm_mpool_pub.h>
#include <bcmcdc.h>
-#define SSSR_NEW_API
-
-/* Include bcmerror.h for error codes or aliases */
-#ifdef BCMUTILS_ERR_CODES
-#include <bcmerror.h>
-#endif /* BCMUTILS_ERR_CODES */
-
-/* NOTE re: Module specific error codes.
- *
- * BCME_.. error codes are extended by various features - e.g. FTM, NAN, SAE etc.
- * The current process is to allocate a range of 1024 negative 32 bit integers to
- * each module that extends the error codes to indicate a module specific status.
- *
- * The next range to use is below. If that range is used for a new feature, please
- * update the range to be used by the next feature.
- *
- * The error codes -4096 ... -5119 are reserved for firmware signing.
- *
- * Next available (inclusive) range: [-8*1024 + 1, -7*1024]
- *
- * End Note
- */
-/* 11ax trigger frame format - versioning info */
-#define TRIG_FRAME_FORMAT_11AX_DRAFT_1P1 0
typedef struct {
uint32 num;
#define TPK_FTM_LEN 16
#ifndef INTF_NAME_SIZ
#define INTF_NAME_SIZ 16
-#endif // endif
-
-#define WL_ASSOC_START_EVT_DATA_VERSION 1
-
-typedef struct assoc_event_data {
- uint32 version;
- uint32 flags;
- chanspec_t join_chspec;
-} assoc_event_data_t;
+#endif
/**Used to send ioctls over the transport pipe */
typedef struct remote_ioctl {
#define DFS_SCAN_S_SCAN_MODESW_INPROGRESS 4
#define DFS_SCAN_S_MAX 5
+
#define ACTION_FRAME_SIZE 1800
typedef struct wl_action_frame {
uint32 m_ulp_wakeind;
} ulp_shm_info_t;
+
/* Legacy structure to help keep backward compatible wl tool and tray app */
#define LEGACY_WL_BSS_INFO_VERSION 107 /**< older version of wl_bss_info struct */
/* variable length Information Elements */
} wl_bss_info_108_t;
+
#define WL_BSS_INFO_VERSION 109 /**< current version of wl_bss_info struct */
/**
uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */
uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */
uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */
-} wl_bss_info_v109_t;
-
-/**
- * BSS info structure
- * Applications MUST CHECK ie_offset field and length field to access IEs and
- * next bss_info structure in a vector (in wl_scan_results_t)
- */
-typedef struct wl_bss_info_v109_1 {
- uint32 version; /**< version field */
- uint32 length; /**< byte length of data in this record,
- * starting at version and including IEs
- */
- struct ether_addr BSSID;
- uint16 beacon_period; /**< units are Kusec */
- uint16 capability; /**< Capability information */
- uint8 SSID_len;
- uint8 SSID[32];
- uint8 bcnflags; /* additional flags w.r.t. beacon */
- struct {
- uint32 count; /**< # rates in this set */
- uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */
- } rateset; /**< supported rates */
- chanspec_t chanspec; /**< chanspec for bss */
- uint16 atim_window; /**< units are Kusec */
- uint8 dtim_period; /**< DTIM period */
- uint8 accessnet; /* from beacon interwork IE (if bcnflags) */
- int16 RSSI; /**< receive signal strength (in dBm) */
- int8 phy_noise; /**< noise (in dBm) */
- uint8 n_cap; /**< BSS is 802.11N Capable */
- uint8 he_cap; /**< BSS is he capable */
- uint8 freespace1; /* make implicit padding explicit */
- uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */
- uint8 ctl_ch; /**< 802.11N BSS control channel number */
- uint8 padding1[3]; /**< explicit struct alignment padding */
- uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
- uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
- uint8 flags; /**< flags */
- uint8 vht_cap; /**< BSS is vht capable */
- uint8 reserved[2]; /**< Reserved for expansion of BSS properties */
- uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */
-
- uint16 ie_offset; /**< offset at which IEs start, from beginning */
- uint16 freespace2; /* making implicit padding explicit */
- uint32 ie_length; /**< byte length of Information Elements */
- int16 SNR; /**< average SNR of during frame reception */
- uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */
- uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */
- uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */
- uint32 he_mcsmap; /**< STA's Associated hemcsmap */
- uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
- uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
-} wl_bss_info_v109_1_t;
-
-/**
- * BSS info structure
- * Applications MUST CHECK ie_offset field and length field to access IEs and
- * next bss_info structure in a vector (in wl_scan_results_t)
- */
-typedef struct wl_bss_info_v109_2 {
- uint32 version; /**< version field */
- uint32 length; /**< byte length of data in this record,
- * starting at version and including IEs
- */
- struct ether_addr BSSID;
- uint16 beacon_period; /**< units are Kusec */
- uint16 capability; /**< Capability information */
- uint8 SSID_len;
- uint8 SSID[32];
- uint8 bcnflags; /* additional flags w.r.t. beacon */
- struct {
- uint32 count; /**< # rates in this set */
- uint8 rates[16]; /**< rates in 500kbps units w/hi bit set if basic */
- } rateset; /**< supported rates */
- chanspec_t chanspec; /**< chanspec for bss */
- uint16 atim_window; /**< units are Kusec */
- uint8 dtim_period; /**< DTIM period */
- uint8 accessnet; /* from beacon interwork IE (if bcnflags) */
- int16 RSSI; /**< receive signal strength (in dBm) */
- int8 phy_noise; /**< noise (in dBm) */
- uint8 n_cap; /**< BSS is 802.11N Capable */
- uint8 he_cap; /**< BSS is he capable */
- uint8 freespace1; /* make implicit padding explicit */
- uint32 nbss_cap; /**< 802.11N+AC BSS Capabilities */
- uint8 ctl_ch; /**< 802.11N BSS control channel number */
- uint8 padding1[3]; /**< explicit struct alignment padding */
- uint16 vht_rxmcsmap; /**< VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
- uint16 vht_txmcsmap; /**< VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
- uint8 flags; /**< flags */
- uint8 vht_cap; /**< BSS is vht capable */
- uint8 reserved[2]; /**< Reserved for expansion of BSS properties */
- uint8 basic_mcs[MCSSET_LEN]; /**< 802.11N BSS required MCS set */
-
- uint16 ie_offset; /**< offset at which IEs start, from beginning */
- uint16 freespace2; /* making implicit padding explicit */
- uint32 ie_length; /**< byte length of Information Elements */
- int16 SNR; /**< average SNR of during frame reception */
- uint16 vht_mcsmap; /**< STA's Associated vhtmcsmap */
- uint16 vht_mcsmap_prop; /**< STA's Associated prop vhtmcsmap */
- uint16 vht_txmcsmap_prop; /**< prop VHT tx mcs prop */
- uint32 he_mcsmap; /**< STA's Associated hemcsmap */
- uint32 he_rxmcsmap; /**< HE rx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
- uint32 he_txmcsmap; /**< HE tx mcs map (802.11ax IE, HE_CAP_MCS_MAP_*) */
- uint32 timestamp[2]; /* Beacon Timestamp for FAKEAP req */
-} wl_bss_info_v109_2_t;
-
-#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
-typedef wl_bss_info_v109_t wl_bss_info_t;
-#endif // endif
+ /* Add new fields here */
+ /* variable length Information Elements */
+} wl_bss_info_t;
-#define WL_GSCAN_FULL_RESULT_VERSION 2 /* current version of wl_gscan_result_t struct */
+#define WL_GSCAN_FULL_RESULT_VERSION 2 /* current version of wl_gscan_result_t struct */
+#define WL_GSCAN_INFO_FIXED_FIELD_SIZE (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t))
typedef struct wl_gscan_bss_info {
uint32 timestamp[2];
- wl_bss_info_v109_t info;
+ wl_bss_info_t info;
/* Do not add any more members below, fixed */
/* and variable length Information Elements to follow */
-} wl_gscan_bss_info_v2_t;
+} wl_gscan_bss_info_t;
-typedef struct wl_gscan_bss_info_v3 {
- uint32 timestamp[2];
- uint8 info[]; /* var length wl_bss_info_X structures */
- /* Do not add any more members below, fixed */
- /* and variable length Information Elements to follow */
-} wl_gscan_bss_info_v3_t;
-
-#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
-typedef wl_gscan_bss_info_v2_t wl_gscan_bss_info_t;
-#define WL_GSCAN_INFO_FIXED_FIELD_SIZE (sizeof(wl_gscan_bss_info_t) - sizeof(wl_bss_info_t))
-#endif // endif
typedef struct wl_bsscfg {
uint32 bsscfg_idx;
uint32 chanspec;
} wl_bss_config_t;
-/* Number of Bsscolor supported per core */
-#ifndef HE_MAX_BSSCOLOR_RES
-#define HE_MAX_BSSCOLOR_RES 2
-#endif // endif
-
-#ifndef HE_MAX_STAID_PER_BSSCOLOR
-#define HE_MAX_STAID_PER_BSSCOLOR 4
-#endif // endif
-
-/* BSSColor indices */
-#define BSSCOLOR0_IDX 0
-#define BSSCOLOR1_IDX 1
-#define HE_BSSCOLOR0 0
-#define HE_BSSCOLOR_MAX_VAL 63
-
-/* STAID indices */
-#define STAID0_IDX 0
-#define STAID1_IDX 1
-#define STAID2_IDX 2
-#define STAID3_IDX 3
-#define HE_STAID_MAX_VAL 0x07FF
-
-typedef struct wl_bsscolor_info {
- uint16 version; /**< structure version */
- uint16 length; /**< length of the bsscolor info */
- uint8 bsscolor_index; /**< bsscolor index 0-1 */
- uint8 bsscolor; /**<bsscolor value from 0 to 63 */
- uint8 partial_bsscolor_ind;
- uint8 disable_bsscolor_ind; /**< To disable particular bsscolor */
- /* bsscolor_disable to be added as part of D1.0 */
- uint16 staid_info[HE_MAX_STAID_PER_BSSCOLOR]; /**< 0-3 staid info of each bsscolor */
-} wl_bsscolor_info_t;
-
#define WL_BSS_USER_RADAR_CHAN_SELECT 0x1 /**< User application will randomly select
* radar channel.
*/
};
typedef struct wl_clm_dload_info wl_clm_dload_info_t;
+
typedef struct wlc_ssid {
uint32 SSID_len;
uint8 SSID[DOT11_MAX_SSID_LEN];
#define WLC_EXTDSCAN_MAX_SSID 5
typedef struct wl_extdscan_params {
- int8 nprobes; /**< 0, passive, otherwise active */
- int8 split_scan; /**< split scan */
+ int8 nprobes; /**< 0, passive, otherwise active */
+ int8 split_scan; /**< split scan */
int8 band; /**< band */
int8 pad;
- wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /**< ssid list */
+ wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /**< ssid list */
uint32 tx_rate; /**< in 500ksec units */
wl_scan_type_t scan_type; /**< enum */
- int32 channel_num;
+ int32 channel_num;
chan_scandata_t channel_list[1]; /**< list of chandata structs */
} wl_extdscan_params_t;
-#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
+#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
-#define WL_SCAN_PARAMS_SSID_MAX 10
+#define WL_SCAN_PARAMS_SSID_MAX 10
-struct wl_scan_params {
+typedef struct wl_scan_params {
wlc_ssid_t ssid; /**< default: {0, ""} */
struct ether_addr bssid; /**< default: bcast */
int8 bss_type; /**< default: any,
* the fixed portion is ignored
*/
uint16 channel_list[1]; /**< list of chanspecs */
-};
-
-/* changes in wl_scan_params_v2 as comapred to wl_scan_params (v1)
-* unit8 scantype to uint32
-*/
-typedef struct wl_scan_params_v2 {
- uint16 version; /* Version of wl_scan_params, change value of
- * WL_SCAN_PARAM_VERSION on version update
- */
- uint16 length; /* length of structure wl_scan_params_v1_t
- * without implicit pad
- */
- wlc_ssid_t ssid; /**< default: {0, ""} */
- struct ether_addr bssid; /**< default: bcast */
- int8 bss_type; /**< default: any,
- * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT
- */
- uint8 PAD;
- uint32 scan_type; /**< flags, 0 use default, and flags specified in
- * WL_SCANFLAGS_XXX
- */
- int32 nprobes; /**< -1 use default, number of probes per channel */
- int32 active_time; /**< -1 use default, dwell time per channel for
- * active scanning
- */
- int32 passive_time; /**< -1 use default, dwell time per channel
- * for passive scanning
- */
- int32 home_time; /**< -1 use default, dwell time for the home channel
- * between channel scans
- */
- int32 channel_num; /**< count of channels and ssids that follow
- *
- * low half is count of channels in channel_list, 0
- * means default (use all available channels)
- *
- * high half is entries in wlc_ssid_t array that
- * follows channel_list, aligned for int32 (4 bytes)
- * meaning an odd channel count implies a 2-byte pad
- * between end of channel_list and first ssid
- *
- * if ssid count is zero, single ssid in the fixed
- * parameter portion is assumed, otherwise ssid in
- * the fixed portion is ignored
- */
- uint16 channel_list[1]; /**< list of chanspecs */
-} wl_scan_params_v2_t;
-
-#define WL_SCAN_PARAMS_VERSION_V2 2
+} wl_scan_params_t;
/** size of wl_scan_params not including variable length array */
-#define WL_SCAN_PARAMS_V2_FIXED_SIZE (OFFSETOF(wl_scan_params_v2_t, channel_list))
-#define WL_MAX_ROAMSCAN_DATSZ \
- (WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
-#define WL_MAX_ROAMSCAN_V2_DATSZ \
- (WL_SCAN_PARAMS_V2_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
+#define WL_SCAN_PARAMS_FIXED_SIZE 64
+#define WL_MAX_ROAMSCAN_DATSZ (WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
#define ISCAN_REQ_VERSION 1
-#define ISCAN_REQ_VERSION_V2 2
-
-/** incremental scan struct */
-struct wl_iscan_params {
- uint32 version;
- uint16 action;
- uint16 scan_duration;
- struct wl_scan_params params;
-};
/** incremental scan struct */
-typedef struct wl_iscan_params_v2 {
+typedef struct wl_iscan_params {
uint32 version;
uint16 action;
uint16 scan_duration;
- wl_scan_params_v2_t params;
-} wl_iscan_params_v2_t;
+ wl_scan_params_t params;
+} wl_iscan_params_t;
/** 3 fields + size of wl_scan_params, not including variable length array */
-#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
-#define WL_ISCAN_PARAMS_V2_FIXED_SIZE \
- (OFFSETOF(wl_iscan_params_v2_t, params) + sizeof(wlc_ssid_t))
+#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t))
typedef struct wl_scan_results {
uint32 buflen;
uint32 version;
uint32 count;
- wl_bss_info_v109_t bss_info[1];
-} wl_scan_results_v109_t;
-
-typedef struct wl_scan_results_v2 {
- uint32 buflen;
- uint32 version;
- uint32 count;
- uint8 bss_info[]; /* var length wl_bss_info_X structures */
-} wl_scan_results_v2_t;
+ wl_bss_info_t bss_info[1];
+} wl_scan_results_t;
-#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
-typedef wl_scan_results_v109_t wl_scan_results_t;
/** size of wl_scan_results not including variable length array */
#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
-#endif // endif
-
-#if defined(SIMPLE_ISCAN)
-/** the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */
-#define WLC_IW_ISCAN_MAXLEN 2048
-typedef struct iscan_buf {
- struct iscan_buf * next;
- int8 iscan_buf[WLC_IW_ISCAN_MAXLEN];
-} iscan_buf_t;
-#endif /* SIMPLE_ISCAN */
#define ESCAN_REQ_VERSION 1
-#define ESCAN_REQ_VERSION_V2 2
/** event scan reduces amount of SOC memory needed to store scan results */
-struct wl_escan_params {
+typedef struct wl_escan_params {
uint32 version;
uint16 action;
uint16 sync_id;
- struct wl_scan_params params;
-};
-
-typedef struct wl_escan_params_v2 {
- uint32 version;
- uint16 action;
- uint16 sync_id;
- wl_scan_params_v2_t params;
-} wl_escan_params_v2_t;
+ wl_scan_params_t params;
+} wl_escan_params_t;
#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t))
-#define WL_ESCAN_PARAMS_V2_FIXED_SIZE (OFFSETOF(wl_escan_params_v2_t, params) + sizeof(wlc_ssid_t))
-
-/* New scan version is defined then change old version of scan to
- * wl_scan_params_v1_t and new one to wl_scan_params_t
- */
-#ifdef WL_SCAN_PARAMS_V2
-typedef struct wl_scan_params wl_scan_params_v1_t;
-typedef struct wl_escan_params wl_escan_params_v1_t;
-typedef struct wl_iscan_params wl_iscan_params_v1_t;
-typedef struct wl_scan_params_v2 wl_scan_params_t;
-typedef struct wl_escan_params_v2 wl_escan_params_t;
-typedef struct wl_iscan_params_v2 wl_iscan_params_t;
-#define WL_SCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_scan_params_t, channel_list))
-#else
-typedef struct wl_scan_params wl_scan_params_t;
-typedef struct wl_escan_params wl_escan_params_t;
-typedef struct wl_iscan_params wl_iscan_params_t;
-#define WL_SCAN_PARAMS_FIXED_SIZE 64
-#endif // endif
/** event scan reduces amount of SOC memory needed to store scan results */
typedef struct wl_escan_result {
uint32 version;
uint16 sync_id;
uint16 bss_count;
- wl_bss_info_v109_t bss_info[1];
-} wl_escan_result_v109_t;
+ wl_bss_info_t bss_info[1];
+} wl_escan_result_t;
-/** event scan reduces amount of SOC memory needed to store scan results */
-typedef struct wl_escan_result_v2 {
- uint32 buflen;
- uint32 version;
- uint16 sync_id;
- uint16 bss_count;
- uint8 bss_info[]; /* var length wl_bss_info_X structures */
-} wl_escan_result_v2_t;
-
-#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
-typedef wl_escan_result_v109_t wl_escan_result_t;
#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t))
-#endif // endif
-
typedef struct wl_gscan_result {
uint32 buflen;
uint32 version;
uint32 scan_ch_bucket;
- wl_gscan_bss_info_v2_t bss_info[1];
-} wl_gscan_result_v2_t;
-
-typedef struct wl_gscan_result_v2_1 {
- uint32 buflen;
- uint32 version;
- uint32 scan_ch_bucket;
- uint8 bss_info[]; /* var length wl_bss_info_X structures */
-} wl_gscan_result_v2_1_t;
+ wl_gscan_bss_info_t bss_info[1];
+} wl_gscan_result_t;
-#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
-typedef wl_gscan_result_v2_t wl_gscan_result_t;
#define WL_GSCAN_RESULTS_FIXED_SIZE (sizeof(wl_gscan_result_t) - sizeof(wl_gscan_bss_info_t))
-#endif // endif
-
/** incremental scan results struct */
typedef struct wl_iscan_results {
uint32 status;
- wl_scan_results_v109_t results;
-} wl_iscan_results_v109_t;
+ wl_scan_results_t results;
+} wl_iscan_results_t;
-/** incremental scan results struct */
-typedef struct wl_iscan_results_v2 {
- uint32 status;
- wl_scan_results_v2_t results;
-} wl_iscan_results_v2_t;
-
-#ifndef WL_BSS_INFO_TYPEDEF_HAS_ALIAS
-typedef wl_iscan_results_v109_t wl_iscan_results_t;
/** size of wl_iscan_results not including variable length array */
#define WL_ISCAN_RESULTS_FIXED_SIZE \
(WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
-#endif // endif
typedef struct wl_probe_params {
wlc_ssid_t ssid;
} wl_probe_params_t;
#define WL_MAXRATES_IN_SET 16 /**< max # of rates in a rateset */
-
typedef struct wl_rateset {
uint32 count; /**< # rates in this set */
uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */
} wl_rateset_t;
-#define WL_VHT_CAP_MCS_MAP_NSS_MAX 8
-
-typedef struct wl_rateset_args_v1 {
+typedef struct wl_rateset_args {
uint32 count; /**< # rates in this set */
uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */
- uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */
- uint16 vht_mcs[WL_VHT_CAP_MCS_MAP_NSS_MAX]; /**< supported mcs index bit map per nss */
-} wl_rateset_args_v1_t;
-
-#define RATESET_ARGS_V1 (1)
-#define RATESET_ARGS_V2 (2)
-
-/* RATESET_VERSION_ENABLED is defined in wl.mk post J branch.
- * Guidelines to use wl_rateset_args_t:
- * [a] in wlioctl.h: Add macro RATESET_ARGS_VX where X is the new version number.
- * [b] in wlioctl.h: Add a new structure with wl_rateset_args_vX_t
- * [c] in wlu.c app: Add support to parse new structure under RATESET_ARGS_VX
- * [d] in wlc_types.h: in respective branch and trunk: redefine wl_rateset_args_t with
- * new wl_rateset_args_vX_t
- */
-#ifndef RATESET_VERSION_ENABLED
-/* rateset structure before versioning. legacy. DONOT update anymore here */
-#define RATESET_ARGS_VERSION (RATESET_ARGS_V1)
-typedef wl_rateset_args_v1_t wl_rateset_args_t;
-#endif /* RATESET_VERSION_ENABLED */
-
-/* Note: dependent structures: sta_info_vX_t. When any update to this structure happens,
- * update sta_info_vX_t also.
- */
-#define WL_HE_CAP_MCS_MAP_NSS_MAX 8
-
-typedef struct wl_rateset_args_v2 {
- uint16 version; /**< version. */
- uint16 len; /**< length */
- uint32 count; /**< # rates in this set */
- uint8 rates[WL_MAXRATES_IN_SET]; /**< rates in 500kbps units w/hi bit set if basic */
- uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */
- uint16 vht_mcs[WL_VHT_CAP_MCS_MAP_NSS_MAX]; /**< supported mcs index bit map per nss */
- uint16 he_mcs[WL_HE_CAP_MCS_MAP_NSS_MAX]; /**< supported he mcs index bit map per nss */
-} wl_rateset_args_v2_t;
-
-/* HE Rates BITMAP */
-#define WL_HE_CAP_MCS_0_7_MAP 0x00ff
-#define WL_HE_CAP_MCS_0_8_MAP 0x01ff
-#define WL_HE_CAP_MCS_0_9_MAP 0x03ff
-#define WL_HE_CAP_MCS_0_10_MAP 0x07ff
-#define WL_HE_CAP_MCS_0_11_MAP 0x0fff
+ uint8 mcs[MCSSET_LEN]; /**< supported mcs index bit map */
+ uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /**< supported mcs index bit map per nss */
+} wl_rateset_args_t;
#define TXBF_RATE_MCS_ALL 4
#define TXBF_RATE_VHT_ALL 4
} wlc_antselcfg_t;
typedef struct {
- uint32 duration; /**< millisecs spent sampling this channel */
- union {
- uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
- /**< move if cur bss moves channels) */
- uint32 congest_me; /**< millisecs in my own traffic */
- };
- union {
- uint32 congest_obss; /**< traffic not in our bss */
- uint32 congest_notme; /**< traffic not from/to me (including bc/mc) */
- };
- uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
- uint32 timestamp; /**< second timestamp */
+ uint32 duration; /**< millisecs spent sampling this channel */
+ uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
+ /**< move if cur bss moves channels) */
+ uint32 congest_obss; /**< traffic not in our bss */
+ uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
+ uint32 timestamp; /**< second timestamp */
} cca_congest_t;
typedef struct {
- chanspec_t chanspec; /**< Which channel? */
- uint16 num_secs; /**< How many secs worth of data */
- cca_congest_t secs[1]; /**< Data */
+ chanspec_t chanspec; /**< Which channel? */
+ uint16 num_secs; /**< How many secs worth of data */
+ cca_congest_t secs[1]; /**< Data */
} cca_congest_channel_req_t;
-
-typedef struct {
- uint32 timestamp; /**< second timestamp */
-
- /* Base structure of cca_congest_t: CCA statistics all inclusive */
- uint32 duration; /**< millisecs spent sampling this channel */
- uint32 congest_meonly; /**< millisecs in my own traffic (TX + RX) */
- uint32 congest_ibss; /**< millisecs in our bss (presumably this traffic will */
- /**< move if cur bss moves channels) */
- uint32 congest_obss; /**< traffic not in our bss */
- uint32 interference; /**< millisecs detecting a non 802.11 interferer. */
-
- /* CCA statistics for non PM only */
- uint32 duration_nopm; /**< millisecs spent sampling this channel */
- uint32 congest_meonly_nopm; /**< millisecs in my own traffic (TX + RX) */
- uint32 congest_ibss_nopm; /**< millisecs in our bss (presumably this traffic will */
- /**< move if cur bss moves channels) */
- uint32 congest_obss_nopm; /**< traffic not in our bss */
- uint32 interference_nopm; /**< millisecs detecting a non 802.11 interferer. */
-
- /* CCA statistics for during PM only */
- uint32 duration_pm; /**< millisecs spent sampling this channel */
- uint32 congest_meonly_pm; /**< millisecs in my own traffic (TX + RX) */
- uint32 congest_ibss_pm; /**< millisecs in our bss (presumably this traffic will */
- /**< move if cur bss moves channels) */
- uint32 congest_obss_pm; /**< traffic not in our bss */
- uint32 interference_pm; /**< millisecs detecting a non 802.11 interferer. */
-} cca_congest_ext_t;
-
-#define WL_CCA_EXT_REQ_VER 0
-typedef struct {
- uint16 ver; /**< version of this struct */
- uint16 len; /**< len of this structure */
- chanspec_t chanspec; /**< Which channel? */
- uint16 num_secs; /**< How many secs worth of data */
- cca_congest_ext_t secs[1]; /**< Data - 3 sets for ALL - non-PM - PM */
-} cca_congest_ext_channel_req_t;
-
typedef struct {
uint32 duration; /**< millisecs spent sampling this channel */
uint32 congest; /**< millisecs detecting busy CCA */
uint32 timestamp; /**< second timestamp */
} cca_congest_simple_t;
-/* The following two structure must have same first 4 fields.
- * The cca_chan_qual_event_t is used to report CCA in older formats and NF.
- * The cca_only_chan_qual_event_t is used to report CCA only with newer format.
- */
typedef struct {
uint16 status;
uint16 id;
- chanspec_t chanspec; /**< Which channel? */
+ chanspec_t chanspec; /**< Which channel? */
uint16 len;
union {
- cca_congest_simple_t cca_busy; /**< CCA busy */
- cca_congest_t cca_busy_ext; /**< Extended CCA report */
- int32 noise; /**< noise floor */
+ cca_congest_simple_t cca_busy; /**< CCA busy */
+ int32 noise; /**< noise floor */
};
} cca_chan_qual_event_t;
-typedef struct {
- uint16 status;
- uint16 id;
- chanspec_t chanspec; /**< Which channel? */
- uint16 len;
- union {
- cca_congest_simple_t cca_busy; /**< CCA busy */
- struct {
- cca_congest_t cca_busy_ext; /**< Extended CCA report */
- cca_congest_t cca_busy_nopm; /**< Extedned CCA report (PM awake time) */
- cca_congest_t cca_busy_pm; /**< Extedned CCA report (PM sleep time) */
- };
- };
-} cca_only_chan_qual_event_t;
-
typedef struct {
uint32 msrmnt_time; /**< Time for Measurement (msec) */
uint32 msrmnt_done; /**< flag set when measurement complete */
*/
} wl_country_t;
+
#define CCODE_INFO_VERSION 1
typedef enum wl_ccode_role {
WLC_CCODE_ROLE_80211D_ASSOC,
WLC_CCODE_ROLE_80211D_SCAN,
WLC_CCODE_ROLE_DEFAULT,
- WLC_CCODE_ROLE_DEFAULT_SROM_BKUP,
WLC_CCODE_LAST
} wl_ccode_role_t;
#define WLC_NUM_CCODE_INFO WLC_CCODE_LAST
wl_rm_rep_elt_t rep[1]; /**< variable length block of reports */
} wl_rm_rep_t;
#define WL_RM_REP_FIXED_LEN 8
-#ifdef BCMCCX
-
-#define LEAP_USER_MAX 32
-#define LEAP_DOMAIN_MAX 32
-#define LEAP_PASSWORD_MAX 32
-
-typedef struct wl_leap_info {
- wlc_ssid_t ssid;
- uint8 user_len;
- uint8 user[LEAP_USER_MAX];
- uint8 password_len;
- uint8 password[LEAP_PASSWORD_MAX];
- uint8 domain_len;
- uint8 domain[LEAP_DOMAIN_MAX];
- uint8 PAD;
-} wl_leap_info_t;
-
-typedef struct wl_leap_list {
- uint32 buflen;
- uint32 version;
- uint32 count;
- wl_leap_info_t leap_info[1];
-} wl_leap_list_t;
-#endif /* BCMCCX */
-
typedef enum sup_auth_status {
/* Basic supplicant authentication states */
WLC_SUP_DISCONNECTED = 0,
} rxiv;
uint32 pad_5[2];
struct ether_addr ea; /**< per station */
- uint16 PAD;
+ uint16 PAD;
} wl_wsec_key_t;
-/* Min length for PSK passphrase */
#define WSEC_MIN_PSK_LEN 8
-/* Max length of supported passphrases for PSK */
#define WSEC_MAX_PSK_LEN 64
-/* Max length of supported passphrases for SAE */
-#define WSEC_MAX_PASSPHRASE_LEN 256u
-/* Flag for key material needing passhash'ing */
-#define WSEC_PASSPHRASE 1u
-/* Flag indicating an SAE passphrase */
-#define WSEC_SAE_PASSPHRASE 2u
+/** Flag for key material needing passhash'ing */
+#define WSEC_PASSPHRASE (1<<0)
/**receptacle for WLC_SET_WSEC_PMK parameter */
typedef struct wsec_pmk {
- ushort key_len; /* octets in key material */
- ushort flags; /* key handling qualification */
- uint8 key[WSEC_MAX_PASSPHRASE_LEN]; /* PMK material */
+ ushort key_len; /**< octets in key material */
+ ushort flags; /**< key handling qualification */
+ uint8 key[WSEC_MAX_PSK_LEN]; /**< PMK material */
} wsec_pmk_t;
-#define WL_AUTH_EVENT_DATA_V1 0x1
-
-/* tlv ids for auth event */
-#define WL_AUTH_PMK_TLV_ID 1
-#define WL_AUTH_PMKID_TLV_ID 2
-/* AUTH event data
-* pmk and pmkid in case of SAE auth
-* xtlvs will be 32 bit alligned
-*/
-typedef struct wl_auth_event {
- uint16 version;
- uint16 length;
- uint8 xtlvs[];
-} wl_auth_event_t;
-
-#define WL_AUTH_EVENT_FIXED_LEN_V1 OFFSETOF(wl_auth_event_t, xtlvs)
-
-#define WL_PMKSA_EVENT_DATA_V1 1u
-
-/* tlv ids for PMKSA event */
-#define WL_PMK_TLV_ID 1u
-#define WL_PMKID_TLV_ID 2u
-#define WL_PEER_ADDR_TLV_ID 3u
-
-/* PMKSA event data structure */
-typedef struct wl_pmksa_event {
- uint16 version;
- uint16 length;
- uint8 xtlvs[];
-} wl_pmksa_event_t;
-
-#define WL_PMKSA_EVENT_FIXED_LEN_V1 OFFSETOF(wl_pmksa_event_t, xtlvs)
-
-#define FILS_CACHE_ID_LEN 2u
-#define PMK_LEN_MAX 48u
-
-typedef struct _pmkid_v1 {
+typedef struct _pmkid {
struct ether_addr BSSID;
- uint8 PMKID[WPA2_PMKID_LEN];
-} pmkid_v1_t;
+ uint8 PMKID[WPA2_PMKID_LEN];
+} pmkid_t;
-#define PMKID_ELEM_V2_LENGTH (sizeof(struct ether_addr) + WPA2_PMKID_LEN + PMK_LEN_MAX + \
- sizeof(ssid_info_t) + FILS_CACHE_ID_LEN)
-
-typedef struct _pmkid_v2 {
- uint16 length; /* Should match PMKID_ELEM_VX_LENGTH */
- struct ether_addr BSSID;
- uint8 PMKID[WPA2_PMKID_LEN];
- uint8 pmk[PMK_LEN_MAX]; /* for FILS key deriviation */
- uint16 pmk_len;
- ssid_info_t ssid;
- uint8 fils_cache_id[FILS_CACHE_ID_LEN];
-} pmkid_v2_t;
-
-#define PMKID_LIST_VER_2 2
-
-typedef struct _pmkid_v3 {
- struct ether_addr bssid;
- uint8 pmkid[WPA2_PMKID_LEN];
- uint8 pmkid_len;
- uint8 pmk[PMK_LEN_MAX];
- uint8 pmk_len;
- uint16 fils_cache_id; /* 2-byte length */
- uint8 pad;
- uint8 ssid_len;
- uint8 ssid[DOT11_MAX_SSID_LEN]; /* For FILS, to save ESSID */
- /* one pmkid used in whole ESS */
- uint32 time_left; /* remaining time until expirary in sec. */
- /* 0 means expired, all 0xFF means never expire */
-} pmkid_v3_t;
-
-#define PMKID_LIST_VER_3 3
-typedef struct _pmkid_list_v1 {
+typedef struct _pmkid_list {
uint32 npmkid;
- pmkid_v1_t pmkid[1];
-} pmkid_list_v1_t;
-
-typedef struct _pmkid_list_v2 {
- uint16 version;
- uint16 length;
- pmkid_v2_t pmkid[1];
-} pmkid_list_v2_t;
-
-typedef struct _pmkid_list_v3 {
- uint16 version;
- uint16 length;
- uint16 count;
- uint16 pad;
- pmkid_v3_t pmkid[];
-} pmkid_list_v3_t;
-
-#ifndef PMKID_VERSION_ENABLED
-/* pmkid structure before versioning. legacy. DONOT update anymore here */
-typedef pmkid_v1_t pmkid_t;
-typedef pmkid_list_v1_t pmkid_list_t;
-#endif /* PMKID_VERSION_ENABLED */
+ pmkid_t pmkid[1];
+} pmkid_list_t;
typedef struct _pmkid_cand {
struct ether_addr BSSID;
struct dot11_assoc_req req;
struct ether_addr reassoc_bssid; /**< used in reassoc's */
struct dot11_assoc_resp resp;
- uint32 state;
} wl_assoc_info_t;
typedef struct wl_led_info {
uint8 PAD[3];
} wl_led_info_t;
+
/** srom read/write struct passed through ioctl */
typedef struct {
uint32 byteoff; /**< byte offset */
uint8 PAD;
} link_val_t;
+
#define WL_PM_MUTE_TX_VER 1
typedef struct wl_pm_mute_tx {
uint8 PAD;
} wl_pm_mute_tx_t;
-/*
- * Please update the following when modifying this structure:
- * StaInfo Twiki page flags section - description of the sta_info_t struct
- * src/wl/exe/wlu.c - print of sta_info_t
- * Pay attention to version if structure changes.
- */
/* sta_info_t version 4 */
typedef struct {
chanspec_t chanspec; /** chanspec this sta is on */
uint16 PAD;
- wl_rateset_args_v1_t rateset_adv; /* rateset along with mcs index bitmap */
+ wl_rateset_args_t rateset_adv; /* rateset along with mcs index bitmap */
uint32 PAD;
} sta_info_v4_t;
chanspec_t chanspec; /** chanspec this sta is on */
uint16 PAD;
- wl_rateset_args_v1_t rateset_adv; /* rateset along with mcs index bitmap */
+ wl_rateset_args_t rateset_adv; /* rateset along with mcs index bitmap */
} sta_info_v5_t;
-/*
- * Please update the following when modifying this structure:
- * StaInfo Twiki page flags section - description of the sta_info_t struct
- * src/wl/exe/wlu.c - print of sta_info_t
- * Pay attention to version if structure changes.
- */
-
-/* sta_info_t version 6
- changes to wl_rateset_args_t is leading to update this struct version as well.
- */
-typedef struct {
- uint16 ver; /**< version of this struct */
- uint16 len; /**< length in bytes of this structure */
- uint16 cap; /**< sta's advertised capabilities */
- uint16 PAD;
- uint32 flags; /**< flags defined below */
- uint32 idle; /**< time since data pkt rx'd from sta */
- struct ether_addr ea; /**< Station address */
- uint16 PAD;
- wl_rateset_t rateset; /**< rateset in use */
- uint32 in; /**< seconds elapsed since associated */
- uint32 listen_interval_inms; /**< Min Listen interval in ms for this STA */
- uint32 tx_pkts; /**< # of user packets transmitted (unicast) */
- uint32 tx_failures; /**< # of user packets failed */
- uint32 rx_ucast_pkts; /**< # of unicast packets received */
- uint32 rx_mcast_pkts; /**< # of multicast packets received */
- uint32 tx_rate; /**< Rate used by last tx frame */
- uint32 rx_rate; /**< Rate of last successful rx frame */
- uint32 rx_decrypt_succeeds; /**< # of packet decrypted successfully */
- uint32 rx_decrypt_failures; /**< # of packet decrypted unsuccessfully */
- uint32 tx_tot_pkts; /**< # of user tx pkts (ucast + mcast) */
- uint32 rx_tot_pkts; /**< # of data packets recvd (uni + mcast) */
- uint32 tx_mcast_pkts; /**< # of mcast pkts txed */
- uint64 tx_tot_bytes; /**< data bytes txed (ucast + mcast) */
- uint64 rx_tot_bytes; /**< data bytes recvd (ucast + mcast) */
- uint64 tx_ucast_bytes; /**< data bytes txed (ucast) */
- uint64 tx_mcast_bytes; /**< # data bytes txed (mcast) */
- uint64 rx_ucast_bytes; /**< data bytes recvd (ucast) */
- uint64 rx_mcast_bytes; /**< data bytes recvd (mcast) */
- int8 rssi[WL_STA_ANT_MAX]; /**< average rssi per antenna
- * of data frames
- */
- int8 nf[WL_STA_ANT_MAX]; /**< per antenna noise floor */
- uint16 aid; /**< association ID */
- uint16 ht_capabilities; /**< advertised ht caps */
- uint16 vht_flags; /**< converted vht flags */
- uint16 PAD;
- uint32 tx_pkts_retried; /**< # of frames where a retry was
- * necessary
- */
- uint32 tx_pkts_retry_exhausted; /**< # of user frames where a retry
- * was exhausted
- */
- int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /**< Per antenna RSSI of last
- * received data frame.
- */
- /* TX WLAN retry/failure statistics:
- * Separated for host requested frames and WLAN locally generated frames.
- * Include unicast frame only where the retries/failures can be counted.
- */
- uint32 tx_pkts_total; /**< # user frames sent successfully */
- uint32 tx_pkts_retries; /**< # user frames retries */
- uint32 tx_pkts_fw_total; /**< # FW generated sent successfully */
- uint32 tx_pkts_fw_retries; /**< # retries for FW generated frames */
- uint32 tx_pkts_fw_retry_exhausted; /**< # FW generated where a retry
- * was exhausted
- */
- uint32 rx_pkts_retried; /**< # rx with retry bit set */
- uint32 tx_rate_fallback; /**< lowest fallback TX rate */
- /* Fields above this line are common to sta_info_t versions 4 and 5 */
-
- uint32 rx_dur_total; /* total user RX duration (estimated) */
-
- chanspec_t chanspec; /** chanspec this sta is on */
- uint16 PAD;
- wl_rateset_args_v2_t rateset_adv; /* rateset along with mcs index bitmap */
-} sta_info_v6_t;
-
-/* define to help support one version older sta_info_t from user level
- * applications.
- */
#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts)
#define WL_STA_VER_4 4
struct ether_addr ea[1]; /**< variable length array of MAC addresses */
} maclist_t;
-typedef struct wds_client_info {
- char ifname[INTF_NAME_SIZ]; /* WDS ifname */
- struct ether_addr ea; /* WDS client MAC address */
-} wds_client_info_t;
-
-#define WDS_MACLIST_MAGIC 0xFFFFFFFF
-#define WDS_MACLIST_VERSION 1
-
-/* For wds MAC list ioctls */
-typedef struct wds_maclist {
- uint32 count; /* Number of WDS clients */
- uint32 magic; /* Magic number */
- uint32 version; /* Version number */
- struct wds_client_info client_list[1]; /* Variable length array of WDS clients */
-} wds_maclist_t;
-
/**get pkt count struct passed through ioctl */
typedef struct get_pktcnt {
uint32 rx_good_pkt;
uint8 num_pkts; /**< Number of packet entries to be averaged */
} wl_mac_ratehisto_cmd_t;
/** Get MAC rate histogram response */
-/* deprecated after JAGUAR branch */
typedef struct {
uint32 rate[DOT11_RATE_MAX + 1]; /**< Rates */
uint32 mcs[WL_RATESET_SZ_HT_IOCTL * WL_TX_CHAINS_MAX]; /**< MCS counts */
#define WL_NUM_RATES_MCS_1STREAM 8 /**< MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
#define WL_NUM_RATES_EXTRA_VHT 2 /**< Additional VHT 11AC rates */
#define WL_NUM_RATES_VHT 10
-#define WL_NUM_RATES_VHT_ALL (WL_NUM_RATES_VHT + WL_NUM_RATES_EXTRA_VHT)
-#define WL_NUM_RATES_HE 12
#define WL_NUM_RATES_MCS32 1
-#define UC_PATH_LEN 128u /**< uCode path length */
+
/*
* Structure for passing hardware and software
uint32 boardvendor; /**< board vendor (usu. PCI sub-vendor id) */
uint32 boardrev; /**< board revision */
uint32 driverrev; /**< driver version */
- uint32 ucoderev; /**< uCode version */
+ uint32 ucoderev; /**< microcode version */
uint32 bus; /**< bus type */
uint32 chipnum; /**< chip number */
uint32 phytype; /**< phy type */
uint32 drvrev_minor; /**< driver version: minor */
uint32 drvrev_rc; /**< driver version: rc */
uint32 drvrev_rc_inc; /**< driver version: rc incremental */
- uint16 ucodeprebuilt; /**< uCode prebuilt flag */
- uint16 ucodediffct; /**< uCode diff count */
- uchar ucodeurl[128u]; /* obsolete, kept for ROM compatiblity */
- uchar ucodepath[UC_PATH_LEN]; /**< uCode URL or path */
} wlc_rev_info_t;
#define WL_REV_INFO_LEGACY_LENGTH 48
#define WL_PHY_PAVARS_LEN 32 /**< Phytype, Bandrange, chain, a[0], b[0], c[0], d[0] .. */
+
#define WL_PHY_PAVAR_VER 1 /**< pavars version */
#define WL_PHY_PAVARS2_NUM 3 /**< a1, b0, b1 */
typedef struct wl_pavars2 {
} wl_aci_args_t;
#define WL_ACI_ARGS_LEGACY_LENGTH 16 /**< bytes of pre NPHY aci args */
-
-#define WL_MACFIFO_PLAY_ARGS_T_VERSION 1u /* version of wl_macfifo_play_args_t struct */
-
-enum wl_macfifo_play_flags {
- WL_MACFIFO_PLAY_STOP = 0x00u, /* stop playing samples */
- WL_MACFIFO_PLAY_START = 0x01u, /* start playing samples */
- WL_MACFIFO_PLAY_LOAD = 0x02u, /* for set: load samples
- for get: samples are loaded
- */
- WL_MACFIFO_PLAY_GET_MAX_SIZE = 0x10u, /* get the macfifo buffer size */
- WL_MACFIFO_PLAY_GET_STATUS = 0x20u, /* get macfifo play status */
-};
-
-typedef struct wl_macfifo_play_args {
- uint16 version; /* structure version */
- uint16 len; /* size of structure */
- uint16 flags;
- uint8 PAD[2];
- uint32 data_len; /* data length */
-} wl_macfifo_play_args_t;
-
-#define WL_MACFIFO_PLAY_DATA_T_VERSION 1u /* version of wl_macfifo_play_data_t struct */
-
-typedef struct wl_macfifo_play_data {
- uint16 version; /* structure version */
- uint16 len; /* size of structure */
- uint32 data_len; /* data length */
-} wl_macfifo_play_data_t;
-
#define WL_SAMPLECOLLECT_T_VERSION 2 /**< version of wl_samplecollect_args_t struct */
typedef struct wl_samplecollect_args {
/* version 0 fields */
uint32 flag; /**< bit def */
} wl_sampledata_t;
+
/* WL_OTA START */
/* OTA Test Status */
enum {
/* Update WL_OTA_TESTVEC_T_VERSION for adding new members to this structure */
} wl_ota_test_vector_t;
+
/** struct copied back form dongle to host to query the status */
typedef struct wl_ota_test_status {
int16 cur_test_cnt; /**< test phase */
uint32 version; /**< version field */
uint32 count; /**< number of valid antenna rssi */
int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */
- int8 rssi_sum; /**< summed rssi across all antennas */
- int8 PAD[3];
} wl_rssi_ant_t;
/* SNR per antenna */
int8 snr_ant[WL_RSSI_ANT_MAX]; /* snr per antenna */
} wl_snr_ant_t;
-/* Weighted average support */
-#define WL_WA_VER 0 /* Initial version - Basic WA algorithm only */
-
-#define WL_WA_ALGO_BASIC 0 /* Basic weighted average algorithm (all 4 metrics) */
-#define WL_WA_TYPE_RSSI 0
-#define WL_WA_TYPE_SNR 1
-#define WL_WA_TYPE_TXRATE 2
-#define WL_WA_TYPE_RXRATE 3
-#define WL_WA_TYPE_MAX 4
-
-typedef struct { /* payload of subcmd in xtlv */
- uint8 id;
- uint8 n_total; /* Total number of samples (n_total >= n_recent) */
- uint8 n_recent; /* Number of samples denoted as recent */
- uint8 w_recent; /* Total weight for the recent samples (as percentage) */
-} wl_wa_basic_params_t;
-
-typedef struct {
- uint16 ver;
- uint16 len;
- uint8 subcmd[]; /* sub-cmd in bcm_xtlv_t */
-} wl_wa_cmd_t;
/** data structure used in 'dfs_status' wl interface, which is used to query dfs status */
typedef struct {
#define WL_DFS_AP_MOVE_ABORT -1 /* Abort any dfs_ap_move in progress immediately */
#define WL_DFS_AP_MOVE_STUNT -2 /* Stunt move but continue background CSA if in progress */
+
/** data structure used in 'radar_status' wl interface, which is use to query radar det status */
typedef struct {
uint8 detected;
#define WL_TXPPR_VERSION 1
#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t))
#define TX_POWER_T_VERSION 45
-
-/* curpower ppr types */
-enum {
- PPRTYPE_TARGETPOWER = 1,
- PPRTYPE_BOARDLIMITS = 2,
- PPRTYPE_REGLIMITS = 3,
- PPRTYPE_RU_REGLIMITS = 4,
- PPRTYPE_RU_BOARDLIMITS = 5,
- PPRTYPE_RU_TARGETPOWER = 6,
- PPRTYPE_LAST
-};
-
/** number of ppr serialization buffers, it should be reg, board and target */
-#define WL_TXPPR_SER_BUF_NUM (PPRTYPE_LAST - 1)
+#define WL_TXPPR_SER_BUF_NUM (3)
typedef struct chanspec_txpwr_max {
chanspec_t chanspec; /**< chanspec */
#define WL_MIMO_PS_STATUS_HW_STATE_NONE 0
#define WL_MIMO_PS_STATUS_HW_STATE_LTECOEX (0x1 << 0)
#define WL_MIMO_PS_STATUS_HW_STATE_MIMOPS_BSS (0x1 << 1)
+#define WL_MIMO_PS_STATUS_HW_STATE_AWDL_BSS (0x1 << 2)
#define WL_MIMO_PS_STATUS_HW_STATE_SCAN (0x1 << 3)
#define WL_MIMO_PS_STATUS_HW_STATE_TXPPR (0x1 << 4)
#define WL_MIMO_PS_STATUS_HW_STATE_PWRTHOTTLE (0x1 << 5)
#define WL_MIMO_PS_PS_LEARNING_CFG_ABORT (1 << 0)
#define WL_MIMO_PS_PS_LEARNING_CFG_STATUS (1 << 1)
#define WL_MIMO_PS_PS_LEARNING_CFG_CONFIG (1 << 2)
-#define WL_MIMO_PS_PS_LEARNING_CFG_MASK (0x7)
#define WL_MIMO_PS_PS_LEARNING_CFG_V1 1
wl_mimo_ps_learning_event_data_t mimops_learning_data;
} wl_mimops_learning_cfg_t;
+
#define WL_OCL_STATUS_VERSION 1
typedef struct ocl_status_info {
uint8 version;
#define OCL_HWMIMO 0x02 /* Set if current coremask is > 1 bit */
#define OCL_COREDOWN 0x80 /* Set if core is currently down */
-#define WL_OPS_CFG_VERSION_1 1
-/* Common IOVAR struct */
-typedef struct wl_ops_cfg_v1 {
- uint16 version;
- uint16 len; /* total length includes fixed fields and variable data[] */
- uint16 subcmd_id; /* subcommand id */
- uint16 padding; /* reserved / padding for 4 byte align */
- uint8 data[]; /* subcommand data; could be empty */
-} wl_ops_cfg_v1_t;
-
-/* subcommands ids */
-enum {
- WL_OPS_CFG_SUBCMD_ENABLE = 0, /* OPS enable/disable mybss and obss
- * for nav and plcp options
- */
- WL_OPS_CFG_SUBCMD_MAX_SLEEP_DUR = 1, /* Max sleep duration used for OPS */
- WL_OPS_CFG_SUBCMD_RESET_STATS = 2 /* Reset stats part of ops_status
- * on both slices
- */
-};
-
-#define WL_OPS_CFG_MASK 0xffff
-#define WL_OPS_CFG_CAP_MASK 0xffff0000
-#define WL_OPS_CFG_CAP_SHIFT 16 /* Shift bits to locate the OPS CAP */
-#define WL_OPS_MAX_SLEEP_DUR 12500 /* max ops duration in us */
-#define WL_OPS_MINOF_MAX_SLEEP_DUR 512 /* minof max ops duration in us */
-#define WL_OPS_SUPPORTED_CFG (WL_OPS_MYBSS_PLCP_DUR | WL_OPS_MYBSS_NAV_DUR \
- | WL_OPS_OBSS_PLCP_DUR | WL_OPS_OBSS_NAV_DUR)
-#define WL_OPS_DEFAULT_CFG WL_OPS_SUPPORTED_CFG
-
-/* WL_OPS_CFG_SUBCMD_ENABLE */
-typedef struct wl_ops_cfg_enable {
- uint32 bits; /* selectively enable ops for mybss and obss */
-} wl_ops_cfg_enable_t;
-/* Bits for WL_OPS_CFG_SUBCMD_ENABLE Parameter */
-#define WL_OPS_MYBSS_PLCP_DUR 0x1 /* OPS based on mybss 11b & 11n mixed HT frames
- * PLCP header duration
- */
-#define WL_OPS_MYBSS_NAV_DUR 0x2 /* OPS based on mybss RTS-CTS duration */
-#define WL_OPS_OBSS_PLCP_DUR 0x4 /* OPS based on obss 11b & 11n mixed HT frames
- * PLCP header duration
- */
-#define WL_OPS_OBSS_NAV_DUR 0x8 /* OPS based on obss RTS-CTS duration */
-
-/* WL_OPS_CFG_SUBCMD_MAX_SLEEP_DUR */
-typedef struct wl_ops_cfg_max_sleep_dur {
- uint32 val; /* maximum sleep duration (us) used for OPS */
-} wl_ops_cfg_max_sleep_dur_t;
-
-/* WL_OPS_CFG_SUBCMD_RESET_STATS */
-typedef struct wl_ops_cfg_reset_stats {
- uint32 val; /* bitmap of slices, 0 means all slices */
-} wl_ops_cfg_reset_stats_t;
-
-#define WL_OPS_STATUS_VERSION_1 1
-#define OPS_DUR_HIST_BINS 5 /* number of bins used, 0-1, 1-2, 2-4, 4-8, >8 msec */
-typedef struct wl_ops_status_v1 {
- uint16 version;
- uint16 len; /* Total length including all fixed fields */
- uint8 slice_index; /* Slice for which status is reported */
- uint8 disable_obss; /* indicate if obss cfg is disabled */
- uint8 pad[2]; /* 4-byte alignment */
- uint32 disable_reasons; /* FW disable reasons */
- uint32 disable_duration; /* ops disable time(ms) due to disable reasons */
- uint32 applied_ops_config; /* currently applied ops config */
- uint32 partial_ops_dur; /* Total time (in usec) of partial ops duration */
- uint32 full_ops_dur; /* Total time (in usec) of full ops duration */
- uint32 count_dur_hist[OPS_DUR_HIST_BINS]; /* ops occurrence histogram */
- uint32 nav_cnt; /* number of times ops triggered based NAV duration */
- uint32 plcp_cnt; /* number of times ops triggered based PLCP duration */
- uint32 mybss_cnt; /* number of times mybss ops trigger */
- uint32 obss_cnt; /* number of times obss ops trigger */
- uint32 miss_dur_cnt; /* number of times ops couldn't happen
- * due to insufficient duration
- */
- uint32 miss_premt_cnt; /* number of times ops couldn't happen due
- * to not meeting Phy preemption thresh
- */
- uint32 max_dur_cnt; /* number of times ops did not trigger due to
- * frames exceeding max sleep duration
- */
- uint32 wake_cnt; /* number of ops miss due to wake reason */
- uint32 bcn_wait_cnt; /* number of ops miss due to waiting for bcn */
-} wl_ops_status_v1_t;
-/* Bits for disable_reasons */
-#define OPS_DISABLED_HOST 0x01 /* Host has disabled through ops_cfg */
-#define OPS_DISABLED_UNASSOC 0x02 /* Disabled because the slice is in unassociated state */
-#define OPS_DISABLED_SCAN 0x04 /* Disabled because the slice is in scan state */
-#define OPS_DISABLED_BCN_MISS 0x08 /* Disabled because beacon missed for a duration */
-
-#define WL_PSBW_CFG_VERSION_1 1
-/* Common IOVAR struct */
-typedef struct wl_psbw_cfg_v1 {
- uint16 version;
- uint16 len; /* total length includes fixed fields and variable data[] */
- uint16 subcmd_id; /* subcommand id */
- uint16 pad; /* reserved / padding for 4 byte align */
- uint8 data[]; /* subcommand data */
-} wl_psbw_cfg_v1_t;
-
-/* subcommands ids */
-enum {
- /* PSBW enable/disable */
- WL_PSBW_CFG_SUBCMD_ENABLE = 0,
- /* override psbw disable requests */
- WL_PSBW_CFG_SUBCMD_OVERRIDE_DISABLE_MASK = 1,
- /* Reset stats part of psbw status */
- WL_PSBW_CFG_SUBCMD_RESET_STATS = 2
-};
-
-#define WL_PSBW_OVERRIDE_DISA_CFG_MASK 0x0000ffff
-#define WL_PSBW_OVERRIDE_DISA_CAP_MASK 0xffff0000
-#define WL_PSBW_OVERRIDE_DISA_CAP_SHIFT 16 /* shift bits for cap */
-
-/* WL_PSBW_CFG_SUBCMD_ENABLE */
-typedef struct wl_psbw_cfg_enable {
- bool enable; /* enable or disable */
-} wl_psbw_cfg_enable_t;
-
-/* WL_PSBW_CFG_SUBCMD_OVERRIDE_DISABLE_MASK */
-typedef struct wl_psbw_cfg_override_disable_mask {
- uint32 mask; /* disable requests to override, cap and current cfg */
-} wl_psbw_cfg_override_disable_mask_t;
-
-/* WL_PSBW_CFG_SUBCMD_RESET_STATS */
-typedef struct wl_psbw_cfg_reset_stats {
- uint32 val; /* infra interface index, 0 */
-} wl_psbw_cfg_reset_stats_t;
-
-#define WL_PSBW_STATUS_VERSION_1 1
-typedef struct wl_psbw_status_v1 {
- uint16 version;
- uint16 len; /* total length including all fixed fields */
- uint8 curr_slice_index; /* current slice index of the interface */
- uint8 associated; /* interface associatd */
- chanspec_t chspec; /* radio chspec */
- uint32 state; /* psbw state */
- uint32 disable_reasons; /* FW disable reasons */
- uint32 slice_enable_dur; /* time(ms) psbw remains enabled on this slice */
- uint32 total_enable_dur; /* time(ms) psbw remains enabled total */
- uint32 enter_cnt; /* total cnt entering PSBW active */
- uint32 exit_cnt; /* total cnt exiting PSBW active */
- uint32 exit_imd_cnt; /* total cnt imd exit when waited N tbtts */
- uint32 enter_skip_cnt; /* total cnt entering PSBW active skipped */
-} wl_psbw_status_v1_t;
-
-/* Bit for state */
-#define PSBW_ACTIVE 0x1 /* active 20MHz */
-#define PSBW_TTTT_PEND 0x2 /* waiting for TTTT intr */
-#define PSBW_WAIT_ENTER 0x4 /* in wait period before entering */
-#define PSBW_CAL_DONE 0x8 /* 20M channel cal done */
-
-/* Bits for disable_reasons */
-#define WL_PSBW_DISA_HOST 0x00000001 /* Host has disabled through psbw_cfg */
-#define WL_PSBW_DISA_AP20M 0x00000002 /* AP is operating on 20 MHz */
-#define WL_PSBW_DISA_SLOTTED_BSS 0x00000004 /* slot_bss active */
-#define WL_PSBW_DISA_NOT_PMFAST 0x00000008 /* Not PM_FAST */
-#define WL_PSBW_DISA_BASICRATESET 0x00000010 /* BasicRateSet is empty */
-#define WL_PSBW_DISA_NOT_D3 0x00000020 /* PCIe not in D3 */
-#define WL_PSBW_DISA_CSA 0x00000040 /* CSA IE is present */
-#define WL_PSBW_DISA_ASSOC 0x00000080 /* assoc state is active/or unassoc */
-#define WL_PSBW_DISA_SCAN 0x00000100 /* scan state is active */
-#define WL_PSBW_DISA_CAL 0x00000200 /* cal pending or active */
-/* following are not part of disable reasons */
-#define WL_PSBW_EXIT_PM 0x00001000 /* Out of PM */
-#define WL_PSBW_EXIT_TIM 0x00002000 /* unicast TIM bit present */
-#define WL_PSBW_EXIT_DATA 0x00004000 /* Data for transmission */
-#define WL_PSBW_EXIT_MGMTDATA 0x00008000 /* management frame for transmission */
-#define WL_PSBW_EXIT_BW_UPD 0x00010000 /* BW being updated */
-#define WL_PSBW_DISA_NONE 0x80000000 /* reserved for internal use only */
/*
* Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
#define RATE_LEGACY_OFDM_54MBPS 7
#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1
-#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V1 1
-#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION_V2 2
typedef struct wl_bsstrans_rssi {
int8 rssi_2g; /**< RSSI in dbm for 2.4 G */
#define RSSI_RATE_MAP_MAX_STREAMS 4 /**< max streams supported */
-/** RSSI to rate mapping, all 20Mhz, no SGI */
-typedef struct wl_bsstrans_rssi_rate_map_v2 {
- uint16 ver;
- uint16 len; /**< length of entire structure */
- wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */
- wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */
- wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
- wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT_ALL]; /**< MCS0-11 */
- wl_bsstrans_rssi_t phy_ax[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_HE]; /**< MCS0-11 */
-} wl_bsstrans_rssi_rate_map_v2_t;
-
-/** RSSI to rate mapping, all 20Mhz, no SGI */
-typedef struct wl_bsstrans_rssi_rate_map_v1 {
- uint16 ver;
- uint16 len; /**< length of entire structure */
- wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /**< 2.4G only */
- wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /**< 6 to 54mbps */
- wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
- wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /**< MCS0-9 */
-} wl_bsstrans_rssi_rate_map_v1_t;
-
/** RSSI to rate mapping, all 20Mhz, no SGI */
typedef struct wl_bsstrans_rssi_rate_map {
uint16 ver;
#define NFIFO 6 /**< # tx/rx fifopairs */
-#ifndef NFIFO_EXT
-#if defined(BCM_AQM_DMA_DESC) && !defined(BCM_AQM_DMA_DESC_DISABLED)
-#define NFIFO_EXT 10 /* 4EDCA + 4 TWT + 1 Mcast/Bcast + 1 Spare */
-#elif defined(WL11AX_TRIGGERQ) && !defined(WL11AX_TRIGGERQ_DISABLED)
+#if defined(BCM_DMA_CT) && !defined(BCM_DMA_CT_DISABLED)
+#define NFIFO_EXT 32 /* 6 traditional FIFOs + 2 rsvd + 24 MU FIFOs */
+#elif defined(WL11AX) && defined(WL11AX_TRIGGERQ_ENABLED)
#define NFIFO_EXT 10
#else
#define NFIFO_EXT NFIFO
-#endif /* BCM_AQM_DMA_DESC && !BCM_AQM_DMA_DESC_DISABLED */
-#endif /* NFIFO_EXT */
+#endif
-/* When new reason codes are added to list, Please update wl_reinit_names also */
/* Reinit reason codes */
enum {
WL_REINIT_RC_NONE = 0,
WL_REINIT_RC_TX_FIFO_SUSP = 51,
WL_REINIT_RC_MAC_ENABLE = 52,
WL_REINIT_RC_SCAN_STALLED = 53,
- WL_REINIT_RC_PHY_HC = 54,
WL_REINIT_RC_LAST /* This must be the last entry */
};
-#define WL_REINIT_RC_INVALID 255
-
#define NREINITREASONCOUNT 8
-/* NREINITREASONCOUNT is 8 in other branches.
- * Any change to this will break wl tool compatibility with other branches
- * #define NREINITREASONCOUNT WL_REINIT_RC_LAST
- */
#define REINITRSNIDX(_x) (((_x) < WL_REINIT_RC_LAST) ? (_x) : 0)
#define WL_CNT_T_VERSION 30 /**< current version of wl_cnt_t struct */
#define WL_CNT_VERSION_6 6
-#define WL_CNT_VERSION_7 7
#define WL_CNT_VERSION_11 11
#define WL_CNT_VERSION_XTLV 30
/* First two uint16 are version and lenght fields. So offset of the first counter will be 4 */
#define FIRST_COUNTER_OFFSET 0x04
-/* need for now due to src/wl/ndis automerged to other branches. e.g. BISON */
#define WLC_WITH_XTLV_CNT
-/* Number of xtlv info as required to calculate subcounter offsets */
-#define WL_CNT_XTLV_ID_NUM 12
-#define WL_TLV_IOV_VER 1
-
/**
* tlv IDs uniquely identifies counter component
* packed into wl_cmd_t container
WL_CNT_XTLV_SLICE_IDX = 0x1, /**< Slice index */
WL_CNT_XTLV_WLC = 0x100, /**< WLC layer counters */
WL_CNT_XTLV_WLC_RINIT_RSN = 0x101, /**< WLC layer reinitreason extension */
- WL_CNT_XTLV_WLC_HE = 0x102, /* he counters */
- WL_CNT_XTLV_WLC_SECVLN = 0x103, /* security vulnerabilities counters */
- WL_CNT_XTLV_WLC_HE_OMI = 0x104, /* he omi counters */
WL_CNT_XTLV_CNTV_LE10_UCODE = 0x200, /**< wl counter ver < 11 UCODE MACSTAT */
WL_CNT_XTLV_LT40_UCODE_V1 = 0x300, /**< corerev < 40 UCODE MACSTAT */
WL_CNT_XTLV_GE40_UCODE_V1 = 0x400, /**< corerev >= 40 UCODE MACSTAT */
- WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800, /* corerev >= 64 UCODEX MACSTAT */
- WL_CNT_XTLV_GE80_UCODE_V1 = 0x900, /* corerev >= 80 UCODEX MACSTAT */
- WL_CNT_XTLV_GE80_TXFUNFL_UCODE_V1 = 0x1000 /* corerev >= 80 UCODEX MACSTAT */
-};
-
-/* tlv IDs uniquely identifies periodic state component */
-enum wl_periodic_slice_state_xtlv_id {
- WL_STATE_COMPACT_COUNTERS = 0x1,
- WL_STATE_TXBF_COUNTERS = 0x2,
- WL_STATE_COMPACT_HE_COUNTERS = 0x3
-};
-
-/* Sub tlvs for chan_counters */
-enum wl_periodic_chan_xtlv_id {
- WL_CHAN_GENERIC_COUNTERS = 0x1,
- WL_CHAN_PERIODIC_COUNTERS = 0x2
-};
-
-#ifdef WLC_CHAN_ECNTR_TEST
-#define WL_CHAN_PERIODIC_CNTRS_VER_1 1
-typedef struct wlc_chan_periodic_cntr
-{
- uint16 version;
- uint16 pad;
- uint32 rxstrt;
-} wlc_chan_periodic_cntr_t;
-#endif /* WLC_CHAN_ECNTR_TEST */
-
-#define WL_CHANCNTR_HDR_VER_1 1
-typedef struct wlc_chan_cntr_hdr_v1
-{
- uint16 version;
- uint16 pad;
- chanspec_t chanspec; /* Dont add any fields above this */
- uint16 pad1;
- uint32 total_time;
- uint32 chan_entry_cnt;
-} wlc_chan_cntr_hdr_v1_t;
-
-/* tlv IDs uniquely identifies periodic state component */
-enum wl_periodic_if_state_xtlv_id {
- WL_STATE_IF_COMPACT_STATE = 0x1,
- WL_STATE_IF_ADPS_STATE = 0x02
-};
-
-enum wl_periodic_tdls_if_state_xtlv_id {
- WL_STATE_IF_TDLS_STATE = 0x1
-};
-
-#define TDMTX_CNT_VERSION_V1 1
-#define TDMTX_CNT_VERSION_V2 2
-
-/* structure holding tdm counters that interface to iovar */
-typedef struct tdmtx_cnt_v1 {
- uint16 ver;
- uint16 length; /* length of this structure */
- uint16 wlc_idx; /* index for wlc */
- uint16 enabled; /* tdmtx is enabled on slice */
- uint32 tdmtx_txa_on; /* TXA on requests */
- uint32 tdmtx_txa_tmcnt; /* Total number of TXA timeout */
- uint32 tdmtx_por_on; /* TXA POR requests */
- uint32 tdmtx_txpuen; /* Path enable requests */
- uint32 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */
- uint32 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */
- uint32 tdmtx_txdefer; /* Total number of times Tx was deferred on the slice */
- uint32 tdmtx_txmute; /* Total number of times active Tx muted on the slice */
- uint32 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */
- uint32 tdmtx_txa_dur; /* Total time txa on */
- uint32 tdmtx_txpri_dur; /* Total time TXPri */
- uint32 tdmtx_txdefer_dur; /* Total time txdefer */
- /* TDMTX input fields */
- uint32 tdmtx_txpri;
- uint32 tdmtx_defer;
- uint32 tdmtx_threshold;
- uint32 tdmtx_rssi_threshold;
- uint32 tdmtx_txpwrboff;
- uint32 tdmtx_txpwrboff_dt;
-} tdmtx_cnt_v1_t;
-
-typedef struct {
- uint16 ver;
- uint16 length; /* length of the data portion */
- uint16 cnt;
- uint16 pad; /* pad to align to 32 bit */
- uint8 data[]; /* array of tdmtx_cnt_v1_t */
-} tdmtx_status_t;
-
-/* structure holding counters that match exactly shm field sizes */
-typedef struct tdmtx_cnt_shm_v1 {
- uint16 tdmtx_txa_on; /* TXA on requests */
- uint16 tdmtx_tmcnt; /* TXA on requests */
- uint16 tdmtx_por_on; /* TXA POR requests */
- uint16 tdmtx_txpuen; /* Path enable requests */
- uint16 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */
- uint16 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */
- uint16 tdmtx_txdefer; /* Total number of times Tx was defered by the slice */
- uint16 tdmtx_txmute; /* Total number of times active Tx muted on the slice */
- uint16 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */
- uint16 tdmtx_txa_dur_l; /* Total time (low 16 bits) txa on */
- uint16 tdmtx_txa_dur_h; /* Total time (low 16 bits) txa on */
- uint16 tdmtx_txpri_dur_l; /* Total time (low 16 bits) TXPri */
- uint16 tdmtx_txpri_dur_h; /* Total time (high 16 bits) TXPri */
- uint16 tdmtx_txdefer_dur_l; /* Total time (low 16 bits) txdefer */
- uint16 tdmtx_txdefer_dur_h; /* Total time (high 16 bits) txdefer */
-} tdmtx_cnt_shm_v1_t;
-
-/* structure holding tdm counters that interface to iovar for version 2 */
-typedef struct tdmtx_cnt_v2 {
- uint16 ver;
- uint16 length; /* length of this structure */
- uint16 wlc_idx; /* index for wlc */
- uint16 enabled; /* tdmtx is enabled on slice */
- uint32 tdmtx_txa_on; /* TXA on requests */
- uint32 tdmtx_txa_tmcnt; /* Total number of TXA timeout */
- uint32 tdmtx_porhi_on; /* TXA PORHI requests */
- uint32 tdmtx_porlo_on; /* TXA PORLO requests */
- uint32 tdmtx_txpuen; /* Path enable requests */
- uint32 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */
- uint32 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */
- uint32 tdmtx_txdefer; /* Total number of times Tx was deferred on the slice */
- uint32 tdmtx_txmute; /* Total number of times active Tx muted on the slice */
- uint32 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */
- uint32 tdmtx_txa_dur; /* Total time txa on */
- uint32 tdmtx_txpri_dur; /* Total time TXPri */
- uint32 tdmtx_txdefer_dur; /* Total time txdefer */
- /* TDMTX input fields */
- uint32 tdmtx_txpri;
- uint32 tdmtx_defer;
- uint32 tdmtx_threshold;
- uint32 tdmtx_rssi_threshold;
- uint32 tdmtx_txpwrboff;
- uint32 tdmtx_txpwrboff_dt;
-} tdmtx_cnt_v2_t;
-
-/* structure holding counters that match exactly shm field sizes */
-typedef struct tdmtx_cnt_shm_v2 {
- uint16 tdmtx_txa_on; /* TXA on requests */
- uint16 tdmtx_tmcnt; /* TXA on requests */
- uint16 tdmtx_porhi_on; /* TXA PORHI requests */
- uint16 tdmtx_porlo_on; /* TXA PORLO requests */
- uint16 tdmtx_txpuen; /* Path enable requests */
- uint16 tdmtx_txpudis; /* Total number of times Tx path is muted on the slice */
- uint16 tdmtx_txpri_on; /* Total number of times Tx priority was obtained by the slice */
- uint16 tdmtx_txdefer; /* Total number of times Tx was defered by the slice */
- uint16 tdmtx_txmute; /* Total number of times active Tx muted on the slice */
- uint16 tdmtx_actpwrboff; /* Total number of times TX power is backed off by the slice */
- uint16 tdmtx_txa_dur_l; /* Total time (low 16 bits) txa on */
- uint16 tdmtx_txa_dur_h; /* Total time (low 16 bits) txa on */
- uint16 tdmtx_txpri_dur_l; /* Total time (low 16 bits) TXPri */
- uint16 tdmtx_txpri_dur_h; /* Total time (high 16 bits) TXPri */
- uint16 tdmtx_txdefer_dur_l; /* Total time (low 16 bits) txdefer */
- uint16 tdmtx_txdefer_dur_h; /* Total time (high 16 bits) txdefer */
-} tdmtx_cnt_shm_v2_t;
-
-typedef struct wl_tdmtx_ioc {
- uint16 id; /* ID of the sub-command */
- uint16 len; /* total length of all data[] */
- uint8 data[]; /* var len payload */
-} wl_tdmtx_ioc_t;
-
-/*
- * iovar subcommand ids
- */
-enum {
- IOV_TDMTX_ENB = 1,
- IOV_TDMTX_STATUS = 2,
- IOV_TDMTX_TXPRI = 3,
- IOV_TDMTX_DEFER = 4,
- IOV_TDMTX_TXA = 5,
- IOV_TDMTX_CFG = 6,
- IOV_TDMTX_LAST
+ WL_CNT_XTLV_GE64_UCODEX_V1 = 0x800 /* corerev >= 64 UCODEX MACSTAT */
};
/**
#define WL_CNT_MCST_VAR_NUM 64
/* sizeof(wl_cnt_ge40mcst_v1_t), sizeof(wl_cnt_lt40mcst_v1_t), and sizeof(wl_cnt_v_le10_mcst_t) */
#define WL_CNT_MCST_STRUCT_SZ ((uint32)sizeof(uint32) * WL_CNT_MCST_VAR_NUM)
-#define WL_CNT_REV80_MCST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge80mcst_v1_t))
-#define WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_FIXED_SZ \
- ((uint32)OFFSETOF(wl_cnt_ge80_txfunfl_v1_t, txfunfl))
-#define WL_CNT_REV80_MCST_TXFUNFl_STRUCT_SZ(fcnt) \
- (WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_FIXED_SZ + (fcnt * sizeof(uint32)))
-#define WL_CNT_REV80_MCST_TXFUNFlW_STRUCT_SZ (WL_CNT_REV80_MCST_TXFUNFl_STRUCT_SZ(NFIFO_EXT))
#define WL_CNT_MCXST_STRUCT_SZ ((uint32)sizeof(wl_cnt_ge64mcxst_v1_t))
-
-#define WL_CNT_HE_STRUCT_SZ ((uint32)sizeof(wl_he_cnt_wlc_t))
-
-#define WL_CNT_SECVLN_STRUCT_SZ ((uint32)sizeof(wl_secvln_cnt_t))
-
-#define WL_CNT_HE_OMI_STRUCT_SZ ((uint32)sizeof(wl_he_omi_cnt_wlc_v1_t))
#define INVALID_CNT_VAL (uint32)(-1)
#define WL_XTLV_CNTBUF_MAX_SIZE ((uint32)(OFFSETOF(wl_cnt_info_t, data)) + \
#define WL_CNTBUF_MAX_SIZE MAX(WL_XTLV_CNTBUF_MAX_SIZE, (uint32)sizeof(wl_cnt_ver_11_t))
-/* Please refer to the twiki for counters addition/deletion.
- * http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/WlCounters#Counter_Edition
- */
/** Top structure of counters IOVar buffer */
typedef struct {
*/
} wl_subcnt_info_t;
-/* Top structure of counters TLV version IOVar buffer
- * The structure definition should remain consistant b/w
- * FW and wl/WLM app.
- */
-typedef struct {
- uint16 version; /* Version of IOVAR structure. Added for backward
- * compatibility feature. If any changes are done,
- * WL_TLV_IOV_VER need to be updated.
- */
- uint16 length; /* total len in bytes of this structure + payload */
- uint16 counters_version; /* See definition of WL_CNT_VERSION_XTLV
- * wl app will update counter tlv version to be used
- * so to calculate offset of supported TLVs.
- * If there is a mismatch in the version, FW will update an error
- */
- uint16 num_tlv; /* Max number of TLV info passed by FW to WL app.
- * and vice-versa
- */
- uint32 data[]; /* variable length payload:
- * This stores the tlv as supported by F/W to the wl app.
- * This table is required to compute subcounter offsets at WLapp end.
- */
-} wl_cntr_tlv_info_t;
-
/** wlc layer counters */
typedef struct {
/* transmit stat counters */
uint32 txdropped; /* tx dropped pkts */
uint32 rxbcast; /* BroadcastReceivedFrameCount */
uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */
- uint32 txq_end_assoccb; /* forced txqueue_end callback fired in assoc */
- uint32 tx_toss_cnt; /* number of tx packets tossed */
- uint32 rx_toss_cnt; /* number of rx packets tossed */
- uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
- uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
- uint32 pmk_badlen_cnt; /* number of invalid pmk len */
- uint32 txbar_notx; /* number of TX BAR not sent (maybe supressed or muted) */
- uint32 txbar_noack; /* number of TX BAR sent, but not acknowledged by peer */
- uint32 rxfrag_agedout; /**< # of aged out rx fragmentation */
-
- /* Do not remove or rename in the middle of this struct.
- * All counter variables have to be of uint32.
- * Please follow the instruction in
- * http://hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/WlCounters#Counter_Edition
- */
} wl_cnt_wlc_t;
-/* he counters Version 1 */
-#define HE_COUNTERS_V1 (1)
-typedef struct wl_he_cnt_wlc_v1 {
- uint32 he_rxtrig_myaid;
- uint32 he_rxtrig_rand;
- uint32 he_colormiss_cnt;
- uint32 he_txmampdu;
- uint32 he_txmtid_back;
- uint32 he_rxmtid_back;
- uint32 he_rxmsta_back;
- uint32 he_txfrag;
- uint32 he_rxdefrag;
- uint32 he_txtrig;
- uint32 he_rxtrig_basic;
- uint32 he_rxtrig_murts;
- uint32 he_rxtrig_bsrp;
- uint32 he_rxdlmu;
- uint32 he_physu_rx;
- uint32 he_phyru_rx;
- uint32 he_txtbppdu;
-} wl_he_cnt_wlc_v1_t;
-
-/* he counters Version 2 */
-#define HE_COUNTERS_V2 (2)
-typedef struct wl_he_cnt_wlc_v2 {
- uint16 version;
- uint16 len;
- uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */
- uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */
- uint32 he_colormiss_cnt; /**< for bss color mismatch cases */
- uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */
- uint32 he_txmtid_back; /**< for multi-TID BACK transmission */
- uint32 he_rxmtid_back; /**< reception of multi-TID BACK */
- uint32 he_rxmsta_back; /**< reception of multi-STA BACK */
- uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */
- uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */
- uint32 he_txtrig; /**< transmission of trigger frames */
- uint32 he_rxtrig_basic; /**< reception of basic trigger frame */
- uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */
- uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */
- uint32 he_rxdlmu; /**< reception of DL MU PPDU */
- uint32 he_physu_rx; /**< reception of SU frame */
- uint32 he_phyru_rx; /**< reception of RU frame */
- uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */
- uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */
-} wl_he_cnt_wlc_v2_t;
-
-/* he counters Version 3 */
-#define WL_RU_TYPE_MAX 6
-#define HE_COUNTERS_V3 (3)
-
-typedef struct wl_he_cnt_wlc_v3 {
- uint16 version;
- uint16 len;
- uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */
- uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */
- uint32 he_colormiss_cnt; /**< for bss color mismatch cases */
- uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */
- uint32 he_txmtid_back; /**< for multi-TID BACK transmission */
- uint32 he_rxmtid_back; /**< reception of multi-TID BACK */
- uint32 he_rxmsta_back; /**< reception of multi-STA BACK */
- uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */
- uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */
- uint32 he_txtrig; /**< transmission of trigger frames */
- uint32 he_rxtrig_basic; /**< reception of basic trigger frame */
- uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */
- uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */
- uint32 he_rxhemuppdu_cnt; /**< rxing HE MU PPDU */
- uint32 he_physu_rx; /**< reception of SU frame */
- uint32 he_phyru_rx; /**< reception of RU frame */
- uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */
- uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */
- uint32 he_rxhesuppdu_cnt; /**< rxing SU PPDU */
- uint32 he_rxhesureppdu_cnt; /**< rxing Range Extension(RE) SU PPDU */
- uint32 he_null_zero_agg; /**< null AMPDU's transmitted in response to basic trigger
- * because of zero aggregation
- */
- uint32 he_null_bsrp_rsp; /**< null AMPDU's txed in response to BSR poll */
- uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger
- * because of no frames in fifo's
- */
- uint32 he_myAID_cnt;
- uint32 he_rxtrig_bfm_cnt;
- uint32 he_rxtrig_mubar;
- uint32 rxheru[WL_RU_TYPE_MAX]; /**< HE of rx pkts */
- uint32 txheru[WL_RU_TYPE_MAX];
- uint32 he_mgmt_tbppdu;
- uint32 he_cs_req_tx_cancel;
- uint32 he_wrong_nss;
- uint32 he_trig_unsupp_rate;
- uint32 he_rxtrig_nfrp;
- uint32 he_rxtrig_bqrp;
- uint32 he_rxtrig_gcrmubar;
-} wl_he_cnt_wlc_v3_t;
-
-/* he counters Version 4 */
-#define HE_COUNTERS_V4 (4)
-typedef struct wl_he_cnt_wlc_v4 {
- uint16 version;
- uint16 len;
- uint32 he_rxtrig_myaid; /**< rxed valid trigger frame with myaid */
- uint32 he_rxtrig_rand; /**< rxed valid trigger frame with random aid */
- uint32 he_colormiss_cnt; /**< for bss color mismatch cases */
- uint32 he_txmampdu; /**< for multi-TID AMPDU transmission */
- uint32 he_txmtid_back; /**< for multi-TID BACK transmission */
- uint32 he_rxmtid_back; /**< reception of multi-TID BACK */
- uint32 he_rxmsta_back; /**< reception of multi-STA BACK */
- uint32 he_txfrag; /**< transmission of Dynamic fragmented packets */
- uint32 he_rxdefrag; /**< reception of dynamic fragmented packets */
- uint32 he_txtrig; /**< transmission of trigger frames */
- uint32 he_rxtrig_basic; /**< reception of basic trigger frame */
- uint32 he_rxtrig_murts; /**< reception of MU-RTS trigger frame */
- uint32 he_rxtrig_bsrp; /**< reception of BSR poll trigger frame */
- uint32 he_rxtsrt_hemuppdu_cnt; /**< rxing HE MU PPDU */
- uint32 he_physu_rx; /**< reception of SU frame */
- uint32 he_phyru_rx; /**< reception of RU frame */
- uint32 he_txtbppdu; /**< increments on transmission of every TB PPDU */
- uint32 he_null_tbppdu; /**< null TB PPDU's sent as a response to basic trigger frame */
- uint32 he_rxstrt_hesuppdu_cnt; /**< rxing SU PPDU */
- uint32 he_rxstrt_hesureppdu_cnt; /**< rxing Range Extension(RE) SU PPDU */
- uint32 he_null_zero_agg; /**< null AMPDU's transmitted in response to basic trigger
- * because of zero aggregation
- */
- uint32 he_null_bsrp_rsp; /**< null AMPDU's txed in response to BSR poll */
- uint32 he_null_fifo_empty; /**< null AMPDU's in response to basic trigger
- * because of no frames in fifo's
- */
- uint32 he_myAID_cnt;
- uint32 he_rxtrig_bfm_cnt;
- uint32 he_rxtrig_mubar;
- uint32 rxheru[WL_RU_TYPE_MAX]; /**< HE of rx pkts */
- uint32 txheru[WL_RU_TYPE_MAX];
- uint32 he_mgmt_tbppdu;
- uint32 he_cs_req_tx_cancel;
- uint32 he_wrong_nss;
- uint32 he_trig_unsupp_rate;
- uint32 he_rxtrig_nfrp;
- uint32 he_rxtrig_bqrp;
- uint32 he_rxtrig_gcrmubar;
- uint32 he_rxtrig_basic_htpack; /**< triggers received with HTP ack policy */
- uint32 he_rxtrig_ed_cncl; /**< count of cancelled packets
- * becasue of cs_req in trigger frame
- */
- uint32 he_rxtrig_suppr_null_tbppdu; /**< count of null frame sent becasue of
- * suppression scenarios
- */
- uint32 he_ulmu_disable; /**< number of UL MU disable scenario's handled in ucode */
- uint32 he_ulmu_data_disable; /**<number of UL MU data disable scenarios
- * handled in ucode
- */
-} wl_he_cnt_wlc_v4_t;
-
-#ifndef HE_COUNTERS_VERSION_ENABLED
-#define HE_COUNTERS_VERSION (HE_COUNTERS_V1)
-typedef wl_he_cnt_wlc_v1_t wl_he_cnt_wlc_t;
-#endif /* HE_COUNTERS_VERSION_ENABLED */
-
-/* he omi counters Version 1 */
-#define HE_OMI_COUNTERS_V1 (1)
-typedef struct wl_he_omi_cnt_wlc_v1 {
- uint16 version;
- uint16 len;
- uint32 he_omitx_sched; /* Count for total number of OMIs scheduled */
- uint32 he_omitx_success; /* Count for OMI Tx success */
- uint32 he_omitx_retries; /* Count for OMI retries as TxDone not set */
- uint32 he_omitx_dur; /* Accumulated duration of OMI completion time */
- uint32 he_omitx_ulmucfg; /* count for UL MU enable/disable change req */
- uint32 he_omitx_ulmucfg_ack; /* count for UL MU enable/disable req txed successfully */
- uint32 he_omitx_txnsts; /* count for Txnsts change req */
- uint32 he_omitx_txnsts_ack; /* count for Txnsts change req txed successfully */
- uint32 he_omitx_rxnss; /* count for Rxnss change req */
- uint32 he_omitx_rxnss_ack; /* count for Rxnss change req txed successfully */
- uint32 he_omitx_bw; /* count for BW change req */
- uint32 he_omitx_bw_ack; /* count for BW change req txed successfully */
- uint32 he_omitx_ersudis; /* count for ER SU enable/disable req */
- uint32 he_omitx_ersudis_ack; /* count for ER SU enable/disable req txed successfully */
- uint32 he_omitx_dlmursdrec; /* count for Resound recommendation change req */
- uint32 he_omitx_dlmursdrec_ack; /* count for Resound recommendation req txed successfully */
-} wl_he_omi_cnt_wlc_v1_t;
-
-/* WL_IFSTATS_XTLV_WL_SLICE_TXBF */
-/* beamforming counters version 1 */
-#define TXBF_ECOUNTERS_V1 (1u)
-#define WL_TXBF_CNT_ARRAY_SZ (8u)
-typedef struct wl_txbf_ecounters_v1 {
- uint16 version;
- uint16 len;
- /* transmit beamforming stats */
- uint16 txndpa; /* null data packet announcements */
- uint16 txndp; /* null data packets */
- uint16 txbfpoll; /* beamforming report polls */
- uint16 txsf; /* subframes */
- uint16 txcwrts; /* contention window rts */
- uint16 txcwcts; /* contention window cts */
- uint16 txbfm;
- /* receive beamforming stats */
- uint16 rxndpa_u; /* unicast NDPAs */
- uint16 rxndpa_m; /* multicast NDPAs */
- uint16 rxbfpoll; /* unicast bf-polls */
- uint16 bferpt; /* beamforming reports */
- uint16 rxsf;
- uint16 rxcwrts;
- uint16 rxcwcts;
- uint16 rxtrig_bfpoll;
- uint16 unused_uint16; /* pad */
- /* sounding stats - interval capture */
- uint16 rxnontb_sound[WL_TXBF_CNT_ARRAY_SZ]; /* non-TB sounding for last 8 captures */
- uint16 rxtb_sound[WL_TXBF_CNT_ARRAY_SZ]; /* TB sounding count for last 8 captures */
- uint32 cap_dur_ms[WL_TXBF_CNT_ARRAY_SZ]; /* last 8 capture durations (in ms) */
- uint32 cap_last_ts; /* timestamp of last sample capture */
-} wl_txbf_ecounters_v1_t;
-
-/* security vulnerabilities counters */
-typedef struct {
- uint32 ie_unknown; /* number of unknown IEs */
- uint32 ie_invalid_length; /* number of IEs with invalid length */
- uint32 ie_invalid_data; /* number of IEs with invalid data */
- uint32 ipv6_invalid_length; /* number of IPv6 packets with invalid payload length */
-} wl_secvln_cnt_t;
-
/* Reinit reasons - do not put anything else other than reinit reasons here */
typedef struct {
uint32 rsn[WL_REINIT_RC_LAST];
uint32 rxdrop20s; /**< drop secondary cnt */
uint32 rxtoolate; /**< receive too late */
uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
- /* All counter variables have to be of uint32. */
} wl_cnt_ge40mcst_v1_t;
/** MACSTAT counters for ucode (corerev < 40) */
uint32 phywatch;
uint32 rxtoolate; /**< receive too late */
uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
- /* All counter variables have to be of uint32. */
} wl_cnt_lt40mcst_v1_t;
-/** MACSTAT counters for ucode (corerev >= 80) */
+/** MACSTAT counters for "wl counter" version <= 10 */
typedef struct {
/* MAC counters: 32-bit version of d11.h's macstat_t */
- /* Start of PSM2HOST stats(72) block */
uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
* Control Management (includes retransmissions)
*/
uint32 txackfrm; /**< number of ACK frames sent out */
uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */
uint32 txbcnfrm; /**< beacons transmitted */
- uint32 txampdu; /**< number of AMPDUs transmitted */
- uint32 txmpdu; /**< number of MPDUs transmitted */
+ uint32 txfunfl[6]; /**< per-fifo tx underflows */
+ uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */
+ uint32 PAD0; /**< number of MPDUs transmitted */
uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
* or BCN)
*/
uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
- uint32 rxanyerr; /**< Any RX error that is not counted by other counters. */
+ uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not
+ * data/control/management
+ */
uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
uint32 rxbadplcp; /**< parity check of the PLCP header failed */
uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
uint32 rxstrt; /**< Number of received frames with a good PLCP
* (i.e. passing parity check)
*/
- uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
- uint32 rxmgucastmbss; /**< number of received mgmt frames with good FCS and matching RA */
- uint32 rxctlucast; /**< number of received CNTRL frames with good FCS and matching RA */
- uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
- uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
+ uint32 rxdfrmucastmbss; /* number of received DATA frames with good FCS and matching RA */
+ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
+ uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */
+ uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
+ uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
- uint32 rxdtocast; /**< number of received DATA frames (good FCS and not matching RA) */
- uint32 rxmgocast; /**< number of received MGMT frames (good FCS and not matching RA) */
- uint32 rxctlocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
+ uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */
+ uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */
+ uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
- uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
- uint32 rxmgmcast; /**< number of RX Management multicast frames received by the MAC */
- uint32 rxctlmcast; /**< number of RX Control multicast frames received by the MAC
+ uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */
+ uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */
+ uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC
* (unlikely to see these)
*/
uint32 rxbeaconmbss; /**< beacons received from member of BSS */
- uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
+ uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from
* other BSS (WDS FRAME)
*/
uint32 rxbeaconobss; /**< beacons received from other BSS */
* expecting a response
*/
uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
- uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
- uint32 missbcn_dbg; /**< number of beacon missed to receive */
+ uint32 PAD1;
+ uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
+ uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */
+ uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */
+ uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */
uint32 pmqovfl; /**< number of PMQ overflows */
uint32 rxcgprqfrm; /**< number of received Probe requests that made it into
* the PRQ fifo
* fifo because a probe response could not be sent out within
* the time limit defined in M_PRS_MAXTIME
*/
- uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
- uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
- uint32 txinrtstxop; /**< number of data frame transmissions during rts txop */
+ uint32 rxnack; /**< obsolete */
+ uint32 frmscons; /**< obsolete */
+ uint32 txnack; /**< obsolete */
uint32 rxback; /**< blockack rxcnt */
uint32 txback; /**< blockack txcnt */
uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
uint32 rxdrop20s; /**< drop secondary cnt */
uint32 rxtoolate; /**< receive too late */
uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
- uint32 rxtrig_myaid; /* New counters added in corerev 80 */
- uint32 rxtrig_rand;
- uint32 goodfcs;
- uint32 colormiss;
- uint32 txmampdu;
- uint32 rxmtidback;
- uint32 rxmstaback;
- uint32 txfrag;
- /* End of PSM2HOST stats block */
- /* start of rxerror overflow counter(24) block which are modified/added in corerev 80 */
- uint32 phyovfl;
- uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
- uint32 rxf1ovfl; /**< number of receive fifo 1 overflows */
- uint32 lenfovfl;
- uint32 weppeof;
- uint32 badplcp;
- uint32 msduthresh;
- uint32 strmeof;
- uint32 stsfifofull;
- uint32 stsfifoerr;
- uint32 PAD[6];
- uint32 rxerr_stat;
- uint32 ctx_fifo_full;
- uint32 PAD0[9];
- uint32 ctmode_ufc_cnt;
- uint32 PAD1[28]; /* PAD added for counter elements to be added soon */
-} wl_cnt_ge80mcst_v1_t;
-
-typedef struct {
- uint32 fifocount;
- uint32 txfunfl[];
-} wl_cnt_ge80_txfunfl_v1_t;
+} wl_cnt_v_le10_mcst_t;
-/** MACSTAT counters for "wl counter" version <= 10 */
-/* With ucode before its macstat cnts cleaned up */
-typedef struct {
- /* MAC counters: 32-bit version of d11.h's macstat_t */
- uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
- * Control Management (includes retransmissions)
- */
- uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
- uint32 txctsfrm; /**< number of CTS sent out by the MAC */
- uint32 txackfrm; /**< number of ACK frames sent out */
- uint32 txdnlfrm; /**< number of Null-Data transmission generated from template */
- uint32 txbcnfrm; /**< beacons transmitted */
- uint32 txfunfl[6]; /**< per-fifo tx underflows */
- uint32 txfbw; /**< transmit at fallback bw (dynamic bw) */
- uint32 PAD0; /**< number of MPDUs transmitted */
- uint32 txtplunfl; /**< Template underflows (mac was too slow to transmit ACK/CTS
- * or BCN)
- */
- uint32 txphyerror; /**< Transmit phy error, type of error is reported in tx-status for
- * driver enqueued frames
- */
- uint32 pktengrxducast; /**< unicast frames rxed by the pkteng code */
- uint32 pktengrxdmcast; /**< multicast frames rxed by the pkteng code */
- uint32 rxfrmtoolong; /**< Received frame longer than legal limit (2346 bytes) */
- uint32 rxfrmtooshrt; /**< Received frame did not contain enough bytes for its frame type */
- uint32 rxinvmachdr; /**< Either the protocol version != 0 or frame type not
- * data/control/management
- */
- uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
- uint32 rxbadplcp; /**< parity check of the PLCP header failed */
- uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
- uint32 rxstrt; /**< Number of received frames with a good PLCP
- * (i.e. passing parity check)
- */
- uint32 rxdfrmucastmbss; /* number of received DATA frames with good FCS and matching RA */
- uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
- uint32 rxcfrmucast; /**< number of received CNTRL frames with good FCS and matching RA */
- uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
- uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
- uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
- uint32 rxdfrmocast; /**< number of received DATA frames (good FCS and not matching RA) */
- uint32 rxmfrmocast; /**< number of received MGMT frames (good FCS and not matching RA) */
- uint32 rxcfrmocast; /**< number of received CNTRL frame (good FCS and not matching RA) */
- uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
- uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
- uint32 rxdfrmmcast; /**< number of RX Data multicast frames received by the MAC */
- uint32 rxmfrmmcast; /**< number of RX Management multicast frames received by the MAC */
- uint32 rxcfrmmcast; /**< number of RX Control multicast frames received by the MAC
- * (unlikely to see these)
- */
- uint32 rxbeaconmbss; /**< beacons received from member of BSS */
- uint32 rxdfrmucastobss; /**< number of unicast frames addressed to the MAC from
- * other BSS (WDS FRAME)
- */
- uint32 rxbeaconobss; /**< beacons received from other BSS */
- uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
- * expecting a response
- */
- uint32 bcntxcancl; /**< transmit beacons canceled due to receipt of beacon (IBSS) */
- uint32 PAD1;
- uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
- uint32 rxf1ovfl; /**< Number of receive fifo 1 overflows (obsolete) */
- uint32 rxf2ovfl; /**< Number of receive fifo 2 overflows (obsolete) */
- uint32 txsfovfl; /**< Number of transmit status fifo overflows (obsolete) */
- uint32 pmqovfl; /**< number of PMQ overflows */
- uint32 rxcgprqfrm; /**< number of received Probe requests that made it into
- * the PRQ fifo
- */
- uint32 rxcgprsqovfl; /**< Rx Probe Request Que overflow in the AP */
- uint32 txcgprsfail; /**< Tx Probe Response Fail. AP sent probe response but did
- * not get ACK
- */
- uint32 txcgprssuc; /**< Tx Probe Response Success (ACK was received) */
- uint32 prs_timeout; /**< number of probe requests that were dropped from the PRQ
- * fifo because a probe response could not be sent out within
- * the time limit defined in M_PRS_MAXTIME
- */
- uint32 rxnack; /**< obsolete */
- uint32 frmscons; /**< obsolete */
- uint32 txnack; /**< obsolete */
- uint32 rxback; /**< blockack rxcnt */
- uint32 txback; /**< blockack txcnt */
- uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
- uint32 rxdrop20s; /**< drop secondary cnt */
- uint32 rxtoolate; /**< receive too late */
- uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
- /* All counter variables have to be of uint32. */
-} wl_cnt_v_le10_mcst_t;
-
-#define MAX_RX_FIFO 3
-#define WL_RXFIFO_CNT_VERSION 1 /* current version of wl_rxfifo_cnt_t */
+#define MAX_RX_FIFO 3
+#define WL_RXFIFO_CNT_VERSION 1 /* current version of wl_rxfifo_cnt_t */
typedef struct {
/* Counters for frames received from rx fifos */
uint16 version;
uint32 rxbcast; /* BroadcastReceivedFrameCount */
uint32 rxdropped; /* rx dropped pkts (derived: sum of others) */
- /* This structure is deprecated and used only for ver <= 11.
- * All counter variables have to be of uint32.
- * Please refer to the following twiki before editing.
- * http://hwnbu-twiki.sj.broadcom.com/bin/view/
- * Mwgroup/WlCounters#wlc_layer_counters_non_xTLV
- */
} wl_cnt_ver_11_t;
-typedef struct {
- uint16 version; /* see definition of WL_CNT_T_VERSION */
- uint16 length; /* length of entire structure */
-
- /* transmit stat counters */
- uint32 txframe; /* tx data frames */
- uint32 txbyte; /* tx data bytes */
- uint32 txretrans; /* tx mac retransmits */
- uint32 txerror; /* tx data errors (derived: sum of others) */
- uint32 txctl; /* tx management frames */
- uint32 txprshort; /* tx short preamble frames */
- uint32 txserr; /* tx status errors */
- uint32 txnobuf; /* tx out of buffers errors */
- uint32 txnoassoc; /* tx discard because we're not associated */
- uint32 txrunt; /* tx runt frames */
- uint32 txchit; /* tx header cache hit (fastpath) */
- uint32 txcmiss; /* tx header cache miss (slowpath) */
-
- /* transmit chip error counters */
- uint32 txuflo; /* tx fifo underflows */
- uint32 txphyerr; /* tx phy errors (indicated in tx status) */
- uint32 txphycrs;
-
- /* receive stat counters */
- uint32 rxframe; /* rx data frames */
- uint32 rxbyte; /* rx data bytes */
- uint32 rxerror; /* rx data errors (derived: sum of others) */
- uint32 rxctl; /* rx management frames */
- uint32 rxnobuf; /* rx out of buffers errors */
- uint32 rxnondata; /* rx non data frames in the data channel errors */
- uint32 rxbadds; /* rx bad DS errors */
- uint32 rxbadcm; /* rx bad control or management frames */
- uint32 rxfragerr; /* rx fragmentation errors */
- uint32 rxrunt; /* rx runt frames */
- uint32 rxgiant; /* rx giant frames */
- uint32 rxnoscb; /* rx no scb error */
- uint32 rxbadproto; /* rx invalid frames */
- uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */
- uint32 rxbadda; /* rx frames tossed for invalid da */
- uint32 rxfilter; /* rx frames filtered out */
-
- /* receive chip error counters */
- uint32 rxoflo; /* rx fifo overflow errors */
- uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */
-
- uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */
- uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */
- uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */
-
- /* misc counters */
- uint32 dmade; /* tx/rx dma descriptor errors */
- uint32 dmada; /* tx/rx dma data errors */
- uint32 dmape; /* tx/rx dma descriptor protocol errors */
- uint32 reset; /* reset count */
- uint32 tbtt; /* cnts the TBTT int's */
- uint32 txdmawar;
- uint32 pkt_callback_reg_fail; /* callbacks register failure */
-
- /* MAC counters: 32-bit version of d11.h's macstat_t */
- uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS,
- * Control Management (includes retransmissions)
- */
- uint32 txrtsfrm; /* number of RTS sent out by the MAC */
- uint32 txctsfrm; /* number of CTS sent out by the MAC */
- uint32 txackfrm; /* number of ACK frames sent out */
- uint32 txdnlfrm; /* Not used */
- uint32 txbcnfrm; /* beacons transmitted */
- uint32 txfunfl[8]; /* per-fifo tx underflows */
- uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS
- * or BCN)
- */
- uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for
- * driver enqueued frames
- */
- uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */
- uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */
- uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not
- * data/control/management
- */
- uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */
- uint32 rxbadplcp; /* parity check of the PLCP header failed */
- uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */
- uint32 rxstrt; /* Number of received frames with a good PLCP
- * (i.e. passing parity check)
- */
- uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */
- uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */
- uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */
- uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */
- uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */
- uint32 rxackucast; /* number of ucast ACKS received (good FCS) */
- uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */
- uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */
- uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */
- uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */
- uint32 rxctsocast; /* number of received CTS not addressed to the MAC */
- uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */
- uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */
- uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC
- * (unlikely to see these)
- */
- uint32 rxbeaconmbss; /* beacons received from member of BSS */
- uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from
- * other BSS (WDS FRAME)
- */
- uint32 rxbeaconobss; /* beacons received from other BSS */
- uint32 rxrsptmout; /* Number of response timeouts for transmitted frames
- * expecting a response
- */
- uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */
- uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */
- uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */
- uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */
- uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */
- uint32 pmqovfl; /* Number of PMQ overflows */
- uint32 rxcgprqfrm; /* Number of received Probe requests that made it into
- * the PRQ fifo
- */
- uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */
- uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did
- * not get ACK
- */
- uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */
- uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ
- * fifo because a probe response could not be sent out within
- * the time limit defined in M_PRS_MAXTIME
- */
- uint32 rxnack; /* obsolete */
- uint32 frmscons; /* obsolete */
- uint32 txnack; /* obsolete */
- uint32 txglitch_nack; /* obsolete */
- uint32 txburst; /* obsolete */
-
- /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
- uint32 txfrag; /* dot11TransmittedFragmentCount */
- uint32 txmulti; /* dot11MulticastTransmittedFrameCount */
- uint32 txfail; /* dot11FailedCount */
- uint32 txretry; /* dot11RetryCount */
- uint32 txretrie; /* dot11MultipleRetryCount */
- uint32 rxdup; /* dot11FrameduplicateCount */
- uint32 txrts; /* dot11RTSSuccessCount */
- uint32 txnocts; /* dot11RTSFailureCount */
- uint32 txnoack; /* dot11ACKFailureCount */
- uint32 rxfrag; /* dot11ReceivedFragmentCount */
- uint32 rxmulti; /* dot11MulticastReceivedFrameCount */
- uint32 rxcrc; /* dot11FCSErrorCount */
- uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */
- uint32 rxundec; /* dot11WEPUndecryptableCount */
-
- /* WPA2 counters (see rxundec for DecryptFailureCount) */
- uint32 tkipmicfaill; /* TKIPLocalMICFailures */
- uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */
- uint32 tkipreplay; /* TKIPReplays */
- uint32 ccmpfmterr; /* CCMPFormatErrors */
- uint32 ccmpreplay; /* CCMPReplays */
- uint32 ccmpundec; /* CCMPDecryptErrors */
- uint32 fourwayfail; /* FourWayHandshakeFailures */
- uint32 wepundec; /* dot11WEPUndecryptableCount */
- uint32 wepicverr; /* dot11WEPICVErrorCount */
- uint32 decsuccess; /* DecryptSuccessCount */
- uint32 tkipicverr; /* TKIPICVErrorCount */
- uint32 wepexcluded; /* dot11WEPExcludedCount */
-
- uint32 txchanrej; /* Tx frames suppressed due to channel rejection */
- uint32 psmwds; /* Count PSM watchdogs */
- uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */
-
- /* MBSS counters, AP only */
- uint32 prq_entries_handled; /* PRQ entries read in */
- uint32 prq_undirected_entries; /* which were bcast bss & ssid */
- uint32 prq_bad_entries; /* which could not be translated to info */
- uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */
- uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */
- uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */
- uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */
-
- /* per-rate receive stat counters */
- uint32 rx1mbps; /* packets rx at 1Mbps */
- uint32 rx2mbps; /* packets rx at 2Mbps */
- uint32 rx5mbps5; /* packets rx at 5.5Mbps */
- uint32 rx6mbps; /* packets rx at 6Mbps */
- uint32 rx9mbps; /* packets rx at 9Mbps */
- uint32 rx11mbps; /* packets rx at 11Mbps */
- uint32 rx12mbps; /* packets rx at 12Mbps */
- uint32 rx18mbps; /* packets rx at 18Mbps */
- uint32 rx24mbps; /* packets rx at 24Mbps */
- uint32 rx36mbps; /* packets rx at 36Mbps */
- uint32 rx48mbps; /* packets rx at 48Mbps */
- uint32 rx54mbps; /* packets rx at 54Mbps */
- uint32 rx108mbps; /* packets rx at 108mbps */
- uint32 rx162mbps; /* packets rx at 162mbps */
- uint32 rx216mbps; /* packets rx at 216 mbps */
- uint32 rx270mbps; /* packets rx at 270 mbps */
- uint32 rx324mbps; /* packets rx at 324 mbps */
- uint32 rx378mbps; /* packets rx at 378 mbps */
- uint32 rx432mbps; /* packets rx at 432 mbps */
- uint32 rx486mbps; /* packets rx at 486 mbps */
- uint32 rx540mbps; /* packets rx at 540 mbps */
-
- /* pkteng rx frame stats */
- uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */
- uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */
-
- uint32 rfdisable; /* count of radio disables */
- uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */
-
- uint32 txexptime; /* Tx frames suppressed due to timer expiration */
-
- uint32 txmpdu_sgi; /* count for sgi transmit */
- uint32 rxmpdu_sgi; /* count for sgi received */
- uint32 txmpdu_stbc; /* count for stbc transmit */
- uint32 rxmpdu_stbc; /* count for stbc received */
-
- uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */
-
- /* WPA2 counters (see rxundec for DecryptFailureCount) */
- uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */
- uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */
- uint32 tkipreplay_mcst; /* TKIPReplays */
- uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */
- uint32 ccmpreplay_mcst; /* CCMPReplays */
- uint32 ccmpundec_mcst; /* CCMPDecryptErrors */
- uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */
- uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */
- uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */
- uint32 decsuccess_mcst; /* DecryptSuccessCount */
- uint32 tkipicverr_mcst; /* TKIPICVErrorCount */
- uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */
-
- uint32 dma_hang; /* count for stbc received */
- uint32 rxrtry; /* number of packets with retry bit set to 1 */
-} wl_cnt_ver_7_t;
-
typedef struct {
uint16 version; /**< see definition of WL_CNT_T_VERSION */
uint16 length; /**< length of entire structure */
* fifo because a probe response could not be sent out within
* the time limit defined in M_PRS_MAXTIME
*/
- uint32 rxnack; /**< Number of NACKS received (Afterburner) */
- uint32 frmscons; /**< Number of frames completed without transmission because of an
- * Afterburner re-queue
- */
+ uint32 rxnack;
+ uint32 frmscons;
uint32 txnack; /**< obsolete */
uint32 rxback; /**< blockack rxcnt */
uint32 txback; /**< blockack txcnt */
uint32 rxmpdu_stbc; /**< count for stbc received */
uint32 rxdrop20s; /**< drop secondary cnt */
- /* All counter variables have to be of uint32. */
} wl_cnt_ver_6_t;
#define WL_DELTA_STATS_T_VERSION 2 /**< current version of wl_delta_stats_t struct */
uint32 bphy_rxcrsglitch;
uint32 bphy_badplcp;
- uint32 slice_index; /**< Slice for which stats are reported */
-
} wl_delta_stats_t;
/* Partial statistics counter report */
uint32 high;
};
-/* A versioned structure for setting and retrieving debug message levels. */
-#define WL_MSGLEVEL_STRUCT_VERSION_1 1
-
-typedef struct wl_msglevel_v1 {
- uint16 version;
- uint16 length;
- uint32 msglevel1;
- uint32 msglevel2;
- uint32 msglevel3;
- /* add another uint32 when full */
-} wl_msglevel_v1_t;
-
#define WL_ICMP_IPV6_CFG_VERSION 1
#define WL_ICMP_IPV6_CLEAR_ALL (1 << 0)
#define WL_MKEEP_ALIVE_VERSION 1
#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data)
-/* 1/2 second precision since idle time is a seconds counter anyway */
#define WL_MKEEP_ALIVE_PRECISION 500
#define WL_MKEEP_ALIVE_PERIOD_MASK 0x7FFFFFFF
#define WL_MKEEP_ALIVE_IMMEDIATE 0x80000000
uint8 packet[];
} wake_pkt_t;
+
#define WL_MTCPKEEP_ALIVE_VERSION 1
/* #ifdef WLBA */
uint8 mode; /**< mode: depends on iovar */
uint8 PAD;
chanspec_t chanspec;
- uint8 PAD[6];
+ uint16 PAD;
+ uint32 pad; /**< future */
} tdls_iovar_t;
#define TDLS_WFD_IE_SIZE 512
#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /**< default dialog token */
#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /**< default surplus bw */
+
#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80
#define WLC_WOWL_MAX_KEEPALIVE 2
uint32 stop_ptr; /* Stop address to store */
uint8 optn_bmp; /* Options */
uint8 PAD[3];
- /* Don't change the order after this nor
- * add anything in betw. Code uses offsets to populate
- * registers
- */
uint32 tr_mask; /* Trigger Mask */
uint32 tr_val; /* Trigger Value */
uint32 s_mask; /* Store Mode Mask */
uint32 status;
uint32 count;
wl_pfn_net_info_v1_t netinfo;
- wl_bss_info_v109_t bss_info;
+ wl_bss_info_t bss_info;
} wl_pfn_scanresult_v1_t;
typedef struct wl_pfn_scanresult_v2 {
uint32 status;
uint32 count;
wl_pfn_net_info_v2_t netinfo;
- wl_bss_info_v109_t bss_info;
+ wl_bss_info_t bss_info;
} wl_pfn_scanresult_v2_t;
-typedef struct wl_pfn_scanresult_v2_1 {
- uint32 version;
- uint32 status;
- uint32 count;
- wl_pfn_net_info_v2_t netinfo;
- uint8 bss_info[]; /* var length wl_bss_info_X structures */
-} wl_pfn_scanresult_v2_1_t;
-
/**PFN data structure */
typedef struct wl_pfn_param {
int32 version; /**< PNO parameters version */
#ifndef BESTN_MAX
#define BESTN_MAX 10
-#endif // endif
+#endif
#ifndef MSCAN_MAX
#define MSCAN_MAX 90
-#endif // endif
+#endif
/* Dynamic scan configuration for motion profiles */
* 1 means scan for P2P devices plus non-P2P APs.
*/
+
/** For adding a WFDS service to seek */
typedef struct {
uint32 seek_hdl; /**< unique id chosen by host */
uint32 seek_hdl; /**< delete service specified by id */
} wl_p2po_wfds_seek_del_t;
+
/** For adding a WFDS service to advertise */
#include <packed_section_start.h>
typedef BWL_PRE_PACKED_STRUCT struct {
uint8 query_data[1]; /**< ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */
} wl_anqpo_set_t;
-#define WL_ANQPO_FLAGS_BSSID_WILDCARD 0x0001
-#define WL_ANQPO_PEER_LIST_VERSION_2 2
typedef struct {
uint16 channel; /**< channel of the peer */
struct ether_addr addr; /**< addr of the peer */
-} wl_anqpo_peer_v1_t;
-typedef struct {
- uint16 channel; /**< channel of the peer */
- struct ether_addr addr; /**< addr of the peer */
- uint32 flags; /**< 0x01-Peer is MBO Capable */
-} wl_anqpo_peer_v2_t;
+} wl_anqpo_peer_t;
#define ANQPO_MAX_PEER_LIST 64
typedef struct {
uint16 count; /**< number of peers in list */
- wl_anqpo_peer_v1_t peer[1]; /**< max ANQPO_MAX_PEER_LIST */
-} wl_anqpo_peer_list_v1_t;
-
-typedef struct {
- uint16 version; /**<VERSION */
- uint16 length; /**< length of entire structure */
- uint16 count; /**< number of peers in list */
- wl_anqpo_peer_v2_t peer[1]; /**< max ANQPO_MAX_PEER_LIST */
-} wl_anqpo_peer_list_v2_t;
-
-#ifndef WL_ANQPO_PEER_LIST_TYPEDEF_HAS_ALIAS
-typedef wl_anqpo_peer_list_v1_t wl_anqpo_peer_list_t;
-typedef wl_anqpo_peer_v1_t wl_anqpo_peer_t;
-#endif /* WL_ANQPO_PEER_LIST_TYPEDEF_HAS_ALIAS */
+ wl_anqpo_peer_t peer[1]; /**< max ANQPO_MAX_PEER_LIST */
+} wl_anqpo_peer_list_t;
#define ANQPO_MAX_IGNORE_SSID 64
typedef struct {
struct ether_addr bssid[]; /**< max ANQPO_MAX_IGNORE_BSSID */
} wl_anqpo_ignore_bssid_list_t;
+
struct toe_ol_stats_t {
/** Num of tx packets that don't need to be checksummed */
uint32 tx_summed;
uint8 idx; /**< next rssi location */
} rssi_struct_t;
-#ifdef WLDFSP
-#define DFSP_EVT_OFFSET OFFSETOF(dfsp_event_data_t, ie)
-#define DFSP_EVT_FLAGS_AP_ASSOC (1 << 0)
-#define DFSP_EVT_FLAGS_AP_BCNMON (1 << 1)
-#define DFSP_EVT_FLAGS_PROXY_BCSA (1 << 2)
-#define DFSP_EVT_FLAGS_PROXY_UCSA (1 << 3)
-#define DFSP_EVT_FLAGS_PROXY_PCSA (1 << 4)
-
-typedef struct dfsp_event_data {
- uint16 flags; /* indicate what triggers the event */
- uint16 ie_len;
- uint8 ie[]; /* variable length */
-} dfsp_event_data_t;
-
-/* Proxy Channel Switch Announcement is a collection of IEs */
-typedef struct dfsp_pcsa {
- dot11_ext_csa_ie_t ecsa;
- dot11_mesh_csp_ie_t mcsp;
- dot11_wide_bw_chan_switch_ie_t wbcs;
-} dfsp_pcsa_t;
-
-/* DFS Proxy */
-#define DFSP_CFG_VERSION 1
-#define DFSP_FLAGS_ENAB 0x1
-typedef struct dfsp_cfg {
- uint16 version;
- uint16 len;
- uint16 flags; /**< bit 1 to enable/disable the feature */
- uint16 max_bcn_miss_dur; /**< maximum beacon miss duration before ceasing data tx */
- uint8 mcsp_ttl; /**< remaining number of hops allowed for pcsa message */
- uint8 bcsa_cnt; /**< repeat numbers of broadcast CSA */
- chanspec_t mon_chan; /**< passive monitoring channel spec */
- struct ether_addr mon_bssid; /**< broadcast means monitoring all */
- uint16 max_bcn_miss_dur_af; /**< maximum beacon miss duration before ceasing AF tx */
-} dfsp_cfg_t;
-
-#define DFSP_UCSA_VERSION 1
-typedef struct dfsp_ucsa {
- uint16 version;
- uint16 len;
- struct ether_addr address;
- uint8 enable;
- uint8 retry_cnt; /**< just in case host needs to control the value */
-} dfsp_ucsa_t;
-
-typedef struct dfsp_ucsa_tbl {
- uint8 tbl_num;
- uint8 tbl[];
-} dfsp_ucsa_tbl_t;
-
-typedef struct dfsp_stats {
- uint32 dfsp_csainfra;
- uint32 dfsp_csabcnmon;
- uint32 dfsp_bcsarx;
- uint32 dfsp_ucsarx;
- uint32 dfsp_pcsarx;
- uint32 dfsp_bcsatx;
- uint32 dfsp_ucsatx;
- uint32 dfsp_pcsatx;
- uint32 dfsp_ucsatxfail;
- uint32 dfsp_evtnotif;
- uint32 dfsp_evtsuspect;
- uint32 dfsp_evtresume;
-} dfsp_stats_t;
-#endif /* WLDFSP */
/*
* ptk_start: iovar to start 4-way handshake for secured ranging
uint16 base_offs; /**< Base for offset (defined below) */
uint16 size_bytes; /**< Size of mask/pattern */
uint16 match_flags; /**< Addition flags controlling the match */
- uint8 mask_and_data[]; /**< Variable length mask followed by data, each size_bytes */
+ uint8 mask_and_data[1]; /**< Variable length mask followed by data, each size_bytes */
} wl_pkt_filter_pattern_listel_t;
typedef struct wl_pkt_filter_pattern_list {
uint8 list_cnt; /**< Number of elements in the list */
uint8 PAD1[1]; /**< Reserved (possible version: reserved) */
uint16 totsize; /**< Total size of this pattern list (includes this struct) */
- uint8 patterns[]; /**< Variable number of wl_pkt_filter_pattern_listel_t elements */
+ wl_pkt_filter_pattern_listel_t patterns[]; /**< Variable number of list elements */
} wl_pkt_filter_pattern_list_t;
typedef struct wl_apf_program {
*/
uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */
uint32 timeout; /* Timeout(seconds) */
- uint8 mask_and_pattern[]; /* Variable length mask and pattern data.
+ uint8 mask_and_pattern[1]; /* Variable length mask and pattern data.
* mask starts at offset 0. Pattern
* immediately follows mask.
*/
wl_apf_program_t apf_program; /* apf program */
wl_pkt_filter_pattern_timeout_t pattern_timeout; /* Pattern timeout event filter */
} u;
- /* Do NOT add structure members after the filter definitions, since they
- * may include variable length arrays.
- */
} wl_pkt_filter_t;
/** IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */
/** IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */
typedef struct wl_pkt_filter_list {
uint32 num; /**< Number of installed packet filters */
- uint8 filter[]; /**< Variable array of packet filters. */
+ wl_pkt_filter_t filter[1]; /**< Variable array of packet filters. */
} wl_pkt_filter_list_t;
#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter)
#define WL_PKT_FILTER_PORTS_FIXED_LEN OFFSETOF(wl_pkt_filter_ports_t, ports)
#define WL_PKT_FILTER_PORTS_VERSION 0
-#if defined(WL_PKT_FLTR_EXT) && !defined(WL_PKT_FLTR_EXT_DISABLED)
-#define WL_PKT_FILTER_PORTS_MAX 256
-#else
-#define WL_PKT_FILTER_PORTS_MAX 128
-#endif /* WL_PKT_FLTR_EXT && !WL_PKT_FLTR_EXT_DISABLED */
+#define WL_PKT_FILTER_PORTS_MAX 128
#define RSN_REPLAY_LEN 8
typedef struct _gtkrefresh {
((cmd) == WLC_GET_AP) || \
((cmd) == WLC_GET_INSTANCE))
-#define MAX_PKTENG_SWEEP_STEPS 40
typedef struct wl_pkteng {
uint32 flags;
uint32 delay; /**< Inter-packet delay */
uint8 seqno; /**< Enable/disable sequence no. */
struct ether_addr dest; /**< Destination address */
struct ether_addr src; /**< Source address */
- uint8 sweep_steps; /**< Number of sweep power */
- uint8 PAD[2];
+ uint8 PAD[3];
} wl_pkteng_t;
-/* IOVAR pkteng_sweep_counters response structure */
-#define WL_PKTENG_SWEEP_COUNTERS_VERSION 1
-typedef struct wl_pkteng_sweep_ctrs {
- uint16 version; /**< Version - 1 */
- uint16 size; /**< Complete Size including sweep_counters */
- uint16 sweep_steps; /**< Number of steps */
- uint16 PAD;
- uint16 sweep_counter[]; /**< Array of frame counters */
-} wl_pkteng_sweep_ctrs_t;
-
-/* IOVAR pkteng_rx_pkt response structure */
-#define WL_PKTENG_RX_PKT_VERSION 1
-typedef struct wl_pkteng_rx_pkt {
- uint16 version; /**< Version - 1 */
- uint16 size; /**< Complete Size including the packet */
- uint8 payload[]; /**< Packet payload */
-} wl_pkteng_rx_pkt_t;
-
-#define WL_PKTENG_RU_FILL_VER_1 1u
-#define WL_PKTENG_RU_FILL_VER_2 2u
+#define WL_PKTENG_RU_FILL_VER_1 1
// struct for ru packet engine
-typedef struct wl_pkteng_ru_v1 {
- uint16 version; /* ver is 1 */
- uint16 length; /* size of complete structure */
- uint8 bw; /* bandwidth info */
- uint8 ru_alloc_val; /* ru allocation index number */
- uint8 mcs_val; /* mcs allocated value */
- uint8 nss_val; /* num of spatial streams */
- uint32 num_bytes; /* approx num of bytes to calculate other required params */
- uint8 cp_ltf_val ; /* GI and LTF symbol size */
- uint8 he_ltf_symb ; /* num of HE-LTF symbols */
- uint8 stbc; /* STBC support */
- uint8 coding_val; /* BCC/LDPC coding support */
- uint8 pe_category; /* PE duration 0/8/16usecs */
- uint8 dcm; /* dual carrier modulation */
- uint8 mumimo_ltfmode; /* ltf mode */
- uint8 trig_tx; /* form and transmit the trigger frame */
- uint8 trig_type; /* type of trigger frame */
- uint8 trig_period; /* trigger tx periodicity TBD */
- struct ether_addr dest; /* destination address for un-associated mode */
-} wl_pkteng_ru_v1_t;
-
-typedef struct wl_pkteng_ru_v2 {
- uint16 version; /* ver is 1 */
- uint16 length; /* size of complete structure */
+typedef struct wl_pkteng_ru {
+ uint16 version; /* ver is 1 */
+ uint16 length; /* size of complete structure */
uint8 bw; /* bandwidth info */
uint8 ru_alloc_val; /* ru allocation index number */
uint8 mcs_val; /* mcs allocated value */
uint8 nss_val; /* num of spatial streams */
uint32 num_bytes; /* approx num of bytes to calculate other required params */
- struct ether_addr dest; /* destination address for un-associated mode */
uint8 cp_ltf_val ; /* GI and LTF symbol size */
uint8 he_ltf_symb ; /* num of HE-LTF symbols */
uint8 stbc; /* STBC support */
uint8 coding_val; /* BCC/LDPC coding support */
- uint8 pe_category; /* PE duration 0/8/16usecs */
+ uint8 pe_category; /* PE duration 0/8/16usecs */
uint8 dcm; /* dual carrier modulation */
- uint8 mumimo_ltfmode; /* ltf mode */
- uint8 trig_tx; /* form and transmit the trigger frame */
- uint8 trig_type; /* type of trigger frame */
- uint8 trig_period; /* trigger tx periodicity TBD */
- uint8 tgt_rssi; /* target rssi value in encoded format */
- uint8 pad[3]; /* 3 byte padding to make structure size a multiple of 32bits */
-} wl_pkteng_ru_v2_t;
-
-#ifndef WL_PKTENG_RU_VER
-/* App uses the latest version - source picks it up from wlc_types.h */
-typedef wl_pkteng_ru_v2_t wl_pkteng_ru_fill_t;
-#endif // endif
-
-typedef struct wl_trig_frame_info {
- /* Structure versioning and structure length params */
- uint16 version;
- uint16 length;
- /* Below params are the fields related to trigger frame contents */
- /* Common Info Params Figure 9-52d - 11ax Draft 1.1 */
- uint16 lsig_len;
- uint16 trigger_type;
- uint16 cascade_indication;
- uint16 cs_req;
- uint16 bw;
- uint16 cp_ltf_type;
- uint16 mu_mimo_ltf_mode;
- uint16 num_he_ltf_syms;
- uint16 stbc;
- uint16 ldpc_extra_symb;
- uint16 ap_tx_pwr;
- uint16 afactor;
- uint16 pe_disambiguity;
- uint16 spatial_resuse;
- uint16 doppler;
- uint16 he_siga_rsvd;
- uint16 cmn_info_rsvd;
- /* User Info Params Figure 9-52e - 11ax Draft 1.1 */
- uint16 aid12;
- uint16 ru_alloc;
- uint16 coding_type;
- uint16 mcs;
- uint16 dcm;
- uint16 ss_alloc;
- uint16 tgt_rssi;
- uint16 usr_info_rsvd;
-} wl_trig_frame_info_t;
-
-/* wl pkteng_stats related definitions */
-#define WL_PKTENG_STATS_V1 (1)
-#define WL_PKTENG_STATS_V2 (2)
-
-typedef struct wl_pkteng_stats_v1 {
- uint32 lostfrmcnt; /**< RX PER test: no of frames lost (skip seqno) */
- int32 rssi; /**< RSSI */
- int32 snr; /**< signal to noise ratio */
- uint16 rxpktcnt[NUM_80211_RATES+1];
- uint8 rssi_qdb; /**< qdB portion of the computed rssi */
- uint8 version;
-} wl_pkteng_stats_v1_t;
+ uint8 mumimo_ltfmode; /* ltf mode */
+ uint8 PAD[1]; /* pad bytes to make structure occupy 4 byte aligned */
+} wl_pkteng_ru_fill_t;
-typedef struct wl_pkteng_stats_v2 {
+typedef struct wl_pkteng_stats {
uint32 lostfrmcnt; /**< RX PER test: no of frames lost (skip seqno) */
int32 rssi; /**< RSSI */
int32 snr; /**< signal to noise ratio */
uint16 rxpktcnt[NUM_80211_RATES+1];
uint8 rssi_qdb; /**< qdB portion of the computed rssi */
- uint8 version;
- uint16 length;
- uint16 pad;
- int32 rssi_per_core[WL_RSSI_ANT_MAX];
- int32 rssi_per_core_qdb[WL_RSSI_ANT_MAX];
-} wl_pkteng_stats_v2_t;
-
-#ifndef WL_PKTENG_STATS_TYPEDEF_HAS_ALIAS
-typedef wl_pkteng_stats_v1_t wl_pkteng_stats_t;
-#endif /* WL_PKTENG_STATS_TYPEDEF_HAS_ALIAS */
+ uint8 PAD;
+} wl_pkteng_stats_t;
typedef struct wl_txcal_params {
wl_pkteng_t pkteng;
uint8 PAD;
} wl_txcal_params_t;
-typedef struct wl_txcal_gainidx {
- uint8 num_actv_cores;
- uint8 gidx_start_percore[WL_STA_ANT_MAX];
- uint8 gidx_stop_percore[WL_STA_ANT_MAX];
- uint8 PAD[3];
-} wl_txcal_gainidx_t;
-
-typedef struct wl_txcal_params_v2 {
- wl_pkteng_t pkteng;
- int8 gidx_step;
- uint8 pwr_start[WL_STA_ANT_MAX];
- uint8 pwr_stop[WL_STA_ANT_MAX];
- uint8 init_start_idx;
- uint8 gidx_start_percore[WL_STA_ANT_MAX];
- uint8 gidx_stop_percore[WL_STA_ANT_MAX];
- uint16 version;
-} wl_txcal_params_v2_t;
-
-typedef wl_txcal_params_t wl_txcal_params_v1_t;
-
-typedef struct wl_rssilog_params {
- uint8 enable;
- uint8 rssi_threshold;
- uint8 time_threshold;
- uint8 pad;
-} wl_rssilog_params_t;
typedef struct wl_sslpnphy_papd_debug_data {
uint8 psat_pwr;
uint16 pad;
} wl_rssi_monitor_evt_t;
-/* CCA based channel quality event configuration (ID values for both config and report) */
+/* CCA based channel quality event configuration */
#define WL_CHAN_QUAL_CCA 0
#define WL_CHAN_QUAL_NF 1
#define WL_CHAN_QUAL_NF_LTE 2
-#define WL_CHAN_QUAL_TOTAL 3 /* The total IDs supported in both config and report */
-/* Additional channel quality event support in report only (>= 0x100)
- * Notice that uint8 is used in configuration struct wl_chan_qual_metric_t, but uint16 is
- * used for report in struct cca_chan_qual_event_t. So the ID values beyond 8-bit are used
- * for reporting purpose only.
- */
-#define WL_CHAN_QUAL_FULL_CCA (0x100u | WL_CHAN_QUAL_CCA) /* CCA: ibss vs. obss */
-#define WL_CHAN_QUAL_FULLPM_CCA (0x200u | WL_CHAN_QUAL_CCA) /* CCA: me vs. notme, PM vs. !PM */
+#define WL_CHAN_QUAL_TOTAL 3
#define MAX_CHAN_QUAL_LEVELS 8
uint8 ch_list[1];
} wl_action_obss_coex_req_t;
+
/** IOVar parameter block for small MAC address array with type indicator */
#define WL_IOV_MAC_PARAM_LEN 4
-/** This value is hardcoded to be 16 and MUST match PKTQ_MAX_PREC value defined elsewhere */
#define WL_IOV_PKTQ_LOG_PRECS 16
#include <packed_section_start.h>
} wl_iov_mac_full_params_t;
/** Parameter block for PKTQ_LOG statistics */
-/* NOTE: this structure cannot change! It is exported to wlu as a binary format
- * A new format revision number must be created if the interface changes
- * The latest is v05; previous v01...v03 are no longer supported, v04 has
- * common base with v05
-*/
#define PKTQ_LOG_COUNTERS_V4 \
/* packets requested to be stored */ \
uint32 requested; \
char headings[];
} pktq_log_format_v05_t;
+
typedef struct {
uint32 version;
wl_iov_mac_params_t params;
uint8 phytype;
uint8 pad;
} nbr_element_t;
-#define NBR_ADD_STATIC 0
-#define NBR_ADD_DYNAMIC 1
#define WL_RRM_NBR_RPT_VER 1
-
-#define WL_NBR_RPT_FLAG_BSS_PREF_FROM_AP 0x01
/** 11k Neighbor Report element */
typedef struct nbr_rpt_elem {
uint8 version;
uint8 reg;
uint8 channel;
uint8 phytype;
- uint8 addtype; /* static for manual add or dynamic if auto-learning of neighbors */
+ uint8 pad_2;
wlc_ssid_t ssid;
chanspec_t chanspec;
uint8 bss_trans_preference;
uint32 count;
} pcie_bus_tput_stats_t;
-#define HOST_WAKEUP_DATA_VER 1
-#include <packed_section_start.h>
-/* Bus interface host wakeup data */
-typedef BWL_PRE_PACKED_STRUCT struct wl_host_wakeup_data {
- uint16 ver;
- uint16 len;
- uchar data[1]; /* wakeup data */
-} BWL_POST_PACKED_STRUCT wl_host_wakeup_data_t;
-#include <packed_section_end.h>
-
-#define HOST_WAKEUP_DATA_VER_2 2
-#include <packed_section_start.h>
-/* Bus interface host wakeup data */
-typedef BWL_PRE_PACKED_STRUCT struct wl_host_wakeup_data_v2 {
- uint16 ver;
- uint16 len;
- uint32 gpio_toggle_time; /* gpio toggle time in ms */
- uchar data[1]; /* wakeup data */
-} BWL_POST_PACKED_STRUCT wl_host_wakeup_data_v2_t;
-#include <packed_section_end.h>
-
typedef struct keepalives_max_idle {
uint16 keepalive_count; /**< nmbr of keepalives per bss_max_idle period */
uint8 mkeepalive_index; /**< mkeepalive_index for keepalive frame to be used */
#define PM_IGNORE_BCMC_PROXY_ARP (1 << 0)
#define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1)
-/* ##### HMAP section ##### */
-#define PCIE_MAX_HMAP_WINDOWS 8
-#define PCIE_HMAPTEST_VERSION 2
-#define HMAPTEST_INVALID_OFFSET 0xFFFFFFFFu
-#define HMAPTEST_DEFAULT_WRITE_PATTERN 0xBABECAFEu
-#define HMAPTEST_ACCESS_ARM 0
-#define HMAPTEST_ACCESS_M2M 1
-#define HMAPTEST_ACCESS_D11 2
-#define HMAPTEST_ACCESS_NONE 3
-
-typedef struct pcie_hmaptest {
- uint16 version; /* Version */
- uint16 length; /* Length of entire structure */
- uint32 xfer_len;
- uint32 accesstype;
- uint32 is_write;
- uint32 is_invalid;
- uint32 host_addr_hi;
- uint32 host_addr_lo;
- uint32 host_offset;
- uint32 value; /* 4 byte value to be filled in case of write access test */
- uint32 delay; /* wait time in seconds before initiating access from dongle */
-} pcie_hmaptest_t;
-
-/* HMAP window register set */
-typedef struct hmapwindow {
- uint32 baseaddr_lo; /* BaseAddrLower */
- uint32 baseaddr_hi; /* BaseAddrUpper */
- uint32 windowlength; /* Window Length */
-} hmapwindow_t;
-
-#define PCIE_HMAP_VERSION 1
-typedef struct pcie_hmap {
- uint16 version; /**< Version */
- uint16 length; /**< Length of entire structure */
- uint32 enable; /**< status of HMAP enabled/disabled */
- uint32 nwindows; /* no. of HMAP windows enabled */
- uint32 window_config; /* HMAP window_config register */
- uint32 hmap_violationaddr_lo; /* violating address lo */
- uint32 hmap_violationaddr_hi; /* violating addr hi */
- uint32 hmap_violation_info; /* violation info */
- hmapwindow_t hwindows[]; /* Multiple hwindows */
-} pcie_hmap_t;
-
/* ##### Power Stats section ##### */
#define WL_PWRSTATS_VERSION 2
#define WLC_PMD_CHK_UNALIGN_TBTT 0x100
#define WLC_PMD_APSD_STA_UP 0x200
#define WLC_PMD_TX_PEND_WAR 0x400 /* obsolete, can be reused */
-#define WLC_PMD_NAN_AWAKE 0x400 /* Reusing for NAN */
#define WLC_PMD_GPTIMER_STAY_AWAKE 0x800
#define WLC_PMD_PM2_RADIO_SOFF_PEND 0x2000
#define WLC_PMD_NON_PRIM_STA_UP 0x4000
/* Below are latest definitions from PHO25178RC100_BRANCH_6_50 */
/* wl_pwr_pm_awake_stats_v1_t is used for WL_PWRSTATS_TYPE_PM_AWAKE */
-/* Use regs from d11.h instead of raw addresses for */
/* (at least) the chip independent registers */
typedef struct ucode_dbg_ext {
uint32 x120;
} BWL_POST_PACKED_STRUCT wl_pwr_usb_hsic_stats_t;
#include <packed_section_end.h>
-/* PCIe Event counter tlv IDs */
-enum pcie_cnt_xtlv_id {
- PCIE_CNT_XTLV_METRICS = 0x1, /**< PCIe Bus Metrics */
- PCIE_CNT_XTLV_BUS_CNT = 0x2 /**< PCIe Bus counters */
-};
-
typedef struct pcie_bus_metrics {
uint32 d3_suspend_ct; /**< suspend count */
uint32 d0_resume_ct; /**< resume count */
uint32 ltr_sleep_dur; /**< # of msecs chip was in LTR SLEEP */
} pcie_bus_metrics_t;
-typedef struct pcie_cnt {
- uint32 ltr_state; /**< Current LTR state */
- uint32 l0_sr_cnt; /**< SR count during L0 */
- uint32 l2l3_sr_cnt; /**< SR count during L2L3 */
- uint32 d3_ack_sr_cnt; /**< srcount during last D3-ACK */
- uint32 d3_sr_cnt; /**< SR count during D3 */
- uint32 d3_info_start; /**< D3 INFORM received time */
- uint32 d3_info_enter_cnt; /**< # of D3 INFORM received */
- uint32 d3_cnt; /**< # of real D3 */
- uint32 d3_ack_sent_cnt; /**< # of D3 ACK sent count */
- uint32 d3_drop_cnt_event; /**< # of events dropped during D3 */
- uint32 d2h_req_q_len; /**< # of Packet pending in D2H request queue */
- uint32 hw_reason; /**< Last Host wake assert reason */
- uint32 hw_assert_cnt; /**< # of times Host wake Asserted */
- uint32 host_ready_cnt; /**< # of Host ready interrupts */
- uint32 hw_assert_reason_0; /**< timestamp when hw_reason is TRAP */
- uint32 hw_assert_reason_1; /**< timestamp when hw_reason is WL_EVENT */
- uint32 hw_assert_reason_2; /**< timestamp when hw_reason is DATA */
- uint32 hw_assert_reason_3; /**< timestamp when hw_reason is DELAYED_WAKE */
- uint32 last_host_ready; /**< Timestamp of last Host ready */
- bool hw_asserted; /**< Flag to indicate if Host wake is Asserted */
- bool event_delivery_pend; /**< No resources to send event */
- uint16 pad; /**< Word alignment for scripts */
-} pcie_cnt_t;
-
/** Bus interface info for PCIE */
typedef struct wl_pwr_pcie_stats {
uint16 type; /**< WL_PWRSTATS_TYPE_PCIE */
uint32 rx_dur; /**< RX Active duration in us */
} wl_pwr_phy_stats_t;
+
typedef struct wl_mimo_meas_metrics_v1 {
uint16 type;
uint16 len;
/* Total SIFS idle time in SISO mode */
uint32 total_sifs_time_siso;
} wl_mimo_meas_metrics_t;
-
-typedef struct wl_pwr_slice_index {
- uint16 type; /* WL_PWRSTATS_TYPE_SLICE_INDEX */
- uint16 len;
-
- uint32 slice_index; /* Slice index for which stats are meant for */
-} wl_pwr_slice_index_t;
-
-typedef struct wl_pwr_tsync_stats {
- uint16 type; /**< WL_PWRSTATS_TYPE_TSYNC */
- uint16 len;
- uint32 avb_uptime; /**< AVB uptime in msec */
-} wl_pwr_tsync_stats_t;
-
-typedef struct wl_pwr_ops_stats {
- uint16 type; /* WL_PWRSTATS_TYPE_OPS_STATS */
- uint16 len; /* total length includes fixed fields */
- uint32 partial_ops_dur; /* Total time(in usec) partial ops duration */
- uint32 full_ops_dur; /* Total time(in usec) full ops duration */
-} wl_pwr_ops_stats_t;
-
-typedef struct wl_pwr_bcntrim_stats {
- uint16 type; /* WL_PWRSTATS_TYPE_BCNTRIM_STATS */
- uint16 len; /* total length includes fixed fields */
- uint8 associated; /* STA is associated ? */
- uint8 slice_idx; /* on which slice STA is associated */
- uint16 pad; /* padding */
- uint32 slice_beacon_seen; /* number of beacons seen on the Infra
- * interface on this slice
- */
- uint32 slice_beacon_trimmed; /* number beacons actually trimmed on this slice */
- uint32 total_beacon_seen; /* total number of beacons seen on the Infra interface */
- uint32 total_beacon_trimmed; /* total beacons actually trimmed */
-} wl_pwr_bcntrim_stats_t;
-
-typedef struct wl_pwr_slice_index_band {
- uint16 type; /* WL_PWRSTATS_TYPE_SLICE_INDEX_BAND_INFO */
- uint16 len; /* Total length includes fixed fields */
- uint16 index; /* Slice Index */
- int16 bandtype; /* Slice Bandtype */
-} wl_pwr_slice_index_band_t;
-
-typedef struct wl_pwr_psbw_stats {
- uint16 type; /* WL_PWRSTATS_TYPE_PSBW_STATS */
- uint16 len; /* total length includes fixed fields */
- uint8 slice_idx; /* on which slice STA is associated */
- uint8 pad[3];
- uint32 slice_enable_dur; /* time(ms) psbw remains enabled on this slice */
- uint32 total_enable_dur; /* time(ms) psbw remains enabled total */
-} wl_pwr_psbw_stats_t;
-
/* ##### End of Power Stats section ##### */
/** IPV4 Arp offloads for ndis context */
uint32 roam_alert_thresh; /**< time in ms */
} wl_pfn_roam_thresh_t;
+
/* Reasons for wl_pmalert_t */
#define PM_DUR_EXCEEDED (1<<0)
#define MPC_DUR_EXCEEDED (1<<1)
} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t;
#include <packed_section_end.h>
-typedef struct tx_pwr_ru_rate_info {
- uint16 version;
- uint16 ru_alloc;
- uint16 mcs;
- uint16 nss;
- uint16 num_he_ltf_syms;
- uint16 ldpc;
- uint16 gi;
- uint16 txmode;
- uint16 dcm;
- uint16 tx_chain;
-} tx_pwr_ru_rate_info_t;
-
-#define TX_PWR_RU_RATE_INFO_VER 1
-
-/* TLV ID for curpower report, ID < 63 is reserved for ppr module */
-typedef enum tx_pwr_tlv_id {
- TX_PWR_RPT_RU_RATE_INFO_ID = 64
-} tx_pwr_tlv_id_t;
-
#include <packed_section_start.h>
typedef BWL_PRE_PACKED_STRUCT struct {
struct ipv4_addr ipv4_addr;
/* Version of wlc_btc_stats_t structure.
* Increment whenever a change is made to wlc_btc_stats_t
*/
-#define BTCX_STATS_VER_4 4
-typedef struct wlc_btc_stats_v4 {
- uint16 version; /* version number of struct */
- uint16 valid; /* Size of this struct */
- uint32 stats_update_timestamp; /* tStamp when data is updated. */
- uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */
- uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */
- uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */
- uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */
- uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */
- uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */
- uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */
- uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */
- uint16 bt_succ_pm_protect_cnt; /* successful PM protection */
- uint16 bt_succ_cts_cnt; /* successful CTS2A protection */
- uint16 bt_wlan_tx_preempt_cnt; /* WLAN TX Preemption */
- uint16 bt_wlan_rx_preempt_cnt; /* WLAN RX Preemption */
- uint16 bt_ap_tx_after_pm_cnt; /* AP TX even after PM protection */
- uint16 bt_peraud_cumu_gnt_cnt; /* Grant cnt for periodic audio */
- uint16 bt_peraud_cumu_deny_cnt; /* Deny cnt for periodic audio */
- uint16 bt_a2dp_cumu_gnt_cnt; /* Grant cnt for A2DP */
- uint16 bt_a2dp_cumu_deny_cnt; /* Deny cnt for A2DP */
- uint16 bt_sniff_cumu_gnt_cnt; /* Grant cnt for Sniff */
- uint16 bt_sniff_cumu_deny_cnt; /* Deny cnt for Sniff */
- uint16 bt_dcsn_map; /* Accumulated decision bitmap once Ant grant */
- uint16 bt_dcsn_cnt; /* Accumulated decision bitmap counters once Ant grant */
- uint16 bt_a2dp_hiwat_cnt; /* Ant grant by a2dp high watermark */
- uint16 bt_datadelay_cnt; /* Ant grant by acl/a2dp datadelay */
- uint16 bt_crtpri_cnt; /* Ant grant by critical BT task */
- uint16 bt_pri_cnt; /* Ant grant by high BT task */
- uint16 a2dpbuf1cnt; /* Ant request with a2dp buffercnt 1 */
- uint16 a2dpbuf2cnt; /* Ant request with a2dp buffercnt 2 */
- uint16 a2dpbuf3cnt; /* Ant request with a2dp buffercnt 3 */
- uint16 a2dpbuf4cnt; /* Ant request with a2dp buffercnt 4 */
- uint16 a2dpbuf5cnt; /* Ant request with a2dp buffercnt 5 */
- uint16 a2dpbuf6cnt; /* Ant request with a2dp buffercnt 6 */
- uint16 a2dpbuf7cnt; /* Ant request with a2dp buffercnt 7 */
- uint16 a2dpbuf8cnt; /* Ant request with a2dp buffercnt 8 */
- uint16 antgrant_lt10ms; /* Ant grant duration cnt 0~10ms */
- uint16 antgrant_lt30ms; /* Ant grant duration cnt 10~30ms */
- uint16 antgrant_lt60ms; /* Ant grant duration cnt 30~60ms */
- uint16 antgrant_ge60ms; /* Ant grant duration cnt 60~ms */
-} wlc_btc_stats_v4_t;
-
-#define BTCX_STATS_VER_3 3
-
-typedef struct wlc_btc_stats_v3 {
- uint16 version; /* version number of struct */
- uint16 valid; /* Size of this struct */
- uint32 stats_update_timestamp; /* tStamp when data is updated. */
- uint32 btc_status; /* Hybrid/TDM indicator: Bit2:Hybrid, Bit1:TDM,Bit0:CoexEnabled */
- uint32 bt_req_type_map; /* BT Antenna Req types since last stats sample */
- uint32 bt_req_cnt; /* #BT antenna requests since last stats sampl */
- uint32 bt_gnt_cnt; /* #BT antenna grants since last stats sample */
- uint32 bt_gnt_dur; /* usec BT owns antenna since last stats sample */
- uint16 bt_abort_cnt; /* #Times WL was preempted due to BT since WL up */
- uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */
- uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */
- uint16 rsvd; /* pad to align struct to 32bit bndry */
- uint16 bt_succ_pm_protect_cnt; /* successful PM protection */
- uint16 bt_succ_cts_cnt; /* successful CTS2A protection */
- uint16 bt_wlan_tx_preempt_cnt; /* WLAN TX Preemption */
- uint16 bt_wlan_rx_preempt_cnt; /* WLAN RX Preemption */
- uint16 bt_ap_tx_after_pm_cnt; /* AP TX even after PM protection */
- uint16 bt_peraud_cumu_gnt_cnt; /* Grant cnt for periodic audio */
- uint16 bt_peraud_cumu_deny_cnt; /* Deny cnt for periodic audio */
- uint16 bt_a2dp_cumu_gnt_cnt; /* Grant cnt for A2DP */
- uint16 bt_a2dp_cumu_deny_cnt; /* Deny cnt for A2DP */
- uint16 bt_sniff_cumu_gnt_cnt; /* Grant cnt for Sniff */
- uint16 bt_sniff_cumu_deny_cnt; /* Deny cnt for Sniff */
- uint8 pad; /* Padding */
- uint8 slice_index; /* Slice to report */
-} wlc_btc_stats_v3_t;
-
-#define BTCX_STATS_VER_2 2
-
-typedef struct wlc_btc_stats_v2 {
+#define BTCX_STATS_VER 2
+
+typedef struct wlc_btc_stats {
uint16 version; /* version number of struct */
uint16 valid; /* Size of this struct */
uint32 stats_update_timestamp; /* tStamp when data is updated. */
uint16 bt_rxf1ovfl_cnt; /* #Time PSNULL retry count exceeded since WL up */
uint16 bt_latency_cnt; /* #Time ucode high latency detected since WL up */
uint16 rsvd; /* pad to align struct to 32bit bndry */
-} wlc_btc_stats_v2_t;
-
-/* Durations for each bt task in millisecond */
-#define WL_BTCX_DURSTATS_VER_1 (1u)
-typedef struct wlc_btcx_durstats_v1 {
- uint16 version; /* version number of struct */
- uint16 valid; /* validity of this struct */
- uint32 stats_update_timestamp; /* tStamp when data is updated */
- uint16 bt_acl_dur; /* acl duration in ms */
- uint16 bt_sco_dur; /* sco duration in ms */
- uint16 bt_esco_dur; /* esco duration in ms */
- uint16 bt_a2dp_dur; /* a2dp duration in ms */
- uint16 bt_sniff_dur; /* sniff duration in ms */
- uint16 bt_pscan_dur; /* page scan duration in ms */
- uint16 bt_iscan_dur; /* inquiry scan duration in ms */
- uint16 bt_page_dur; /* paging duration in ms */
- uint16 bt_inquiry_dur; /* inquiry duration in ms */
- uint16 bt_mss_dur; /* mss duration in ms */
- uint16 bt_park_dur; /* park duration in ms */
- uint16 bt_rssiscan_dur; /* rssiscan duration in ms */
- uint16 bt_iscan_sco_dur; /* inquiry scan sco duration in ms */
- uint16 bt_pscan_sco_dur; /* page scan sco duration in ms */
- uint16 bt_tpoll_dur; /* tpoll duration in ms */
- uint16 bt_sacq_dur; /* sacq duration in ms */
- uint16 bt_sdata_dur; /* sdata duration in ms */
- uint16 bt_rs_listen_dur; /* rs listen duration in ms */
- uint16 bt_rs_burst_dur; /* rs brust duration in ms */
- uint16 bt_ble_adv_dur; /* ble adv duration in ms */
- uint16 bt_ble_scan_dur; /* ble scan duration in ms */
- uint16 bt_ble_init_dur; /* ble init duration in ms */
- uint16 bt_ble_conn_dur; /* ble connection duration in ms */
- uint16 bt_task_lmp_dur; /* lmp duration in ms */
- uint16 bt_esco_retran_dur; /* esco retransmission duration in ms */
- uint16 bt_task26_dur; /* task26 duration in ms */
- uint16 bt_task27_dur; /* task27 duration in ms */
- uint16 bt_task28_dur; /* task28 duration in ms */
- uint16 bt_task_pred_dur; /* prediction task duration in ms */
- uint16 bt_multihid_dur; /* multihid duration in ms */
-} wlc_btcx_durstats_v1_t;
+} wlc_btc_stats_t;
#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4
#define WL_MAX_IPFO_ROUTE_TBL_ENTRY 64
#define LOGRRC_FIX_LEN 8
#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
/* BCMWAPI_WAI */
-#define IV_LEN 16 /* same as SMS4_WPI_PN_LEN */
+#define IV_LEN 16
struct wapi_sta_msg_t
{
uint16 msg_type;
uint8 sta_mac[6];
uint8 reserve_data2[2];
uint8 gsn[IV_LEN];
- uint8 wie[TLV_BODY_LEN_MAX + TLV_HDR_LEN]; /* 257 */
- uint8 pad[3]; /* padding for alignment */
+ uint8 wie[256];
};
/* #endif BCMWAPI_WAI */
/* chanim acs record */
uint8 PAD[3];
} wl_p2p_wfds_hash_t;
-typedef struct wl_p2p_config_params {
- uint16 enable; /**< 0: disable 1: enable */
- uint16 chanspec; /* GO chanspec */
- wlc_ssid_t ssid; /* SSID */
-} wl_p2p_config_params_t;
-
typedef struct wl_bcmdcs_data {
uint32 reason;
chanspec_t chspec;
#define MAX_NUM_TXCAL_MEAS 128
#define MAX_NUM_PWR_STEP 40
#define TXCAL_IOVAR_VERSION 0x1
-
-#define TXCAL_GAINSWEEP_VER (TXCAL_GAINSWEEP_VERSION_V2)
-#define TXCAL_GAINSWEEP_VERSION_V2 2
-
-/* Below macro defines the latest txcal iovar version updated */
-/* This macro also reflects in the 'txcal_ver' iovar */
-#define TXCAL_IOVAR_LATEST TXCAL_GAINSWEEP_VER
-
-/* below are used for bphy/ofdm separated LSC */
-#define TXCAL_PWR_BPHY 0
-#define TXCAL_PWR_OFDM 1
-
typedef struct wl_txcal_meas_percore {
uint16 tssi[MAX_NUM_TXCAL_MEAS];
int16 pwr[MAX_NUM_TXCAL_MEAS];
uint8 channel;
uint8 num_core;
uint8 gen_tbl;
- uint8 ofdm;
- uint8 pad;
- wl_txcal_power_tssi_percore_t tssi_percore[4];
+ uint16 pad;
+ wl_txcal_power_tssi_percore_t tssi_percore[1];
} wl_txcal_power_tssi_ncore_t;
typedef struct wl_txcal_meas {
uint8 num_entries[WLC_TXCORE_MAX];
uint8 tssi[WLC_TXCORE_MAX][MAX_NUM_PWR_STEP];
uint8 gen_tbl;
- uint8 ofdm;
+ uint8 PAD;
} wl_txcal_power_tssi_t;
typedef struct wl_txcal_power_tssi_old {
uint8 num_entries[WLC_TXCORE_MAX_OLD];
uint8 tssi[WLC_TXCORE_MAX_OLD][MAX_NUM_PWR_STEP];
uint8 gen_tbl;
- uint8 ofdm;
+ uint8 PAD;
} wl_txcal_power_tssi_old_t;
typedef struct wl_olpc_pwr {
uint8 channel;
int16 tempsense;
uint8 olpc_idx;
- uint8 ofdm;
+ uint8 pad;
} wl_olpc_pwr_t;
-typedef struct wl_rfem_temp_vdet_temp {
- uint8 vdet_fem_t1;
- int8 rfem_temp_t1;
- uint8 vdet_fem_t2;
- int8 rfem_temp_t2;
-} wl_rfem_temp_vdet_temp_t;
-
-typedef struct wl_rfem_temp_vin_tssi {
- uint16 vin_chip_v1;
- int16 tssi_chip_v1;
- uint16 vin_chip_v2;
- int16 tssi_chip_v2;
-} wl_rfem_temp_vin_tssi_t;
-
-typedef struct wl_txcal_tempsense {
- uint16 version;
- uint8 valid_cnt;
- uint8 core;
- int16 ref_temperature;
- int16 meas_temperature;
- wl_rfem_temp_vdet_temp_t vdet_temp;
- wl_rfem_temp_vin_tssi_t vin_tssi;
-} wl_txcal_tempsense_t;
-
/** IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */
typedef struct wl_mempool_stats {
int32 num; /**< Number of memory pools */
uint32 ipaddr_gateway;
} nwoe_ifconfig_t;
+/** Traffic management priority classes */
+typedef enum trf_mgmt_priority_class {
+ trf_mgmt_priority_low = 0, /**< Maps to 802.1p BK */
+ trf_mgmt_priority_medium = 1, /**< Maps to 802.1p BE */
+ trf_mgmt_priority_high = 2, /**< Maps to 802.1p VI */
+ trf_mgmt_priority_nochange = 3, /**< do not update the priority */
+ trf_mgmt_priority_invalid = (trf_mgmt_priority_nochange + 1)
+} trf_mgmt_priority_class_t;
+
+/** Traffic management configuration parameters */
+typedef struct trf_mgmt_config {
+ uint32 trf_mgmt_enabled; /**< 0 - disabled, 1 - enabled */
+ uint32 flags; /**< See TRF_MGMT_FLAG_xxx defines */
+ uint32 host_ip_addr; /**< My IP address to determine subnet */
+ uint32 host_subnet_mask; /**< My subnet mask */
+ uint32 downlink_bandwidth; /**< In units of kbps */
+ uint32 uplink_bandwidth; /**< In units of kbps */
+ uint32 min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /**< Minimum guaranteed tx bandwidth */
+ uint32 min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; /**< Minimum guaranteed rx bandwidth */
+} trf_mgmt_config_t;
+
+/** Traffic management filter */
+typedef struct trf_mgmt_filter {
+ struct ether_addr dst_ether_addr; /**< His L2 address */
+ uint8 PAD[2];
+ uint32 dst_ip_addr; /**< His IP address */
+ uint16 dst_port; /**< His L4 port */
+ uint16 src_port; /**< My L4 port */
+ uint16 prot; /**< L4 protocol (only TCP or UDP) */
+ uint16 flags; /**< TBD. For now, this must be zero. */
+ trf_mgmt_priority_class_t priority; /**< Priority for filtered packets */
+ uint32 dscp; /**< DSCP */
+} trf_mgmt_filter_t;
+
+/** Traffic management filter list (variable length) */
+typedef struct trf_mgmt_filter_list {
+ uint32 num_filters;
+ trf_mgmt_filter_t filter[1];
+} trf_mgmt_filter_list_t;
+
+/** Traffic management global info used for all queues */
+typedef struct trf_mgmt_global_info {
+ uint32 maximum_bytes_per_second;
+ uint32 maximum_bytes_per_sampling_period;
+ uint32 total_bytes_consumed_per_second;
+ uint32 total_bytes_consumed_per_sampling_period;
+ uint32 total_unused_bytes_per_sampling_period;
+} trf_mgmt_global_info_t;
+
+/** Traffic management shaping info per priority queue */
+typedef struct trf_mgmt_shaping_info {
+ uint32 gauranteed_bandwidth_percentage;
+ uint32 guaranteed_bytes_per_second;
+ uint32 guaranteed_bytes_per_sampling_period;
+ uint32 num_bytes_produced_per_second;
+ uint32 num_bytes_consumed_per_second;
+ uint32 num_queued_packets; /**< Number of packets in queue */
+ uint32 num_queued_bytes; /**< Number of bytes in queue */
+} trf_mgmt_shaping_info_t;
+
+/** Traffic management shaping info array */
+typedef struct trf_mgmt_shaping_info_array {
+ trf_mgmt_global_info_t tx_global_shaping_info;
+ trf_mgmt_shaping_info_t tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+ trf_mgmt_global_info_t rx_global_shaping_info;
+ trf_mgmt_shaping_info_t rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_shaping_info_array_t;
+
+
+/** Traffic management statistical counters */
+typedef struct trf_mgmt_stats {
+ uint32 num_processed_packets; /**< Number of packets processed */
+ uint32 num_processed_bytes; /**< Number of bytes processed */
+ uint32 num_discarded_packets; /**< Number of packets discarded from queue */
+} trf_mgmt_stats_t;
+
+/** Traffic management statistics array */
+typedef struct trf_mgmt_stats_array {
+ trf_mgmt_stats_t tx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+ trf_mgmt_stats_t rx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
+} trf_mgmt_stats_array_t;
+
/* Both powersel_params and lpc_params are used by IOVAR lpc_params.
* The powersel_params is replaced by lpc_params in later WLC versions.
*/
uint8 tfs_id; /**< ID of a specific set (existing), or nul for all */
} wl_tfs_term_t;
+
#define DMS_DEP_PROXY_ARP (1 << 0)
/* Definitions for WNM/NPS Directed Multicast Service */
} wl_service_term_t;
/** Definitions for WNM/NPS BSS Transistion */
-#define WL_BSSTRANS_QUERY_VERSION_1 1
-typedef struct wl_bsstrans_query {
- uint16 version; /* structure version */
- uint16 pad0; /* padding for 4-byte allignment */
- wlc_ssid_t ssid; /* SSID of NBR elem to be queried for */
- uint8 reason; /* Reason code of the BTQ */
- uint8 pad1[3]; /* padding for 4-byte allignment */
-} wl_bsstrans_query_t;
-
-#define BTM_QUERY_NBR_COUNT_MAX 16
-
-#define WL_BTQ_NBR_LIST_VERSION_1 1
-typedef struct wl_btq_nbr_list {
- uint16 version; /* structure version */
- uint8 count; /* No. of BTQ NBRs returned */
- uint8 pad; /* padding for 4-byte allignment */
- nbr_rpt_elem_t btq_nbt_elem[]; /* BTQ NBR elem in a BTQ NBR list */
-} wl_btq_nbr_list_t;
-
typedef struct wl_bsstrans_req {
uint16 tbtt; /**< time of BSS to end of life, in unit of TBTT */
uint16 dur; /**< time of BSS to keep off, in unit of minute */
WL_BSSTRANS_POLICY_WAIT = 3, /**< Wait for deauth and send Accepted status */
WL_BSSTRANS_POLICY_PRODUCT = 4, /**< Policy for real product use cases (Olympic) */
WL_BSSTRANS_POLICY_PRODUCT_WBTEXT = 5, /**< Policy for real product use cases (SS) */
- WL_BSSTRANS_POLICY_MBO = 6, /**< Policy for MBO certification */
- WL_BSSTRANS_POLICY_MAX = 7
+ WL_BSSTRANS_POLICY_MAX = 6
} wnm_bsstrans_policy_type_t;
/** Definitions for WNM/NPS TIM Broadcast */
uint16 payload; /**< IE Data Payload */
} wl_rmc_vsie_t;
+
/* structures & defines for proximity detection */
enum proxd_method {
PROXD_UNDEFINED_METHOD = 0,
} wl_proxd_seq_config_t;
#define WL_PROXD_TUNE_VERSION_1 1
-#define WL_PROXD_TUNE_VERSION_2 2
#include <packed_section_start.h>
-/* For legacy ranging target (e.g. 43430, 43342) */
-typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune_v1 {
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune {
uint32 version;
uint32 Ki; /**< h/w delay K factor for initiator */
uint32 Kt; /**< h/w delay K factor for target */
uint32 acs_gdv_thresh;
int8 acs_rssi_thresh;
uint8 smooth_win_en;
- int32 emu_delay;
-} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_v1_t;
-#include <packed_section_end.h>
-
-#include <packed_section_start.h>
-/* For legacy ranging initiator (including 4364) */
-typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune_v2 {
- uint32 version;
- uint32 Ki; /**< h/w delay K factor for initiator */
- uint32 Kt; /**< h/w delay K factor for target */
- int16 vhtack; /**< enable/disable VHT ACK */
- int16 N_log2[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
- int16 w_offset[TOF_BW_NUM]; /**< offset of threshold crossing window(per BW) */
- int16 w_len[TOF_BW_NUM]; /**< length of threshold crossing window(per BW) */
- int32 maxDT; /**< max time difference of T4/T1 or T3/T2 */
- int32 minDT; /**< min time difference of T4/T1 or T3/T2 */
- uint8 totalfrmcnt; /**< total count of transfered measurement frames */
- uint16 rsv_media; /**< reserve media value for TOF */
- uint32 flags; /**< flags */
- uint8 core; /**< core to use for tx */
- uint8 setflags; /* set flags of K, N. S values */
- int16 N_scale[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
- uint8 sw_adj; /**< enable sw assisted timestamp adjustment */
- uint8 hw_adj; /**< enable hw assisted timestamp adjustment */
- uint8 seq_en; /**< enable ranging sequence */
- uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /**< number of ftm frames based on bandwidth */
- int16 N_log2_2g; /**< simple threshold crossing for 2g channel */
- int16 N_scale_2g; /**< simple threshold crossing for 2g channel */
- wl_proxd_seq_config_t seq_5g20;
- wl_proxd_seq_config_t seq_2g20; /* Thresh crossing params for 2G Sequence */
- uint16 bitflip_thresh; /* bitflip threshold */
- uint16 snr_thresh; /* SNR threshold */
- int8 recv_2g_thresh; /* 2g recieve sensitivity threshold */
- uint32 acs_gdv_thresh;
- int8 acs_rssi_thresh;
- uint8 smooth_win_en;
int32 acs_gdmm_thresh;
int8 acs_delta_rssi_thresh;
int32 emu_delay;
- uint8 core_mask; /* core mask selection */
-} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_v2_t;
+} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_t;
#include <packed_section_end.h>
-#define WL_PROXD_TUNE_VERSION_3 3
-/* Future ranging support */
-typedef struct wl_proxd_params_tof_tune_v3 {
- uint16 version;
- uint16 len;
- uint32 Ki; /**< h/w delay K factor for initiator */
- uint32 Kt; /**< h/w delay K factor for target */
- int16 vhtack; /**< enable/disable VHT ACK */
- uint16 PAD;
- int16 N_log2[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
- uint16 PAD;
- int16 w_offset[TOF_BW_NUM]; /**< offset of threshold crossing window(per BW) */
- uint16 PAD;
- int16 w_len[TOF_BW_NUM]; /**< length of threshold crossing window(per BW) */
- uint16 PAD;
- int32 maxDT; /**< max time difference of T4/T1 or T3/T2 */
- int32 minDT; /**< min time difference of T4/T1 or T3/T2 */
- uint8 totalfrmcnt; /**< total count of transfered measurement frames */
- uint8 PAD[3];
- uint16 rsv_media; /**< reserve media value for TOF */
- uint16 PAD;
- uint32 flags; /**< flags */
- uint8 core; /**< core to use for tx */
- uint8 setflags; /* set flags of K, N. S values */
- uint16 PAD;
- int16 N_scale[TOF_BW_SEQ_NUM]; /**< simple threshold crossing */
- uint8 sw_adj; /**< enable sw assisted timestamp adjustment */
- uint8 hw_adj; /**< enable hw assisted timestamp adjustment */
- uint8 seq_en; /**< enable ranging sequence */
- uint8 PAD[3];
- uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /**< number of ftm frames based on bandwidth */
- uint8 PAD[3];
- int16 N_log2_2g; /**< simple threshold crossing for 2g channel */
- int16 N_scale_2g; /**< simple threshold crossing for 2g channel */
- wl_proxd_seq_config_t seq_5g20;
- wl_proxd_seq_config_t seq_2g20; /* Thresh crossing params for 2G Sequence */
- uint16 bitflip_thresh; /* bitflip threshold */
- uint16 snr_thresh; /* SNR threshold */
- int8 recv_2g_thresh; /* 2g recieve sensitivity threshold */
- uint8 PAD[3];
- uint32 acs_gdv_thresh;
- int8 acs_rssi_thresh;
- uint8 smooth_win_en;
- uint16 PAD;
- int32 acs_gdmm_thresh;
- int8 acs_delta_rssi_thresh;
- uint8 PAD[3];
- int32 emu_delay;
- uint8 core_mask; /* core mask selection */
- uint8 PAD[3];
-} wl_proxd_params_tof_tune_v3_t;
-
typedef struct wl_proxd_params_iovar {
uint16 method; /**< Proximity Detection method */
- uint8 PAD[2];
union {
/* common params for pdsvc */
wl_proxd_params_common_t cmn_params; /**< common parameters */
wl_proxd_params_rssi_method_t rssi_params; /**< RSSI method parameters */
wl_proxd_params_tof_method_t tof_params; /**< TOF method parameters */
/* tune parameters */
- wl_proxd_params_tof_tune_v3_t tof_tune; /**< TOF tune parameters */
+ wl_proxd_params_tof_tune_t tof_tune; /**< TOF tune parameters */
+ uint8 PAD[sizeof(wl_proxd_params_tof_tune_t)+1];
} u; /**< Method specific optional parameters */
} wl_proxd_params_iovar_t;
uint8 phyver; /**< phy version */
struct ether_addr localMacAddr; /**< local mac address */
struct ether_addr remoteMacAddr; /**< remote mac address */
- wl_proxd_params_tof_tune_v3_t params;
+ wl_proxd_params_tof_tune_t params;
} BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t;
#include <packed_section_end.h>
+
/* ifdef WL_NAN */
/* ********************** NAN wl interface struct types and defs ******************** */
/*
* Bit 0 : If set to 1, means event uses nan bsscfg,
* otherwise uses infra bsscfg. Default is using infra bsscfg
*/
-#define WL_NAN_CTRL_ROUTE_EVENT_VIA_NAN_BSSCFG 0x000001
+#define WL_NAN_CTRL_ROUTE_EVENT_VIA_NAN_BSSCFG 0x1
/* If set, discovery beacons are transmitted on 2G band */
-#define WL_NAN_CTRL_DISC_BEACON_TX_2G 0x000002
+#define WL_NAN_CTRL_DISC_BEACON_TX_2G 0x2
/* If set, sync beacons are transmitted on 2G band */
-#define WL_NAN_CTRL_SYNC_BEACON_TX_2G 0x000004
+#define WL_NAN_CTRL_SYNC_BEACON_TX_2G 0x4
/* If set, discovery beacons are transmitted on 5G band */
-#define WL_NAN_CTRL_DISC_BEACON_TX_5G 0x000008
+#define WL_NAN_CTRL_DISC_BEACON_TX_5G 0x8
/* If set, sync beacons are transmitted on 5G band */
-#define WL_NAN_CTRL_SYNC_BEACON_TX_5G 0x000010
+#define WL_NAN_CTRL_SYNC_BEACON_TX_5G 0x10
/* If set, auto datapath responses will be sent by FW */
-#define WL_NAN_CTRL_AUTO_DPRESP 0x000020
+#define WL_NAN_CTRL_AUTO_DPRESP 0x20
/* If set, auto datapath confirms will be sent by FW */
-#define WL_NAN_CTRL_AUTO_DPCONF 0x000040
-/* If set, auto schedule responses will be sent by FW */
-#define WL_NAN_CTRL_AUTO_SCHEDRESP 0x000080
-/* If set, auto schedule confirms will be sent by FW */
-#define WL_NAN_CTRL_AUTO_SCHEDCONF 0x000100
-/* If set, proprietary rates are supported by FW */
-#define WL_NAN_CTRL_PROP_RATE 0x000200
-/* If set, service awake_dw overrides global dev awake_dw */
-#define WL_NAN_CTRL_SVC_OVERRIDE_DEV_AWAKE_DW 0x000400
-/* If set, merge scan will be disabled */
-#define WL_NAN_CTRL_SCAN_DISABLE 0x000800
-/* If set, power save will be disabled */
-#define WL_NAN_CTRL_POWER_SAVE_DISABLE 0x001000
-/* If set, device will merge to configured CID only */
-#define WL_NAN_CTRL_MERGE_CONF_CID_ONLY 0x002000
-/* If set, 5g core will be brought down in single band NAN */
-#define WL_NAN_CTRL_5G_SLICE_POWER_OPT 0x004000
-#define WL_NAN_CTRL_DUMP_HEAP 0x008000
-/* If set, host generates and assign ndp id for ndp sessions */
-#define WL_NAN_CTRL_HOST_GEN_NDPID 0x010000
-/* If set, nan ndp inactivity watchdog will be activated */
-#define WL_NAN_CTRL_DELETE_INACTIVE_PEERS 0x020000
-/* If set, nan assoc coex will be activated */
-#define WL_NAN_CTRL_INFRA_ASSOC_COEX 0x040000
-/* If set, dam will accept all NDP/RNG request from the peer including counter */
-#define WL_NAN_CTRL_DAM_ACCEPT_ALL 0x080000
-/* If set, nan mac ignores role for tx discovery beacon for periodic config */
-#define WL_NAN_CTRL_FASTDISC_IGNO_ROLE 0x100000
-/* If set, include NA in NAN beacons (disc beacons for now) */
-#define WL_NAN_CTRL_INCL_NA_IN_BCNS 0x200000
-/* If set, host assist will be enabled */
-#define WL_NAN_CTRL_HOST_ASSIST 0x400000
-/* If set, host configures NDI associated with the service */
-#define WL_NAN_CTRL_HOST_CFG_SVC_NDI 0x800000
+#define WL_NAN_CTRL_AUTO_DPCONF 0x40
/* Value when all host-configurable bits set */
#define WL_NAN_CTRL_MAX_MASK 0xFFFFFF
#define WL_NAN_CFG_CTRL_FW_BITS 8
-/* Last 8-bits are firmware controlled bits.
- * Bit 31:
+/* Bit 31:
* If set - indicates that NAN initialization is successful
- * Bit 30:
- * If set - indicates that NAN MAC cfg creation is successful
- *
- * NOTE: These are only ready-only bits for host.
- * All sets to these bits from host are masked off
+ * NOTE: This is a ready-only bit. All sets to this are masked off
*/
-#define WL_NAN_PROTO_INIT_DONE (1 << 31)
-#define WL_NAN_CFG_CREATE_DONE (1 << 30)
-
+#define WL_NAN_PROTO_INIT_DONE 0x80000000
#define WL_NAN_GET_PROTO_INIT_STATUS(x) \
- (((x) & WL_NAN_PROTO_INIT_DONE) ? TRUE:FALSE)
+ (((x) >> 31) & 1)
#define WL_NAN_CLEAR_PROTO_INIT_STATUS(x) \
((x) &= ~WL_NAN_PROTO_INIT_DONE)
#define WL_NAN_SET_PROTO_INIT_STATUS(x) \
- ((x) |= (WL_NAN_PROTO_INIT_DONE))
-
-#define WL_NAN_GET_CFG_CREATE_STATUS(x) \
- (((x) & WL_NAN_CFG_CREATE_DONE) ? TRUE:FALSE)
-#define WL_NAN_CLEAR_CFG_CREATE_STATUS(x) \
- ((x) &= ~WL_NAN_CFG_CREATE_DONE)
-#define WL_NAN_SET_CFG_CREATE_STATUS(x) \
- ((x) |= (WL_NAN_CFG_CREATE_DONE))
+ ((x) |= (1 << 31))
#define WL_NAN_IOCTL_VERSION 0x2
/* < some sufficient ioc buff size for our module */
uint8 attr_list[0]; /* attributes payload */
} wl_nan_event_replied_t;
-/* NAN Tx status of transmitted frames */
-#define WL_NAN_TXS_FAILURE 0
-#define WL_NAN_TXS_SUCCESS 1
-
-/* NAN frame types */
-enum wl_nan_frame_type {
- /* discovery frame types */
- WL_NAN_FRM_TYPE_PUBLISH = 1,
- WL_NAN_FRM_TYPE_SUBSCRIBE = 2,
- WL_NAN_FRM_TYPE_FOLLOWUP = 3,
-
- /* datapath frame types */
- WL_NAN_FRM_TYPE_DP_REQ = 4,
- WL_NAN_FRM_TYPE_DP_RESP = 5,
- WL_NAN_FRM_TYPE_DP_CONF = 6,
- WL_NAN_FRM_TYPE_DP_INSTALL = 7,
- WL_NAN_FRM_TYPE_DP_END = 8,
-
- /* schedule frame types */
- WL_NAN_FRM_TYPE_SCHED_REQ = 9,
- WL_NAN_FRM_TYPE_SCHED_RESP = 10,
- WL_NAN_FRM_TYPE_SCHED_CONF = 11,
- WL_NAN_FRM_TYPE_SCHED_UPD = 12,
-
- /* ranging frame types */
- WL_NAN_FRM_TYPE_RNG_REQ = 13,
- WL_NAN_FRM_TYPE_RNG_RESP = 14,
- WL_NAN_FRM_TYPE_RNG_TERM = 15,
- WL_NAN_FRM_TYPE_RNG_REPORT = 16,
-
- WL_NAN_FRM_TYPE_UNSOLICIT_SDF = 17,
- WL_NAN_FRM_TYPE_INVALID
-};
-typedef uint8 wl_nan_frame_type_t;
-
-/* NAN Reason codes for tx status */
-enum wl_nan_txs_reason_codes {
- WL_NAN_REASON_SUCCESS = 1, /* NAN status success */
- WL_NAN_REASON_TIME_OUT = 2, /* timeout reached */
- WL_NAN_REASON_DROPPED = 3, /* pkt dropped due to internal failure */
- WL_NAN_REASON_MAX_RETRIES_DONE = 4 /* Max retries exceeded */
-};
-
-/* For NAN TX status */
-typedef struct wl_nan_event_txs {
- uint8 status; /* For TX status, success or failure */
- uint8 reason_code; /* to identify reason when status is failure */
- uint16 host_seq; /* seq num to keep track of pkts sent by host */
- uint8 type; /* wl_nan_frame_type_t */
- uint8 pad;
- uint16 opt_tlvs_len;
- uint8 opt_tlvs[];
-} wl_nan_event_txs_t;
-
-/* SD transmit pkt's event status is sent as optional tlv in wl_nan_event_txs_t */
-typedef struct wl_nan_event_sd_txs {
- uint8 inst_id; /* Publish or subscribe instance id */
- uint8 req_id; /* Requestor instance id */
-} wl_nan_event_sd_txs_t;
-
-/* nanho fsm tlv WL_NAN_XTLV_NANHO_OOB_TXS(0x0b0a) */
-typedef struct wl_nan_event_nanho_txs {
- uint32 fsm_id; /* nho fsm id */
- uint16 seq_id; /* nho seq id */
- uint16 pad;
-} wl_nan_event_nanho_txs_t;
-
/* Subscribe or Publish instance Terminated */
/* WL_NAN_EVENT_TERMINATED */
uint8 instance_id; /* publish / subscribe instance id */
uint8 reason; /* 1=timeout, 2=Host/IOVAR, 3=FW Terminated 4=Failure */
uint8 svctype; /* 0 - Publish, 0x1 - Subscribe */
- uint8 pad; /* Align */
- uint32 tx_cnt; /* Number of SDFs sent */
+ uint8 pad; /* Align */
} wl_nan_ev_terminated_t;
/* Follow up received against a pub / subscr */
uint8 attr_list[0]; /* attributes payload */
} wl_nan_ev_receive_t;
-/* WL_NAN_EVENT_DISC_CACHE_TIMEOUT */
-#define WL_NAN_DISC_CACHE_EXPIRY_ENTRIES_MAX 8
-
-typedef struct wl_nan_disc_expired_cache_entry {
- uint8 l_sub_id; /* local sub instance_id */
- uint8 r_pub_id; /* remote-matched pub instance_id */
- struct ether_addr r_nmi_addr; /* remote-matched pub nmi addr */
-} wl_nan_disc_expired_cache_entry_t;
-
-typedef struct wl_nan_ev_disc_cache_timeout {
- uint16 count; /* no. of expired cache entries */
- uint16 pad;
- wl_nan_disc_expired_cache_entry_t cache_exp_list[];
-} wl_nan_ev_disc_cache_timeout_t;
-
-/* For NAN event mask extention */
-#define WL_NAN_EVMASK_EXTN_VER 1
-#define WL_NAN_EVMASK_EXTN_LEN 16 /* 16*8 = 128 masks supported */
-
-typedef struct wl_nan_event_extn {
- uint8 ver;
- uint8 pad;
- uint16 len;
- uint8 evmask[];
-} wl_nan_evmask_extn_t;
-
-/* WL_NAN_XTLV_DATA_DP_TXS */
-
-typedef struct wl_nan_data_dp_txs {
- uint8 ndp_id;
- uint8 pad;
- struct ether_addr indi; /* initiator ndi */
-} wl_nan_data_dp_txs_t;
-
-/* WL_NAN_XTLV_RNG_TXS */
-
-typedef struct wl_nan_range_txs {
- uint8 range_id;
- uint8 pad[3];
-} wl_nan_range_txs_t;
-
-#define NAN_MAX_BANDS 2
-
/*
* TLVs - Below XTLV definitions will be deprecated
* in due course (soon as all other branches update
WL_NAN_XTLV_BCN_RX = 0x130,
WL_NAN_XTLV_REPLIED = 0x131, /* Publish sent for a subscribe */
WL_NAN_XTLV_RECEIVED = 0x132, /* FUP Received */
- WL_NAN_XTLV_DISC_RESULTS = 0x133, /* Discovery results */
- WL_NAN_XTLV_TXS = 0x134 /* TX status */
+ WL_NAN_XTLV_DISC_RESULTS = 0x133 /* Discovery results */
};
#define WL_NAN_CMD_GLOBAL 0x00
#define WL_NAN_CMD_DATA_COMP_ID 0x05
#define WL_NAN_CMD_DAM_COMP_ID 0x06
#define WL_NAN_CMD_RANGE_COMP_ID 0x07
-#define WL_NAN_CMD_GENERIC_COMP_ID 0x08
-#define WL_NAN_CMD_SCHED_COMP_ID 0x09
-#define WL_NAN_CMD_NSR_COMP_ID 0x0a /* NAN Save Restore */
-#define WL_NAN_CMD_NANHO_COMP_ID 0x0b /* NAN Host offload */
#define WL_NAN_CMD_DBG_COMP_ID 0x0f
#define WL_NAN_CMD_COMP_SHIFT 8
WL_NAN_XTLV_CFG_SEC_CSID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x08), /* Security CSID */
WL_NAN_XTLV_CFG_SEC_PMK = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x09), /* Security PMK */
WL_NAN_XTLV_CFG_SEC_PMKID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0A),
- WL_NAN_XTLV_CFG_SEC_SCID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0B),
- WL_NAN_XTLV_CFG_VNDR_PAYLOAD = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0C),
- WL_NAN_XTLV_CFG_HOST_INDPID = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0D),
- /* when host ndpid is used */
- WL_NAN_XTLV_CFG_MAC_ADDR = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0E),
- /* fast disc time bitmap config */
- WL_NAN_XTLV_CFG_FDISC_TBMP = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x0F),
WL_NAN_XTLV_SD_SVC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01),
WL_NAN_XTLV_SD_FOLLOWUP = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x02),
WL_NAN_XTLV_SD_REPLIED = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x08), /* Pub sent */
WL_NAN_XTLV_SD_FUP_RECEIVED = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x09), /* FUP Received */
WL_NAN_XTLV_SD_DISC_RESULTS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0A), /* Pub RX */
- WL_NAN_XTLV_SD_TXS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B), /* Tx status */
- WL_NAN_XTLV_SD_SDE_SVC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C),
- WL_NAN_XTLV_SD_SDE_SVC_UPD_IND = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D),
- WL_NAN_XTLV_SD_SVC_NDI = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0E),
- WL_NAN_XTLV_SD_NDP_SPEC_INFO = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F),
- WL_NAN_XTLV_SD_NDPE_TLV_LIST = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x10),
- WL_NAN_XTLV_SD_NDL_QOS_UPD = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x11),
- WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x12),
- WL_NAN_XTLV_SD_PEER_NMI = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x13),
WL_NAN_XTLV_SYNC_BCN_RX = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01),
- WL_NAN_XTLV_EV_MR_CHANGED = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x02),
WL_NAN_XTLV_DATA_DP_END = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01),
WL_NAN_XTLV_DATA_DP_INFO = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02),
WL_NAN_XTLV_DATA_DP_SEC_INST = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03),
- WL_NAN_XTLV_DATA_DP_TXS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x04), /* txs for dp */
- WL_NAN_XTLV_DATA_DP_OPAQUE_INFO = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x05),
- WL_NAN_XTLV_RANGE_INFO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01),
- WL_NAN_XTLV_RNG_TXS = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02),
-
- WL_NAN_XTLV_EV_SLOT_INFO = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01),
- WL_NAN_XTLV_EV_GEN_INFO = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x02),
- WL_NAN_XTLV_CCA_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x03),
- WL_NAN_XTLV_PER_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x04),
- WL_NAN_XTLV_CHBOUND_INFO = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x05),
- WL_NAN_XTLV_SLOT_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x06),
-
- WL_NAN_XTLV_DAM_NA_ATTR = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01), /* na attr */
- WL_NAN_XTLV_HOST_ASSIST_REQ = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x02), /* host assist */
-
- /* wl_nan_fw_cap_t */
- WL_NAN_XTLV_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01),
- /* wl_nan_fw_cap_v2_t */
- WL_NAN_XTLV_GEN_FW_CAP_V2 = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02),
-
- WL_NAN_XTLV_SCHED_INFO = NAN_CMD(WL_NAN_CMD_SCHED_COMP_ID, 0x01),
-
- /* Nan Save-Restore XTLVs */
- WL_NAN_XTLV_NSR2_PEER = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x21),
- WL_NAN_XTLV_NSR2_NDP = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x22),
-
- /* Host offload XTLVs */
- WL_NAN_XTLV_NANHO_PEER_ENTRY = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01),
- WL_NAN_XTLV_NANHO_DCAPLIST = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02),
- WL_NAN_XTLV_NANHO_DCSLIST = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03),
- WL_NAN_XTLV_NANHO_BLOB = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x04),
- WL_NAN_XTLV_NANHO_NDP_STATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x05),
- WL_NAN_XTLV_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x06),
- WL_NAN_XTLV_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x07),
- WL_NAN_XTLV_NANHO_LOG_ERR_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x08),
- WL_NAN_XTLV_NANHO_LOG_DBG_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x09),
- WL_NAN_XTLV_NANHO_OOB_TXS = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0A),
- WL_NAN_XTLV_NANHO_DCAP_ATTR = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0B),
- WL_NAN_XTLV_NANHO_ELEM_ATTR = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0C),
- WL_NAN_XTLV_NANHO_SEC_SA = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x0D)
-} wl_nan_tlv_t;
-/* Sub Module ID's for NAN */
-enum {
- NAN_MAC = 0, /* nan mac */
- NAN_DISC = 1, /* nan discovery */
- NAN_DBG = 2, /* nan debug */
- NAN_SCHED = 3, /* nan sched */
- NAN_PEER_ENTRY = 4, /* nan peer entry */
- NAN_AVAIL = 5, /* nan avail */
- NAN_DAM = 6, /* nan dam */
- NAN_FSM = 7, /* nan fsm registry */
- NAN_NDP = 8, /* nan ndp */
- NAN_NDL = 9, /* nan ndl */
- NAN_DP = 10, /* nan dp core */
- NAN_RNG = 11, /* nan ranging */
- NAN_SEC = 12, /* nan sec */
- NAN_LAST = 13
-};
+ WL_NAN_XTLV_RANGE_INFO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01)
+} wl_nan_tlv_t;
enum wl_nan_sub_cmd_xtlv_id {
WL_NAN_CMD_CFG_AVAIL = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x12),
WL_NAN_CMD_CFG_WFA_TM = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x13),
WL_NAN_CMD_CFG_EVENT_MASK = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x14),
- WL_NAN_CMD_CFG_NAN_CONFIG = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x15), /* ctrl */
+ WL_NAN_CMD_CFG_NAN_CONFIG = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x15),
WL_NAN_CMD_CFG_NAN_ENAB = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x16),
WL_NAN_CMD_CFG_ULW = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x17),
- WL_NAN_CMD_CFG_NAN_CONFIG2 = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x18), /* ctrl2 */
- WL_NAN_CMD_CFG_DEV_CAP = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x19),
- WL_NAN_CMD_CFG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1A),
- WL_NAN_CMD_CFG_VNDR_PAYLOAD = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1B),
- WL_NAN_CMD_CFG_FASTDISC = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1C),
- WL_NAN_CMD_CFG_MIN_TX_RATE = NAN_CMD(WL_NAN_CMD_CFG_COMP_ID, 0x1D),
- WL_NAN_CMD_CFG_MAX = WL_NAN_CMD_CFG_MIN_TX_RATE,
-
+ WL_NAN_CMD_CFG_MAX = WL_NAN_CMD_CFG_NAN_ENAB,
/* Add new commands before and update */
/* nan election sub-commands */
+ WL_NAN_CMD_ELECTION_JOIN = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x04), /* Deprecated */
+ WL_NAN_CMD_ELECTION_STOP = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x07), /* Deprecate */
+
WL_NAN_CMD_ELECTION_HOST_ENABLE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x01),
WL_NAN_CMD_ELECTION_METRICS_CONFIG = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x02),
WL_NAN_CMD_ELECTION_METRICS_STATE = NAN_CMD(WL_NAN_CMD_ELECTION_COMP_ID, 0x03),
WL_NAN_CMD_SD_FUP_TRANSMIT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0B),
WL_NAN_CMD_SD_CONNECTION = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0C),
WL_NAN_CMD_SD_SHOW = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0D),
- WL_NAN_CMD_SD_DISC_CACHE_TIMEOUT = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0E),
- WL_NAN_CMD_SD_DISC_CACHE_CLEAR = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x0F),
- WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_DISC_CACHE_CLEAR,
+ WL_NAN_CMD_SD_MAX = WL_NAN_CMD_SD_SHOW,
/* nan time sync sub-commands */
WL_NAN_CMD_DATA_DATACONF = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0E),
WL_NAN_CMD_DATA_MIN_TX_RATE = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x0F),
WL_NAN_CMD_DATA_MAX_PEERS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x10),
- WL_NAN_CMD_DATA_DP_IDLE_PERIOD = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x11),
- WL_NAN_CMD_DATA_DP_OPAQUE_INFO = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x12),
- WL_NAN_CMD_DATA_DP_HB_DURATION = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x13),
- WL_NAN_CMD_DATA_PATH_MAX = WL_NAN_CMD_DATA_DP_HB_DURATION, /* New ones before and update */
+ WL_NAN_CMD_DATA_PATH_MAX = WL_NAN_CMD_DATA_MAX_PEERS, /* New ones before and update */
/* nan dam sub-commands */
WL_NAN_CMD_DAM_CFG = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01),
WL_NAN_CMD_RANGE_AUTO = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x02),
WL_NAN_CMD_RANGE_RESPONSE = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x03),
WL_NAN_CMD_RANGE_CANCEL = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x04),
- WL_NAN_CMD_RANGE_IDLE_COUNT = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x05),
- WL_NAN_CMD_RANGE_CANCEL_EXT = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x06),
/* nan debug sub-commands */
WL_NAN_CMD_DBG_SCAN_PARAMS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x01),
WL_NAN_CMD_DBG_DISC_RESULTS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0D),
WL_NAN_CMD_DBG_STATS = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0E),
WL_NAN_CMD_DBG_LEVEL = NAN_CMD(WL_NAN_CMD_DBG_COMP_ID, 0x0F),
- WL_NAN_CMD_DBG_MAX = WL_NAN_CMD_DBG_LEVEL, /* New ones before and update */
-
- /* Generic componenet */
- WL_NAN_CMD_GEN_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01),
- WL_NAN_CMD_GEN_FW_CAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02),
- WL_NAN_CMD_GEN_MAX = WL_NAN_CMD_GEN_FW_CAP,
-
- /* NAN Save-Restore */
- WL_NAN_CMD_NSR2 = NAN_CMD(WL_NAN_CMD_NSR_COMP_ID, 0x20),
- WL_NAN_CMD_NSR2_MAX = WL_NAN_CMD_NSR2,
-
- /* Host offload sub-commands */
- WL_NAN_CMD_NANHO_UPDATE = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01), /* WILL BE REMOVED */
- WL_NAN_CMD_NANHO_INFO = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x01),
- WL_NAN_CMD_NANHO_FRM_TPLT = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x02), /* unused */
- WL_NAN_CMD_NANHO_OOB_NAF = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x03), /* unused */
- WL_NAN_CMD_NANHO_LOG_CTRL = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x04),
- WL_NAN_CMD_NANHO_VER = NAN_CMD(WL_NAN_CMD_NANHO_COMP_ID, 0x05),
- WL_NAN_CMD_NANHO_MAX = WL_NAN_CMD_NANHO_VER,
-
- /* Add submodules below, and update WL_NAN_CMD_MAX */
-
- /* ROML check for this enum should use WL_NAN_CMD_MAX */
- WL_NAN_CMD_MAX = WL_NAN_CMD_NANHO_MAX
-};
-
-/*
- * Component/Module based NAN TLV IDs for NAN stats
- */
-typedef enum wl_nan_stats_tlv {
- WL_NAN_XTLV_SYNC_MAC_STATS = NAN_CMD(WL_NAN_CMD_SYNC_COMP_ID, 0x01),
-
- WL_NAN_XTLV_SD_DISC_STATS = NAN_CMD(WL_NAN_CMD_SD_COMP_ID, 0x01),
-
- WL_NAN_XTLV_DATA_NDP_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x01),
- WL_NAN_XTLV_DATA_NDL_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x02),
- WL_NAN_XTLV_DATA_SEC_STATS = NAN_CMD(WL_NAN_CMD_DATA_COMP_ID, 0x03),
-
- WL_NAN_XTLV_GEN_SCHED_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x01),
- WL_NAN_XTLV_GEN_PEER_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x02),
- WL_NAN_XTLV_GEN_PEER_STATS_DEVCAP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x03),
- WL_NAN_XTLV_GEN_PEER_STATS_NDP = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x04),
- WL_NAN_XTLV_GEN_PEER_STATS_SCHED = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x05),
- WL_NAN_XTLV_GEN_AVAIL_STATS_SCHED = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x06),
- WL_NAN_XTLV_GEN_NDP_STATS = NAN_CMD(WL_NAN_CMD_GENERIC_COMP_ID, 0x07),
-
- WL_NAN_XTLV_DAM_STATS = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x01),
- WL_NAN_XTLV_DAM_AVAIL_STATS = NAN_CMD(WL_NAN_CMD_DAM_COMP_ID, 0x02),
-
- WL_NAN_XTLV_RANGE_STATS = NAN_CMD(WL_NAN_CMD_RANGE_COMP_ID, 0x01)
-} wl_nan_stats_tlv_t;
-
-/* NAN stats WL_NAN_CMD_GEN_STATS command */
-/* Input data */
-typedef struct wl_nan_cmn_get_stat {
- uint32 modules_btmap; /* Bitmap to indicate module stats are needed:
- * See NAN Sub Module ID's above
- */
- uint8 operation; /* Get, Get and Clear */
- uint8 arg1; /* Submodule control variable1 */
- uint8 arg2; /* Submodule control variable2 */
- uint8 pad; /* May not be needed as TLV's are aligned,add to pass compile chk */
-} wl_nan_cmn_get_stat_t;
-
-/* Output for Stats container */
-typedef struct wl_nan_cmn_stat {
- uint32 n_stats; /* Number of different sub TLV stats present in the container */
- uint32 totlen; /* Total Length of stats data in container */
- uint8 stats_tlvs []; /* Stat TLV's container */
-} wl_nan_cmn_stat_t;
-
-/* Defines for operation */
-#define WLA_NAN_STATS_GET 0
-#define WLA_NAN_STATS_GET_CLEAR 1
-
-#define WL_NAN_STAT_ALL 0xFFFFFFFF
-
-/* NAN Mac stats */
-
-typedef struct wl_nan_mac_band_stats {
- uint32 bcn_tx; /* 2g/5g disc/sync beacon tx count */
- uint32 bcn_rx; /* 2g/5g disc/sync beacon rx count */
- uint32 dws; /* Number of 2g/5g DW's */
-} wl_nan_mac_band_stats_t;
-
-/* Note: if this struct is changing update wl_nan_slot_ecounters_vX_t version,
- * as this struct is sent as payload in wl_nan_slot_ecounter_vX_ts
- */
-typedef struct wl_nan_mac_stats {
- wl_nan_mac_band_stats_t band[NAN_MAX_BANDS]; /* MAC sync band specific stats */
- uint32 naf_tx; /* NAN AF tx */
- uint32 naf_rx; /* NAN AF rx */
- uint32 sdf_tx; /* SDF tx */
- uint32 sdf_rx; /* SDF rx */
- uint32 cnt_sync_bcn_rx_tu[3]; /* delta bw */
- uint32 cnt_bcn_tx_out_dw; /* TX sync beacon outside dw */
- uint32 cnt_role_am_dw; /* anchor master role due to dw */
- uint32 cnt_am_hop_err; /* wrong hopcount set for AM */
-} wl_nan_mac_stats_t;
-
-/* NAN Sched stats */
-/* Per core Sched stats */
-typedef struct nan_sched_stats_core {
- uint32 slotstart; /* slot_start */
- uint32 slotend; /* slot_end */
- uint32 slotskip; /* slot_skip */
- uint32 slotstart_partial; /* slot resume */
- uint32 slotend_partial; /* slot pre-empt */
- uint8 avail_upd_cnt; /* count to track num of times avail has been updated */
- uint8 pad[3];
-} nan_sched_stats_core_t;
-/* Common Sched stats */
-typedef struct nan_sched_stats_cmn {
- uint32 slot_adj_dw; /* Slot adjusts due to DW changes */
- uint32 slot_dur; /* Total slot duration in TU's */
-} nan_sched_stats_cmn_t;
-
-/* Note: if this struct is changing update wl_nan_slot_ecounters_vX_t version,
- * as this struct is sent as payload in wl_nan_slot_ecounters_vX_t
- */
-typedef struct nan_sched_stats {
- nan_sched_stats_cmn_t cmn;
- nan_sched_stats_core_t slice[MAX_NUM_D11CORES];
-} nan_sched_stats_t;
-/* End NAN Sched stats */
-
-/* NAN Discovery stats */
-typedef struct nan_disc_stats {
- uint32 pub_tx; /* Publish tx */
- uint32 pub_rx; /* Publish rx */
- uint32 sub_tx; /* Subscribe tx */
- uint32 sub_rx; /* Subscribe rx */
- uint32 fup_tx; /* Followup tx */
- uint32 fup_rx; /* Followup rx */
- uint32 pub_resp_ignored; /* response to incoming publish ignored */
- uint32 sub_resp_ignored; /* response to incoming subscribe ignored */
-} nan_disc_stats_t;
-/* NAN Discovery stats end */
-
-/* statistics for nan sec */
-typedef struct nan_sec_stats_s {
- uint32 mic_fail; /* rx mic fail */
- uint32 replay_fail; /* replay counter */
- uint32 tx_fail; /* tx fail (from txstatus) */
- uint32 key_info_err; /* key info field err */
- uint32 ok_sessions; /* successful mx negotiations */
- uint32 fail_sessions; /* failed sessions */
- uint32 keydesc_err; /* key desc error */
- uint32 invalid_cipher; /* cipher suite not valid */
- uint32 pmk_not_found; /* no pmk found for given service or for any reason */
- uint32 no_pmk_for_pmkid; /* no pmk found for give pmkid */
- uint32 key_install_err; /* failed to install keys */
- uint32 no_keydesc_attr; /* key desc attr missing */
- uint32 nonce_mismatch; /* nonce mismatch */
-} nan_sec_stats_t;
-
-/* WL_NAN_XTLV_GEN_PEER_STATS */
-typedef struct wl_nan_peer_stats {
- struct ether_addr nmi;
- uint8 pad[2];
- uint32 pkt_enq; /* counter for queued pkt of peer */
-
- /* NDL */
- bool ndl_exist;
- uint8 ndl_state;
- bool counter_proposed;
- uint8 pad1;
-
- /* NDL QoS */
- uint16 local_max_latency;
- uint16 peer_max_latency;
- uint8 local_min_slots;
- uint8 peer_min_slots;
-
- /* security association */
- struct ether_addr sec_laddr; /* local mac addr */
- struct ether_addr sec_raddr; /* remote mac addr */
- uint8 sec_csid;
- uint8 pad2;
-} wl_nan_peer_stats_t;
-
-/* WL_NAN_XTLV_GEN_PEER_STATS_DEVCAP */
-typedef struct wl_nan_peer_stats_dev_cap {
- uint8 mapid;
- uint8 awake_dw_2g;
- uint8 awake_dw_5g;
- uint8 bands_supported;
- uint8 op_mode;
- uint8 num_antennas;
- uint16 chan_switch_time;
- uint8 capabilities;
- uint8 pad[3];
-} wl_nan_peer_stats_dev_cap_t;
-
-/* WL_NAN_XTLV_GEN_PEER_STATS_NDP */
-typedef struct wl_nan_peer_stats_ndp {
- uint8 peer_role;
- uint8 ndp_state;
- uint8 indp_id; /* initiator ndp id */
- uint8 ndp_ctrl; /* ndp control field */
- struct ether_addr peer_nmi;
- struct ether_addr peer_ndi;
- struct ether_addr local_ndi;
-
- /* peer scb info */
- bool scb_allocated;
- bool scb_found;
- uint32 scb_flags;
- uint32 scb_flags2;
- uint32 scb_flags3;
-} wl_nan_peer_stats_ndp_t;
-
-enum {
- WL_NAN_SCHED_STAT_SLOT_COMM = 0x01, /* Committed slot */
- WL_NAN_SCHED_STAT_SLOT_COND = 0x02, /* Conditional slot(proposal/counter) */
- WL_NAN_SCHED_STAT_SLOT_NDC = 0x04, /* NDC slot */
- WL_NAN_SCHED_STAT_SLOT_IMMUT = 0x08, /* Immutable slot */
- WL_NAN_SCHED_STAT_SLOT_RANGE = 0x10, /* Ranging slot */
+ WL_NAN_CMD_DBG_MAX = WL_NAN_CMD_DBG_LEVEL /* New ones before and update */
};
-typedef uint16 wl_nan_stats_sched_slot_info_t;
-
-typedef struct wl_nan_stats_sched_slot {
- wl_nan_stats_sched_slot_info_t info; /* capture slot type and more info */
- chanspec_t chanspec;
-} wl_nan_stats_sched_slot_t;
-
-/* WL_NAN_XTLV_GEN_PEER_STATS_SCHED, WL_NAN_XTLV_GEN_AVAIL_STATS_SCHED */
-typedef struct wl_nan_stats_sched {
- uint8 map_id;
- uint8 seq_id; /* seq id from NA attr */
- uint8 slot_dur;
- uint8 pad;
- uint16 period;
- uint16 num_slot;
- wl_nan_stats_sched_slot_t slot[];
-} wl_nan_stats_sched_t;
-
-/* WL_NAN_XTLV_GEN_PEER_STATS_SCHED */
-typedef struct wl_nan_peer_stats_sched {
- uint8 map_id;
- uint8 seq_id; /* seq id from NA attr */
- uint8 slot_dur;
- uint8 pad;
- uint16 period;
- uint16 num_slot;
- wl_nan_stats_sched_slot_t slot[];
-} wl_nan_peer_stats_sched_t;
-
-/* WL_NAN_XTLV_RANGE_STATS */
-typedef struct wl_nan_range_stats {
- uint16 rng_ssn_estb;
- uint16 rng_ssn_fail;
- uint16 rng_sched_start;
- uint16 rng_sched_end;
- uint16 ftm_ssn_success; /* number of succesfull ftm sessions */
- uint16 ftm_ssn_fail;
- uint16 num_meas; /* number of ftm frames */
- uint16 num_valid_meas; /* number of ftm frames with valid timestamp */
-} wl_nan_range_stats_t;
-
-/* defines for ndp stats flag */
-
-#define NAN_NDP_STATS_FLAG_ROLE_MASK 0x01
-#define NAN_NDP_STATS_FLAG_ROLE_INIT 0x00
-#define NAN_NDP_STATS_FLAG_ROLE_RESP 0x01
-
-#define NAN_NDP_STATS_STATE_BIT_SHIFT 1
-#define NAN_NDP_STATS_FLAG_STATE_MASK 0x07
-#define NAN_NDP_STATS_FLAG_STATE_IN_PROG 0x00
-#define NAN_NDP_STATS_FLAG_STATE_ESTB 0x01
-#define NAN_NDP_STATS_FLAG_STATE_TEARDOWN_WAIT 0x02
-/* More states can be added here, when needed */
-
-/* WL_NAN_XTLV_GEN_NDP_STATS */
-typedef struct wl_nan_ndp_stats_s {
- uint8 ndp_id;
- uint8 indp_id;
- uint8 flags;
- uint8 nan_sec_csid;
- struct ether_addr lndi_addr;
- struct ether_addr pnmi_addr;
- struct ether_addr pndi_addr;
- uint8 PAD[2];
-} wl_nan_ndp_stats_t;
-
-/* WL_NAN_XTLV_EV_SLOT_INFO */
-typedef struct wl_nan_slot_info_s {
- /* dw slot start expected */
- uint32 dwst_h;
- uint32 dwst_l;
- /* dw slot start actual */
- uint32 act_dwst_h;
- uint32 act_dwst_l;
- uint16 cur_chan[MAX_NUM_D11CORES]; /* sdb channels */
- uint16 dw_chan; /* dw channel */
- uint8 dw_no; /* dw number */
- uint8 slot_seq_no; /* slot seq no. */
-} wl_nan_slot_info_t;
-
-/* WL_NAN_EVENT_MR_CHANGED */
-typedef uint8 wl_nan_mr_changed_t;
-#define WL_NAN_AMR_CHANGED 1
-#define WL_NAN_IMR_CHANGED 2
-
-/*
- * The macro BCMUTILS_ERR_CODES is defined only
- * when using the common header file(a new approach) bcmutils/include/bcmerror.h.
- * Otherwise, use the error codes from this file.
- */
-#ifndef BCMUTILS_ERR_CODES
/** status - TBD BCME_ vs NAN status - range reserved for BCME_ */
enum {
/* add new status here... */
- WL_NAN_E_INVALID_TOKEN = -2135, /* invalid token or mismatch */
- WL_NAN_E_INVALID_ATTR = -2134, /* generic invalid attr error */
- WL_NAN_E_INVALID_NDL_ATTR = -2133, /* invalid NDL attribute */
- WL_NAN_E_SCB_NORESOURCE = -2132, /* no more peer scb available */
- WL_NAN_E_PEER_NOTAVAIL = -2131,
- WL_NAN_E_SCB_EXISTS = -2130,
- WL_NAN_E_INVALID_PEER_NDI = -2129,
- WL_NAN_E_INVALID_LOCAL_NDI = -2128,
- WL_NAN_E_ALREADY_EXISTS = -2127, /* generic NAN error for duplication */
- WL_NAN_E_EXCEED_MAX_NUM_MAPS = -2126,
- WL_NAN_E_INVALID_DEV_CHAN_SCHED = -2125,
- WL_NAN_E_INVALID_PEER_BLOB_TYPE = -2124,
- WL_NAN_E_INVALID_LCL_BLOB_TYPE = -2123,
- WL_NAN_E_BCMC_PDPA = -2122, /* BCMC NAF PDPA */
- WL_NAN_E_TIMEOUT = -2121,
- WL_NAN_E_HOST_CFG = -2120,
- WL_NAN_E_NO_ACK = -2119,
- WL_NAN_E_SECINST_FAIL = -2118,
- WL_NAN_E_REJECT_NDL = -2117, /* generic NDL rejection error */
- WL_NAN_E_INVALID_NDP_ATTR = -2116,
- WL_NAN_E_HOST_REJECTED = -2115,
- WL_NAN_E_PCB_NORESOURCE = -2114,
- WL_NAN_E_NDC_EXISTS = -2113,
- WL_NAN_E_NO_NDC_ENTRY_AVAIL = -2112,
- WL_NAN_E_INVALID_NDC_ENTRY = -2111,
- WL_NAN_E_SD_TX_LIST_FULL = -2110,
- WL_NAN_E_SVC_SUB_LIST_FULL = -2109,
- WL_NAN_E_SVC_PUB_LIST_FULL = -2108,
- WL_NAN_E_SDF_MAX_LEN_EXCEEDED = -2107,
- WL_NAN_E_ZERO_CRB = -2106, /* no CRB between local and peer */
- WL_NAN_E_PEER_NDC_NOT_SELECTED = -2105, /* peer ndc not selected */
- WL_NAN_E_DAM_CHAN_CONFLICT = -2104, /* dam schedule channel conflict */
- WL_NAN_E_DAM_SCHED_PERIOD = -2103, /* dam schedule period mismatch */
- WL_NAN_E_LCL_NDC_NOT_SELECTED = -2102, /* local selected ndc not configured */
- WL_NAN_E_NDL_QOS_INVALID_NA = -2101, /* na doesn't comply with ndl qos */
- WL_NAN_E_CLEAR_NAF_WITH_SA_AS_RNDI = -2100, /* rx clear naf with peer rndi */
- WL_NAN_E_SEC_CLEAR_PKT = -2099, /* rx clear pkt from a peer with sec_sa */
- WL_NAN_E_PROT_NON_PDPA_NAF = -2098, /* rx protected non PDPA frame */
- WL_NAN_E_DAM_DOUBLE_REMOVE = -2097, /* remove peer schedule already removed */
- WL_NAN_E_DAM_DOUBLE_MERGE = -2096, /* merge peer schedule already merged */
- WL_NAN_E_DAM_REJECT_INVALID = -2095, /* reject for invalid schedule */
- WL_NAN_E_DAM_REJECT_RANGE = -2094,
- WL_NAN_E_DAM_REJECT_QOS = -2093,
- WL_NAN_E_DAM_REJECT_NDC = -2092,
- WL_NAN_E_DAM_REJECT_PEER_IMMUT = -2091,
- WL_NAN_E_DAM_REJECT_LCL_IMMUT = -2090,
- WL_NAN_E_DAM_EXCEED_NUM_SCHED = -2089,
- WL_NAN_E_DAM_INVALID_SCHED_MAP = -2088, /* invalid schedule map list */
- WL_NAN_E_DAM_INVALID_LCL_SCHED = -2087,
- WL_NAN_E_INVALID_MAP_ID = -2086,
- WL_NAN_E_CHAN_OVERLAP_ACROSS_MAP = -2085,
- WL_NAN_E_INVALID_CHAN_LIST = -2084,
- WL_NAN_E_INVALID_RANGE_TBMP = -2083,
- WL_NAN_E_INVALID_IMMUT_SCHED = -2082,
- WL_NAN_E_INVALID_NDC_ATTR = -2081,
- WL_NAN_E_INVALID_TIME_BITMAP = -2080,
- WL_NAN_E_INVALID_NA_ATTR = -2079,
- WL_NAN_E_NO_NA_ATTR_IN_AVAIL_MAP = -2078, /* no na attr saved in avail map */
- WL_NAN_E_INVALID_MAP_IDX = -2077,
WL_NAN_E_SEC_SA_NOTFOUND = -2076,
WL_NAN_E_BSSCFG_NOTFOUND = -2075,
WL_NAN_E_SCB_NOTFOUND = -2074,
WL_NAN_E_INVALID_BAND = -2050,
WL_NAN_E_INVALID_MAC = -2049,
WL_NAN_E_BAD_INSTANCE = -2048,
- /* NAN status code reserved from -2048 to -3071 */
- /* Do NOT add new status below -2048 */
+ WL_NAN_E_NDC_EXISTS = -2047,
+ WL_NAN_E_NO_NDC_ENTRY_AVAIL = -2046,
+ WL_NAN_E_INVALID_NDC_ENTRY = -2045,
WL_NAN_E_ERROR = -1,
WL_NAN_E_OK = 0
};
-#endif /* BCMUTILS_ERR_CODES */
-
-/* Error codes used in vendor specific attribute in Data Path Termination frames */
-enum {
- WL_NAN_DPEND_E_OK = 0,
- WL_NAN_DPEND_E_ERROR = 1,
- WL_NAN_DPEND_E_HOST_CMD = 2,
- WL_NAN_DPEND_E_HOST_REJECTED = 3, /* host rejected rx frame */
- WL_NAN_DPEND_E_RESOURCE_LIMIT = 4,
- WL_NAN_DPEND_E_NO_ACK_RCV = 5,
- WL_NAN_DPEND_E_TIMEOUT = 6,
- WL_NAN_DPEND_E_NO_ELT = 7, /* rx frame missing element container */
- WL_NAN_DPEND_E_NO_NDP_ATTR = 8,
- WL_NAN_DPEND_E_NO_AVAIL_ATTR = 9,
- WL_NAN_DPEND_E_NO_NDC_ATTR = 10,
- WL_NAN_DPEND_E_NO_RANGE_BM = 11,
- WL_NAN_DPEND_E_INVALID_NDP_ATTR = 12,
- WL_NAN_DPEND_E_INVALID_NDC_ATTR = 13,
- WL_NAN_DPEND_E_INVALID_IMMUT = 14,
- WL_NAN_DPEND_E_INVALID_NDL_QOS = 15,
- WL_NAN_DPEND_E_INVALID_SEC_PARAMS = 16,
- WL_NAN_DPEND_E_REJECT_AVAIL = 17,
- WL_NAN_DPEND_E_REJECT_NDL = 18,
- WL_NAN_DPEND_E_SCB_NORESOURCE = 19
-};
-
typedef int32 wl_nan_status_t;
/** nan cmd list entry */
/* WL_NAN_CMD_CFG_NAN_CONFIG */
typedef uint32 wl_nan_cfg_ctrl_t;
-/* WL_NAN_CMD_CFG_NAN_CONFIG2 */
-typedef struct wl_nan_cfg_ctrl2 {
- uint32 flags1; /* wl_nan_cfg_ctrl2_flags1 */
- uint32 flags2; /* wl_nan_cfg_ctrl2_flags2 */
-} wl_nan_cfg_ctrl2_t;
-
-enum wl_nan_cfg_ctrl2_flags1 {
- /* Allows unicast SDF TX while local device is under NDP/NDL negotiation,
- * but Not with the peer SDF destined to.
- */
- WL_NAN_CTRL2_FLAG1_ALLOW_SDF_TX_UCAST_IN_PROG = 0x00000001,
- /* Allows broadcast SDF TX while local device is under NDP/NDL negotiation */
- WL_NAN_CTRL2_FLAG1_ALLOW_SDF_TX_BCAST_IN_PROG = 0x00000002,
- /* Allows the device to send schedule update automatically on local schedule change */
- WL_NAN_CTRL2_FLAG1_AUTO_SCHEDUPD = 0x00000004,
- /* Allows the device to handle slot pre_close operations */
- WL_NAN_CTRL2_FLAG1_SLOT_PRE_CLOSE = 0x00000008
-};
-#define WL_NAN_CTRL2_FLAGS1_MASK 0x0000000F
-
-#define WL_NAN_CTRL2_FLAGS2_MASK 0x00000000
-
/*
* WL_NAN_CMD_CFG_BAND, WL_NAN_CMD_CFG_RSSI_THRESHOLD(Get only)
*/
typedef uint8 wl_nan_max_peers_t;
+#define NAN_MAX_BANDS 2
/*
* WL_NAN_CMD_CFG_STATUS
*/
+/* Deprecated - Begin */
+typedef struct wl_nan_cfg_status {
+ uint8 enabled;
+ uint8 inited;
+ uint8 joined;
+ uint8 merged;
+ uint8 role;
+ uint8 PAD[3];
+ uint32 chspec[2];
+ uint8 mr[8]; /**< Master Rank */
+ uint8 amr[8]; /**< Anchor Master Rank */
+ uint32 cnt_pend_txfrm; /**< pending TX frames */
+ uint32 cnt_bcn_tx; /**< TX disc/sync beacon count */
+ uint32 cnt_bcn_rx; /**< RX disc/sync beacon count */
+ uint32 cnt_svc_disc_tx; /**< TX svc disc frame count */
+ uint32 cnt_svc_disc_rx; /**< RX svc disc frame count */
+ uint32 ambtt; /**< Anchor master beacon target time */
+ struct ether_addr cid; /**< Cluster id */
+ uint8 hop_count; /**< Hop count */
+ uint8 PAD;
+} wl_nan_cfg_status_t;
+
+typedef struct wl_nan_config_status {
+ struct ether_addr def_cid; /* Default Cluster id */
+ uint8 inited; /* NAN Initialized successfully */
+ uint8 enabled; /* NAN Enabled */
+ struct ether_addr cur_cid; /* Default Cluster id */
+ uint8 joined; /* Joined or started own cluster */
+ uint8 role; /* Master, Non Master, NM Sync & Non-Sync */
+ chanspec_t chspec[NAN_MAX_BANDS]; /* Channel Spec 2.4G followed by 5G */
+ uint8 mr[WL_NAN_MASTER_RANK_LEN]; /* Master Rank */
+ uint8 amr[WL_NAN_MASTER_RANK_LEN]; /* Anchor Master Rank */
+ uint32 cnt_pend_txfrm; /* Pending Tx Frames */
+ uint32 cnt_bcn_tx; /* TX disc/sync beacon count */
+ uint32 cnt_bcn_rx; /* RX disc/sync beacon count */
+ uint32 cnt_svc_disc_tx; /* TX svc disc frame count */
+ uint32 cnt_svc_disc_rx; /* RX svc disc frame count */
+ uint32 ambtt; /* Anchor master beacon target time */
+ uint8 hop_count; /* Hop count */
+ uint8 pad[3]; /* Align */
+} wl_nan_config_status_t;
+/* Deprecated - End */
typedef enum wl_nan_election_mode {
WL_NAN_ELECTION_RUN_BY_HOST = 1,
uint8 role; /* Current nan sync role */
struct ether_addr cid; /* Current Cluster id */
uint8 social_chans[2]; /* Social channels */
- uint8 mr[8]; /* Self Master Rank */
+ uint8 mr[8]; /* Master Rank */
uint8 amr[8]; /* Anchor Master Rank */
uint32 ambtt; /* Anchor master beacon target time */
uint32 cluster_tsf_h; /* Current Cluster TSF High */
uint32 cluster_tsf_l; /* Current Cluster TSF Low */
uint8 election_mode; /* Election mode, host or firmware */
uint8 hop_count; /* Current Hop count */
- uint8 imr[8]; /* Immediate Master Rank */
- uint8 pad[4]; /* remove after precommit */
- uint16 opt_tlvs_len;
- uint8 opt_tlvs[];
+ uint8 pad[2];
} wl_nan_conf_status_t;
/*
*/
typedef struct ether_addr wl_nan_cluster_id_t;
-#define NHO_SEC_NCS_SK_REPLAY_CNT_LEN 8u
-/* kck */
-#define NHO_SEC_NCS_SK_256_KCK_LEN 24u /* refer nan2 r21 7.1.4.1 */
-/* kek */
-#define NHO_SEC_NCS_SK_256_KEK_LEN 32u /* refer nan2 r21 7.1.4.1 */
-/* tk */
-#define NHO_SEC_NCS_SK_256_TK_LEN 32u /* refer nan2 r21 section 7 */
-
-#define NHO_SEC_NCS_SK_MAX_KEY_LEN (NHO_SEC_NCS_SK_256_KCK_LEN \
- + NHO_SEC_NCS_SK_256_KEK_LEN \
- + NHO_SEC_NCS_SK_256_TK_LEN)
-
-#define NHO_SEC_KEY_INSTALL_FLAG (1 << 0)
-#define NHO_SEC_KEY_UNINSTALL_FLAG (1 << 1)
-
-/* WL_NAN_XTLV_NANHO_SEC_SA */
-typedef struct nanho_sec_sa {
- int8 csid; /* Cipher suite ID to identify the security type */
- uint8 kck_len; /* KCK len in key_buf */
- uint8 kek_len; /* KEK len in key_buf */
- uint8 tk_len; /* Transient key len in key_buf */
- uint16 flags;
- uint16 pad;
- struct ether_addr laddr; /* local mac addr */
- struct ether_addr raddr; /* remote mac addr */
- uint8 key_krc[NHO_SEC_NCS_SK_REPLAY_CNT_LEN]; /* Key Replay ctr */
- uint8 key_buf[NHO_SEC_NCS_SK_MAX_KEY_LEN]; /* PTK = KCK + KEK + TK */
-} nanho_sec_sa_t;
+/*
+ * WL_NAN_CMD_ELECTION_JOIN
+ */
+typedef struct wl_nan_join {
+ uint8 start_cluster; /* Start a cluster */
+ uint8 pad[3];
+ wl_nan_cluster_id_t cluster_id; /* Cluster ID to join */
+} wl_nan_join_t;
/*
* WL_NAN_CMD_ELECTION_MERGE
/*
* WL_NAN_CMD_CFG_ROLE
- * role = 0 means configuration by firmware(obsolete); otherwise by host
+ * role = 0 means configuration by firmware; otherwise by host
* when host configures role, also need target master address to sync to
*/
-#define NAN_SYNC_MASTER_SELF 1
-#define NAN_SYNC_MASTER_USE_TIMING 2 /* Use the tsf timing provided */
-#define NAN_SYNC_MASTER_AMREC_UPD 4 /* provide AM record update */
-
-/*
- struct ether_addr addr:
- when NAN_SYNC_MASTER_USE_TIMING is set, addr is the mac of Rx NAN beacon
- providing the timing info
- ltsf_h, ltsf_l:
+#define NAN_SYNC_MASTER_SELF 0
+#define NAN_SYNC_MASTER_AM 1
+#define NAN_SYNC_MASTER_INTERMEDIATE 2
+/* ltsf_h, ltsf_l:
The local TSF timestamp filled in by FW in the WL_NAN_EVENT_BCN_RX event;
rtsf_h, rtsf_l:
The timestamp in the Rx beacon frame, filled in by host
uint32 ambtt:
the amtt in the cluster ID attribute in the Rx beacon frame
*/
-
typedef struct nan_sync_master {
- uint8 flag; /* 1: self; 2: use TSF timing; 4: AMR update */
+ uint8 flag; /* 0: self, 1: anchor-master, 2: intermediate master */
uint8 hop_count;
struct ether_addr addr;
struct ether_addr cluster_id;
/* TODO RSDB: add chspec to indicates core corresponds correct core */
typedef struct nan_adv_entry {
uint8 age; /* used to remove stale entries */
- uint8 hop_count; /* for NTLV support, use bit7 for virtual NAN peer */
+ uint8 hop_count;
struct ether_addr addr;
struct ether_addr cluster_id;
chanspec_t channel; /* bcn reception channel */
int8 rssi[NAN_MAX_BANDS]; /* rssi last af was received at */
int8 last_rssi[NAN_MAX_BANDS]; /* rssi in the last AF */
} nan_adv_entry_t;
-#define NAN_VIRTUAL_PEER_BIT 0x80
-
-typedef enum {
- NAC_CNT_NTLV_AF_TX = 0, /* count of AF containing NTLV tx */
- NAC_CNT_NTLV_AF_RX, /* count of AF containing NTLV rx */
- NAC_CNT_NTLV_TMERR_TX, /* count of NTLV tx timing error */
- NAC_CNT_NTLV_TMERR_RX, /* count of NTLV rx timing error */
- NAC_CNT_NTLV_TM_MISMATCH, /* count of TopMaster mismatch in Rx NTLV processing */
- NAC_CNT_NTLV_ADV_EXISTED, /* count of NTLV ignored bc advertiser existed from bcn */
- NAC_CNT_NTLV_STALED_BCN, /* count of staled bcn from NTLV info */
- NAC_CNT_NTLV_MERGE, /* count of NTLV used for NAN cluster merge */
- NAC_CNT_NTLV_ELECTION_DROP, /* count of NTLV dropped in NAN election */
- NAC_CNT_NTLV_TSF_ADOPT, /* count of NTLV used for NAN TSF adoption */
- NAC_CNT_NTLV_LAST
-} nac_cnt_enum_t;
-
-#define NAC_MAX_CNT (NAC_CNT_NTLV_LAST)
-
-typedef struct nac_stats {
- uint32 nac_cnt[NAC_MAX_CNT];
-} nac_stats_t;
typedef struct nan_adv_table {
uint8 num_adv;
/* Flag bits for Publish and Subscribe (wl_nan_sd_params_t flags) */
-/* First 8 bits are blocked for mapping
- * against svc_control flag bits which goes out
- * as part of SDA attribute in air in SDF frames
- */
#define WL_NAN_RANGE_LIMITED 0x0040
/* Event generation indicator (default is continuous) */
#define WL_NAN_PUB_SOLICIT_PENDING 0x10000 /* Used for one-time solicited Publish */
#define WL_NAN_FOLLOWUP 0x20000 /* Follow-up frames */
-#define WL_NAN_TX_FOLLOWUP 0x40000 /* host generated transmit Follow-up frames */
/* Bits specific to Subscribe */
*/
#define WL_NAN_TTL_FIRST 0
-/* Nan Service Based control Flags */
-
-/* If set, dev will take care of dp_resp */
-#define WL_NAN_SVC_CTRL_AUTO_DPRESP 0x1000000
-
-/* If set, host wont rec event "receive" */
-#define WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE 0x2000000
-
-/* If set, host wont rec event "replied" */
-#define WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED 0x4000000
-
-/* If set, host wont rec event "terminated" */
-#define WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED 0x8000000
-
/*
* WL_NAN_CMD_SD_PARAMS
*/
{
uint16 length; /* length including options */
uint8 period; /* period of the unsolicited SDF xmission in DWs */
- uint8 awake_dw; /* interval between two DWs where SDF tx/rx are done */
+ uint8 pad;
uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* Hash for the service name */
uint8 instance_id; /* Instance of the current service */
int8 proximity_rssi; /* RSSI limit to Rx subscribe or pub SDF 0 no effect */
*/
typedef struct wl_nan_sid_beacon_control {
uint8 sid_enable; /* Flag to indicate the inclusion of Service IDs in Beacons */
- uint8 sid_count; /* Limit for number of publish SIDs to be included in Beacons */
- uint8 sub_sid_count; /* Limit for number of subscribe SIDs to be included in Beacons */
- uint8 pad;
+ uint8 sid_count; /* Limit for number of SIDs to be included in Beacons */
+ uint8 pad[2];
} wl_nan_sid_beacon_control_t;
/*
uint32 sdmftfail4;
} wl_nan_sd_stats_t;
-/* Flag bits for sd transmit message (wl_nan_sd_transmit_t flags) */
-
-/* If set, host wont rec "tx status" event for tx-followup msg */
-#define WL_NAN_FUP_SUPR_EVT_TXS 0x01
-/* more flags can be added here */
-
/*
* WL_NAN_CMD_SD_TRANSMIT
* WL_NAN_CMD_SD_FUP_TRANSMIT
*/
typedef struct wl_nan_sd_transmit {
- uint8 local_service_id; /* Sender Service ID */
- uint8 requestor_service_id; /* Destination Service ID */
- struct ether_addr destination_addr; /* Destination MAC */
- uint16 token; /* follow_up_token when a follow-up
- * msg is queued successfully
- */
- uint8 priority; /* requested relative prio */
- uint8 flags; /* Flags for tx follow-up msg */
- uint16 opt_len; /* total length of optional tlvs */
- uint8 opt_tlv[]; /* optional tlvs in bcm_xtlv_t type */
+ uint8 local_service_id; /* Sender Service ID */
+ uint8 requestor_service_id; /* Destination Service ID */
+ struct ether_addr destination_addr; /* Destination MAC */
+ uint16 token; /* follow_up_token when a follow-up msg is queued successfully */
+ uint8 priority; /* requested relative prio */
+ uint8 service_info_len; /* size in bytes of the service info payload */
+ uint8 service_info[]; /* Service Info payload */
} wl_nan_sd_transmit_t;
-/* disc cache timeout for a cache entry */
-typedef uint16 wl_nan_disc_cache_timeout_t;
-
/*
* WL_NAN_CMD_SYNC_TSRESERVE
*/
/* nan passive scan params */
#define NAN_SCAN_MAX_CHCNT 8
-/* nan merge scan params */
typedef struct wl_nan_scan_params {
- /* dwell time of discovery channel corresponds to band_idx.
- * If set to 0 then fw default will be used.
- */
- uint16 dwell_time;
- /* scan period of discovery channel corresponds to band_idx.
- * If set to 0 then fw default will be used.
- */
- uint16 scan_period;
- /* band index of discovery channel */
- uint8 band_index;
+ uint16 scan_time;
+ uint16 home_time;
+ uint16 ms_intvl; /**< interval between merge scan */
+ uint16 ms_dur; /**< duration of merge scan */
+ uint16 chspec_num;
+ uint8 pad[2];
+ chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /**< act. used 3, 5 rfu */
} wl_nan_scan_params_t;
/*
uint8 PAD[2];
} wl_nan_debug_params_t;
+
typedef struct wl_nan_sched_svc_timeslot_s {
uint32 abitmap; /* availability bitmap */
uint32 chanlist[NAN_MAX_TIMESLOT];
uint8 PAD[2];
} wl_nan_sched_svc_timeslot_t;
-/*
- * WL_NAN_CMD_DATA_DP_IDLE_PERIOD
- */
-typedef uint16 wl_nan_ndp_idle_period_t;
-/*
- * WL_NAN_CMD_DATA_DP_HB_DURATION
- */
-typedef uint16 wl_nan_ndp_hb_duration_t;
+/* nan passive scan params */
+#define NAN_SCAN_MAX_CHCNT 8
+typedef struct nan_scan_params {
+ uint16 scan_time;
+ uint16 home_time;
+ uint16 ms_intvl; /**< interval between merge scan */
+ uint16 ms_dur; /**< duration of merge scan */
+ uint16 chspec_num;
+ uint8 pad[2];
+ chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /**< act. used 3, 5 rfu */
+} nan_scan_params_t;
/* nan cmd IDs */
enum wl_nan_cmds {
WL_NAN_CMD_TSRELEASE = 9,
WL_NAN_CMD_OUI = 10,
WL_NAN_CMD_OOB_AF = 11,
- WL_NAN_CMD_SCAN_PARAMS = 12,
WL_NAN_CMD_COUNT = 15,
WL_NAN_CMD_CLEARCOUNT = 16,
WL_NAN_CMD_SHOW = 26,
WL_NAN_CMD_STOP = 27, /* stop nan for a given cluster ID */
/* nan debug iovars & cmds */
+ WL_NAN_CMD_SCAN_PARAMS = 46,
WL_NAN_CMD_SCAN = 47,
WL_NAN_CMD_SCAN_RESULTS = 48,
WL_NAN_CMD_EVENT_MASK = 49,
#define WL_NAN_DP_MAX_SVC_INFO 0xFF
#define WL_NAN_DATA_NDP_INST_SUPPORT 16
-/* Nan flags (16 bits) */
-#define WL_NAN_DP_FLAG_SVC_INFO 0x0001
-#define WL_NAN_DP_FLAG_CONFIRM 0x0002
-#define WL_NAN_DP_FLAG_EXPLICIT_CFM 0x0004
-#define WL_NAN_DP_FLAG_SECURITY 0x0008
-#define WL_NAN_DP_FLAG_HAST_NDL_COUNTER 0x0010 /* Host assisted NDL counter */
+/* Nan flags */
+#define WL_NAN_DP_FLAG_SVC_INFO (1 << 0)
+#define WL_NAN_DP_FLAG_CONFIRM (1 << 1)
+#define WL_NAN_DP_FLAG_EXPLICIT_CFM (1 << 2)
+#define WL_NAN_DP_FLAG_SECURITY (1 << 3)
/* NAN Datapath host status */
#define WL_NAN_DP_STATUS_ACCEPTED 1
uint8 tbd;
} wl_nan_dp_cap_t;
+
/** The service hash (service id) is exactly this many bytes. */
#define WL_NAN_SVC_HASH_LEN 6
/** Number of hash functions per bloom filter */
* discovery interface event structures *
*/
-enum wl_nan_oob_af_flags {
- WL_NAN_OOB_AF_FLAG_SEND_EVENT = 0x0001, /* send tx status event */
- WL_NAN_OOB_AF_FLAG_FLUSH_PCB = 0x0002, /* flush PCB */
- WL_NAN_OOB_AF_FLAG_ADD_DCAP = 0x0004, /* add dev cap attr into NAF body */
- WL_NAN_OOB_AF_FLAG_ADD_ELMT = 0x0008, /* add elmt container attr into NAF body */
- WL_NAN_OOB_AF_FLAG_MFP_REQUIRED = 0x0010 /* MFP required */
-};
-typedef uint16 wl_nan_oob_af_flags_t;
-
/* mandatory parameters for OOB action frame */
+/* single-shot when bitmap and offset are set to 0; periodic otherwise */
typedef struct wl_nan_oob_af_params_s
{
- uint8 fup_lcl_id; /* local instance ID of follow-up SDF */
- uint8 fup_peer_id; /* peer instance ID of follow-up SDF */
- uint8 sdf_type; /* represented by service control type NAN_SC_XXX */
- uint8 unused_uint8;
- uint32 unused_uint32;
+ /* bitmap for the 32 timeslots in 512TU dw interval */
+ uint32 ts_map;
+ /* offset from start of dw, in us */
+ uint32 tx_offset;
struct ether_addr bssid;
struct ether_addr dest;
uint32 pkt_lifetime;
- uint8 n2af_sub_type; /* NAN2 AF sub type */
- uint8 retry_cnt; /* NAF tx retry (not 802.11 re-tx) */
- uint16 token; /* NAN host seq num */
- uint16 flags; /* wl_nan_oob_af_flags_t */
- uint32 fsm_id; /* unique fsm id */
uint16 payload_len;
uint8 payload[1];
} wl_nan_oob_af_params_t;
uint32 cnt_nan_enab; /* no. of times nan feature got enabled */
uint32 cnt_nan_disab; /* no. of times nan feature got disabled */
uint32 cnt_sync_bcn_rx; /* count of sync bcn rx within DW */
- uint32 cnt_sync_bcn_rx_tu[3]; /* Delta bw the tsf in bcn & remote */
- uint32 cnt_bcn_tx_out_dw; /* TX sync beacon outside dw */
- uint32 cnt_role_am_dw; /* anchor master role due to dw */
- uint32 cnt_am_hop_err; /* wrong hopcount set for AM */
} wl_nan_stats_t;
#define WL_NAN_MAC_MAX_NAN_PEERS 6
uint16 mean_rate;
uint16 svc_interval;
} wl_nan_dp_qos_t;
-
-#define WL_NAN_NDL_QOS_MAX_LAT_NO_PREF 0xFFFF
-
-/* nan2 qos */
-typedef struct wl_nan_ndl_qos
-{
- uint8 min_slots; /* min slots per dw interval */
- uint8 pad;
- uint16 max_latency; /* max latency */
-} wl_nan_ndl_qos_t;
-
/* ndp config */
typedef struct wl_nan_ndp_config
{
typedef uint8 wl_nan_ndp_ndpid_t;
typedef uint8 wl_nan_ndp_conn_t;
-#define WL_NAN_INVALID_NDPID 0 /* reserved ndp id */
-
typedef struct wl_nan_dp_req {
uint8 type; /* 0- unicast 1 - multicast */
uint8 pub_id; /* Publisher ID */
uint16 flags;
struct ether_addr peer_mac; /* Peer's NMI addr */
struct ether_addr mcast_mac; /* Multicast addr */
- struct ether_addr ndi;
wl_nan_dp_qos_t qos;
- wl_nan_ndl_qos_t ndl_qos; /* ndl qos */
uint8 tlv_params[]; /* xtlv parameters for command */
} wl_nan_dp_req_t;
uint8 status; /* Accepted or Rejected */
uint8 reason_code;
/* Local NDP ID for unicast, mc_id for multicast, 0 for implicit NMSG */
- uint8 ndp_id; /* can be host indp id also */
+ uint8 ndp_id;
wl_nan_dp_qos_t qos;
/* Initiator data address for unicast or multicast address for multicast */
struct ether_addr mac_addr;
- struct ether_addr ndi;
uint16 flags;
- wl_nan_ndl_qos_t ndl_qos; /* ndl qos */
uint8 tlv_params[]; /* xtlv parameters for command */
} wl_nan_dp_resp_t;
} wl_nan_dp_resp_ret_t;
typedef struct wl_nan_dp_conf {
- uint8 lndp_id; /* can be host ndp id */
- uint8 status; /* Accepted or Rejected */
+ uint8 lndp_id;
+ uint8 status; /* Accepted or Rejected */
uint8 pad[2];
} wl_nan_dp_conf_t;
typedef struct wl_nan_dp_end
{
- uint8 lndp_id; /* can be host ndp id */
+ uint8 lndp_id;
uint8 status;
- struct ether_addr mac_addr; /* initiator's ndi */
+ uint8 pad[2];
} wl_nan_dp_end_t;
typedef struct wl_nan_dp_schedupd {
uint8 flags;
struct ether_addr addr; /* peer NMI or multicast addr */
wl_nan_dp_qos_t qos;
- wl_nan_ndl_qos_t ndl_qos; /* ndl qos */
uint8 map_id;
- uint8 pad;
- uint16 hostseq;
+ uint8 pad[3];
} wl_nan_dp_schedupd_t;
/* set: update with notification, unset: NDL setup handshake */
struct ether_addr peer_nmi;
struct ether_addr peer_ndi;
ndp_session_t session;
- struct ether_addr lndi;
- uint8 pad[2];
+ uint8 pad;
} wl_nan_ndp_status_t;
-#define NAN_DP_OPAQUE_INFO_DP_RESP 0x01
-#define NAN_DP_OPAQUE_INFO_DP_CONF 0x02
-
-typedef struct wl_nan_dp_opaque_info {
- uint8 frm_mask; /* dp_resp / dp_conf as defined above. */
- struct ether_addr initiator_ndi; /* NDI to match in the dp_req. */
- uint8 pub_id; /* publish id where the opaque data is included. */
- uint8 len; /* len of opaque_info[]. */
- uint8 pad[3];
- uint8 opaque_info[0];
-} wl_nan_dp_opaque_info_t;
-
/* events */
#define NAN_DP_SESSION_UNICAST 0
#define NAN_DP_SESSION_MULTICAST 1
/* Following two fields are valid only if type is multicast */
uint8 nmsg_id[WL_NAN_DATA_NMSGID_LEN];
uint8 mc_id;
- uint8 pad;
+ uint8 pad[1];
uint16 opt_tlv_len;
uint8 opt_tlvs[];
} wl_nan_ev_datapath_cmn_t;
-/* this is obsolete - DON'T USE */
typedef struct wl_nan_ev_datapath_end {
uint8 ndp_id;
uint8 status;
struct ether_addr peer_ndi;
} wl_nan_ev_datapath_end_t;
-typedef struct wl_tsf {
- uint32 tsf_l;
- uint32 tsf_h;
-} wl_tsf_t;
-
-typedef struct wl_nan_ev_rx_bcn {
- wl_tsf_t tsf;
- uint16 bcn_len;
- uint8 pad[2];
- uint8 bcn[0];
-} wl_nan_ev_rx_bcn_t;
-
-/* reason of host assist request */
-enum wl_nan_host_assist_reason {
- WL_NAN_HAST_REASON_NONE = 0,
-
- /* reason for host assist request */
- WL_NAN_HAST_REASON_NO_CRB = 1, /* NDL: no common NA */
- WL_NAN_HAST_REASON_NDC = 2, /* NDL: NDC not compliant */
- WL_NAN_HAST_REASON_IMMUT = 3, /* NDL: peer immutable schedule */
- WL_NAN_HAST_REASON_RNG = 4, /* NDL: ranging schedule */
- WL_NAN_HAST_REASON_QOS = 5, /* NDL: QoS not satisfied */
- WL_NAN_HAST_REASON_SVC_NDI_MISSING = 6, /* SD: NDI associated with svc is missing */
- WL_NAN_HAST_REASON_PEER_SCB_NORESOURCE = 7 /* NDP: no more peer scb available */
-};
-typedef uint8 wl_nan_host_assist_reason_t;
-
-/* WL_NAN_XTLV_HOST_ASSIST_REQ */
-typedef struct wl_nan_host_assist_req {
- struct ether_addr peer_nmi; /* peer nmi */
- struct ether_addr initiator_ndi; /* initiator ndi */
- uint8 indp_id; /* initiator NDP ID */
- wl_nan_frame_type_t frm_type; /* received NAF type */
- wl_nan_host_assist_reason_t reason; /* reason of host assist request */
- uint8 pub_id; /* Publish ID (valid for WL_NAN_FRM_TYPE_DP_REQ) */
- uint8 pad[2];
-} wl_nan_host_assist_req_t;
-
-/* nan sub-features */
-enum wl_nan_fw_cap_flag1 {
- WL_NAN_FW_CAP_FLAG_NONE = 0x00000000, /* dummy */
- WL_NAN_FW_CAP_FLAG1_AVAIL = 0x00000001,
- WL_NAN_FW_CAP_FLAG1_DISC = 0x00000002,
- WL_NAN_FW_CAP_FLAG1_DATA = 0x00000004,
- WL_NAN_FW_CAP_FLAG1_SEC = 0x00000008,
- WL_NAN_FW_CAP_FLAG1_RANGE = 0x00000010,
- WL_NAN_FW_CAP_FLAG1_WFA_TB = 0x00000020,
- WL_NAN_FW_CAP_FLAG1_DAM = 0x00000040,
- WL_NAN_FW_CAP_FLAG1_DAM_STRICT = 0x00000080,
- WL_NAN_FW_CAP_FLAG1_DAM_AUTO = 0x00000100,
- WL_NAN_FW_CAP_FLAG1_DBG = 0x00000200,
- WL_NAN_FW_CAP_FLAG1_BCMC_IN_NDC = 0x00000400,
- WL_NAN_FW_CAP_FLAG1_CHSTATS = 0x00000800,
- WL_NAN_FW_CAP_FLAG1_ASSOC_COEX = 0x00001000,
- WL_NAN_FW_CAP_FLAG1_FASTDISC = 0x00002000,
- WL_NAN_FW_CAP_FLAG1_NO_ID_GEN = 0x00004000,
- WL_NAN_FW_CAP_FLAG1_DP_OPAQUE_DATA = 0x00008000,
- WL_NAN_FW_CAP_FLAG1_NSR2 = 0x00010000,
- WL_NAN_FW_CAP_FLAG1_NSR2_SAVE = 0x00020000,
- WL_NAN_FW_CAP_FLAG1_NANHO = 0x00040000
-};
-
-/* WL_NAN_XTLV_GEN_FW_CAP */
-typedef struct wl_nan_fw_cap {
- uint32 flags1; /* nan sub-features compiled in firmware */
- uint32 flags2; /* for more sub-features in future */
- uint8 max_svc_publishes; /* max num of service publish */
- uint8 max_svc_subscribes; /* max num of service subscribe */
- uint8 max_lcl_sched_maps; /* max num of local schedule map */
- uint8 max_lcl_ndc_entries; /* max num of local NDC entry */
- uint8 max_lcl_ndi_interfaces; /* max num of local NDI interface */
- uint8 max_peer_entries; /* max num of peer entry */
- uint8 max_ndp_sessions; /* max num of NDP session */
- uint8 max_concurrent_nan_clusters; /* max num of concurrent clusters */
- uint16 max_service_name_len; /* max service name length */
- uint16 max_match_filter_len; /* max match filter length */
- uint16 max_total_match_filter_len; /* max total match filter length */
- uint16 max_service_specific_info_len; /* max service specific info length */
- uint16 max_vsa_data_len; /* max vendor specific attrib data length */
- uint16 max_mesh_data_len; /* max mesh data length */
- uint16 max_app_info_len; /* max app info length */
- uint16 max_sdea_svc_specific_info_len; /* max sdea ser specific info length */
- uint8 max_queued_tx_followup_msgs; /* max no. of queued tx followup msgs */
- uint8 max_subscribe_address; /* max subscribe addresses supported */
- uint8 ndp_supported_bands; /* number of ndp supported bands */
- uint8 is_ndp_security_supported; /* if secure ndp is supported */
- uint8 cipher_suites_supported_mask; /* bitmask for suites supported */
- uint8 pad[3];
-} wl_nan_fw_cap_t;
-
-/* WL_NAN_XTLV_GEN_FW_CAP_V2 */
-typedef struct wl_nan_fw_cap_v2 {
- uint32 flags1; /* nan sub-features compiled in firmware */
- uint32 flags2; /* for more sub-features in future */
- uint8 max_svc_publishes; /* max num of service publish */
- uint8 max_svc_subscribes; /* max num of service subscribe */
- uint8 max_lcl_sched_maps; /* max num of local schedule map */
- uint8 max_lcl_ndc_entries; /* max num of local NDC entry */
- uint8 max_lcl_ndi_interfaces; /* max num of local NDI interface */
- uint8 max_peer_entries; /* max num of peer entry */
- uint8 max_peer_sched_maps; /* max num of peer schedule maps */
- uint8 max_ndp_sessions; /* max num of NDP session */
- uint32 cipher_suites_supported_mask; /* bitmask for supported cipher suites */
- uint32 reserved_uint32_1; /* reserved for future sub-features */
- uint32 reserved_uint32_2; /* reserved for future sub-features */
- uint32 reserved_uint32_3; /* reserved for future sub-features */
- uint32 reserved_uint32_4; /* reserved for future sub-features */
-} wl_nan_fw_cap_v2_t;
-
-/* nan cipher suite support mask bits */
-#define WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK 0x01
-#define WL_NAN_CIPHER_SUITE_SHARED_KEY_256_MASK 0x02
-
-/* NAN Save Restore */
-#define WL_NAN_NSR2_INFO_MAX_SIZE 2048 /* arbitrary */
-
-/* WL_NAN_XTLV_NSR2_PEER */
-typedef struct wl_nan_nsr_peer_info {
- struct ether_addr nmi;
- uint8 l_min_slots; /* local QoS min slots */
- uint8 p_min_slots; /* peer QoS min slots */
- uint16 l_max_latency; /* local QoS max latency */
- uint16 p_max_latency; /* peer QoS max latency */
- uint8 num_map; /* num of NA map */
- uint8 pad;
- uint16 attrs_len; /* total len of following attrs */
- uint8 attrs[]; /* peer attributes (NA/NDC/ULW/DevCap/Element container) */
-} wl_nan_nsr_peer_info_t;
-
-enum wl_nan_nsr_ndp_flag {
- WL_NAN_NSR_NDP_FLAG_LCL_INITATOR = 0x0001,
- WL_NAN_NSR_NDP_FLAG_MCAST = 0x0002
-};
-typedef uint16 wl_nan_nsr_ndp_flag_t;
-
-/* WL_NAN_XTLV_NSR2_NDP */
-typedef struct wl_nan_nsr_ndp_info {
- struct ether_addr peer_nmi;
- struct ether_addr peer_ndi;
- struct ether_addr lcl_ndi;
- uint16 flags; /* wl_nan_nsr_ndp_flag_t */
- uint8 pub_id; /* publish id */
- uint8 indp_id; /* initiator's ndp id */
- uint8 last_token; /* last NDP dialog token */
- uint8 pad;
-} wl_nan_nsr_ndp_info_t;
-
-/* NAN2.0 Ranging definitions */
+/* NAN2.0 Ranging definitions */
/* result indication bit map */
-#define NAN_RANGE_INDICATION_NONE 0
#define NAN_RANGE_INDICATION_CONT (1<<0)
#define NAN_RANGE_INDICATION_INGRESS (1<<1)
-#define NAN_RANGE_INDICATION_EGRESS (1<<2)
+#define NAN_RANGE_INIDICATION_EGRESS (1<<2)
/* responder flags */
#define NAN_RANGE_FLAG_AUTO_ACCEPT (1 << 0)
#define NAN_RNG_RESP_IOV_LEN 20
-#define NAN_RNG_TERM_FLAG_IMMEDIATE (1u << 0u) /* Do not wait for TXS */
-#define NAN_RNG_TERM_FLAG_SILIENT_TEARDOWN (1u << 1u) /* Do not TX rng_term */
-#define NAN_RNG_TERM_FLAG_EVENT_HOST (1u << 2u) /* Notify event to host */
-#define NAN_RNG_TERM_FLAG_OPT_TLVS (1u << 3u) /* opt tlvs present */
-
-typedef struct wl_nan_range_cancel_ext {
- wl_nan_range_id range_id;
- uint8 flags;
- uint8 pad[2];
-} wl_nan_range_cancel_ext_t;
-
-#define NAN_RNG_CANCEL_IOV_FIXED_LEN 4u
-
#define NAN_RNG_MAX_IOV_LEN 255
typedef struct wl_nan_ev_rng_req_ind {
uint32 dist_mm; /* in millimeter */
struct ether_addr peer_m_addr;
uint8 indication; /* indication definitions mentioned above */
- uint8 rng_id;
+ uint8 pad;
} wl_nan_ev_rng_rpt_ind_t;
-#define NAN_RNG_RPT_IND_SIZE 12
-
-/* number of continuous ranging crbs which can be idle,
-* after which ranging session will be terminated.
-* Default value is 5. Set to zero for disabling the
-* idle timeout functionality
-*/
-typedef uint8 wl_nan_range_idle_count_t;
-
-/* nan ranging termination reason codes */
-#define NAN_RNG_TERM_UNSPECIFIED 0
-#define NAN_RNG_TERM_IDLE_TIMEOUT 1u /* no ftms from peer */
-#define NAN_RNG_TERM_PEER_REQ 2u
-#define NAN_RNG_TERM_USER_REQ 3u
-#define NAN_RNG_TERM_RNG_RESP_TIMEOUT 4u /* On FSM Timeout, waiting for Resp from peer */
-#define NAN_RNG_TERM_RNG_RESP_REJ 5u /* On range resp, reject from peer */
-#define NAN_RNG_TERM_RNG_TXS_FAIL 6u /* On range req/resp txs fail */
+#define NAN_RNG_RPT_IND_SIZE 11
typedef struct wl_nan_ev_rng_term_ind {
struct ether_addr peer_m_addr;
uint8 reason_code;
- uint8 rng_id;
+ uint8 pad;
} wl_nan_ev_rng_term_ind_t;
-#define NAN_RNG_TERM_IND_SIZE 8
-
-typedef struct wl_nan_ev_rng_resp {
- struct ether_addr peer_m_addr;
- uint8 status;
- uint8 rng_id;
-} wl_nan_ev_rng_resp_t;
-
-/* Used by NDL schedule events -
- * WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF, WL_NAN_EVENT_PEER_SCHED_REQ
- * WL_NAN_EVENT_PEER_SCHED_RESP, WL_NAN_EVENT_PEER_SCHED_CONF
- */
-typedef struct wl_nan_ev_sched_info {
- struct ether_addr peer_nmi;
- uint8 ndl_status; /* applies only to sched resp/conf */
- uint8 pad;
- uint16 opt_tlv_len;
- uint8 opt_tlvs[];
-} wl_nan_ev_sched_info_t;
-
-/* WL_NAN_EVENT_CHAN_BOUNDARY */
-typedef struct wl_nan_chbound_info {
- uint32 cluster_tsf_h; /* Current Cluster TSF High */
- uint32 cluster_tsf_l; /* Current Cluster TSF Low */
- uint16 cur_chspec;
- uint16 opt_tlvs_len;
- uint8 opt_tlvs[];
-} wl_nan_chbound_info_t;
-
-/* channel stats (includes nan & non-nan) */
-
-/* WL_NAN_XTLV_CCA_STATS */
-typedef struct wl_nan_cca_stats {
- uint16 chanspec;
- uint8 pad[2];
- uint32 sample_dur;
-
- uint32 congest_ibss;
- uint32 congest_obss;
- uint32 interference;
-} wl_nan_cca_stats_t;
-
-/* WL_NAN_XTLV_PER_STATS */
-typedef struct wl_nan_per_stats_s {
- uint16 chanspec;
- uint8 pad[2];
- uint32 sample_dur;
-
- uint32 txframe; /* tx data frames */
- uint32 txretrans; /* tx mac retransmits */
- uint32 txerror; /* tx data errors */
- uint32 txctl; /* tx management frames */
- uint32 txserr; /* tx status errors */
-
- uint32 rxframe; /* rx data frames */
- uint32 rxerror; /* rx data errors */
- uint32 rxctl; /* rx management frames */
-
- uint32 txbar; /* tx bar */
- uint32 rxbar; /* rx bar */
- uint32 txaction; /* tx action frame */
- uint32 rxaction; /* rx action frame */
- uint32 txlost; /* lost packets reported in txs */
- uint32 rxback; /* rx block ack */
- uint32 txback; /* tx bloak ack */
-} wl_nan_per_stats_t;
-
-/* fast discovery beacon config
- * WL_NAN_XTLV_CFG_FDISC_TBMP
-*/
-typedef struct wl_nan_fastdisc_s {
- uint8 id;
- uint8 bitmap_len;
- uint8 pad[2];
- uint8 bitmap[];
-} wl_nan_fastdisc_t;
+#define NAN_RNG_TERM_IND_SIZE 7
-#define WL_NAN_FASTDISC_CFG_SIZE 1024 /* arbitrary */
/* ********************* end of NAN section ******************************** */
/* endif WL_NAN */
uint32 avl_bmp; /* availability interval bitmap */
} nan_post_disc_p2p_data_t;
+/* timeslot etc for NAN */
+enum {
+ WL_TMU_TU = 0,
+ WL_TMU_SEC = 1,
+ WL_TMU_MILLI_SEC = 2,
+ WL_TMU_MICRO_SEC = 3,
+ WL_TMU_NANO_SEC = 4,
+ WL_TMU_PICO_SEC = 5
+};
+typedef int16 wl_tmu_t;
+
+typedef struct {
+ uint32 intvl; /* time interval */
+ wl_tmu_t tmu; /* time unit */
+ uint8 pad[2]; /* padding */
+} wl_time_interval_t;
+
+/* availabiloty slot flags */
+enum {
+ WL_AVAIL_SLOT_NONE = 0x0000,
+ WL_AVAIL_SLOT_COM = 0x0001, /* committed */
+ WL_AVAIL_SLOT_POT = 0x0002, /* potential */
+ WL_AVAIL_SLOT_PROP = 0x0004, /* proposed - note: not configurable */
+ WL_AVAIL_SLOT_PAGED = 0x0008 /* P-NDL */
+ /* 0x0030 - resrved for NDC index */
+ /* 0x00c0 - resrved for usage preference */
+};
+typedef int16 wl_avail_slot_flags_t;
+
+#define WL_AVAIL_SLOT_NDC_MASK 0x0030 /* up to 4 NDCs */
+#define WL_AVAIL_SLOT_NDC_SHIFT 4
+#define WL_AVAIL_SLOT_NDC(_flags) (((_flags) & WL_AVAIL_SLOT_NDC_MASK) \
+ >> WL_AVAIL_SLOT_NDC_SHIFT)
+#define WL_AVAIL_SLOT_SET_NDC(_flags, _ndc_idx) (((_flags) & ~WL_AVAIL_SLOT_NDC_MASK) |\
+ ((_ndc_idx) << WL_AVAIL_SLOT_NDC_SHIFT))
+
+#define WL_AVAIL_SLOT_UPREF_MASK 0x00c0 /* up to 4 usage preferences */
+#define WL_AVAIL_SLOT_UPREF_SHIFT 6
+#define WL_AVAIL_SLOT_UPREF(_flags) (((_flags) & WL_AVAIL_SLOT_UPREF_MASK) \
+ >> WL_AVAIL_SLOT_UPREF_SHIFT)
+#define WL_AVAIL_SLOT_SET_UPREF(_flags, _pref) (((_flags) & ~WL_AVAIL_SLOT_UPREF_MASK) |\
+ ((_pref) << WL_AVAIL_SLOT_UPREF_SHIFT))
+
+typedef struct wl_avail_slot {
+ wl_avail_slot_flags_t flags;
+ uint16 PAD;
+ wl_time_interval_t start; /* from time ref */
+ wl_time_interval_t duration; /* from start */
+ uint32 chanspec; /* channel spec */
+} wl_avail_slot_t;
+
+/* time reference */
+enum {
+ WL_TIME_REF_NONE = 0,
+ WL_TIME_REF_DEV_TSF = 1,
+ WL_TIME_REF_NAN_DW = 2,
+ WL_TIME_REF_TBTT = 3,
+ WL_TIME_REF_NAN_DW0 = 4
+};
+typedef int16 wl_time_ref_t;
+
enum {
WL_AVAIL_NONE = 0x0000,
WL_AVAIL_LOCAL = 0x0001,
WL_AVAIL_RESPONSE = 0x0005,
WL_AVAIL_COUNTER = 0x0006,
WL_AVAIL_RANGING = 0x0007,
- WL_AVAIL_UPD_POT = 0x0008, /* modify potential, keep committed/conditional */
- WL_AVAIL_UPD_COM_COND = 0x0009, /* modify committed/conditional, keep potential */
- WL_AVAIL_REMOVE_MAP = 0x000A, /* remove map */
- WL_AVAIL_FRM_TYPE = 0x000B, /* specify frame types containing NA */
- WL_AVAIL_TYPE_MAX = WL_AVAIL_FRM_TYPE /* New ones before and update */
+ WL_AVAIL_TYPE_MAX = WL_AVAIL_RANGING /* New ones before and update */
};
-#define WL_AVAIL_TYPE_MASK 0x000F
-#define WL_AVAIL_FLAG_REMOVE 0x2000 /* remove schedule attr of given type & map id */
-#define WL_AVAIL_FLAG_SELECTED_NDC 0x4000
-#define WL_AVAIL_FLAG_RAW_MODE 0x8000
-#define WL_AVAIL_FLAGS_MASK 0xFF00
-#define WL_AVAIL_FLAGS_SHIFT 8
-
+#define WL_AVAIL_TYPE_MASK 0x000F
+#define WL_AVAIL_FLAG_RAW_MODE 0x8000
typedef int16 wl_avail_flags_t;
/* availability entry flags */
WL_AVAIL_ENTRY_COND = 0x0004, /* conditional */
WL_AVAIL_ENTRY_PAGED = 0x0008, /* P-NDL */
WL_AVAIL_ENTRY_USAGE = 0x0030, /* usage preference */
- WL_AVAIL_ENTRY_BIT_DUR = 0x00C0, /* bit duration */
+ WL_AVAIL_ENTRY_BIT_DUR = 0x00c0, /* bit duration */
WL_AVAIL_ENTRY_BAND_PRESENT = 0x0100, /* band present */
WL_AVAIL_ENTRY_CHAN_PRESENT = 0x0200, /* channel information present */
- WL_AVAIL_ENTRY_CHAN_ENTRY_PRESENT = 0x0400, /* channel entry (opclass+bitmap) */
- /* free to use 0x0800 */
- WL_AVAIL_ENTRY_RXNSS = 0xF000 /* max num of spatial stream RX */
+ WL_AVAIL_ENTRY_CHAN_ENTRY_PRESENT = 0x0400, /* channel entry (opclass+bitmap) */
};
/* bit duration */
WL_AVAIL_BAND_60G = 5, /* reserved (for 60 GHz) */
};
-#define WL_AVAIL_ENTRY_TYPE_MASK 0x000F
-#define WL_AVAIL_ENTRY_USAGE_MASK 0x0030 /* up to 4 usage preferences */
-#define WL_AVAIL_ENTRY_USAGE_SHIFT 4
-#define WL_AVAIL_ENTRY_USAGE_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_USAGE_MASK) \
+#define WL_AVAIL_ENTRY_TYPE_MASK 0x0F
+#define WL_AVAIL_ENTRY_USAGE_MASK 0x0030 /* up to 4 usage preferences */
+#define WL_AVAIL_ENTRY_USAGE_SHIFT 4
+#define WL_AVAIL_ENTRY_USAGE_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_USAGE_MASK) \
>> WL_AVAIL_ENTRY_USAGE_SHIFT)
-#define WL_AVAIL_ENTRY_BIT_DUR_MASK 0x00C0 /* 0:16TU, 1:32TU, 2:64TU, 3:128TU */
-#define WL_AVAIL_ENTRY_BIT_DUR_SHIFT 6
-#define WL_AVAIL_ENTRY_BIT_DUR_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_BIT_DUR_MASK) \
+#define WL_AVAIL_ENTRY_BIT_DUR_MASK 0x00c0 /* 0:16TU, 1:32TU, 2:64TU, 3:128TU */
+#define WL_AVAIL_ENTRY_BIT_DUR_SHIFT 6
+#define WL_AVAIL_ENTRY_BIT_DUR_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_BIT_DUR_MASK) \
>> WL_AVAIL_ENTRY_BIT_DUR_SHIFT)
-#define WL_AVAIL_ENTRY_BAND_MASK 0x0100 /* 0=band not present, 1=present */
-#define WL_AVAIL_ENTRY_BAND_SHIFT 8
+#define WL_AVAIL_ENTRY_BAND_MASK 0x0100 /* 0=band not present, 1=present */
+#define WL_AVAIL_ENTRY_BAND_SHIFT 8
-#define WL_AVAIL_ENTRY_CHAN_MASK 0x0200 /* 0=channel info not present, 1=present */
-#define WL_AVAIL_ENTRY_CHAN_SHIFT 9
+#define WL_AVAIL_ENTRY_CHAN_MASK 0x0200 /* 0=channel info not present, 1=present */
+#define WL_AVAIL_ENTRY_CHAN_SHIFT 9
-#define WL_AVAIL_ENTRY_CHAN_ENTRY_MASK 0x0400 /* 0=chanspec, 1=hex channel entry */
-#define WL_AVAIL_ENTRY_CHAN_ENTRY_SHIFT 10
+#define WL_AVAIL_ENTRY_CHAN_ENTRY_MASK 0x0400 /* 0=chanspec, 1=hex channel entry */
+#define WL_AVAIL_ENTRY_CHAN_ENTRY_SHIFT 10
-#define WL_AVAIL_ENTRY_RXNSS_MASK 0xF000
-#define WL_AVAIL_ENTRY_RXNSS_SHIFT 12
-#define WL_AVAIL_ENTRY_RXNSS_VAL(_flags) (((_flags) & WL_AVAIL_ENTRY_RXNSS_MASK) \
- >> WL_AVAIL_ENTRY_RXNSS_SHIFT)
-#define WL_AVAIL_ENTRY_RXNSS_MAX 15 /* 0-15 */
-
-/* mask for channel_entry (to be obsoleted) */
-#define WL_AVAIL_ENTRY_OPCLASS_MASK 0xFF
-#define WL_AVAIL_ENTRY_CHAN_BITMAP_MASK 0xFF00
-#define WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT 8
-#define WL_AVAIL_ENTRY_CHAN_BITMAP_VAL(_info) (((_info) & WL_AVAIL_ENTRY_CHAN_BITMAP_MASK) \
+#define WL_AVAIL_ENTRY_OPCLASS_MASK 0xFF
+#define WL_AVAIL_ENTRY_CHAN_BITMAP_MASK 0xFF00
+#define WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT 8
+#define WL_AVAIL_ENTRY_CHAN_BITMAP_VAL(_info) (((_info) & WL_AVAIL_ENTRY_CHAN_BITMAP_MASK) \
>> WL_AVAIL_ENTRY_CHAN_BITMAP_SHIFT)
/* Used for raw channel entry field input */
-#define MAX_CHAN_ENTRY_LEN 6
+#define MAX_CHAN_ENTRY_LEN 6
typedef struct wl_avail_entry {
uint16 length; /* total length */
* WL_AVAIL_ENTRY_HEX_CHAN_ENTRY flag
*/
uint32 band; /* defined by WL_BAND enum, 2=2.4GHz, 4=5GHz */
- uint8 channel_entry[MAX_CHAN_ENTRY_LEN];
- uint8 align[8]; /* aligned len of union in structure (not for use)
- * if member of union is changed,
- * update length of align[] accordingly.
- */
+ uint8 channel_entry[MAX_CHAN_ENTRY_LEN];
} u; /* band or channel value, 0=all band/channels */
- uint8 sched_map_id; /* avail map id associated with sched entry */
- uint8 pad;
+ uint8 pad[2];
uint8 period; /* in TUs, defined by WL_AVAIL_PERIOD enum
* 1:128, 2:256, 3:512, 4:1024, 5:2048, 6:4096,
* 7:8192
*/
uint8 bitmap_len;
uint16 flags; /* defined by avail entry flags enum:
- * type, usage pref, bit duration, rx nss,
- * and band, channel or channel entry
+ * type, usage pref, bit duration, band, channel
*/
uint8 bitmap[]; /* time bitmap */
} wl_avail_entry_t;
-#define WL_AVAIL_VERSION 1 /* current wl_avail version */
-
typedef struct wl_avail {
uint16 length; /* total length */
- uint16 flags; /* LSB - avail type (defined by WL_AVAIL enum)
- * MSB - avail flags
+ uint16 flags; /* defined by WL_AVAIL enum
+ * 1=local, 2=peer, 3=ndc, 4=immutable,
+ * 5=response, 6=counter
*/
uint8 id; /* id used for multiple maps/avail */
- uint8 lndc_id; /* ndc id used in multi-ndc case */
- uint8 version;
- uint8 pad;
- struct ether_addr addr; /* peer mac address or ndc id */
+ uint8 pad[3];
+ struct ether_addr addr; /* peer mac address or ndc id */
uint8 num_entries;
- uint8 unused_byte;
+ uint8 entry_offset;
/* add additional fields above this line */
uint8 entry[];
} wl_avail_t;
WL_NAN_WFA_TM_SEC_REJECT_STATUS4M4 = 0x00000080,
/* send mgmt frame (for eg. ndp terminate) in clear txt (bypass security) */
WL_NAN_WFA_TM_SEC_SEND_MGMT_CLEAR = 0x00000100,
- /* validate qos */
- WL_NAN_WFA_TM_NDL_QOS_VALIDATE = 0x00000200,
- /* firmware generated schedule update */
- WL_NAN_WFA_TM_GEN_SCHED_UPD = 0x00000400,
- /* add lower 4-bytes of TSF to configured start time */
- WL_NAN_WFA_TM_ULW_START_TIME = 0x00000800,
- /* enable schedule validation for SDF */
- WL_NAN_WFA_TM_SDF_SCHED_VALIDATE = 0x00001000,
- /* by pass faw na iovar */
- WL_NAN_WFA_TM_SKIP_RAW_NA_BLOB = 0x00002000,
- /* overwrite local NA with peer NA in received frame */
- WL_NAN_WFA_TM_LOCAL_NA_OVERWRITE = 0x00004000,
- /* randomize and self configure ndl qos(needed at responder in auto mode) */
- WL_NAN_WFA_TM_SELF_CFG_NDL_QOS = 0x00008000,
- /* send NAF frames only in DW */
- WL_NAN_WFA_TM_SEND_NAF_IN_DW = 0x00010000,
- /* restrict channels used for countered slots to Ch 6/149 only */
- WL_NAN_WFA_TM_RESTRICT_COUNTER_SLOTS_CHAN = 0x00020000,
- /* NDPE negative test case (4.2.5 & 4.2.6) */
- WL_NAN_WFA_TM_NDPE_NEGATIVE_TEST_TB = 0x00040000,
- /* Set NDPE(NAN3.0) capable bit in dev cap attr */
- WL_NAN_WFA_TM_ENABLE_NDPE_CAP = 0x00080000,
- /* NDPE negative test case (4.2.5.2). Enable both NDP and NDPE attributes */
- WL_NAN_WFA_TM_ENABLE_NDP_NDPE_ATTR = 0x00100000,
-
- /* add above & update mask */
- WL_NAN_WFA_TM_FLAG_MASK = 0x001FFFFF
+ WL_NAN_WFA_TM_FLAG_MASK = 0x000001ff /* add above & update mask */
};
typedef uint32 wl_nan_wfa_testmode_t;
-/* To be removed; replaced by wl_nan_vndr_payload */
-typedef struct wl_nan_vndr_ie {
- uint32 flags; /* bitmask indicating which packet(s) contain this IE */
- uint16 body_len; /* length of body (does not include oui field) */
- uint8 pad[2];
- uint8 oui[DOT11_OUI_LEN];
- uint8 pad2;
- uint8 body[]; /* vendor IE payload */
-} wl_nan_vndr_ie_t;
-
-typedef struct wl_nan_vndr_payload {
- uint32 flags; /* bitmask indicating which packet(s) contain payload */
- uint16 payload_len; /* length of payload */
- uint8 pad[2];
- uint8 payload[]; /* payload to be appended to NAN frame */
-} wl_nan_vndr_payload_t;
-
-typedef struct wl_nan_dev_cap {
- uint8 bands[NAN_MAX_BANDS];
- uint8 awake_dw[NAN_MAX_BANDS];
- uint8 overwrite_mapid[NAN_MAX_BANDS];
- uint8 mapid; /* dev cap mapid */
- uint8 all_maps; /* applies to device */
- uint8 paging;
- uint8 pad[3];
-} wl_nan_dev_cap_t;
-
-/* arbitrary max len for frame template */
-#define WL_NAN_FRM_TPLT_MAX_LEN 1024
-
-typedef struct wl_nan_frm_tplt {
- wl_nan_frame_type_t type;
- uint8 pad;
- uint16 len; /* length of template */
- uint8 data[]; /* template */
-} wl_nan_frm_tplt_t;
-
#define RSSI_THRESHOLD_SIZE 16
#define MAX_IMP_RESP_SIZE 256
#include <packed_section_end.h>
#define WL_PROXD_COLLECT_DATA_VERSION_2 2
-#include <packed_section_start.h>
-typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data_v2 {
- wl_proxd_collect_info_t info;
- uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0];
- /**< raw data read from phy used to adjust timestamps */
- uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ];
-} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t_v2;
-#include <packed_section_end.h>
-
-#define WL_PROXD_COLLECT_DATA_VERSION_3 3
-typedef struct wl_proxd_collect_data_v3 {
+typedef struct wl_proxd_collect_data_v2 {
uint16 version;
uint16 len;
wl_proxd_collect_info_t info;
- uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0];
+ uint8 ri_rr[FTM_TPK_RI_RR_LEN];
+ uint8 pad[3]; /* should be based on FTM_TPK_RI_RR_LEN */
/**< raw data read from phy used to adjust timestamps */
uint32 H[K_TOF_COLLECT_H_SIZE_20MHZ];
uint32 chan[4 * K_TOF_COLLECT_CHAN_SIZE];
-} wl_proxd_collect_data_t_v3;
-#define WL_PROXD_COLLECT_DATA_VERSION_MAX WL_PROXD_COLLECT_DATA_VERSION_3
+} wl_proxd_collect_data_t_v2;
+#define WL_PROXD_COLLECT_DATA_VERSION_MAX WL_PROXD_COLLECT_DATA_VERSION_2
typedef struct wl_proxd_debug_data {
uint8 count; /**< number of packets */
WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4),
WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5),
WL_WSEC_INFO_BSS_ALGOS = (WL_WSEC_INFO_BSS_BASE + 6),
- WL_WSEC_INFO_BSS_WPA_AP_RESTRICT = (WL_WSEC_INFO_BSS_BASE + 7),
/* add per-BSS properties above */
WL_WSEC_INFO_MAX = 0xffff
} wl_wsec_info_type_t;
uint8 num_tlvs;
wl_wsec_info_tlv_t tlvs[1]; /**< tlv data follows */
} wl_wsec_info_t;
-#define AP_BLOCK_NONE 0x0000 /* default: No restriction */
-#define AP_ALLOW_WPA2 0x0001 /* allow WPA2PSK AP */
-#define AP_ALLOW_TSN 0x0002 /* WPA3 transition AP */
-#define AP_ALLOW_WPA3_ONLY 0x0004 /* WPA3 only AP */
-#define AP_ALLOW_MAX (AP_ALLOW_WPA2 | AP_ALLOW_TSN | \
- AP_ALLOW_WPA3_ONLY)
-typedef struct {
- uint32 wpa_ap_restrict; /* set WPA2 / WPA3 AP restriction policy */
-} wl_wsec_info_wpa_ap_restrict_t;
/*
* randmac definitions
#define WL_RANDMAC_USER_FTM 0x0001
#define WL_RANDMAC_USER_NAN 0x0002
#define WL_RANDMAC_USER_SCAN 0x0004
-#define WL_RANDMAC_USER_ANQP 0x0008
#define WL_RANDMAC_USER_ALL 0xFFFF
typedef uint16 wl_randmac_method_t;
#define WL_SCANMAC_SCAN_ASSOC_ROAM (0x01 << 1) /**< associated roam scans */
#define WL_SCANMAC_SCAN_ASSOC_PNO (0x01 << 2) /**< associated PNO scans */
#define WL_SCANMAC_SCAN_ASSOC_HOST (0x01 << 3) /**< associated host scans */
-
-#define WL_SCAN_EVENT_VERSION 1
-
-#define WL_SCAN_TYPE_ASSOC 0x1 /* Assoc scan */
-#define WL_SCAN_TYPE_ROAM 0x2 /* Roam scan */
-#define WL_SCAN_TYPE_FWSCAN 0x4 /* Other FW scan */
-#define WL_SCAN_TYPE_HOSTSCAN 0x8 /* Host scan */
-
-typedef struct scan_event_data {
- uint32 version;
- uint32 flags;
- uint16 num_chan_slice0;
- uint16 num_chan_slice1;
- /* Will contain num_chan_slice0 followed by num_chan_slice1 chanspecs */
- chanspec_t scan_chan_list[];
-} scan_event_data_t;
-
/*
* bonjour dongle offload definitions
*/
uint16 reps;
} statreq_t;
-typedef struct txstrmreq {
- struct ether_addr da; /* Destination address */
- uint16 random_int; /* Random interval for measurement start */
- uint16 dur; /* Measurement duration */
- uint16 reps; /* number of repetitions */
- struct ether_addr peer; /* Peer MAC address */
- uint8 tid; /* Traffic ID */
- uint8 bin0_range; /* Delay range of the first bin */
-} txstrmreq_t;
-
-typedef struct lcireq {
- struct ether_addr da; /* Destination address */
- uint16 reps; /* number of repetitions */
- uint8 subj; /* Local/Remote/Thid party */
- uint8 lat_res; /* Latitude requested Resolution */
- uint8 lon_res; /* Longitude requested Resolution */
- uint8 alt_res; /* Altitude requested Resolution */
-} lcireq_t;
-
-typedef struct civicreq {
- struct ether_addr da; /* Destination address */
- uint16 reps; /* number of repetitions */
- uint8 subj; /* Local/Remote/Thid party */
- uint8 civloc_type; /* Format of location info */
- uint8 siu; /* Unit of Location service interval */
- uint8 pad;
- uint16 si; /* Location service interval */
-} civicreq_t;
-
-typedef struct locidreq {
- struct ether_addr da; /* Destination address */
- uint16 reps; /* number of repetitions */
- uint8 subj; /* Local/Remote/Thid party */
- uint8 siu; /* Unit of Location service interval */
- uint16 si; /* Location service interval */
-} locidreq_t;
-
typedef struct wl_rrm_config_ioc {
uint16 version; /* command version */
uint16 id; /* subiovar cmd ID */
WL_RRM_CONFIG_SET_LCI = 2, /* set LCI */
WL_RRM_CONFIG_GET_CIVIC = 3, /* get civic location */
WL_RRM_CONFIG_SET_CIVIC = 4, /* set civic location */
- WL_RRM_CONFIG_GET_LOCID = 5, /* get location identifier */
- WL_RRM_CONFIG_SET_LOCID = 6, /* set location identifier */
- WL_RRM_CONFIG_MAX = 7
+ WL_RRM_CONFIG_MAX = 5
};
#define WL_RRM_CONFIG_NAME "rrm_config"
uint8 flags;
} wl_el_tag_params_t;
-#define EVENT_LOG_SET_TYPE_CURRENT_VERSION 0
-typedef struct wl_el_set_type_s {
- uint16 version;
- uint16 len;
- uint8 set; /* Set number */
- uint8 type; /* Type- EVENT_LOG_SET_TYPE_DEFAULT or EVENT_LOG_SET_TYPE_PRSRV */
- uint16 PAD;
-} wl_el_set_type_t;
+/** Video Traffic Interference Monitor config */
+#define INTFER_VERSION 1
+typedef struct wl_intfer_params {
+ uint16 version; /**< version */
+ uint8 period; /**< sample period */
+ uint8 cnt; /**< sample cnt */
+ uint8 txfail_thresh; /**< non-TCP txfail threshold */
+ uint8 tcptxfail_thresh; /**< tcptxfail threshold */
+} wl_intfer_params_t;
typedef struct wl_staprio_cfg {
struct ether_addr ea; /**< mac addr */
uint8 prio; /**< scb priority */
} wl_staprio_cfg_t;
-#define STAMON_STACONFIG_VER 1
-/* size of struct wlc_stamon_sta_config_t elements */
-#define STAMON_STACONFIG_LENGTH 20
-
typedef enum wl_stamon_cfg_cmd_type {
STAMON_CFG_CMD_DEL = 0,
STAMON_CFG_CMD_ADD = 1,
STAMON_CFG_CMD_DSB = 3,
STAMON_CFG_CMD_CNT = 4,
STAMON_CFG_CMD_RSTCNT = 5,
- STAMON_CFG_CMD_GET_STATS = 6,
- STAMON_CFG_CMD_SET_MONTIME = 7
+ STAMON_CFG_CMD_GET_STATS = 6
} wl_stamon_cfg_cmd_type_t;
typedef struct wlc_stamon_sta_config {
- wl_stamon_cfg_cmd_type_t cmd; /**< 0 - delete, 1 - add */
+ wl_stamon_cfg_cmd_type_t cmd; /**< 0 - delete, 1 - add */
struct ether_addr ea;
- uint16 version; /* Command structure version */
- uint16 length; /* Command structure length */
- uint8 pad[2];
- /* Time (ms) for which STA's are monitored. Value ZERO indicates no time limit */
- uint32 monitor_time;
+ uint8 PAD[2];
} wlc_stamon_sta_config_t;
/* ifdef SR_DEBUG */
GPAIO_OFF,
GPAIO_PMU_LOGENLDO,
GPAIO_PMU_RXLDO2G,
- GPAIO_PMU_RXLDO5G,
- GPAIO_PMU_LPFTXLDO,
- GPAIO_PMU_LDO1P6,
- GPAIO_RCAL,
- GPAIO_IQDAC_BUF_DC_MEAS,
- GPAIO_IQDAC_BUF_DC_CLEAR,
- GPAIO_DAC_IQ_DC_RDBK,
- GPAIO_DAC_IQ_DC_RDBK_CLEAR,
- GPAIO_AFE_LDO_FOR_DAC_DC,
- GPAIO_PA5G_VCAS_SOURCE,
- GPAIO_BIQ2_DC_MEAS,
- GPAIO_BIQ2_DC_CLEAR,
- GPAIO_VBATMONITOR,
- GPAIO_PA5G_VCAS_GMDRAIN
+ GPAIO_PMU_RXLDO5G
} wl_gpaio_option_t;
/** IO Var Operations - the Value of iov_op In wlc_ap_doiovar */
uint16 combo4; /* mws ant selection 4 */
} mws_ant_map_t;
-/* MWS ANT map 2nd generation */
-typedef struct {
- uint16 combo[16]; /* mws ant selection 2nd */
-} mws_ant_map_t_2nd;
-
/* MWS SCAN_REQ Bitmap */
typedef struct mws_scanreq_params {
uint16 idx;
}
wl_band_t;
-#define WL_ROAM_STATS_VER_1 (1u) /**< current version of wl_if_stats structure */
-
-/** roam statistics counters */
-typedef struct {
- uint16 version; /**< version of the structure */
- uint16 length; /**< length of the entire structure */
- uint32 initial_assoc_time;
- uint32 prev_roam_time;
- uint32 last_roam_event_type;
- uint32 last_roam_event_status;
- uint32 last_roam_event_reason;
- uint16 roam_success_cnt;
- uint16 roam_fail_cnt;
- uint16 roam_attempt_cnt;
- uint16 max_roam_target_cnt;
- uint16 min_roam_target_cnt;
- uint16 max_cached_ch_cnt;
- uint16 min_cached_ch_cnt;
- uint16 partial_roam_scan_cnt;
- uint16 full_roam_scan_cnt;
- uint16 most_roam_reason;
- uint16 most_roam_reason_cnt;
-} wl_roam_stats_v1_t;
-
#define WL_WLC_VERSION_T_VERSION 1 /**< current version of wlc_version structure */
/** wlc interface version */
}
wl_wlc_version_t;
-#define WL_SCAN_VERSION_T_VERSION 1 /**< current version of scan_version structure */
-/** scan interface version */
-typedef struct wl_scan_version {
- uint16 version; /**< version of the structure */
- uint16 length; /**< length of the entire structure */
-
- /* scan interface version numbers */
- uint16 scan_ver_major; /**< scan interface major version number */
-} wl_scan_version_t;
-
/* Highest version of WLC_API_VERSION supported */
#define WLC_API_VERSION_MAJOR_MAX 8
#define WLC_API_VERSION_MINOR_MAX 0
WL_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /**< Same as proxd flags above */
WL_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /**< Use NAN BSS, if applicable */
WL_PROXD_SESSION_FLAG_TS1 = 0x00000800, /**< e.g. FTM1 - ASAP-capable */
- WL_PROXD_SESSION_FLAG_RANDMAC = 0x00001000, /**< use random mac */
WL_PROXD_SESSION_FLAG_REPORT_FAILURE = 0x00002000, /**< report failure to target */
WL_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /**< report distance to target */
WL_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000,
/** commands that can apply to proxd, method or a session */
enum {
- WL_PROXD_CMD_NONE = 0,
+ WL_PROXD_CMD_NONE = 0,
WL_PROXD_CMD_GET_VERSION = 1,
- WL_PROXD_CMD_ENABLE = 2,
- WL_PROXD_CMD_DISABLE = 3,
- WL_PROXD_CMD_CONFIG = 4,
- WL_PROXD_CMD_START_SESSION = 5,
- WL_PROXD_CMD_BURST_REQUEST = 6,
- WL_PROXD_CMD_STOP_SESSION = 7,
- WL_PROXD_CMD_DELETE_SESSION = 8,
- WL_PROXD_CMD_GET_RESULT = 9,
- WL_PROXD_CMD_GET_INFO = 10,
- WL_PROXD_CMD_GET_STATUS = 11,
- WL_PROXD_CMD_GET_SESSIONS = 12,
- WL_PROXD_CMD_GET_COUNTERS = 13,
- WL_PROXD_CMD_CLEAR_COUNTERS = 14,
- WL_PROXD_CMD_COLLECT = 15, /* not supported, see 'wl proxd_collect' */
- WL_PROXD_CMD_TUNE = 16, /* not supported, see 'wl proxd_tune' */
- WL_PROXD_CMD_DUMP = 17,
+ WL_PROXD_CMD_ENABLE = 2,
+ WL_PROXD_CMD_DISABLE = 3,
+ WL_PROXD_CMD_CONFIG = 4,
+ WL_PROXD_CMD_START_SESSION = 5,
+ WL_PROXD_CMD_BURST_REQUEST = 6,
+ WL_PROXD_CMD_STOP_SESSION = 7,
+ WL_PROXD_CMD_DELETE_SESSION = 8,
+ WL_PROXD_CMD_GET_RESULT = 9,
+ WL_PROXD_CMD_GET_INFO = 10,
+ WL_PROXD_CMD_GET_STATUS = 11,
+ WL_PROXD_CMD_GET_SESSIONS = 12,
+ WL_PROXD_CMD_GET_COUNTERS = 13,
+ WL_PROXD_CMD_CLEAR_COUNTERS = 14,
+ WL_PROXD_CMD_COLLECT = 15, /* not supported, see 'wl proxd_collect' */
+ WL_PROXD_CMD_TUNE = 16, /* not supported, see 'wl proxd_tune' */
+ WL_PROXD_CMD_DUMP = 17,
WL_PROXD_CMD_START_RANGING = 18,
WL_PROXD_CMD_STOP_RANGING = 19,
- WL_PROXD_CMD_GET_RANGING_INFO = 20,
- WL_PROXD_CMD_IS_TLV_SUPPORTED = 21,
+ WL_PROXD_CMD_GET_RANGING_INFO = 20,
+ WL_PROXD_CMD_IS_TLV_SUPPORTED = 21,
WL_PROXD_CMD_MAX
};
typedef uint16 wl_proxd_session_id_t;
-/* Use WL_PROXD_E_* errorcodes from this file if BCMUTILS_ERR_CODES not defined */
-#ifndef BCMUTILS_ERR_CODES
-
/** status - TBD BCME_ vs proxd status - range reserved for BCME_ */
enum {
- WL_PROXD_E_LAST = -1056,
WL_PROXD_E_NOAVAIL = -1056,
WL_PROXD_E_EXT_SCHED = -1055,
- WL_PROXD_E_NOT_BCM = -1054,
+ WL_PROXD_E_NOT_BCM = -1054,
WL_PROXD_E_FRAME_TYPE = -1053,
WL_PROXD_E_VERNOSUPPORT = -1052,
WL_PROXD_E_SEC_NOKEY = -1051,
WL_PROXD_E_SEC_POLICY = -1050,
WL_PROXD_E_SCAN_INPROCESS = -1049,
WL_PROXD_E_BAD_PARTIAL_TSF = -1048,
- WL_PROXD_E_SCANFAIL = -1047,
- WL_PROXD_E_NOTSF = -1046,
- WL_PROXD_E_POLICY = -1045,
+ WL_PROXD_E_SCANFAIL = -1047,
+ WL_PROXD_E_NOTSF = -1046,
+ WL_PROXD_E_POLICY = -1045,
WL_PROXD_E_INCOMPLETE = -1044,
WL_PROXD_E_OVERRIDDEN = -1043,
WL_PROXD_E_ASAP_FAILED = -1042,
WL_PROXD_E_NOTSTARTED = -1041,
WL_PROXD_E_INVALIDMEAS = -1040,
WL_PROXD_E_INCAPABLE = -1039,
- WL_PROXD_E_MISMATCH = -1038,
+ WL_PROXD_E_MISMATCH = -1038,
WL_PROXD_E_DUP_SESSION = -1037,
WL_PROXD_E_REMOTE_FAIL = -1036,
- WL_PROXD_E_REMOTE_INCAPABLE = -1035,
+ WL_PROXD_E_REMOTE_INCAPABLE = -1035,
WL_PROXD_E_SCHED_FAIL = -1034,
- WL_PROXD_E_PROTO = -1033,
- WL_PROXD_E_EXPIRED = -1032,
- WL_PROXD_E_TIMEOUT = -1031,
- WL_PROXD_E_NOACK = -1030,
- WL_PROXD_E_DEFERRED = -1029,
+ WL_PROXD_E_PROTO = -1033,
+ WL_PROXD_E_EXPIRED = -1032,
+ WL_PROXD_E_TIMEOUT = -1031,
+ WL_PROXD_E_NOACK = -1030,
+ WL_PROXD_E_DEFERRED = -1029,
WL_PROXD_E_INVALID_SID = -1028,
- WL_PROXD_E_REMOTE_CANCEL = -1027,
- WL_PROXD_E_CANCELED = -1026, /**< local */
+ WL_PROXD_E_REMOTE_CANCEL = -1027,
+ WL_PROXD_E_CANCELED = -1026, /**< local */
WL_PROXD_E_INVALID_SESSION = -1025,
WL_PROXD_E_BAD_STATE = -1024,
- WL_PROXD_E_START = -1024,
- WL_PROXD_E_ERROR = -1,
- WL_PROXD_E_OK = 0
+ WL_PROXD_E_ERROR = -1,
+ WL_PROXD_E_OK = 0
};
typedef int32 wl_proxd_status_t;
-#endif /* BCMUTILS_ERR_CODES */
-
/* proxd errors from phy */
#define PROXD_TOF_INIT_ERR_BITS 16
/** session states */
enum {
- WL_PROXD_SESSION_STATE_NONE = 0,
+ WL_PROXD_SESSION_STATE_NONE = 0,
WL_PROXD_SESSION_STATE_CREATED = 1,
WL_PROXD_SESSION_STATE_CONFIGURED = 2,
WL_PROXD_SESSION_STATE_STARTED = 3,
/** RTT sample flags */
enum {
- WL_PROXD_RTT_SAMPLE_NONE = 0x00,
+ WL_PROXD_RTT_SAMPLE_NONE = 0x00,
WL_PROXD_RTT_SAMPLE_DISCARD = 0x01
};
typedef uint8 wl_proxd_rtt_sample_flags_t;
typedef uint16 wl_proxd_snr_t;
typedef uint16 wl_proxd_bitflips_t;
-/** result flags */
-enum {
- WL_PRXOD_RESULT_FLAG_NONE = 0x0000,
- WL_PROXD_RESULT_FLAG_NLOS = 0x0001, /**< LOS - if available */
- WL_PROXD_RESULT_FLAG_LOS = 0x0002, /**< NLOS - if available */
- WL_PROXD_RESULT_FLAG_FATAL = 0x0004, /**< Fatal error during burst */
- WL_PROXD_RESULT_FLAG_VHTACK = 0x0008, /* VHTACK or Legacy ACK used */
- WL_PROXD_REQUEST_SENT = 0x0010, /* FTM request was sent */
- WL_PROXD_REQUEST_ACKED = 0x0020, /* FTM request was acked */
- WL_PROXD_LTFSEQ_STARTED = 0x0040, /* LTF sequence started */
- WL_PROXD_RESULT_FLAG_ALL = 0xffff
-};
-typedef int16 wl_proxd_result_flags_t;
-
-#define WL_PROXD_RTT_SAMPLE_VERSION_1 1
-typedef struct wl_proxd_rtt_sample_v1 {
- uint8 id; /**< id for the sample - non-zero */
+typedef struct wl_proxd_rtt_sample {
+ uint8 id; /**< id for the sample - non-zero */
wl_proxd_rtt_sample_flags_t flags;
wl_proxd_rssi_t rssi;
- wl_proxd_intvl_t rtt; /**< round trip time */
+ wl_proxd_intvl_t rtt; /**< round trip time */
uint32 ratespec;
wl_proxd_snr_t snr;
wl_proxd_bitflips_t bitflips;
wl_proxd_bitflips_t tof_tgt_bitflips;
uint8 coreid;
uint8 pad[3];
-} wl_proxd_rtt_sample_v1_t;
+} wl_proxd_rtt_sample_t;
-#define WL_PROXD_RTT_RESULT_VERSION_1 1
-/** rtt measurement result */
-typedef struct wl_proxd_rtt_result_v1 {
- wl_proxd_session_id_t sid;
- wl_proxd_result_flags_t flags;
- wl_proxd_status_t status;
- struct ether_addr peer;
- wl_proxd_session_state_t state; /**< current state */
- union {
- wl_proxd_intvl_t retry_after; /* hint for errors */
- wl_proxd_intvl_t burst_duration; /* burst duration */
- } u;
- wl_proxd_rtt_sample_v1_t avg_rtt;
- uint32 avg_dist; /* 1/256m units */
- uint16 sd_rtt; /* RTT standard deviation */
- uint8 num_valid_rtt; /* valid rtt cnt */
- uint8 num_ftm; /* actual num of ftm cnt (Configured) */
- uint16 burst_num; /* in a session */
- uint16 num_rtt; /* 0 if no detail */
- uint16 num_meas; /* number of ftm frames seen OTA */
- uint8 pad[2];
- wl_proxd_rtt_sample_v1_t rtt[1]; /* variable */
-} wl_proxd_rtt_result_v1_t;
-
-#define WL_PROXD_RTT_SAMPLE_VERSION_2 2
-typedef struct wl_proxd_rtt_sample_v2 {
- uint16 version;
- uint16 length;
- uint8 id; /**< id for the sample - non-zero */
- wl_proxd_rtt_sample_flags_t flags;
- wl_proxd_rssi_t rssi;
- wl_proxd_intvl_t rtt; /**< round trip time */
- uint32 ratespec;
- wl_proxd_snr_t snr;
- wl_proxd_bitflips_t bitflips;
- wl_proxd_status_t status;
- int32 distance;
- wl_proxd_phy_error_t tof_phy_error;
- wl_proxd_phy_error_t tof_tgt_phy_error; /* target phy error bit map */
- wl_proxd_snr_t tof_tgt_snr;
- wl_proxd_bitflips_t tof_tgt_bitflips;
- uint8 coreid;
- uint8 pad[3];
- uint32 chanspec;
-} wl_proxd_rtt_sample_v2_t;
+/** result flags */
+enum {
+ WL_PRXOD_RESULT_FLAG_NONE = 0x0000,
+ WL_PROXD_RESULT_FLAG_NLOS = 0x0001, /**< LOS - if available */
+ WL_PROXD_RESULT_FLAG_LOS = 0x0002, /**< NLOS - if available */
+ WL_PROXD_RESULT_FLAG_FATAL = 0x0004, /**< Fatal error during burst */
+ WL_PROXD_RESULT_FLAG_VHTACK = 0x0008, /* VHTACK or Legacy ACK used */
+ WL_PROXD_REQUEST_SENT = 0x0010, /* FTM request was sent */
+ WL_PROXD_REQUEST_ACKED = 0x0020, /* FTM request was acked */
+ WL_PROXD_LTFSEQ_STARTED = 0x0040, /* LTF sequence started */
+ WL_PROXD_RESULT_FLAG_ALL = 0xffff
+};
+typedef int16 wl_proxd_result_flags_t;
-#define WL_PROXD_RTT_RESULT_VERSION_2 2
/** rtt measurement result */
-typedef struct wl_proxd_rtt_result_v2 {
- uint16 version;
- uint16 length; /* up to rtt[] */
+typedef struct wl_proxd_rtt_result {
wl_proxd_session_id_t sid;
wl_proxd_result_flags_t flags;
wl_proxd_status_t status;
struct ether_addr peer;
wl_proxd_session_state_t state; /**< current state */
union {
- wl_proxd_intvl_t retry_after; /* hint for errors */
- wl_proxd_intvl_t burst_duration; /* burst duration */
+ wl_proxd_intvl_t retry_after; /* hint for errors */
+ wl_proxd_intvl_t burst_duration; /* burst duration */
} u;
+ wl_proxd_rtt_sample_t avg_rtt;
uint32 avg_dist; /* 1/256m units */
uint16 sd_rtt; /* RTT standard deviation */
uint8 num_valid_rtt; /* valid rtt cnt */
uint16 num_rtt; /* 0 if no detail */
uint16 num_meas; /* number of ftm frames seen OTA */
uint8 pad[2];
- wl_proxd_rtt_sample_v2_t rtt[1]; /* variable, first element is avg_rtt */
-} wl_proxd_rtt_result_v2_t;
+ wl_proxd_rtt_sample_t rtt[1]; /* variable */
+} wl_proxd_rtt_result_t;
/** aoa measurement result */
typedef struct wl_proxd_aoa_result {
- wl_proxd_session_id_t sid;
- wl_proxd_result_flags_t flags;
- wl_proxd_status_t status;
- struct ether_addr peer;
- wl_proxd_session_state_t state;
- uint16 burst_num;
- uint8 pad[2];
+ wl_proxd_session_id_t sid;
+ wl_proxd_result_flags_t flags;
+ wl_proxd_status_t status;
+ struct ether_addr peer;
+ wl_proxd_session_state_t state;
+ uint16 burst_num;
+ uint8 pad[2];
/* wl_proxd_aoa_sample_t sample_avg; TBD */
} BWL_POST_PACKED_STRUCT wl_proxd_aoa_result_t;
#include <packed_section_end.h>
/** global stats */
typedef struct wl_proxd_counters {
- uint32 tx; /* tx frame count */
- uint32 rx; /* rx frame count */
- uint32 burst; /* total number of burst */
- uint32 sessions; /* total number of sessions */
- uint32 max_sessions; /* max concurrency */
- uint32 sched_fail; /* scheduling failures */
- uint32 timeouts; /* timeouts */
- uint32 protoerr; /* protocol errors */
- uint32 noack; /* tx w/o ack */
- uint32 txfail; /* any tx falure */
- uint32 lci_req_tx; /* tx LCI requests */
- uint32 lci_req_rx; /* rx LCI requests */
- uint32 lci_rep_tx; /* tx LCI reports */
- uint32 lci_rep_rx; /* rx LCI reports */
- uint32 civic_req_tx; /* tx civic requests */
- uint32 civic_req_rx; /* rx civic requests */
- uint32 civic_rep_tx; /* tx civic reports */
- uint32 civic_rep_rx; /* rx civic reports */
- uint32 rctx; /* ranging contexts created */
- uint32 rctx_done; /* count of ranging done */
- uint32 publish_err; /* availability publishing errors */
- uint32 on_chan; /* count of scheduler onchan */
- uint32 off_chan; /* count of scheduler offchan */
- uint32 tsf_lo; /* local tsf or session tsf */
+ uint32 tx; /**< tx frame count */
+ uint32 rx; /**< rx frame count */
+ uint32 burst; /**< total number of burst */
+ uint32 sessions; /**< total number of sessions */
+ uint32 max_sessions; /**< max concurrency */
+ uint32 sched_fail; /**< scheduling failures */
+ uint32 timeouts; /**< timeouts */
+ uint32 protoerr; /**< protocol errors */
+ uint32 noack; /**< tx w/o ack */
+ uint32 txfail; /**< any tx falure */
+ uint32 lci_req_tx; /**< tx LCI requests */
+ uint32 lci_req_rx; /**< rx LCI requests */
+ uint32 lci_rep_tx; /**< tx LCI reports */
+ uint32 lci_rep_rx; /**< rx LCI reports */
+ uint32 civic_req_tx; /**< tx civic requests */
+ uint32 civic_req_rx; /**< rx civic requests */
+ uint32 civic_rep_tx; /**< tx civic reports */
+ uint32 civic_rep_rx; /**< rx civic reports */
+ uint32 rctx; /**< ranging contexts created */
+ uint32 rctx_done; /**< count of ranging done */
+ uint32 publish_err; /**< availability publishing errors */
+ uint32 on_chan; /**< count of scheduler onchan */
+ uint32 off_chan; /**< count of scheduler offchan */
+ uint32 tsf_lo; /* local tsf or session tsf */
uint32 tsf_hi;
uint32 num_meas;
} wl_proxd_counters_t;
} wl_proxd_ftm_info_t;
enum {
- WL_PROXD_WAIT_NONE = 0x0000,
+ WL_PROXD_WAIT_NONE = 0x0000,
WL_PROXD_WAIT_KEY = 0x0001,
WL_PROXD_WAIT_SCHED = 0x0002,
WL_PROXD_WAIT_TSF = 0x0004
typedef uint32 wl_proxd_debug_mask_t;
/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */
-typedef enum {
+enum {
WL_PROXD_TLV_ID_NONE = 0,
WL_PROXD_TLV_ID_METHOD = 1,
WL_PROXD_TLV_ID_FLAGS = 2,
WL_PROXD_TLV_ID_TPK = 36, /* 32byte TPK */
WL_PROXD_TLV_ID_RI_RR = 36, /* RI_RR */
WL_PROXD_TLV_ID_TUNE = 37, /* wl_proxd_pararms_tof_tune_t */
- WL_PROXD_TLV_ID_CUR_ETHER_ADDR = 38, /* Source Address used for Tx */
/* output - 512 + x */
WL_PROXD_TLV_ID_STATUS = 512,
WL_PROXD_TLV_ID_SESSION_INFO = 517,
WL_PROXD_TLV_ID_SESSION_STATUS = 518,
WL_PROXD_TLV_ID_SESSION_ID_LIST = 519,
- WL_PROXD_TLV_ID_RTT_RESULT_V2 = 520,
/* debug tlvs can be added starting 1024 */
WL_PROXD_TLV_ID_DEBUG_MASK = 1024,
WL_PROXD_TLV_ID_COLLECT_INFO = 1028, /* wl_proxd_collect_info_t */
WL_PROXD_TLV_ID_COLLECT_DATA = 1029, /* wl_proxd_collect_data_t */
WL_PROXD_TLV_ID_COLLECT_CHAN_DATA = 1030, /* wl_proxd_collect_data_t */
- WL_PROXD_TLV_ID_MF_STATS_DATA = 1031, /* mf_stats_buffer */
-
- WL_PROXD_TLV_ID_COLLECT_INLINE_HEADER = 1032,
- WL_PROXD_TLV_ID_COLLECT_INLINE_FRAME_INFO = 1033,
- WL_PROXD_TLV_ID_COLLECT_INLINE_FRAME_DATA = 1034,
- WL_PROXD_TLV_ID_COLLECT_INLINE_RESULTS = 1035,
WL_PROXD_TLV_ID_MAX
-} wl_proxd_tlv_types_t;
-
-#define TOF_COLLECT_INLINE_HEADER_INFO_VER_1 1
-
-typedef struct wl_proxd_collect_inline_header_info_v1
-{
- uint16 version;
- uint16 pad1;
- uint32 ratespec; /* override */
- chanspec_t chanspec;
- uint16 num_ftm;
- struct ether_addr peer_mac;
- struct ether_addr cur_ether_addr; /* source address for Tx */
-} wl_proxd_collect_inline_header_info_v1_t;
-
-#define TOF_COLLECT_INLINE_RESULTS_VER_1 1
-typedef struct wl_proxd_collect_inline_results_info_v1
-{
- uint16 version;
- uint16 pad1;
- uint32 meanrtt;
- uint32 distance;
- uint16 num_rtt;
- uint16 pad2;
- int32 status;
- uint32 ratespec;
-} wl_proxd_collect_inline_results_info_v1_t;
-
-#define TOF_COLLECT_INLINE_FRAME_INFO_VER_1 1
-typedef struct wl_proxd_collect_inline_frame_info_v1
-{
- uint16 version;
- uint16 pad1;
- int32 gd;
- uint32 T[4];
- uint32 prev_t1;
- uint32 prev_t4;
- int32 hadj;
- int8 rssi;
- uint8 pad[3];
-} wl_proxd_collect_inline_frame_info_v1_t;
-
-#define TOF_COLLECT_INLINE_FRAME_INFO_VER_2 2
-typedef struct wl_proxd_collect_inline_frame_info_v2
-{
- uint16 version;
- uint16 pad1;
- int32 gd;
- uint32 T[4];
- int32 hadj;
- int8 rssi;
- uint8 pad[3];
-} wl_proxd_collect_inline_frame_info_v2_t;
+};
typedef struct wl_proxd_tlv {
uint16 id;
WL_PROXD_EVENT_CIVIC_MEAS_REP = 16, /* civic measurement report */
WL_PROXD_EVENT_COLLECT = 17,
WL_PROXD_EVENT_START_WAIT = 18, /* waiting to start */
- WL_PROXD_EVENT_MF_STATS = 19, /* mf stats event */
WL_PROXD_EVENT_MAX
};
/** proxd event - applies to proxd, method or session */
typedef struct wl_proxd_event {
- uint16 version;
- uint16 len;
- wl_proxd_event_type_t type;
- wl_proxd_method_t method;
- wl_proxd_session_id_t sid;
- uint8 pad[2]; /* This field is used fragmentation purpose */
- wl_proxd_tlv_t tlvs[1]; /**< variable */
+ uint16 version;
+ uint16 len;
+ wl_proxd_event_type_t type;
+ wl_proxd_method_t method;
+ wl_proxd_session_id_t sid;
+ uint8 pad[2];
+ wl_proxd_tlv_t tlvs[1]; /**< variable */
} wl_proxd_event_t;
enum {
typedef struct wl_proxd_ranging_info wl_proxd_ranging_info_t;
#include <packed_section_start.h>
-/* Legacy platform i.e. 43342/43430 */
-#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_1 1
-typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_event_data_v1 {
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_event_data {
uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ];
uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ];
uint8 ri_rr[FTM_TPK_LEN];
wl_proxd_phy_error_t phy_err_mask;
-} BWL_POST_PACKED_STRUCT wl_proxd_collect_event_data_v1_t;
-
-/* Secured 2.0 supoorted devices i.e. 4364 */
-#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_2 2
-typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_event_data_v2 {
- uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ];
- uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ];
- uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0];
- wl_proxd_phy_error_t phy_err_mask;
-} BWL_POST_PACKED_STRUCT wl_proxd_collect_event_data_v2_t;
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_event_data_t;
#include <packed_section_end.h>
-#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_3 3
-typedef struct wl_proxd_collect_event_data_v3 {
- uint16 version;
- uint16 length;
- uint32 H_LB[K_TOF_COLLECT_H_SIZE_20MHZ];
- uint32 H_RX[K_TOF_COLLECT_H_SIZE_20MHZ];
- uint8 ri_rr[FTM_TPK_RI_RR_LEN_SECURE_2_0];
- wl_proxd_phy_error_t phy_err_mask;
-} wl_proxd_collect_event_data_v3_t;
-
-#define WL_PROXD_COLLECT_EVENT_DATA_VERSION_MAX WL_PROXD_COLLECT_EVENT_DATA_VERSION_3
-
/** Data returned by the bssload_report iovar. This is also the WLC_E_BSS_LOAD event data */
#include <packed_section_start.h>
typedef BWL_PRE_PACKED_STRUCT struct wl_bssload {
#define WL_ROAM_PROF_VER_0 0
#define WL_ROAM_PROF_VER_1 1
-#define WL_ROAM_PROF_VER_2 2
-#define WL_MAX_ROAM_PROF_VER WL_ROAM_PROF_VER_1
+#define WL_MAX_ROAM_PROF_VER WL_ROAM_PROF_VER_1
#define WL_ROAM_PROF_NONE (0 << 0)
#define WL_ROAM_PROF_LAZY (1 << 0)
#define WL_CU_CALC_DURATION_DEFAULT 10 /* seconds */
#define WL_CU_CALC_DURATION_MAX 60 /* seconds */
-#define WL_ESTM_LOW_TRIGGER_DISABLE 0
-#define WL_ESTM_LOW_TRIGGER_DEFAULT 5 /* Mbps */
-#define WL_ESTM_LOW_TRIGGER_MAX 250 /* Mbps */
-#define WL_ESTM_ROAM_DELTA_DEFAULT 10
-
-typedef struct wl_roam_prof_v3 {
- uint8 roam_flags; /**< bit flags */
- int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */
- int8 rssi_lower;
- int8 roam_delta;
-
- /* if channel_usage if zero, roam_delta is rssi delta required for new AP */
- /* if channel_usage if non-zero, roam_delta is score delta(%) required for new AP */
- int8 rssi_boost_thresh; /**< Min RSSI to qualify for RSSI boost */
- int8 rssi_boost_delta; /**< RSSI boost for AP in the other band */
- uint16 nfscan; /**< number of full scan to start with */
- uint16 fullscan_period;
- uint16 init_scan_period;
- uint16 backoff_multiplier;
- uint16 max_scan_period;
- uint8 channel_usage;
- uint8 cu_avg_calc_dur;
- uint16 estm_low_trigger; /**< ESTM low throughput roam trigger */
- int8 estm_roam_delta; /**< ESTM low throughput roam delta */
- uint8 pad;
-} wl_roam_prof_v3_t;
-
typedef struct wl_roam_prof_v2 {
int8 roam_flags; /**< bit flags */
int8 roam_trigger; /**< RSSI trigger level per profile/RSSI bracket */
uint16 max_scan_period;
} wl_roam_prof_v1_t;
-typedef struct wl_roam_prof_band_v3 {
- uint32 band; /**< Must be just one band */
- uint16 ver; /**< version of this struct */
- uint16 len; /**< length in bytes of this structure */
- wl_roam_prof_v3_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
-} wl_roam_prof_band_v3_t;
-
typedef struct wl_roam_prof_band_v2 {
uint32 band; /**< Must be just one band */
uint16 ver; /**< version of this struct */
uint16 weight; /* weightage for each type between 0 to 100 */
} wnm_bss_select_weight_cfg_t;
-/* For branches before koala .. wbtext is part
- * of wnm need to use below type only
- */
-typedef struct wnm_btm_default_score_cfg {
- uint32 default_score; /* default score */
- uint8 band;
-} wnm_btm_default_score_cfg_t;
-
-/* For branches from koala and above .. wbtext is
- * seperate module..need to use below type only
- */
-typedef struct wbtext_btm_default_score_cfg {
- uint32 default_score; /* default score */
- uint8 band;
-} wbtext_btm_default_score_cfg_t;
-
#define WNM_BSS_SELECT_TYPE_RSSI 0
#define WNM_BSS_SELECT_TYPE_CU 1
-#define WNM_BSS_SELECT_TYPE_ESTM_DL 2
#define WNM_BSSLOAD_MONITOR_VERSION 1
typedef struct wnm_bssload_monitor_cfg {
typedef enum wl_interface_type {
WL_INTERFACE_TYPE_STA = 0,
WL_INTERFACE_TYPE_AP = 1,
+ WL_INTERFACE_TYPE_AWDL = 2,
WL_INTERFACE_TYPE_NAN = 3,
- WL_INTERFACE_TYPE_P2P_GO = 4,
- WL_INTERFACE_TYPE_P2P_GC = 5,
- WL_INTERFACE_TYPE_P2P_DISC = 6,
- WL_INTERFACE_TYPE_IBSS = 7,
WL_INTERFACE_TYPE_MAX
} wl_interface_type_t;
*/
#define WL_INTERFACE_BSSID_INDEX_USE (1 << 4)
+#ifdef WLMESH
+typedef struct wl_interface_info {
+ uint16 ver; /* version of this struct */
+ struct ether_addr mac_addr; /* MAC address of the interface */
+ char ifname[BCM_MSG_IFNAME_MAX]; /* name of interface */
+ uint8 bsscfgidx; /* source bsscfg index */
+} wl_interface_info_t;
+#endif
+
typedef struct wl_interface_create {
uint16 ver; /* version of this struct */
uint32 flags; /* flags that defines the operation */
#define WL_INTERFACE_INFO_VER_1 1
#define WL_INTERFACE_INFO_VER_2 2
-typedef struct wl_interface_info {
- uint16 ver; /* version of this struct */
- struct ether_addr mac_addr; /* MAC address of the interface */
- char ifname[BCM_MSG_IFNAME_MAX]; /* name of interface */
- uint8 bsscfgidx; /* source bsscfg index */
-} wl_interface_info_t;
-
typedef struct wl_interface_info_v1 {
uint16 ver; /**< version of this struct */
struct ether_addr mac_addr; /**< MAC address of the interface */
wl_iqest_value_t value[1];
} wl_iqest_result_t;
-#define WL_PRIO_ROAM_PROF_V1 (1u)
-
-typedef struct wl_prio_roam_prof_v1 {
- uint16 version; /* Version info */
- uint16 length; /* byte length of this structure */
- uint8 prio_roam_mode; /* Roam mode RCC/RCC+Full Scan */
- uint8 PAD[3];
-} wl_prio_roam_prof_v1_t;
-
-typedef enum wl_prio_roam_mode {
- PRIO_ROAM_MODE_OFF = 0, /* Prio_Roam feature disable */
- PRIO_ROAM_MODE_RCC_ONLY = 1, /* Scan RCC list only */
- PRIO_ROAM_MODE_RCC_FULLSCAN = 2, /* Scan RCC list + Full scan */
- PRIO_ROAM_MODE_FULLSCAN_ONLY = 3 /* Full Scan only */
-} wl_prio_roam_mode_t;
-
/* BTCX AIBSS (Oxygen) Status */
typedef struct wlc_btc_aibss_info {
uint32 prev_tsf_l; // Lower 32 bits of last read of TSF
uint32 data[]; /* variable length data containing stats */
} wl_bcntrim_status_v1_t;
-#define BCNTRIM_STATS_MAX 10 /* Total stats part of the status data[] */
+#define BCNTRIM_STATS_MAX 10 /* Total stats part of the status data[] */
/* Bits for FW status */
-#define WL_BCNTRIM_DISABLE_HOST 0x1 /* Host disabled bcntrim through bcntrim IOVar */
-#define WL_BCNTRIM_DISABLE_PHY_RATE 0x2 /* bcntrim disabled because beacon rx rate is
- * higher than phy_rate_thresh
- */
-#define WL_BCNTRIM_DISABLE_QUIET_IE 0x4 /* bcntrim disable when Quiet IE present */
-#define WL_BCNTRIM_DISABLE_QBSSLOAD_IE 0x8 /* bcntrim disable when QBSS Load IE present */
-#define WL_BCNTRIM_DISABLE_OPERMODE_IE 0x10 /* bcntrim dsiable when opermode IE is present */
-#define WL_BCNTRIM_DISABLE_CSA_IE 0x20 /* bcntrim dsiable when CSA IE is present */
-
-#define BCNTRIM_DISABLE_THRESHOLD_TIME 1000 * 10 /* enable bcntrim after a threshold (10sec)
- * when disabled due to above mentioned IE's
- */
+#define WL_BCNTRIM_DISABLE_HOST 0x1 /* Host disabled bcntrim through bcntrim IOVar */
+#define WL_BCNTRIM_DISABLE_PHY_RATE 0x2 /* bcntrim disabled because beacon rx rate is
+ higher than phy_rate_thresh
+ */
+#define WL_BCNTRIM_DISABLE_QUIET_IE 0x4 /* bcntrim disable when Quiet IE present */
+
#define WL_BCNTRIM_CFG_VERSION_1 1
/* Common IOVAR struct */
typedef struct wl_bcntrim_cfg_v1 {
#define BCNTRIM_MAX_PHY_RATE 48 /* in 500Kbps */
#define BCNTRIM_MAX_TSF_DRIFT 65535 /* in usec */
-#define WL_BCNTRIM_OVERRIDE_DISABLE_MASK \
- (WL_BCNTRIM_DISABLE_QUIET_IE | WL_BCNTRIM_DISABLE_QBSSLOAD_IE)
+#define WL_BCNTRIM_OVERRIDE_DISABLE_MASK (WL_BCNTRIM_DISABLE_QUIET_IE)
/* WL_BCNTRIM_CFG_SUBCMD_PHY_RATE_TRESH */
typedef struct wl_bcntrim_cfg_phy_rate_thresh {
uint8 pad[2]; /* 4-byte alignment */
} wl_bcntrim_cfg_tsf_drift_limit_t;
+
/* -------------- TX Power Cap --------------- */
#define TXPWRCAP_MAX_NUM_CORES 8
#define TXPWRCAP_MAX_NUM_ANTENNAS (TXPWRCAP_MAX_NUM_CORES * 2)
-#define TXPWRCAP_MAX_NUM_CORES_V3 4
-#define TXPWRCAP_MAX_NUM_ANTENNAS_V3 (TXPWRCAP_MAX_NUM_CORES_V3 * 2)
-
#define TXPWRCAP_NUM_SUBBANDS 5
-#define TXPWRCAP_MAX_NUM_SUBGRPS 10
/* IOVAR txcapconfig enum's */
-#define TXPWRCAPCONFIG_WCI2 0u
-#define TXPWRCAPCONFIG_HOST 1u
-#define TXPWRCAPCONFIG_WCI2_AND_HOST 2u
-#define TXPWRCAPCONFIG_NONE 0xFFu
+#define TXPWRCAPCONFIG_WCI2 0
+#define TXPWRCAPCONFIG_HOST 1
+#define TXPWRCAPCONFIG_WCI2_AND_HOST 2
/* IOVAR txcapstate enum's */
#define TXPWRCAPSTATE_LOW_CAP 0
/* IOVAR txcapconfig and txcapstate structure is shared: SET and GET */
#define TXPWRCAPCTL_VERSION 2
-#define TXPWRCAPCTL_VERSION_3 3
-
typedef struct wl_txpwrcap_ctl {
uint8 version;
uint8 ctl[TXPWRCAP_NUM_SUBBANDS];
} wl_txpwrcap_ctl_t;
-typedef struct wl_txpwrcap_ctl_v3 {
- uint8 version;
- uint8 ctl[TXPWRCAP_MAX_NUM_SUBGRPS];
-} wl_txpwrcap_ctl_v3_t;
-
/* IOVAR txcapdump structure: GET only */
#define TXPWRCAP_DUMP_VERSION 2
typedef struct wl_txpwrcap_dump {
uint8 PAD[2];
} wl_txpwrcap_dump_v3_t;
-/*
-* Capability flag for wl_txpwrcap_tbl_v2_t and wl_txpwrcap_t
-* The index into pwrs will be: 0: onbody-cck, 1: onbody-ofdm, 2:offbody-cck, 3:offbody-ofdm
-*
-* For 5G power in SDB case as well as for non-SDB case, the value of flag will be: CAP_ONOFF_BODY
-* The index into pwrs will be: 0: onbody, 1: offbody-ofdm
-*/
-
-#define CAP_ONOFF_BODY (0x1) /* on/off body only */
-#define CAP_CCK_OFDM (0x2) /* cck/ofdm capability only */
-#define CAP_LTE_CELL (0x4) /* cell on/off capability; required for iOS builds */
-#define CAP_HEAD_BODY (0x8) /* head/body capability */
-#define CAP_2G_DEPON_5G (0x10) /* 2G pwr caps depend on other slice 5G subband */
-#define CAP_SISO_MIMO (0x20) /* Siso/Mimo Separate Power Caps */
-#define CAP_ANT_TX (0x40) /* Separate Power Caps based on cell ant tx value */
-#define CAP_LTE_PQBIT (0x100u) /* QPBit is enabled */
-#define CAP_ONOFF_BODY_CCK_OFDM (CAP_ONOFF_BODY | CAP_CCK_OFDM)
-#define CAP_TXPWR_ALL (CAP_ONOFF_BODY|CAP_CCK_OFDM|CAP_LTE_CELL|\
- CAP_SISO_MIMO|CAP_HEAD_BODY|CAP_ANT_TX)
-
-#define TXHDR_SEC_MAX 5u /* Deprecated. Kept till removed in all branches */
-#define TXPWRCAP_MAX_STATES 4u
-#define TXPWRCAP_MAX_STATES_V3 10u
-#define TXPWRCAP_CCKOFDM_ONOFFBODY_MAX_STATES 4u
-#define TXPWRCAP_ONOFFBODY_MAX_STATES 2u
-#define TXPWRCAP_ONOFFCELL_MAX_STATES 2u
-
-#define TXHDR_SEC_NONSDB_MAIN_2G 0
-#define TXHDR_SEC_NONSDB_MAIN_5G 1
-#define TXHDR_SEC_NONSDB_AUX_2G 2
-#define TXHDR_SEC_NONSDB_AUX_5G 3
-#define TXHDR_SEC_SDB_MAIN_2G 4
-#define TXHDR_SEC_SDB_MAIN_5G 5
-#define TXHDR_SEC_SDB_AUX_2G 6
-#define TXHDR_SEC_SDB_AUX_5G 7
-#define TXHDR_MAX_SECTION 8
-
-#define WL_TXPWRCAP_MAX_SLICES 2
-#define WL_TXPWRCAPDUMP_VER 4
-
-#define WL_TXPWRCAP_VERSION_2 2
-#define WL_TXPWRCAP_VERSION_3 3
-
-typedef struct wl_txpwrcap {
- uint8 capability;
- uint8 num_cap_states;
- uint8 section; /* Index from above,eg. TXHDR_SEC_NONSDB */
- int8 pwrs[][TXPWRCAP_NUM_SUBBANDS][TXPWRCAP_MAX_NUM_CORES];
-} wl_txpwrcap_t;
-
-typedef struct {
- uint8 capability;
- uint8 num_cap_states;
- uint8 num_subgrps;
- uint8 section; /* Index from above,eg. TXHDR_SEC_NONSDB */
- int8 pwrs[][TXPWRCAP_MAX_NUM_SUBGRPS][TXPWRCAP_MAX_NUM_ANTENNAS_V3];
-} wl_txpwrcap_v2_t;
-
-#define TXPWRCAP_DUMP_VERSION_4 4u
-#define TXPWRCAP_DUMP_VERSION_5 5u
-#define TXPWRCAP_DUMP_VERSION_6 6u
-
-typedef struct wl_txpwrcap_dump_v4 {
- uint8 version;
- uint8 num_pwrcap;
- uint8 current_country[2];
- uint32 current_channel;
- uint8 download_present;
- uint8 num_cores; /* number cores on slice */
- uint8 num_cc_groups; /* number cc groups */
- uint8 current_country_cc_group_info_index;
- /* first power cap always exist
- * On main,-non-sdb follows by sdb2g and then sdb5g
- * On aux slice - aux2g then aux5g.
- */
- wl_txpwrcap_t pwrcap; /* first power cap */
-} wl_txpwrcap_dump_v4_t;
-
-typedef struct wl_txpwrcap_dump_v5 {
- uint8 version;
- uint8 num_pwrcap;
- uint8 current_country[2];
- uint8 current_channel;
- uint8 high_cap_state_enabled;
- uint8 reserved[2];
- uint8 download_present;
- uint8 num_ants; /* number antenna slice */
- uint8 num_cc_groups; /* number cc groups */
- uint8 current_country_cc_group_info_index;
- uint8 ant_tx; /* current value of ant_tx */
- uint8 cell_status; /* current value of cell status */
- int8 pwrcap[]; /* variable size power caps (wl_txpwrcap_v2_t) */
-} wl_txpwrcap_dump_v5_t;
-
-typedef struct wl_txpwrcap_dump_v6 {
- uint8 version;
- uint8 num_pwrcap;
- uint8 current_country[2];
- uint8 current_channel;
- uint8 high_cap_state_enabled;
- uint8 reserved[2];
- uint8 download_present;
- uint8 num_ants; /* number antenna slice */
- uint8 num_cc_groups; /* number cc groups */
- uint8 current_country_cc_group_info_index;
- uint8 ant_tx; /* current value of ant_tx */
- uint8 cell_status; /* current value of cell status */
- uint16 capability[TXHDR_MAX_SECTION]; /* capabilities */
- int8 pwrcap[]; /* variable size power caps (wl_txpwrcap_v2_t) */
-} wl_txpwrcap_dump_v6_t;
-
-#define TXCAPINFO_VERSION_1 1
-typedef struct wl_txpwrcap_ccgrp_info {
- uint8 num_cc;
- char cc_list[1][2]; /* 2 letters for each country. At least one country */
-} wl_txpwrcap_ccgrp_info_t;
-
-typedef struct {
- uint16 version;
- uint16 length; /* length in bytes */
- uint8 num_ccgrp;
- /* followed by one or more wl_txpwrcap_ccgrp_info_t */
- wl_txpwrcap_ccgrp_info_t ccgrp_data[1];
-} wl_txpwrcap_info_t;
-
typedef struct wl_txpwrcap_tbl {
uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES];
/* Stores values for valid antennas */
int8 pwrcap_cell_off[TXPWRCAP_MAX_NUM_ANTENNAS]; /* qdBm units */
} wl_txpwrcap_tbl_t;
-typedef struct wl_txpwrcap_tbl_v2 {
- uint8 version;
- uint8 length; /* size of entire structure, including the pwrs */
- uint8 capability; /* capability bitmap */
- uint8 num_cores; /* number of cores i.e. entries in each cap state row */
- /*
- * pwrs array has TXPWRCAP_MAX_STATES rows - one for each cap state.
- * Each row has up to TXPWRCAP_MAX_NUM_CORES entries - one for each core.
- */
- uint8 pwrs[][TXPWRCAP_MAX_NUM_CORES]; /* qdBm units */
-} wl_txpwrcap_tbl_v2_t;
-
-typedef struct wl_txpwrcap_tbl_v3 {
- uint8 version;
- uint8 length; /* size of entire structure, including the pwrs */
- uint8 capability; /* capability bitmap */
- uint8 num_cores; /* number of cores */
- uint8 num_antennas_per_core[TXPWRCAP_MAX_NUM_CORES_V3];
- /*
- * pwrs array has TXPWRCAP_MAX_STATES rows - one for each cap state.
- * Each row has up to TXPWRCAP_MAX_NUM_ANTENNAS entries - for each antenna.
- * Included in the rows of powers are rows for fail safe.
- */
- int8 pwrs[][TXPWRCAP_MAX_NUM_ANTENNAS_V3]; /* qdBm units */
-} wl_txpwrcap_tbl_v3_t;
-
/* ##### Ecounters section ##### */
#define ECOUNTERS_VERSION_1 1
} ecounters_trigger_config_t;
#define ECOUNTERS_TRIGGER_REASON_VERSION_1 1
-typedef enum {
- /* Triggered due to timer based ecounters */
- ECOUNTERS_TRIGGER_REASON_TIMER = 0,
- /* Triggered due to event based configuration */
- ECOUNTERS_TRIGGER_REASON_EVENTS = 1,
- ECOUNTERS_TRIGGER_REASON_D2H_EVENTS = 2,
- ECOUNTERS_TRIGGER_REASON_H2D_EVENTS = 3,
- ECOUNTERS_TRIGGER_REASON_USER_EVENTS = 4,
- ECOUNTERS_TRIGGER_REASON_MAX = 5
-} ecounters_trigger_reasons_list_t;
+/* Triggered due to timer based ecounters */
+#define ECOUNTERS_TRIGGER_REASON_TIMER 0
+/* Triggered due to event based configuration */
+#define ECOUNTERS_TRIGGER_REASON_EVENTS 1
+#define ECOUNTERS_TRIGGER_REASON_MAX 1
typedef struct ecounters_trigger_reason {
uint16 version; /* version */
#define WL_LQM_CURRENT_BSS_VALID 0x1
#define WL_LQM_TARGET_BSS_VALID 0x2
-#define WL_PERIODIC_COMPACT_CNTRS_VER_1 (1)
-#define WL_PERIODIC_TXBF_CNTRS_VER_1 (1)
-typedef struct {
- uint16 version;
- uint16 pad;
- /* taken from wl_wlc_cnt_t */
- uint32 txfail;
- /* taken from wl_cnt_ge40mcst_v1_t */
- uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
- * Control Management (includes retransmissions)
- */
- uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
- uint32 txctsfrm; /**< number of CTS sent out by the MAC */
- uint32 txback; /**< blockack txcnt */
- uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
- uint32 txnoack; /**< dot11ACKFailureCount */
- uint32 txframe; /**< tx data frames */
- uint32 txretrans; /**< tx mac retransmits */
- uint32 txpspoll; /**< Number of TX PS-poll */
-
- uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
- * expecting a response
- */
- uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
- uint32 rxstrt; /**< number of received frames with a good PLCP */
- uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
- uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
- uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
- uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
- uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
- uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
- uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
- uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */
- uint32 rxhlovfl; /**< number of length / header fifo overflows */
- uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
- uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
- uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
- uint32 rxback; /**< blockack rxcnt */
- uint32 rxbeaconmbss; /**< beacons received from member of BSS */
- uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
- uint32 rxbeaconobss; /**< beacons received from other BSS */
- uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
- * other BSS (WDS FRAME)
- */
- uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
- uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
- uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
- uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
- uint32 rxmpdu_mu; /**< Number of MU MPDUs received */
- uint32 rxtoolate; /**< receive too late */
- uint32 rxframe; /**< rx data frames */
- uint32 lqcm_report; /**< lqcm metric tx/rx idx */
- uint32 tx_toss_cnt; /* number of tx packets tossed */
- uint32 rx_toss_cnt; /* number of rx packets tossed */
- uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
- uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
- uint32 txbcnfrm; /**< beacons transmitted */
-} wl_periodic_compact_cntrs_v1_t;
-
-#define WL_PERIODIC_COMPACT_CNTRS_VER_2 (2)
-typedef struct {
- uint16 version;
- uint16 pad;
- /* taken from wl_wlc_cnt_t */
- uint32 txfail;
- /* taken from wl_cnt_ge40mcst_v1_t */
- uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
- * Control Management (includes retransmissions)
- */
- uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
- uint32 txctsfrm; /**< number of CTS sent out by the MAC */
- uint32 txback; /**< blockack txcnt */
- uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
- uint32 txnoack; /**< dot11ACKFailureCount */
- uint32 txframe; /**< tx data frames */
- uint32 txretrans; /**< tx mac retransmits */
- uint32 txpspoll; /**< Number of TX PS-poll */
-
- uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
- * expecting a response
- */
- uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
- uint32 rxstrt; /**< number of received frames with a good PLCP */
- uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
- uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
- uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
- uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
- uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
- uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
- uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
- uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */
- uint32 rxhlovfl; /**< number of length / header fifo overflows */
- uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
- uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
- uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
- uint32 rxback; /**< blockack rxcnt */
- uint32 rxbeaconmbss; /**< beacons received from member of BSS */
- uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
- uint32 rxbeaconobss; /**< beacons received from other BSS */
- uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
- * other BSS (WDS FRAME)
- */
- uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
- uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
- uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
- uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
- uint32 rxmpdu_mu; /**< Number of MU MPDUs received */
- uint32 rxtoolate; /**< receive too late */
- uint32 rxframe; /**< rx data frames */
- uint32 lqcm_report; /**< lqcm metric tx/rx idx */
- uint32 tx_toss_cnt; /* number of tx packets tossed */
- uint32 rx_toss_cnt; /* number of rx packets tossed */
- uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
- uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
- uint32 txbcnfrm; /**< beacons transmitted */
- uint32 rxretry; /* Number of rx packets received after retry */
- uint32 rxdup; /* Number of dump packet. Indicates whether peer is receiving ack */
- uint32 chswitch_cnt; /* Number of channel switches */
- uint32 pm_dur; /* Total sleep time in PM, msecs */
-} wl_periodic_compact_cntrs_v2_t;
-
-#define WL_PERIODIC_COMPACT_CNTRS_VER_3 (3)
-typedef struct {
- uint16 version;
- uint16 pad;
- /* taken from wl_wlc_cnt_t */
- uint32 txfail;
- /* taken from wl_cnt_ge40mcst_v1_t */
- uint32 txallfrm; /**< total number of frames sent, incl. Data, ACK, RTS, CTS,
- * Control Management (includes retransmissions)
- */
- uint32 txrtsfrm; /**< number of RTS sent out by the MAC */
- uint32 txctsfrm; /**< number of CTS sent out by the MAC */
- uint32 txback; /**< blockack txcnt */
- uint32 txucast; /**< number of unicast tx expecting response other than cts/cwcts */
- uint32 txnoack; /**< dot11ACKFailureCount */
- uint32 txframe; /**< tx data frames */
- uint32 txretrans; /**< tx mac retransmits */
- uint32 txpspoll; /**< Number of TX PS-poll */
-
- uint32 rxrsptmout; /**< number of response timeouts for transmitted frames
- * expecting a response
- */
- uint32 txrtsfail; /**< number of rts transmission failure that reach retry limit */
- uint32 rxstrt; /**< number of received frames with a good PLCP */
- uint32 rxbadplcp; /**< number of parity check of the PLCP header failed */
- uint32 rxcrsglitch; /**< PHY was able to correlate the preamble but not the header */
- uint32 rxnodelim; /**< number of no valid delimiter detected by ampdu parser */
- uint32 bphy_badplcp; /**< number of bad PLCP reception on BPHY rate */
- uint32 bphy_rxcrsglitch; /**< PHY count of bphy glitches */
- uint32 rxbadfcs; /**< number of frames for which the CRC check failed in the MAC */
- uint32 rxf0ovfl; /**< number of receive fifo 0 overflows */
- uint32 rxf1ovfl; /**< number of receive fifo 0 overflows */
- uint32 rxhlovfl; /**< number of length / header fifo overflows */
- uint32 rxrtsucast; /**< number of unicast RTS addressed to the MAC (good FCS) */
- uint32 rxctsucast; /**< number of unicast CTS addressed to the MAC (good FCS) */
- uint32 rxackucast; /**< number of ucast ACKS received (good FCS) */
- uint32 rxback; /**< blockack rxcnt */
- uint32 rxbeaconmbss; /**< beacons received from member of BSS */
- uint32 rxdtucastmbss; /**< number of received DATA frames with good FCS and matching RA */
- uint32 rxbeaconobss; /**< beacons received from other BSS */
- uint32 rxdtucastobss; /**< number of unicast frames addressed to the MAC from
- * other BSS (WDS FRAME)
- */
- uint32 rxdtocast; /**< number of received DATA frames (good FCS and no matching RA) */
- uint32 rxrtsocast; /**< number of received RTS not addressed to the MAC */
- uint32 rxctsocast; /**< number of received CTS not addressed to the MAC */
- uint32 rxdtmcast; /**< number of RX Data multicast frames received by the MAC */
- uint32 rxmpdu_mu; /**< Number of MU MPDUs received */
- uint32 rxtoolate; /**< receive too late */
- uint32 rxframe; /**< rx data frames */
- uint32 lqcm_report; /**< lqcm metric tx/rx idx */
- uint32 tx_toss_cnt; /* number of tx packets tossed */
- uint32 rx_toss_cnt; /* number of rx packets tossed */
- uint32 last_tx_toss_rsn; /* reason because of which last tx pkt tossed */
- uint32 last_rx_toss_rsn; /* reason because of which last rx pkt tossed */
- uint32 txbcnfrm; /**< beacons transmitted */
- uint32 rxretry; /* Number of rx packets received after retry */
- uint32 rxdup; /* Number of dump packet. Indicates whether peer is receiving ack */
- uint32 chswitch_cnt; /* Number of channel switches */
- uint32 pm_dur; /* Total sleep time in PM, msecs */
- uint32 rxholes; /* Count of missed packets from peer */
-} wl_periodic_compact_cntrs_v3_t;
-
-#define WL_PERIODIC_COMPACT_HE_CNTRS_VER_1 (1)
-typedef struct {
- uint16 version;
- uint16 len;
- uint32 he_rxtrig_rand;
- uint32 he_colormiss_cnt;
- uint32 he_txmtid_back;
- uint32 he_rxmtid_back;
- uint32 he_rxmsta_back;
- uint32 he_rxtrig_basic;
- uint32 he_rxtrig_murts;
- uint32 he_rxtrig_bsrp;
- uint32 he_rxdlmu;
- uint32 he_physu_rx;
- uint32 he_txtbppdu;
-} wl_compact_he_cnt_wlc_v1_t;
-
-#define WL_PERIODIC_COMPACT_HE_CNTRS_VER_2 (2)
-typedef struct {
- uint16 version;
- uint16 len;
- uint32 he_rxtrig_myaid;
- uint32 he_rxtrig_rand;
- uint32 he_colormiss_cnt;
- uint32 he_txmampdu;
- uint32 he_txmtid_back;
- uint32 he_rxmtid_back;
- uint32 he_rxmsta_back;
- uint32 he_txfrag;
- uint32 he_rxdefrag;
- uint32 he_txtrig;
- uint32 he_rxtrig_basic;
- uint32 he_rxtrig_murts;
- uint32 he_rxtrig_bsrp;
- uint32 he_rxhemuppdu_cnt;
- uint32 he_physu_rx;
- uint32 he_phyru_rx;
- uint32 he_txtbppdu;
- uint32 he_null_tbppdu;
- uint32 he_rxhesuppdu_cnt;
- uint32 he_rxhesureppdu_cnt;
- uint32 he_null_zero_agg;
- uint32 he_null_bsrp_rsp;
- uint32 he_null_fifo_empty;
-} wl_compact_he_cnt_wlc_v2_t;
-
-/* for future versions of this data structure, can consider wl_txbf_ecounters_t
- * which contains the full list of txbf dump counters
- */
-typedef struct {
- uint16 version;
- uint16 coreup;
- uint32 txndpa;
- uint32 txndp;
- uint32 rxsf;
- uint32 txbfm;
- uint32 rxndpa_u;
- uint32 rxndpa_m;
- uint32 bferpt;
- uint32 rxbfpoll;
- uint32 txsf;
-} wl_periodic_txbf_cntrs_v1_t;
-
typedef struct {
struct ether_addr BSSID;
chanspec_t chanspec;
wl_rx_signal_metric_t target_bss;
} wl_lqm_t;
-#define WL_PERIODIC_IF_STATE_VER_1 (1)
-typedef struct wl_if_state_compact {
- uint8 version;
- uint8 assoc_state;
- uint8 antenna_count; /**< number of valid antenna rssi */
- int8 noise_level; /**< noise right after tx (in dBm) */
- int8 snr; /* current noise level */
- int8 rssi_sum; /**< summed rssi across all antennas */
- uint16 pad16;
- int8 rssi_ant[WL_RSSI_ANT_MAX]; /**< rssi per antenna */
- struct ether_addr BSSID;
- chanspec_t chanspec;
-} wl_if_state_compact_t;
-
-#define WL_EVENT_STATISTICS_VER_1 (1)
-/* Event based statistics ecounters */
-typedef struct {
- uint16 version;
- uint16 pad;
- struct ether_addr BSSID; /* BSSID of the BSS */
- uint32 txdeauthivalclass;
-} wl_event_based_statistics_v1_t;
-
-#define WL_EVENT_STATISTICS_VER_2 (2)
-/* Event based statistics ecounters */
-typedef struct {
- uint16 version;
- uint16 pad;
- struct ether_addr BSSID; /* BSSID of the BSS */
- uint32 txdeauthivalclass;
- /* addition for v2 */
- int32 timestamp; /* last deauth time */
- struct ether_addr last_deauth; /* wrong deauth MAC */
- uint16 misdeauth; /* wrong deauth count every 1sec */
- int16 cur_rssi; /* current bss rssi */
- int16 deauth_rssi; /* deauth pkt rssi */
-} wl_event_based_statistics_v2_t;
-
-#define WL_EVENT_STATISTICS_VER_3 (3)
-/* Event based statistics ecounters */
-typedef struct {
- uint16 version;
- uint16 pad;
- struct ether_addr BSSID; /* BSSID of the BSS */
- uint16 PAD;
- uint32 txdeauthivalclass;
- /* addition for v2 */
- int32 timestamp; /* last deauth time */
- struct ether_addr last_deauth; /* wrong deauth MAC */
- uint16 misdeauth; /* wrong deauth count every 1sec */
- int16 cur_rssi; /* current bss rssi */
- int16 deauth_rssi; /* deauth pkt rssi */
- /* addition for v3 (roam statistics) */
- uint32 initial_assoc_time;
- uint32 prev_roam_time;
- uint32 last_roam_event_type;
- uint32 last_roam_event_status;
- uint32 last_roam_event_reason;
- uint16 roam_success_cnt;
- uint16 roam_fail_cnt;
- uint16 roam_attempt_cnt;
- uint16 max_roam_target_cnt;
- uint16 min_roam_target_cnt;
- uint16 max_cached_ch_cnt;
- uint16 min_cached_ch_cnt;
- uint16 partial_roam_scan_cnt;
- uint16 full_roam_scan_cnt;
- uint16 most_roam_reason;
- uint16 most_roam_reason_cnt;
-} wl_event_based_statistics_v3_t;
-
-#define WL_EVENT_STATISTICS_VER_4 (4u)
-/* Event based statistics ecounters */
-typedef struct {
- uint16 version;
- uint16 pad;
- struct ether_addr BSSID; /* BSSID of the BSS */
- uint16 PAD;
- uint32 txdeauthivalclass;
- /* addition for v2 */
- int32 timestamp; /* last deauth time */
- struct ether_addr last_deauth; /* wrong deauth MAC */
- uint16 misdeauth; /* wrong deauth count every 1sec */
- int16 cur_rssi; /* current bss rssi */
- int16 deauth_rssi; /* deauth pkt rssi */
-} wl_event_based_statistics_v4_t;
-
/* ##### Ecounters v2 section ##### */
#define ECOUNTERS_VERSION_2 2
#define ECOUNTERS_STATS_TYPES_FLAG_SLICE 0x1
#define ECOUNTERS_STATS_TYPES_FLAG_IFACE 0x2
#define ECOUNTERS_STATS_TYPES_FLAG_GLOBAL 0x4
-#define ECOUNTERS_STATS_TYPES_DEFAULT 0x8
/* Slice mask bits */
#define ECOUNTERS_STATS_TYPES_SLICE_MASK_SLICE0 0x1
uint8 stats_types_req[]; /* XTLVs of requested types */
} ecounters_stats_types_report_req_t;
-/* ##### Ecounters_Eventmsgs v2 section ##### */
-
-#define ECOUNTERS_EVENTMSGS_VERSION_2 2
-
-typedef struct event_ecounters_config_request_v2 {
- uint16 version; /* config version */
- uint16 len; /* Length of this struct including variable len */
- uint16 logset; /* Set where data will go. */
- uint16 event_id; /* Event id for which this config is meant for */
- uint8 flags; /* Config flags */
- uint8 pad[3]; /* Reserved for future use */
- uint8 ecounters_xtlvs[]; /* Statistics Types (tags) to retrieve. */
-} event_ecounters_config_request_v2_t;
-
-#define EVENT_ECOUNTERS_FLAGS_ADD (1 << 0) /* Add configuration for the event_id if set */
-#define EVENT_ECOUNTERS_FLAGS_DEL (1 << 1) /* Delete configuration for event_id if set */
-#define EVENT_ECOUNTERS_FLAGS_ANYIF (1 << 2) /* Interface filtering disable / off bit */
-#define EVENT_ECOUNTERS_FLAGS_BE (1 << 3) /* If cleared report stats of
- * one event log buffer
- */
-#define EVENT_ECOUNTERS_FLAGS_DEL_ALL (1 << 4) /* Delete all the configurations of
- * event ecounters if set
- */
-
-#define EVENT_ECOUNTERS_FLAGS_BUS (1 << 5) /* Add configuration for the bus events */
-#define EVENT_ECOUNTERS_FLAGS_BUS_H2D (1 << 6) /* Add configuration for the bus direction
- * 0 - D2H and 1 - H2D
- */
-
-#define EVENT_ECOUNTERS_FLAGS_DELAYED_FLUSH (1 << 7) /* Flush only when half of the total size
- * of blocks gets filled. This is to avoid
- * many interrupts to host.
- */
-#define EVENT_ECOUNTERS_FLAGS_USER (1 << 6) /* Add configuration for user defined events
- * Reuse the same flag as H2D
- */
-
-/* Ecounters suspend resume */
-#define ECOUNTERS_SUSPEND_VERSION_V1 1
-/* To be used in populating suspend_mask and suspend_bitmap */
-#define ECOUNTERS_SUSPEND_TIMER (1 << ECOUNTERS_TRIGGER_REASON_TIMER)
-#define ECOUNTERS_SUSPEND_EVENTS (1 << ECOUNTERS_TRIGGER_REASON_EVENTS)
-
-typedef struct ecounters_suspend {
- uint16 version;
- uint16 len;
- uint32 suspend_bitmap; /* type of ecounter reporting to be suspended */
- uint32 suspend_mask; /* type of ecounter reporting to be suspended */
-} ecounters_suspend_t;
-
/* -------------- dynamic BTCOEX --------------- */
#define DCTL_TROWS 2 /**< currently practical number of rows */
#define DCTL_TROWS_MAX 4 /**< 2 extra rows RFU */
} mu_group_t;
typedef struct mupkteng_sta {
- struct ether_addr ea;
+ struct ether_addr ea;
uint8 PAD[2];
- int32 nrxchain;
- int32 idx;
+ int32 nrxchain;
+ int32 idx;
} mupkteng_sta_t;
typedef struct mupkteng_client {
- int32 rspec;
- int32 idx;
- int32 flen;
- int32 nframes;
+ int32 rspec;
+ int32 idx;
+ int32 flen;
+ int32 nframes;
} mupkteng_client_t;
typedef struct mupkteng_tx {
- mupkteng_client_t client[8];
- int32 nclients;
- int32 ntx;
+ mupkteng_client_t client[8];
+ int32 nclients;
+ int32 ntx;
} mupkteng_tx_t;
/*
/* ifdef WL11ULB */
/* ULB Mode configured via "ulb_mode" IOVAR */
enum {
- ULB_MODE_DISABLED = 0,
- ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */
- ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */
+ ULB_MODE_DISABLED = 0,
+ ULB_MODE_STD_ALONE_MODE = 1, /* Standalone ULB Mode */
+ ULB_MODE_DYN_MODE = 2, /* Dynamic ULB Mode */
/* Add all other enums before this */
- MAX_SUPP_ULB_MODES
+ MAX_SUPP_ULB_MODES
};
/* ULB BWs configured via "ulb_bw" IOVAR during Standalone Mode Only.
* 'ULB Operations' Attribute or 'ULB Mode Switch' Attribute)
*/
typedef enum {
- ULB_BW_DISABLED = 0,
- ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */
- ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */
- ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */
+ ULB_BW_DISABLED = 0,
+ ULB_BW_10MHZ = 1, /* Standalone ULB BW in 10 MHz BW */
+ ULB_BW_5MHZ = 2, /* Standalone ULB BW in 5 MHz BW */
+ ULB_BW_2P5MHZ = 3, /* Standalone ULB BW in 2.5 MHz BW */
/* Add all other enums before this */
- MAX_SUPP_ULB_BW
+ MAX_SUPP_ULB_BW
} ulb_bw_type_t;
/* endif WL11ULB */
+
#define WL_MESH_IOCTL_VERSION 1
#define MESH_IOC_BUFSZ 512 /* sufficient ioc buff size for mesh */
+#ifdef WLMESH
+typedef struct mesh_peer_info_ext {
+ mesh_peer_info_t peer_info;
+ uint8 pad1;
+ uint16 local_aid; /* AID generated by *local* to peer */
+ uint32 entry_state; /* see MESH_PEER_ENTRY_STATE_ACTIVE etc; valid
+ * ONLY for internal peering requests
+ */
+ int8 rssi;
+ uint8 pad2;
+ struct ether_addr ea; /* peer ea */
+} mesh_peer_info_ext_t;
+
+/* #ifdef WLMESH */
+typedef struct mesh_peer_info_dump {
+ uint32 buflen;
+ uint32 version;
+ uint16 count; /* number of results */
+ uint16 remaining; /* remaining rsults */
+ mesh_peer_info_ext_t mpi_ext[1];
+} mesh_peer_info_dump_t;
+#define WL_MESH_PEER_RES_FIXED_SIZE (sizeof(mesh_peer_info_dump_t) - sizeof(mesh_peer_info_ext_t))
+
+#endif /* WLMESH */
/* container for mesh iovtls & events */
typedef struct wl_mesh_ioc {
uint16 version; /* interface command or event version */
};
/* endif WLMESH */
+#ifdef WLMESH
+#ifndef SAE_MAX_PASSWD_LEN
+#define SAE_MAX_PASSWD_LEN 32
+#endif
+#endif
+
/* Fast BSS Transition parameter configuration */
#define FBT_PARAM_CURRENT_VERSION 0
/* values for IOV_MFP arg */
enum {
- WL_MFP_NONE = 0,
- WL_MFP_CAPABLE,
- WL_MFP_REQUIRED
+ WL_MFP_NONE = 0,
+ WL_MFP_CAPABLE,
+ WL_MFP_REQUIRED
};
typedef enum {
CHANSW_IOVAR = 7, /* channel switch due to IOVAR */
CHANSW_CSA_DFS = 8, /* channel switch due to chan switch announcement from AP */
CHANSW_APCS = 9, /* Channel switch from AP channel select module */
+ CHANSW_AWDL = 10, /* channel switch due to AWDL */
CHANSW_FBT = 11, /* Channel switch from FBT module for action frame response */
CHANSW_UPDBW = 12, /* channel switch at update bandwidth */
CHANSW_ULB = 13, /* channel switch at ULB */
#define WL_RSDB_CONFIG_LEN sizeof(rsdb_config_t)
+
typedef uint8 rsdb_opmode_t;
typedef uint32 rsdb_flags_t;
/* Definitions for slot_bss chanseq iovar */
#define WL_SLOT_BSS_VERSION 1
-/* critical slots max size */
-#define WL_SLOTTED_BSS_CS_BMP_CFG_MAX_SZ 128 /* arbitrary */
-
enum wl_slotted_bss_cmd_id {
WL_SLOTTED_BSS_CMD_VER = 0,
- WL_SLOTTED_BSS_CMD_CHANSEQ = 1,
- WL_SLOTTED_BSS_CMD_CS_BMP = 2 /* critical slots bitmap */
+ WL_SLOTTED_BSS_CMD_CHANSEQ = 1
};
-
typedef uint16 chan_seq_type_t;
enum chan_seq_type {
- CHAN_SEQ_TYPE_AWDL = 1,
- CHAN_SEQ_TYPE_SLICE = 2,
- CHAN_SEQ_TYPE_NAN = 3, /* NAN avail XTLV */
- CHAN_SEQ_TYPE_NANHO = 4 /* NANHO channel schedule XTLV */
+ CHAN_SEQ_TYPE_AWDL = 1,
+ CHAN_SEQ_TYPE_SLICE = 2,
+ CHAN_SEQ_TYPE_NAN = 3
};
-
typedef uint8 sched_flag_t;
enum sched_flag {
NO_SDB_SCHED = 0x1,
typedef struct slice_chan_seq {
uint8 slice_index; /* 0(Main) or 1 (Aux) */
uint8 num_chanspecs;
- uint8 dur;
- uint8 pad;
+ uint16 pad;
chanspec_t chanspecs[1];
} slice_chan_seq_t;
-#define SLOT_BSS_SLICE_TYPE_DUR_MAX_RANGE 2u
-#define SLOTTED_BSS_AGGR_EN (1 << 0) /* Bitmap of mode */
-#define SLOTTED_BSS_AGGR_LIMIT_DUR (1 << 1) /* Jira 49554 */
-#define SLOTTED_BSS_HE_1024_QAM_SUPPORT (1 << 2) /* MCS10-11 Support */
-
#define WL_SLICE_CHAN_SEQ_FIXED_LEN OFFSETOF(slice_chan_seq_t, chanspecs)
-/* Definitions for slotted_bss stats */
-#define SBSS_STATS_VERSION 1
-#define SBSS_STATS_CURRENT_VERSION SBSS_STATS_VERSION
-
-#define SBSS_MAX_CHAN_STATS 4
-
-typedef struct sbss_core_stats {
- uint32 sb_slot_start;
- uint32 sb_slot_end;
- uint32 sb_slot_skip;
- uint32 mismatch_count;
-} sbss_core_stats_t;
-
-typedef struct sbss_chan_stats {
- chanspec_t chanspec;
- uint32 slot_start;
- uint32 slot_end;
- uint32 slot_skip;
-} sbss_chan_stats_t;
-
-typedef struct sbss_stats_v1 {
- uint16 version;
- uint16 length;
- sbss_core_stats_t corestats[MAX_NUM_D11CORES];
- sbss_chan_stats_t sbss_chanstats[MAX_NUM_D11CORES][SBSS_MAX_CHAN_STATS];
-} sbss_stats_t;
-
-/* slotted bss critical slots */
-typedef struct wl_sbss_cs_bmp_s {
- uint8 bitmap_len;
- uint8 pad[3];
- uint8 bitmap[];
-} wl_sbss_cs_bmp_t;
typedef struct sim_pm_params {
uint32 enabled;
uint16 up;
} sim_pm_params_t;
-/* Digital napping status */
-#define WL_NAP_STATUS_VERSION_1 1
-typedef struct wl_nap_status_v1 {
- uint16 version; /* structure version */
- uint16 len; /* length of returned data */
- uint16 fw_status; /* bitmask of FW disable reasons */
- uint8 hw_status; /* bitmask for actual HW state info */
- uint8 slice_index; /* which slice this represents */
- uint32 total_disable_dur; /* total time (ms) disabled for fw_status */
-} wl_nap_status_v1_t;
-
/* Bits for fw_status */
-#define NAP_DISABLED_HOST 0x0001 /* Host has disabled through nap_enable */
-#define NAP_DISABLED_RSSI 0x0002 /* Disabled because of nap_rssi_threshold */
-#define NAP_DISABLED_SCAN 0x0004 /* Disabled because of scan */
-#define NAP_DISABLED_ASSOC 0x0008 /* Disabled because of association */
-#define NAP_DISABLED_LTE 0x0010 /* Disabled because of LTE */
-#define NAP_DISABLED_ACI 0x0020 /* Disabled because of ACI mitigation */
+#define NAP_DISABLED_HOST 0x01 /* Host has disabled through nap_enable */
+#define NAP_DISABLED_RSSI 0x02 /* Disabled because of nap_rssi_threshold */
/* Bits for hw_status */
#define NAP_HWCFG 0x01 /* State of NAP config bit in phy HW */
-#define NAP_NOCLK 0x80 /* No clock to read HW (e.g. core down) */
/* ifdef WL_NATOE */
#define WL_NATOE_IOCTL_VERSION 1
#define WL_NATOE_IOC_BUFSZ 512 /* sufficient ioc buff size for natoe */
#define WL_NATOE_DBG_STATS_BUFSZ 2048
-#define NATOE_FLAGS_ENAB_MASK 0x1
-#define NATOE_FLAGS_ACTIVE_MASK 0x2
-#define NATOE_FLAGS_PUBNW_MASK 0x4
-#define NATOE_FLAGS_PVTNW_MASK 0x8
-#define NATOE_FLAGS_ENAB_SHFT_MASK 0
-#define NATOE_FLAGS_ACTIVE_SHFT_MASK 1
-#define NATOE_FLAGS_PUBNW_SHFT_MASK 2
-#define NATOE_FLAGS_PVTNW_SHFT_MASK 3
-#define NATOE_FLAGS_PUB_NW_UP (1 << NATOE_FLAGS_PUBNW_SHFT_MASK)
-#define NATOE_FLAGS_PVT_NW_UP (1 << NATOE_FLAGS_PVTNW_SHFT_MASK)
-
-#define PCIE_FRWDPKT_STATS_VERSION 1
-
-/* Module version is 1 for IGUANA */
-#define WL_NATOE_MODULE_VER_1 1
-/* Module version is 2 for Lemur */
-#define WL_NATOE_MODULE_VER_2 2
-
-/* WL_NATOE_CMD_MOD_VER */
-typedef uint16 wl_natoe_ver_t;
+
/* config natoe STA and AP IP's structure */
typedef struct {
uint32 sta_ip;
uint8 data[]; /* var len payload of bcm_xtlv_t type */
} wl_natoe_ioc_t;
-typedef struct wl_natoe_pool_stats_v1 {
- /* For debug purposes */
- uint16 poolreorg_cnt;
- uint16 poolrevert_cnt;
- uint16 txfrag_state;
- uint16 rxfrag_state;
- uint16 txfrag_plen;
- uint16 rxfrag_plen;
- uint16 tx_pavail;
- uint16 rx_pavail;
- uint16 txmin_bkup_bufs;
- uint16 rxmin_bkup_bufs;
- uint16 pktpool_sbuf_alloc;
- uint16 pktpool_plen;
- uint16 pktpool_pavail;
- /* Peak shared buffer count in all iterations */
- uint16 sbuf_peak;
- /* Peak shared buffer count in current D3 iteration */
- uint16 sbuf_peak_cur;
-} wl_natoe_pool_stats_v1_t;
-
-typedef struct wl_natoe_arp_entry_v1 {
- struct ipv4_addr ip;
- struct ether_addr mac_addr;
- uint8 lifetime;
- uint8 flags;
-} wl_natoe_arp_entry_v1_t;
-
-typedef struct wl_natoe_dbg_arp_tbl_info_v1 {
- uint8 valid_arp_entries;
- uint8 PAD[3];
- wl_natoe_arp_entry_v1_t arp_ent[];
-} wl_natoe_dbg_arp_tbl_info_v1_t;
-
-typedef struct wl_natoe_skip_port_entry_v1 {
- struct ipv4_addr srcip;
- uint16 src_port;
- uint16 lifetime;
-} wl_natoe_skip_port_entry_v1_t;
-
-typedef struct wl_natoe_skip_port_info_v1 {
- uint8 valid_entries;
- uint8 PAD[3];
- wl_natoe_skip_port_entry_v1_t skip_port_ent[];
-} wl_natoe_skip_port_info_v1_t;
-
-typedef struct wl_natoe_dbg_stats_v1 {
- uint16 active_nat_entries;
- uint16 active_dns_entries;
- uint16 active_icmp_entries;
- uint16 valid_arp_entries;
- uint16 prev_nat_entries;
- uint16 prev_dns_entries;
- uint16 tcp_fast_reclaim_cnt;
- uint16 mcast_packets;
- uint16 bcast_packets;
- uint16 port_commands_rcvd;
- uint16 unsupported_prot;
- uint16 arp_req_sent;
- uint16 arp_rsp_rcvd;
- uint16 non_ether_frames;
- uint16 port_alloc_fail;
- uint16 srcip_tbl_full;
- uint16 dstip_tbl_full;
- uint16 nat_tbl_full;
- uint16 icmp_error_cnt;
- uint16 pkt_drops_resource;
- uint32 frwd_nat_pkt_cnt;
- uint32 reverse_nat_pkt_cnt;
- uint16 pub_nw_chspec;
- uint16 pvt_nw_chspec;
- uint8 pubnw_cfg_idx;
- uint8 pvtnw_cfg_idx;
- uint8 pubnw_cfg_ID;
- uint8 pvtnw_cfg_ID;
- uint16 natoe_flags;
-} wl_natoe_dbg_stats_v1_t;
-
-typedef struct wl_natoe_exception_port_inf_v1 {
- uint16 except_bmap_size;
- uint8 port_except_bmap[];
-} wl_natoe_exception_port_inf_v1_t;
-
-typedef struct wl_natoe_dstnat_entry_v1 {
- struct ipv4_addr clientip;
- struct ether_addr client_mac_addr;
- uint16 client_listenport;
- uint8 opcode;
-} wl_natoe_dstnat_entry_v1_t;
-
-typedef struct wl_pcie_frwd_stats_v1 {
- uint16 version;
- uint16 len;
- uint16 frwd_txfrag_q_cnt; /* no. of txfrags in frwd_txfrag_list */
- /* no. of outstanding lbufs in txpath on if0/ifx */
- uint16 tx_frwd_n_lb_if0;
- uint16 tx_frwd_n_lb_ifx;
- /* no. of outstanding lfrags in txpath on if0/ifx */
- uint16 tx_frwd_n_lf_if0;
- uint16 tx_frwd_n_lf_ifx;
- /* no. of pending frwd pkts dropped upon d3 entry */
- uint16 tx_frwd_d3_drop_cnt;
- /* Total no. of lbufs frwded in txpath on if0/ifx */
- uint32 tx_frwd_n_lb_if0_cnt;
- uint32 tx_frwd_n_lb_ifx_cnt;
- /* Total no. of lfrags frwded in txpath on if0/ifx */
- uint32 tx_frwd_n_lf_if0_cnt;
- uint32 tx_frwd_n_lf_ifx_cnt;
- uint32 frwd_tx_drop_thr_cnt; /* no. of pkts dropped due to txfrag threshold */
- uint32 frwd_tx_drop_err_cnt; /* no. of pkts dropped due to txfrags not avail / errors */
-} wl_pcie_frwd_stats_v1_t;
-
enum wl_natoe_cmds {
- WL_NATOE_CMD_MOD_VER = 0,
WL_NATOE_CMD_ENABLE = 1,
WL_NATOE_CMD_CONFIG_IPS = 2,
WL_NATOE_CMD_CONFIG_PORTS = 3,
WL_NATOE_CMD_DBG_STATS = 4,
WL_NATOE_CMD_EXCEPTION_PORT = 5,
WL_NATOE_CMD_SKIP_PORT = 6,
- WL_NATOE_CMD_TBL_CNT = 7,
- WL_NATOE_CMD_CONFIG_DSTNAT = 8,
- WL_NATOE_CMD_CTRL = 9
+ WL_NATOE_CMD_TBL_CNT = 7
};
enum wl_natoe_cmd_xtlv_id {
- WL_NATOE_XTLV_MOD_VER = 0,
WL_NATOE_XTLV_ENABLE = 1,
WL_NATOE_XTLV_CONFIG_IPS = 2,
WL_NATOE_XTLV_CONFIG_PORTS = 3,
WL_NATOE_XTLV_DBG_STATS = 4,
WL_NATOE_XTLV_EXCEPTION_PORT = 5,
WL_NATOE_XTLV_SKIP_PORT = 6,
- WL_NATOE_XTLV_TBL_CNT = 7,
- WL_NATOE_XTLV_ARP_TBL = 8,
- WL_NATOE_XTLV_POOLREORG = 9,
- WL_NATOE_XTLV_CONFIG_DSTNAT = 10,
- WL_NATOE_XTLV_CTRL = 11
+ WL_NATOE_XTLV_TBL_CNT = 7
};
/* endif WL_NATOE */
uint32 four_way_hs_fail; /* No of 4-way handshake fails */
} wl_idauth_counters_t;
-#define WLC_UTRACE_LEN (1024u * 4u) // default length
-#define WLC_UTRACE_LEN_AUX (1024u * 3u) // reduced length to fit smaller AUX BM
-#define WLC_UTRACE_LEN_SC (1024u * 3u) // reduced length to fit smaller Scan core BM
-
+#define WLC_UTRACE_LEN 512
#define WLC_UTRACE_READ_END 0
#define WLC_UTRACE_MORE_DATA 1
-
typedef struct wl_utrace_capture_args_v1 {
uint32 length;
uint32 flag;
uint32 flag; /* Indicates if there is more data or not */
} wl_utrace_capture_args_v2_t;
-/* Signal read end. */
-#define WLC_REGVAL_READ_END 0
-/* Signal more data pending. */
-#define WLC_REGVAL_MORE_DATA 1
-/* Internal read state. */
-#define WLC_REGVAL_READ_CONTINUE 2
-
-#define WLC_REGVAL_DUMP_PHYREG 0
-#define WLC_REGVAL_DUMP_RADREG 1
-
-#define PHYREGVAL_CAPTURE_BUFFER_LEN 2048
-
-typedef struct wl_regval_capture_args {
- uint32 control_flag; /* Carries status information. */
-} wl_regval_capture_args_t;
-
/* XTLV IDs for the Health Check "hc" iovar top level container */
enum {
WL_HC_XTLV_ID_CAT_HC = 1, /* category for HC as a whole */
WL_HC_XTLV_ID_CAT_DATAPATH_TX = 2, /* Datapath Tx */
WL_HC_XTLV_ID_CAT_DATAPATH_RX = 3, /* Datapath Rx */
WL_HC_XTLV_ID_CAT_SCAN = 4, /* Scan */
- WL_HC_XTLV_ID_CAT_EVENTMASK = 5, /* Health Check event mask. */
};
/* Health Check: Common XTLV IDs for sub-elements in the top level container
WL_HC_TX_XTLV_ID_VAL_FC_FORCE = 7, /* flow ctl force failure */
WL_HC_TX_XTLV_ID_VAL_DELAY_TO_TRAP = 8, /* delay threshold for forced trap */
WL_HC_TX_XTLV_ID_VAL_DELAY_TO_RPT = 9, /* delay threshold for event log report */
- WL_HC_TX_XTLV_ID_VAL_FAILURE_TO_RPT = 10, /* threshold for consecutive TX failures */
};
/* Health Check: Datapath RX IDs */
WL_HC_RX_XTLV_ID_VAL_STALL_THRESHOLD = 3, /* stall_threshold */
WL_HC_RX_XTLV_ID_VAL_STALL_SAMPLE_SIZE = 4, /* stall_sample_size */
WL_HC_RX_XTLV_ID_VAL_STALL_FORCE = 5, /* stall test trigger */
- WL_HC_RX_XTLV_ID_VAL_STALL_UC_DECRYPT_FAIL = 6, /* trigger uc decrypt failures */
- WL_HC_RX_XTLV_ID_VAL_STALL_BCMC_DECRYPT_FAIL = 7, /* trigger bcmc decrypt failures */
};
/* Health Check: Datapath SCAN IDs */
WL_HC_XTLV_ID_VAL_SCAN_STALL_THRESHOLD = 1, /* scan stall threshold */
};
-/* Health check: PHY IDs */
-/* Needed for iguana 13.35 branch */
-typedef enum {
- PHY_HC_DD_ALL = 0,
- PHY_HC_DD_TEMPSENSE = 1,
- PHY_HC_DD_VCOCAL = 2,
- PHY_HC_DD_RX = 3,
- PHY_HC_DD_TX = 4,
- PHY_HC_DD_LAST /* This must be the last entry */
-} phy_hc_dd_type_t;
-
-typedef enum {
- PHY_HC_DD_TEMP_FAIL = 0,
- PHY_HC_DD_VCO_FAIL = 1,
- PHY_HC_DD_RXDSN_FAIL = 2,
- PHY_HC_DD_TXPOW_FAIL = 3,
- PHY_HC_DD_END /* This must be the last entry */
-} phy_hc_dd_type_v2_t;
-
/* IDs of Health Check report structures for sub types of health checks within WL */
-typedef enum wl_hc_dd_type {
- WL_HC_DD_PCIE = 0, /* PCIe */
- WL_HC_DD_RX_DMA_STALL = 1, /* RX DMA stall check */
- WL_HC_DD_RX_STALL = 2, /* RX stall check */
- WL_HC_DD_TX_STALL = 3, /* TX stall check */
- WL_HC_DD_SCAN_STALL = 4, /* SCAN stall check */
- WL_HC_DD_PHY = 5, /* PHY health check */
- WL_HC_DD_REINIT = 6, /* Reinit due to other reasons */
- WL_HC_DD_TXQ_STALL = 7, /* TXQ stall */
- WL_HC_DD_RX_STALL_V2 = 8, /* RX stall check v2 */
+enum {
+ WL_HC_DD_UNDEFINED = 0, /* Undefined */
+ WL_HC_DD_RX_DMA_STALL = 1, /* RX DMA stall check */
+ WL_HC_DD_RX_STALL = 2, /* RX stall check */
+ WL_HC_DD_TX_STALL = 3, /* TX stall check */
+ WL_HC_DD_SCAN_STALL = 4, /* SCAN stall check */
WL_HC_DD_MAX
-} wl_hc_dd_type_t;
-
-/* RX stall reason codes sent with wl_rx_hc_info_v2_t */
-typedef enum bcm_rx_hc_stall_reason {
- BCM_RX_HC_RESERVED = 0,
- BCM_RX_HC_UNSPECIFIED = 1, /* All other. Catch all */
- BCM_RX_HC_UNICAST_DECRYPT_FAIL = 2, /* Unicast decrypt fail */
- BCM_RX_HC_BCMC_DECRYPT_FAIL = 3, /* BCMC decrypt fail */
- BCM_RX_HC_UNICAST_REPLAY = 4, /* Unicast replay */
- BCM_RX_HC_BCMC_REPLAY = 5, /* BCMC replay */
- BCM_RX_HC_AMPDU_DUP = 6, /* AMPDU DUP */
- BCM_RX_HC_MAX
-} bcm_rx_hc_stall_reason_t;
+};
/*
* Health Check report structures for sub types of health checks within WL
/* Health Check report structure for Rx dropped packet failure check */
typedef struct {
- uint16 type; /* WL_HC_RX_DD_STALL */
+ uint16 type;
uint16 length;
uint32 bsscfg_idx;
uint32 rx_hc_pkts;
uint32 rx_hc_alert_th;
} wl_rx_hc_info_t;
-/* Health Check report structure for Rx dropped packet failure check */
-typedef struct {
- uint16 type; /* WL_HC_RX_DD_STALL_V2 */
- uint16 length;
- uint8 if_idx; /* interface index on which issue is reported */
- uint8 ac; /* access category on which this problem is seen */
- uint8 pad[2]; /* Reserved */
- uint32 rx_hc_pkts;
- uint32 rx_hc_dropped_all;
- uint32 rx_hc_alert_th;
- uint32 reason; /* refer to bcm_rx_hc_stall_reason_t above */
- struct ether_addr peer_ea;
-} wl_rx_hc_info_v2_t;
-
/* HE top level command IDs */
enum {
- WL_HE_CMD_ENAB = 0u,
- WL_HE_CMD_FEATURES = 1u,
- WL_HE_CMD_TWT_SETUP = 2u,
- WL_HE_CMD_TWT_TEARDOWN = 3u,
- WL_HE_CMD_TWT_INFO = 4u,
- WL_HE_CMD_BSSCOLOR = 5u,
- WL_HE_CMD_PARTIAL_BSSCOLOR = 6u,
- WL_HE_CMD_CAP = 7u,
- WL_HE_CMD_STAID = 8u,
- WL_HE_CMD_MUEDCA = 9u,
- WL_HE_CMD_RTSDURTHRESH = 10u,
- WL_HE_CMD_PEDURATION = 11u,
- WL_HE_CMD_TESTBED_MODE = 12u,
- WL_HE_CMD_OMI_CONFIG = 13u,
- WL_HE_CMD_OMI_STATUS = 14u,
- WL_HE_CMD_OMI_ULMU_THROTTLE = 15u,
- WL_HE_CMD_ULMU_DISABLE_POLICY = 16u,
- WL_HE_CMD_ULMU_DISABLE_STATS = 17u,
- WL_HE_CMD_OMI_DLMU_RSD_RCM_MPF_MAP = 18u,
- WL_HE_CMD_SR_PROHIBIT = 19u,
+ WL_HE_CMD_ENAB = 0,
+ WL_HE_CMD_FEATURES = 1,
+ WL_HE_CMD_TWT_SETUP = 2,
+ WL_HE_CMD_TWT_TEARDOWN = 3,
+ WL_HE_CMD_TWT_INFO = 4,
+ WL_HE_CMD_BSSCOLOR = 5,
+ WL_HE_CMD_PARTIAL_BSSCOLOR = 6,
WL_HE_CMD_LAST
};
-enum {
- WL_HE_MUEDCA_IE = 0,
- WL_HE_MUEDCA_SHM = 1,
- WL_HE_MUEDCA_LAST
-};
-
-#ifdef WL11AX
-
-/* struct for dump MU EDCA IE/SHM paramters */
-typedef struct wl_he_muedca_ie_v1 {
- uint16 version; /* structure version */
- uint16 length; /* data length (starting after this field) */
- uint8 mu_qos_info;
- he_mu_ac_param_record_t param_ac[AC_COUNT];
-} wl_he_muedca_ie_v1_t;
-
-typedef wl_he_muedca_ie_v1_t wl_he_muedca_ie_t;
-
-#define WL_HE_MUEDCA_VER_1 1
-
-#endif /* WL11AX */
-
-/* TWT top level command IDs */
-enum {
- WL_TWT_CMD_ENAB = 0,
- WL_TWT_CMD_SETUP = 1,
- WL_TWT_CMD_TEARDOWN = 2,
- WL_TWT_CMD_INFO = 3,
- WL_TWT_CMD_AUTOSCHED = 4,
- WL_TWT_CMD_STATS = 5,
- WL_TWT_CMD_EARLY_TERM_TIME = 6,
- WL_TWT_CMD_RESP_CONFIG = 7,
- WL_TWT_CMD_SPPS_ENAB = 8,
- WL_TWT_CMD_LAST
-};
-
-/* TODO: Remove the follwoing after mering TWT changes to trunk */
-#define WL_TWT_CMD_DEF_IN_WLIOCTL 1
-
-#define WL_HEB_VER_1 1
+#define WL_HEB_VERSION 0
/* HEB top level command IDs */
enum {
- WL_HEB_CMD_ENAB = 0,
- WL_HEB_CMD_NUM_HEB = 1,
- WL_HEB_CMD_COUNTERS = 2,
- WL_HEB_CMD_CLEAR_COUNTERS = 3,
- WL_HEB_CMD_CONFIG = 4,
- WL_HEB_CMD_STATUS = 5,
+ WL_HEB_CMD_ENAB = 0,
+ WL_HEB_CMD_NUM_HEB = 1,
+ WL_HEB_CMD_COUNTERS = 1,
+ WL_HEB_CMD_CLEAR_COUNTERS = 2,
WL_HEB_CMD_LAST
};
/* HEB counters structures */
-typedef struct wl_heb_int_cnt_v1 {
+typedef struct {
uint16 pre_event;
uint16 start_event;
uint16 end_event;
uint16 missed;
-} wl_heb_int_cnt_v1_t;
+} wl_heb_int_cnt_t;
-typedef struct wl_heb_cnt_v1 {
+typedef struct {
/* structure control */
uint16 version; /* structure version */
uint16 length; /* data length (starting after this field) */
- wl_heb_int_cnt_v1_t heb_int_cnt[1];
-} wl_heb_cnt_v1_t;
+ wl_heb_int_cnt_t heb_int_cnt[1];
+} wl_heb_cnt_t;
-// struct for configuring HEB
-typedef struct wl_config_heb_fill_v1 {
- uint16 version; /* structure version */
- uint16 length; /* data length (starting after this field) */
- uint32 duration;
- uint32 periodicity;
- uint16 heb_idx;
- uint16 preeventtime;
- uint8 count;
- uint8 PAD[3];
-} wl_config_heb_fill_v1_t;
-
-typedef struct wl_heb_blk_params_v1 {
- /* Don't change the order of following elements. This is as per the HEB HW spec */
- uint32 event_int_val_l;
- uint32 event_int_val_h;
- uint32 param2;
- uint32 param3;
- uint32 pre_event_intmsk_bmp;
- uint32 start_event_intmsk_bmp;
- uint32 end_event_intmsk_bmp;
- uint32 event_driver_info;
- uint16 param1;
- uint8 event_count;
- uint8 noa_invert;
-} wl_heb_blk_params_v1_t;
-
-typedef struct wl_heb_int_status_v1 {
- uint32 heb_idx;
- wl_heb_blk_params_v1_t blk_params;
-} wl_heb_reg_status_v1_t;
-
-typedef struct wl_heb_status_v1 {
- uint16 version; /* structure version */
- uint16 length; /* data length (starting after this field) */
- wl_heb_reg_status_v1_t heb_status[1];
-} wl_heb_status_v1_t;
-
-/* HWA */
-#define WL_HWA_VER_1 1
-
-/* HWA top level command IDs */
-typedef enum wl_hwa_cmd_type {
- WL_HWA_CMD_ENAB = 0,
- WL_HWA_CMD_CAPS = 1,
- WL_HWA_CMD_COUNTERS = 2,
- WL_HWA_CMD_CLRCNTS = 3,
- WL_HWA_CMD_REGDUMP = 4,
- WL_HWA_CMD_INDUCE_ERR = 5,
- WL_HWA_CMD_LAST
-} wl_hwa_cmd_type_t;
-
-typedef struct wl_hwa_cnts_info_v1 {
- uint16 cnt_rxs_filter; /* #filters added */
- uint16 cnt_rxs_chainable; /* #rxchainable matched */
-} wl_hwa_cnts_info_v1_t;
-
-/* HWA dump info structures */
-typedef struct wl_hwa_hwcaps_info_v1 {
- uint16 up; /* is hwa init'd/deint'd */
- uint16 corerev; /* hwa core revision */
- uint32 submodules_mask; /* mask for hwa submodules that are enabled */
-} wl_hwa_hwcaps_info_v1_t;
-
-typedef struct wl_hwa_cnts_v1 {
- /* structure control */
- uint16 version; /* structure version */
- uint16 length; /* data length (starting after this field) */
- wl_hwa_cnts_info_v1_t hwa_cnts_info[]; /* variable length array with hwa counters */
-} wl_hwa_cnts_v1_t;
-
-/* All submodules, order is important and define order of initialization. */
-/* Not use enumeration here because these defines are also used in macro */
-#define HWA_SUBMODULES_COMMON 0 /**< Common */
-#define HWA_SUBMODULES_TXPOST 1u /**< TxPost 3a */
-#define HWA_SUBMODULES_RXPOSTFILL 2u /**< RxPost and Fill 1a/1b */
-#define HWA_SUBMODULES_TXDMA 3u /**< TxDMA 3b */
-#define HWA_SUBMODULES_TXS 4u /**< TxStatus 4a */
-#define HWA_SUBMODULES_BUFMGR 5u /**< Buffer Manager, RX and TX. Do this last */
-#define HWA_SUBMODULES_CPL 6u /**< Completion 2b/4b */
-#define HWA_SUBMODULES_RXS 7u /**< RxStatus 2a */
-#define HWA_SUBMODULES_NUM 8u /**< number of submodules */
-
-#define HWA_SUBMODULES_ALL 0xFF /* Bitmaps for all submodules */
-#ifdef HWA
-#define HWA_SUBMODULE_MASK(submodule) (1u << (submodule))
-#else
-#define HWA_SUBMODULE_MASK(submodule) (0)
-#endif /* HWA */
-/*
- * NOTES:
- * wl_twt_sdesc_t is used to support both broadcast TWT and individual TWT.
- * Value in bit[0:2] in 'flow_id' field is interpreted differently:
- * - flow id for individual TWT (when WL_TWT_FLOW_FLAG_BROADCAST bit is NOT set
- * in 'flow_flags' field)
- * - flow id as defined in Table 8-248l1 for broadcast TWT (when
- * WL_TWT_FLOW_FLAG_BROADCAST bit is set)
- * In latter case other bits could be used to differentiate different flows
- * in order to support multiple broadcast TWTs with the same flow id.
- */
/* TWT Setup descriptor */
-typedef struct wl_twt_sdesc {
+typedef struct {
/* Setup Command. */
- uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h */
- uint8 flow_flags; /* Flow attributes. See WL_TWT_FLOW_FLAG_XXXX below */
- uint8 flow_id; /* must be between 0 and 7. Set 0xFF for auto assignment */
- uint8 bid; /* must be between 0 and 31. Set 0xFF for auto assignment */
- uint8 channel; /* Twt channel - Not used for now */
- uint8 negotiation_type; /* Negotiation Type: See macros TWT_NEGO_TYPE_X */
- uint8 frame_recomm; /* frame recommendation for broadcast TWTs - Not used for now */
+ uint8 setup_cmd; /* See TWT_SETUP_CMD_XXXX in 802.11ah.h,
+ * valid when bcast_twt is FALSE.
+ */
+ /* Flow attributes */
+ uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX below */
+ uint8 flow_id; /* must be between 0 and 7 */
+ /* Target Wake Time */
uint8 wake_type; /* See WL_TWT_TIME_TYPE_XXXX below */
uint32 wake_time_h; /* target wake time - BSS TSF (us) */
uint32 wake_time_l;
- uint32 wake_dur; /* target wake duration in unit of microseconds */
+ uint32 wake_dur; /* target wake duration in us units */
uint32 wake_int; /* target wake interval */
- uint32 btwt_persistence; /* Broadcast TWT Persistence */
- uint32 wake_int_max; /* max wake interval(uS) for TWT */
- uint8 duty_cycle_min; /* min duty cycle for TWT(Percentage) */
- uint8 pad;
- /* deprecated - to be removed */
- uint16 li;
-
} wl_twt_sdesc_t;
/* Flow flags */
-#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1u << 0u)
-#define WL_TWT_FLOW_FLAG_TRIGGER (1u << 1u)
-#define WL_TWT_FLOW_FLAG_REQUEST (1u << 2u)
-#define WL_TWT_FLOW_FLAG_PROTECT (1u << 3u)
-#define WL_TWT_FLOW_FLAG_RESPONDER_PM (1u << 4u)
-#define WL_TWT_FLOW_FLAG_UNSOLICITED (1u << 5u)
-
-/* Deprecated - To be removed */
-#define WL_TWT_FLOW_FLAG_BROADCAST (1u << 5u)
-#define WL_TWT_FLOW_FLAG_WAKE_TBTT_NEGO (1u << 6u)
-#define WL_TWT_FLOW_FLAG_IMPLICIT (1u << 7u)
+#define WL_TWT_FLOW_FLAG_BROADCAST (1<<0)
+#define WL_TWT_FLOW_FLAG_IMPLICIT (1<<1)
+#define WL_TWT_FLOW_FLAG_UNANNOUNCED (1<<2)
+#define WL_TWT_FLOW_FLAG_TRIGGER (1<<3)
/* Flow id */
-#define WL_TWT_FLOW_ID_FID 0x07u /* flow id */
-#define WL_TWT_FLOW_ID_GID_MASK 0x70u /* group id - broadcast TWT only */
-#define WL_TWT_FLOW_ID_GID_SHIFT 4u
-
-#define WL_TWT_INV_BCAST_ID 0xFFu
-#define WL_TWT_INV_FLOW_ID 0xFFu
-
-/* auto flow_id */
-#define WL_TWT_SETUP_FLOW_ID_AUTO 0xFFu
-/* auto broadcast ID */
-#define WL_TWT_SETUP_BCAST_ID_AUTO 0xFFu
-/* Infinite persistence for broadcast schedule */
-#define WL_TWT_INFINITE_BTWT_PERSIST 0xFFFFFFFFu
-
-/* should be larger than what chip supports */
-#define WL_TWT_STATS_MAX_BTWT 4u
-#define WL_TWT_STATS_MAX_ITWT 4u
+#define WL_TWT_FLOW_ID_FID 0x07 /* flow id */
+#define WL_TWT_FLOW_ID_GID_MASK 0x70 /* group id - broadcast TWT only */
+#define WL_TWT_FLOW_ID_GID_SHIFT 4
/* Wake type */
/* TODO: not yet finalized */
-#define WL_TWT_TIME_TYPE_BSS 0u /* The time specified in wake_time_h/l is
+#define WL_TWT_TIME_TYPE_BSS 0 /* The time specified in wake_time_h/l is
* the BSS TSF time.
*/
-#define WL_TWT_TIME_TYPE_OFFSET 1u /* The time specified in wake_time_h/l is an offset
+#define WL_TWT_TIME_TYPE_OFFSET 1 /* The time specified in wake_time_h/l is an offset
* of the TSF time when the iovar is processed.
*/
-#define WL_TWT_TIME_TYPE_AUTO 2u /* The target wake time is chosen internally by the FW */
-#define WL_TWT_SETUP_VER 0u
+#define WL_TWT_SETUP_VER 0
/* HE TWT Setup command */
-typedef struct wl_twt_setup {
+typedef struct {
/* structure control */
uint16 version; /* structure version */
uint16 length; /* data length (starting after this field) */
/* peer address */
struct ether_addr peer; /* leave it all 0s' for AP */
- uint8 pad[2];
+ /* session id */
+ uint8 dialog; /* an arbitrary number to identify the seesion */
+ uint8 pad;
/* setup descriptor */
wl_twt_sdesc_t desc;
-
- /* deprecated - to be removed */
- uint16 dialog;
- uint8 pad1[2];
} wl_twt_setup_t;
-/* deprecated -to be removed */
-#define WL_TWT_DIALOG_TOKEN_AUTO 0xFFFF
-
-#define WL_TWT_TEARDOWN_VER 0u
-
-/* twt teardown descriptor */
-typedef struct wl_twt_teardesc {
- uint8 negotiation_type;
- uint8 flow_id; /* must be between 0 and 7 */
- uint8 bid; /* must be between 0 and 31 */
- bool alltwt; /* all twt teardown - 0 or 1 */
-} wl_twt_teardesc_t;
+#define WL_TWT_TEARDOWN_VER 0
/* HE TWT Teardown command */
-typedef struct wl_twt_teardown {
+typedef struct {
/* structure control */
uint16 version; /* structure version */
uint16 length; /* data length (starting after this field) */
/* peer address */
struct ether_addr peer; /* leave it all 0s' for AP */
- wl_twt_teardesc_t teardesc; /* Teardown descriptor */
-
- /* deprecated - to be removed */
- uint8 flow_flags;
- uint8 flow_id;
- uint8 bid;
- uint8 pad;
+ /* flow attributes */
+ uint8 flow_flags; /* See WL_TWT_FLOW_FLAG_XXXX above.
+ * (only BORADCAST) is applicable)
+ */
+ uint8 flow_id; /* must be between 0 and 7 */
} wl_twt_teardown_t;
/* twt information descriptor */
-typedef struct wl_twt_infodesc {
+typedef struct {
uint8 flow_flags; /* See WL_TWT_INFO_FLAG_XXX below */
uint8 flow_id;
uint8 pad[2];
uint32 next_twt_h;
uint32 next_twt_l;
- /* deprecated - to be removed */
- uint8 wake_type;
- uint8 pad1[3];
-} wl_twt_infodesc_t;
+} wl_twt_idesc_t;
/* Flow flags */
-#define WL_TWT_INFO_FLAG_ALL_TWT (1u << 0u) /* All TWT */
-#define WL_TWT_INFO_FLAG_RESUME (1u << 1u) /* 1 is TWT Resume, 0 is TWT Suspend */
+#define WL_TWT_INFO_FLAG_RESP_REQ (1<<0) /* Request response */
-/* deprecated - to be removed */
-#define WL_TWT_INFO_FLAG_RESP_REQ (1 << 0) /* Response Requested */
-#define WL_TWT_INFO_FLAG_NEXT_TWT_REQ (1 << 1) /* Next TWT Request */
-#define WL_TWT_INFO_FLAG_BTWT_RESCHED (1 << 2) /* Broadcast Reschedule */
-typedef wl_twt_infodesc_t wl_twt_idesc_t;
-
-#define WL_TWT_INFO_VER 0u
+#define WL_TWT_INFO_VER 0
/* HE TWT Information command */
-typedef struct wl_twt_info {
+typedef struct {
/* structure control */
uint16 version; /* structure version */
uint16 length; /* data length (starting after this field) */
/* peer address */
struct ether_addr peer; /* leave it all 0s' for AP */
uint8 pad[2];
- wl_twt_infodesc_t infodesc; /* information descriptor */
- /* deprecated - to be removed */
+ /* information descriptor */
wl_twt_idesc_t desc;
} wl_twt_info_t;
-#define WL_TWT_PEER_STATS_VERSION_1 1u
-typedef struct wl_twt_peer_stats_v1 {
- uint16 version;
- uint16 length;
- struct ether_addr peer;
- uint8 PAD[2];
- uint8 id;
- uint8 flow_flags;
- uint8 PAD[2];
- uint32 sp_seq; /* sequence number of the service period */
- uint32 tx_ucast_pkts;
- uint32 tx_pkts_min;
- uint32 tx_pkts_max;
- uint32 tx_pkts_avg;
- uint32 tx_failures;
- uint32 rx_ucast_pkts;
- uint32 rx_pkts_min;
- uint32 rx_pkts_max;
- uint32 rx_pkts_avg;
- uint32 rx_pkts_retried;
-} wl_twt_peer_stats_v1_t;
-
-#define WL_TWT_STATS_VERSION_1 1
-typedef struct wl_twt_stats_v1 {
- uint16 version;
- uint16 length;
- uint32 num_stats; /* number of peer stats in the peer_stats_list */
- wl_twt_peer_stats_v1_t peer_stats_list[];
-} wl_twt_stats_v1_t;
-
-#define WL_TWT_STATS_CMD_VERSION_1 1
-#define WL_TWT_STATS_CMD_FLAGS_RESET (1u << 0u)
-/* HE TWT stats command */
-typedef struct wl_twt_stats_cmd_v1 {
- uint16 version;
- uint16 length;
- struct ether_addr peer;
- uint8 PAD[2];
- uint16 flags; /* see WL_TWT_STATS_CMD_FLAGS */
- uint8 num_fid;
- uint8 num_bid;
- uint8 fid_list[WL_TWT_STATS_MAX_ITWT];
- uint8 bid_list[WL_TWT_STATS_MAX_BTWT];
-} wl_twt_stats_cmd_v1_t;
-
-#define WL_TWT_RESP_CFG_VER 0u
-
-#define WL_TWT_CMD_RESP_CFG_TYPE_ALTERNATE 0u
-#define WL_TWT_CMD_RESP_CFG_TYPE_DICTATE 1u
-/* HE TWT resp command */
-typedef struct wl_twt_resp_cfg {
- /* structure control */
- uint16 version; /* Structure version */
- uint16 length; /* Data length (starting after this field) */
- uint8 dc_max; /* Max supported duty cycle for single TWT */
- uint8 resp_type; /* Resp. type(Alt/dict) if duty cycle>max duty cycle */
-} wl_twt_resp_cfg_t;
-
/* Current version for wlc_clm_power_limits_req_t structure and flags */
#define WLC_CLM_POWER_LIMITS_REQ_VERSION 1
/* "clm_power_limits" iovar request structure */
/* Output. Limits taken from country-default (all-product) data */
#define WLC_CLM_POWER_LIMITS_OUTPUT_FLAG_DEFAULT_COUNTRY_LIMITS 0x00000004
-#define WL_MBO_IOV_MAJOR_VER 1
-#define WL_MBO_IOV_MINOR_VER 1
-#define WL_MBO_IOV_MAJOR_VER_SHIFT 8
-#define WL_MBO_IOV_VERSION \
- ((WL_MBO_IOV_MAJOR_VER << WL_MBO_IOV_MAJOR_VER_SHIFT)| WL_MBO_IOV_MINOR_VER)
+/*
+ * WOG (Wake On Googlecast)
+ */
-#define MBO_MAX_CHAN_PREF_ENTRIES 16
+#define MAX_GCAST_APPID_CNT_LIMIT 50
+#define MAX_DNS_LABEL 63
-enum wl_mbo_cmd_ids {
- WL_MBO_CMD_ADD_CHAN_PREF = 1,
- WL_MBO_CMD_DEL_CHAN_PREF = 2,
- WL_MBO_CMD_LIST_CHAN_PREF = 3,
- WL_MBO_CMD_CELLULAR_DATA_CAP = 4,
+typedef struct wog_appid {
+ uint8 appID[MAX_DNS_LABEL+1];
+} wog_appid_t;
+
+enum {
+ WOG_APPID_ADD,
+ WOG_APPID_DEL,
+ WOG_APPID_CLEAR,
+ WOG_APPID_LIST,
+ WOG_MAX_APPID_CNT
+};
+
+#define WOG_APPID_IOV_VER 1
+typedef struct wog_appid_iov {
+ /* version for iovar */
+ uint32 ver;
+ /* add/del/clear/list operation */
+ uint32 operation;
+ /* for adding or deleting multiple items */
+ /* for WOG_MAX_APPID_CNT, this value is used for max count for AppID */
+ uint32 cnt;
+ /* Application IDs */
+ /* If FW found an AppID from this list, FW will respond to discovery */
+ /* without wake up the host */
+ wog_appid_t appids[1];
+} wog_appid_iov_t;
+
+/* dns service record */
+/* service name : _googlecast */
+typedef struct wog_srv_record {
+ uint32 ttl;
+ uint16 port; /* tcp 8008 or 8009 */
+ uint8 PAD[2];
+} wog_srv_record_t;
+
+#define GCAST_MAX_MODEL_NAME_LEN 16
+#define GCAST_MAX_FNAME_LEN 64
+#define GCAST_MAX_RS_LEN 60
+
+#define GCAST_UUID_LEN 32
+#define GCAST_PUBLICKEY_ID_LEN 64
+#define GCAST_VER_LEN 2
+typedef struct wog_txt_record {
+ uint32 ttl;
+ /* id : UUID for the receiver */
+ char id[GCAST_UUID_LEN+1];
+
+ /* Cast protocol version supported. Begins at 2 */
+ /* and is incremented by 1 with each version */
+ char ver[GCAST_VER_LEN+1];
+
+ /* 256bit receiver Subject Public Key Identifier from the SSL cert */
+ char public_key[GCAST_PUBLICKEY_ID_LEN+1];
+
+ /* A bitfield of device capabilities. */
+ /* bit 0 : video_out (1:has video out, 0:no video) */
+ /* bit 1 : video_in */
+ /* bit 2 : audio_out */
+ /* bit 3 : audio_in */
+ /* bit 4 : dev_mode */
+ /* (1:dev mode enabled, 0: not enabled) */
+ char capability;
+
+ /* Receiver status flag 0:IDLE, 1(BUSY/JOIN) */
+ /* IDLE : The receiver is idle */
+ /* and doesn't need to be connected now. */
+ /* BUSY/JOIN : The receiver is hosting an activity */
+ /* and invites the sender to join */
+ char receiver_status_flag;
+
+ uint8 PAD0[1];
+
+ char friendly_name[GCAST_MAX_FNAME_LEN+1];
+ uint8 PAD1[3];
+
+ char model_name[GCAST_MAX_MODEL_NAME_LEN+1];
+ uint8 PAD2[3];
+
+ /* Receiver Status text for Cast Protocol v2 */
+ /* Spec says that if the status text exceeds 60 characters in length, */
+ /* it is truncated at 60 caracters and */
+ /* a UTF-8 ellipsis character is appended to indicate trucation. */
+ /* But our dongle won't use UTF-8 ellipsis. It's not a big deal. */
+ char receiver_status[GCAST_MAX_RS_LEN+1];
+ uint8 PAD3[3];
+} wog_txt_record_t;
+
+/* ip will be taken from the ip of wog_info_t */
+typedef struct wog_a_record {
+ uint32 ttl;
+} wog_a_record_t;
+
+/* Google Cast protocl uses mDNS SD for its discovery */
+#define WOG_SD_RESP_VER 1
+typedef struct wog_sd_resp {
+ /* version for iovar */
+ int32 ver;
+ /* device name of Google Cast receiver */
+ char device_name[MAX_DNS_LABEL+1];
+ /* IP address of Google Cast receiver */
+ uint8 ip[4];
+ /* ttl of PTR response */
+ uint32 ptr_ttl;
+ /* DNS TXT record */
+ wog_txt_record_t txt;
+ /* DNS SRV record */
+ wog_srv_record_t srv;
+ /* DNS A record */
+ wog_a_record_t a;
+} wog_sd_resp_t;
+
+enum wl_mbo_cmd_ids {
+ WL_MBO_CMD_ADD_CHAN_PREF = 1,
+ WL_MBO_CMD_DEL_CHAN_PREF = 2,
+ WL_MBO_CMD_LIST_CHAN_PREF = 3,
+ WL_MBO_CMD_CELLULAR_DATA_CAP = 4,
WL_MBO_CMD_DUMP_COUNTERS = 5,
WL_MBO_CMD_CLEAR_COUNTERS = 6,
WL_MBO_CMD_FORCE_ASSOC = 7,
WL_MBO_CMD_BSSTRANS_REJECT = 8,
WL_MBO_CMD_SEND_NOTIF = 9,
- /* Unused command, This enum no can be use
- * for next new command
- */
- WL_MBO_CMD_CLEAR_CHAN_PREF = 10,
- WL_MBO_CMD_NBR_INFO_CACHE = 11,
- WL_MBO_CMD_ANQPO_SUPPORT = 12,
- WL_MBO_CMD_DBG_EVENT_CHECK = 13,
- WL_MBO_CMD_EVENT_MASK = 14,
/* Add before this !! */
WL_MBO_CMD_LAST
};
WL_MBO_XTLV_CELL_DATA_CAP = 0x5,
WL_MBO_XTLV_COUNTERS = 0x6,
WL_MBO_XTLV_ENABLE = 0x7,
- WL_MBO_XTLV_SUB_ELEM_TYPE = 0x8,
- WL_MBO_XTLV_BTQ_TRIG_START_OFFSET = 0x9,
- WL_MBO_XTLV_BTQ_TRIG_RSSI_DELTA = 0xa,
- WL_MBO_XTLV_ANQP_CELL_SUPP = 0xb,
- WL_MBO_XTLV_BIT_MASK = 0xc
+ WL_MBO_XTLV_SUB_ELEM_TYPE = 0x8
};
-/* event bit mask flags for MBO */
-#define MBO_EVT_BIT_MASK_CELLULAR_SWITCH 0x0001 /* Evt bit mask to enab cellular switch */
-#define MBO_EVT_BIT_MASK_BTM_REQ_RCVD 0x0002 /* Evt bit mask to enab BTM req rcvd */
-
typedef struct wl_mbo_counters {
/* No of transition req recvd */
uint16 trans_req_rcvd;
uint16 wifi_to_cell;
} wl_mbo_counters_t;
-#define WL_FILS_IOV_MAJOR_VER 1
-#define WL_FILS_IOV_MINOR_VER 1
-#define WL_FILS_IOV_MAJOR_VER_SHIFT 8
-#define WL_FILS_IOV_VERSION \
- ((WL_FILS_IOV_MAJOR_VER << WL_FILS_IOV_MAJOR_VER_SHIFT)| WL_FILS_IOV_MINOR_VER)
-
-enum wl_fils_cmd_ids {
- WL_FILS_CMD_ADD_IND_IE = 1,
- WL_FILS_CMD_ADD_AUTH_DATA = 2, /* Deprecated, kept to prevent ROM invalidation */
- WL_FILS_CMD_ADD_HLP_IE = 3,
- WL_FILS_CMD_ADD_CONNECT_PARAMS = 4,
- WL_FILS_CMD_GET_CONNECT_PARAMS = 5,
- /* Add before this !! */
- WL_FILS_CMD_LAST
-};
-
-enum wl_fils_xtlv_id {
- WL_FILS_XTLV_IND_IE = 0x1,
- WL_FILS_XTLV_AUTH_DATA = 0x2, /* Deprecated, kept to prevent ROM invalidation */
- WL_FILS_XTLV_HLP_IE = 0x3,
- WL_FILS_XTLV_ERP_USERNAME = 0x4,
- WL_FILS_XTLV_ERP_REALM = 0x5,
- WL_FILS_XTLV_ERP_RRK = 0x6,
- WL_FILS_XTLV_ERP_NEXT_SEQ_NUM = 0x7,
- WL_FILS_XTLV_KEK = 0x8,
- WL_FILS_XTLV_PMK = 0x9,
- WL_FILS_XTLV_TK = 0xa,
- WL_FILS_XTLV_PMKID = 0xb
-};
-
-#define WL_OCE_IOV_MAJOR_VER 1
-#define WL_OCE_IOV_MINOR_VER 1
-#define WL_OCE_IOV_MAJOR_VER_SHIFT 8
-#define WL_OCE_IOV_VERSION \
- ((WL_OCE_IOV_MAJOR_VER << WL_OCE_IOV_MAJOR_VER_SHIFT)| WL_OCE_IOV_MINOR_VER)
-
-enum wl_oce_cmd_ids {
- WL_OCE_CMD_ENABLE = 1,
- WL_OCE_CMD_PROBE_DEF_TIME = 2,
- WL_OCE_CMD_FD_TX_PERIOD = 3,
- WL_OCE_CMD_FD_TX_DURATION = 4,
- WL_OCE_CMD_RSSI_TH = 5,
- WL_OCE_CMD_RWAN_LINKS = 6,
- WL_OCE_CMD_CU_TRIGGER = 7,
- /* Add before this !! */
- WL_OCE_CMD_LAST
-};
-
-enum wl_oce_xtlv_id {
- WL_OCE_XTLV_ENABLE = 0x1,
- WL_OCE_XTLV_PROBE_DEF_TIME = 0x2,
- WL_OCE_XTLV_FD_TX_PERIOD = 0x3,
- WL_OCE_XTLV_FD_TX_DURATION = 0x4,
- WL_OCE_XTLV_RSSI_TH = 0x5,
- WL_OCE_XTLV_RWAN_LINKS = 0x6,
- WL_OCE_XTLV_CU_TRIGGER = 0x7
-};
-
-#define WL_ESP_IOV_MAJOR_VER 1
-#define WL_ESP_IOV_MINOR_VER 1
-#define WL_ESP_IOV_MAJOR_VER_SHIFT 8
-#define WL_ESP_IOV_VERSION \
- ((WL_ESP_IOV_MAJOR_VER << WL_ESP_IOV_MAJOR_VER_SHIFT)| WL_ESP_IOV_MINOR_VER)
-
-enum wl_esp_cmd_ids {
- WL_ESP_CMD_ENABLE = 1,
- WL_ESP_CMD_STATIC = 2,
- /* Add before this !! */
- WL_ESP_CMD_LAST
-};
-
-enum wl_esp_xtlv_id {
- WL_ESP_XTLV_ENABLE = 0x1,
- WL_ESP_XTLV_STATIC_AC = 0x2, /* access category */
- WL_ESP_XTLV_STATIC_TYPE = 0x3, /* data type */
- WL_ESP_XTLV_STATIC_VAL = 0x4
-};
-
/* otpread command */
#define WL_OTPREAD_VER 1
#define WL_LEAKED_GUARD_TIME_NONE 0 /* Not in any guard time */
#define WL_LEAKED_GUARD_TIME_FRTS (0x01 << 0) /* Normal FRTS power save */
#define WL_LEAKED_GUARD_TIME_SCAN (0x01 << 1) /* Channel switch due to scanning */
+#define WL_LEAKED_GUARD_TIME_AWDL_PSF (0x01 << 2) /* Channel switch due to AWDL PSF */
+#define WL_LEAKED_GUARD_TIME_AWDL_AW (0x01 << 3) /* Channel switch due to AWDL AW */
#define WL_LEAKED_GUARD_TIME_INFRA_STA (0x01 << 4) /* generic type infra sta channel switch */
#define WL_LEAKED_GUARD_TIME_TERMINATED (0x01 << 7) /* indicate a GT is terminated early */
} wl_desense_restage_gain_t;
#define MAX_UCM_CHAINS 5
-#define MAX_UCM_PROFILES 10
+#define MAX_UCM_PROFILES 4
#define UCM_PROFILE_VERSION_1 1
/* UCM per chain attribute struct */
} wlc_btcx_profile_v1_t;
#define SSSR_D11_RESET_SEQ_STEPS 5
+#define SSSR_REG_INFO_VER 0
-#define SSSR_REG_INFO_VER_0 0u
-#define SSSR_REG_INFO_VER_1 1u
-#define SSSR_REG_INFO_VER_2 2u
-
-typedef struct sssr_reg_info_v0 {
- uint16 version;
- uint16 length; /* length of the structure validated at host */
- struct {
- struct {
- uint32 pmuintmask0;
- uint32 pmuintmask1;
- uint32 resreqtimer;
- uint32 macresreqtimer;
- uint32 macresreqtimer1;
- } base_regs;
- } pmu_regs;
- struct {
- struct {
- uint32 intmask;
- uint32 powerctrl;
- uint32 clockcontrolstatus;
- uint32 powerctrl_mask;
- } base_regs;
- } chipcommon_regs;
- struct {
- struct {
- uint32 clockcontrolstatus;
- uint32 clockcontrolstatus_val;
- } base_regs;
- struct {
- uint32 resetctrl;
- uint32 itopoobb;
- } wrapper_regs;
- } arm_regs;
- struct {
- struct {
- uint32 ltrstate;
- uint32 clockcontrolstatus;
- uint32 clockcontrolstatus_val;
- } base_regs;
- struct {
- uint32 itopoobb;
- } wrapper_regs;
- } pcie_regs;
- struct {
- struct {
- uint32 ioctrl;
- } wrapper_regs;
- uint32 vasip_sr_addr;
- uint32 vasip_sr_size;
- } vasip_regs;
- struct {
- struct {
- uint32 xmtaddress;
- uint32 xmtdata;
- uint32 clockcontrolstatus;
- uint32 clockcontrolstatus_val;
- } base_regs;
- struct {
- uint32 resetctrl;
- uint32 itopoobb;
- uint32 ioctrl;
- uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS];
- } wrapper_regs;
- uint32 sr_size;
- } mac_regs[MAX_NUM_D11CORES];
-} sssr_reg_info_v0_t;
-
-typedef struct sssr_reg_info_v1 {
+typedef struct sssr_reg_info {
uint16 version;
uint16 length; /* length of the structure validated at host */
struct {
} wrapper_regs;
uint32 sr_size;
} mac_regs[MAX_NUM_D11CORES];
- struct {
- uint32 dig_sr_addr;
- uint32 dig_sr_size;
- } dig_mem_info;
-} sssr_reg_info_v1_t;
-
-#define MAX_NUM_D11_CORES_WITH_SCAN 3u
-
-typedef struct sssr_reg_info_v2 {
- uint16 version;
- uint16 length; /* length of the structure validated at host */
- struct {
- struct {
- uint32 pmuintmask0;
- uint32 pmuintmask1;
- uint32 resreqtimer;
- uint32 macresreqtimer;
- uint32 macresreqtimer1;
- uint32 macresreqtimer2;
- } base_regs;
- } pmu_regs;
- struct {
- struct {
- uint32 intmask;
- uint32 powerctrl;
- uint32 clockcontrolstatus;
- uint32 powerctrl_mask;
- } base_regs;
- } chipcommon_regs;
- struct {
- struct {
- uint32 clockcontrolstatus;
- uint32 clockcontrolstatus_val;
- } base_regs;
- struct {
- uint32 resetctrl;
- uint32 extrsrcreq;
- } wrapper_regs;
- } arm_regs;
- struct {
- struct {
- uint32 ltrstate;
- uint32 clockcontrolstatus;
- uint32 clockcontrolstatus_val;
- } base_regs;
- struct {
- uint32 extrsrcreq;
- } wrapper_regs;
- } pcie_regs;
- struct {
- struct {
- uint32 xmtaddress;
- uint32 xmtdata;
- uint32 clockcontrolstatus;
- uint32 clockcontrolstatus_val;
- } base_regs;
- struct {
- uint32 resetctrl;
- uint32 extrsrcreq;
- uint32 ioctrl;
- uint32 ioctrl_resetseq_val[SSSR_D11_RESET_SEQ_STEPS];
- } wrapper_regs;
- uint32 sr_size;
- } mac_regs[MAX_NUM_D11_CORES_WITH_SCAN];
- struct {
- uint32 dig_sr_addr;
- uint32 dig_sr_size;
- } dig_mem_info;
-} sssr_reg_info_v2_t;
-
-#ifndef SSSR_REG_INFO_HAS_ALIAS
-typedef sssr_reg_info_v0_t sssr_reg_info_t;
-#define SSSR_REG_INFO_VER SSSR_REG_INFO_VER_0
-#endif // endif
-
-/* A wrapper structure for all versions of SSSR register information structures */
-typedef union sssr_reg_info {
- sssr_reg_info_v0_t rev0;
- sssr_reg_info_v1_t rev1;
- sssr_reg_info_v2_t rev2;
-} sssr_reg_info_cmn_t;
+} sssr_reg_info_t;
/* ADaptive Power Save(ADPS) structure definition */
#define WL_ADPS_IOV_MAJOR_VER 1
#define WL_ADPS_IOV_RSSI 0x0002
#define WL_ADPS_IOV_DUMP 0x0003
#define WL_ADPS_IOV_DUMP_CLEAR 0x0004
-#define WL_ADPS_IOV_SUSPEND 0x0005
#define ADPS_SUMMARY_STEP_NUM 2
#define ADPS_SUMMARY_STEP_LOW 0
#define ADPS_SUB_IOV_VERSION_1 1
#define ADPS_SUB_IOV_VERSION_2 2
-/* suspend/resume ADPS by wl/private command from host */
-#define ADPS_RESUME 0u
-#define ADPS_SUSPEND 1u
-
typedef struct wl_adps_params_v1 {
uint16 version;
uint16 length;
adps_stat_elem_t stat[ADPS_SUMMARY_STEP_NUM]; /* statistics */
} wl_adps_dump_summary_v1_t;
-typedef struct wl_adps_dump_summary_v2 {
- uint16 version;
- uint16 length;
- uint8 mode; /* operation mode: On/Off */
- uint8 current_step; /* current step */
- uint8 padding[2];
- uint32 flags; /* restrict flags */
- adps_stat_elem_t stat[ADPS_SUMMARY_STEP_NUM]; /* statistics */
-} wl_adps_dump_summary_v2_t;
-
-typedef struct wl_adps_suspend_v1 {
- uint16 version;
- uint16 length;
- uint8 suspend; /* 1: suspend 0: resume */
- uint8 padding[3];
-} wl_adps_suspend_v1_t;
-
typedef struct wlc_btc_2gchain_dis {
uint16 ver;
uint16 len;
uint8 flag;
} wlc_btc_2gchain_dis_t;
-/* TDLS structure definition */
-#define WL_TDLS_T_VERSION_V1 1
-typedef struct wl_tdls_dump_summary_v1 {
- uint16 version;
- uint16 length; /* length of the entire structure */
- uint32 txsetupreq; /* tdls setup req sent */
- uint32 txsetupresp; /* tdls setup resp sent */
- uint32 txsetupcfm; /* tdls setup confirm sent */
- uint32 txteardown; /* tdls teardwon frames sent */
- uint32 txptireq; /* tdls pti req frames sent */
- uint32 txptiresp; /* tdls pti resp frames sent */
- uint32 txchswreq; /* tdls chsw req frames sent */
- uint32 txchswresp; /* tdls chsw resp frame sent */
- uint32 rxsetupreq; /* tdls setup req rcvd */
- uint32 rxdsetupresp; /* tdls setup resp rcvd */
- uint32 rxsetupcfm; /* tdls setup confirm rcvd */
- uint32 rxteardown; /* tdls teardown frames rcvd */
- uint32 rxptireq; /* tdls pti req frames rcvd */
- uint32 rxptiresp; /* tdls pti resp frames rcvd */
- uint32 rxchswreq; /* tdls chsw req frames rcvd */
- uint32 rxchswresp; /* tdls chsw resp frames rcvd */
- uint32 discard; /* frames discarded due to full buffer */
- uint32 ubuffered; /* frames buffered by TDLS txmod */
- uint32 buf_reinserted; /* frames reinserted */
- uint32 idletime; /* time since no traffic on tdls link */
- uint32 uptime; /* time since tdls link connected */
- uint32 tx_cnt; /* frames txed over tdls link */
- uint32 rx_cnt; /* frames rcvd over tdls link */
- uint32 blist_cnt; /* number of tdls black list */
- uint32 scb_flags; /* connected tdls scb flags */
- struct ether_addr peer_addr; /* connected peer addr */
- uint8 padding[2];
-} wl_tdls_dump_summary_v1_t;
-
#define WLC_BTC_2GCHAIN_DIS_REASSOC 0x1
#define WLC_BTC_2GCHAIN_DIS_VER1 0x1
#define WLC_BTC_2GCHAIN_DIS_VER1_LEN 6
-/* --- BTCX WiFi Protection (btc_wifi_prot iovar) --- */
-
-/* Current iovar structure version: 1 */
-#define WL_BTC_WIFI_PROT_VER_1 1
-
-typedef struct wl_btc_wifi_prot_v1 {
- uint16 ver; /* version */
- uint16 len; /* total length */
- uint8 data[]; /* bcm_xtlv_t payload */
-} wl_btc_wifi_prot_v1_t;
-
-/* Xtlv tags (protection type) and data */
-#define WL_BTC_WIFI_PROT_M1_M4 1
-typedef struct wl_btc_wifi_prot_m1_m4 {
- uint32 enable; /* enable/disable m1-m4 protection */
- uint32 timeout; /* maximum timeout in ms (0: default) */
-} wl_btc_wifi_prot_m1_m4_t;
-
-#define WL_BTC_WIFI_PROT_ENABLE 1
-#define WL_BTC_WIFI_PROT__DISABLE 0
-
-/* --- End BTCX WiFi Protection --- */
-
-/* --- BTCX ULMU disable (btc_ulmu_config iovar) --- */
-
-/* Version number */
-#define WL_BTC_ULMU_CONFIG_VER_1 1
-typedef struct wl_btc_ulmu_config_v1 {
- uint16 version; /* btc_ulmu_config version */
- uint16 len; /* Total length */
- uint32 ulmu_bt_task_bm; /* BT Task bimtap for ULMU disable */
- uint32 ulmu_bt_period_th; /* BT period thresh for ULMU disable */
-} wl_btc_ulmu_config_v1_t;
-
-/* --- End BTCX ULMU config --- */
-
-#define RPSNOA_IOV_MAJOR_VER 1
-#define RPSNOA_IOV_MINOR_VER 1
-#define RPSNOA_IOV_MAJOR_VER_SHIFT 8
-#define RPSNOA_IOV_VERSION \
- ((RPSNOA_IOV_MAJOR_VER << RPSNOA_IOV_MAJOR_VER_SHIFT)| RPSNOA_IOV_MINOR_VER)
-
enum wl_rpsnoa_cmd_ids {
WL_RPSNOA_CMD_ENABLE = 1,
WL_RPSNOA_CMD_STATUS,
int16 value;
} rpsnoa_data_t;
-typedef struct rpsnoa_stats {
- int16 band;
- int16 state;
- uint32 sleep_dur;
- uint32 sleep_avail_dur;
- uint32 last_pps;
-} rpsnoa_stats_t;
-
typedef struct rpsnoa_param {
uint16 band;
uint8 level;
rpsnoa_data_t data[1];
} rpsnoa_iovar_t;
-typedef struct rpsnoa_iovar_status {
- rpsnoa_cmnhdr_t hdr;
- rpsnoa_stats_t stats[1];
-} rpsnoa_iovar_status_t;
-
typedef struct rpsnoa_iovar_params {
rpsnoa_cmnhdr_t hdr;
rpsnoa_param_t param[1];
WL_IFSTATS_XTLV_MAC_ADDR = 3,
WL_IFSTATS_XTLV_REPORT_CMD = 4, /* Comes in an iovar */
WL_IFSTATS_XTLV_BUS_PCIE = 5,
- WL_STATS_XTLV_BUS_PCIE_TX_HISTOGRAMS = 6,
- WL_STATS_XTLV_BUS_PCIE_TX_QUEUE_DEPTH = 7,
- /* history of blocks freed most recently */
- WL_STATS_XTLV_FBINFO_STATS = 8,
/* Report data across all SCBs using ecounters */
- /* STA_info ecounters */
WL_IFSTATS_XTLV_WL_STA_INFO_ECOUNTERS = 0x100,
- /* For AMPDU stat sub-types requested in a different format */
- /* these could be sum and report stats across slices. OR
- * report sub-types in pairs so host can sum and add.
- * Information sent here is across slices, therefore global
- */
- WL_IFSTATS_XTLV_TX_AMPDU_STATS = 0x101,
- WL_IFSTATS_XTLV_RX_AMPDU_STATS = 0x102,
- /* scb ecounter statistics */
- WL_IFSTATS_XTLV_SCB_ECOUNTERS = 0x103,
- /* Global NAN stats */
- WL_IFSTATS_XTLV_NAN_STATS = 0x104,
- WL_IFSTATS_XTLV_CHAN_STATS = 0x105,
- /* TDLS state */
- WL_IFSTATS_XTLV_IF_TDLS_STATE = 0x106,
- WL_IFSTATS_XTLV_KEY_PLUMB_INFO = 0x107,
- /* HE TX related stats */
- WL_IFSTATS_XTLV_HE_TXMU_STATS = 0x108,
/* Per-slice information
* Per-interface reporting could also include slice specific data
/* xtlv container for reporting */
WL_IFSTATS_XTLV_WL_SLICE = 0x301,
/* Per-slice AMPDU stats */
- WL_IFSTATS_XTLV_WL_SLICE_TX_AMPDU_DUMP = 0x302,
- WL_IFSTATS_XTLV_WL_SLICE_RX_AMPDU_DUMP = 0x303,
+ WL_IFSTATS_XTLV_WL_SLICE_AMPDU_DUMP = 0x302,
/* Per-slice BTCOEX stats */
- WL_IFSTATS_XTLV_WL_SLICE_BTCOEX = 0x304,
+ WL_IFSTATS_XTLV_WL_SLICE_BTCOEX = 0x303,
/* V11_WLCNTRS used in ecounters */
- WL_IFSTATS_XTLV_WL_SLICE_V11_WLCNTRS = 0x305,
+ WL_IFSTATS_XTLV_WL_SLICE_V11_WLCNTRS = 0x304,
/* V30_WLCNTRS Used in ecounters */
- WL_IFSTATS_XTLV_WL_SLICE_V30_WLCNTRS = 0x306,
- /* phy,ucode,scan pwrstats */
- WL_IFSTATS_XTLV_WL_SLICE_PWRSTATS_PHY = 0x307,
- WL_IFSTATS_XTLV_WL_SLICE_PWRSTATS_SCAN = 0x308,
- WL_IFSTATS_XTLV_WL_SLICE_PWRSTATS_WAKE_V2 = 0x309,
- /* Per-slice LTECOEX stats */
- WL_IFSTATS_XTLV_WL_SLICE_LTECOEX = 0x30A,
- /* TVPM ecounters */
- WL_IFSTATS_XTLV_WL_SLICE_TVPM = 0x30B,
- /* TDMTX ecounters */
- WL_IFSTATS_XTLV_WL_SLICE_TDMTX = 0x30C,
- /* Slice specific state capture in periodic fasion */
- WL_SLICESTATS_XTLV_PERIODIC_STATE = 0x30D,
- WL_SLICESTATS_XTLV_HIST_TX_STATS = 0x30E,
- WL_SLICESTATS_XTLV_HIST_RX_STATS = 0x30F,
- /* TX histograms */
- WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS = 0x310,
- /* TX queue depth */
- WL_STATS_XTLV_WL_SLICE_TX_QUEUE_DEPTH = 0x311,
- /* Latency instrumentation debug */
- WL_STATS_XTLV_WL_QUEUE_STOP = 0x312,
- /* Beamforming counters */
- WL_IFSTATS_XTLV_WL_SLICE_TXBF = 0x313,
- /* Per-slice BTCOEX task duration stats */
- WL_IFSTATS_XTLV_WL_SLICE_BTCOEX_TSKDUR_STATS = 0x314,
+ WL_IFSTATS_XTLV_WL_SLICE_V30_WLCNTRS = 0x305,
+
/* Per-interface */
/* XTLV container for reporting */
WL_IFSTATS_XTLV_IF = 0x501,
WL_IFSTATS_XTLV_MGT_CNT = 0x504,
/* AMPDU stats on per-IF */
WL_IFSTATS_XTLV_AMPDU_DUMP = 0x505,
- WL_IFSTATS_XTLV_IF_SPECIFIC = 0x506,
- WL_IFSTATS_XTLV_IF_LQM = 0x508,
- /* Interface specific state capture in periodic fashion */
- WL_IFSTATS_XTLV_IF_PERIODIC_STATE = 0x509,
- /* Event statistics on per-IF */
- WL_IFSTATS_XTLV_IF_EVENT_STATS = 0x50A,
- /* Infra HE specific */
- WL_IFSTATS_XTLV_INFRA_SPECIFIC_HE = 0x50B,
- /* Roam statistics */
- WL_IFSTATS_XTLV_ROAM_STATS_PERIODIC = 0x50C,
- WL_IFSTATS_XTLV_ROAM_STATS_EVENT = 0x50D,
- /* ecounters for nan */
- /* nan slot stats */
- WL_IFSTATS_XTLV_NAN_SLOT_STATS = 0x601,
- /* Ecounters for NDP session status */
- WL_STATS_XTLV_NDP_SESSION_STATUS = 0x602,
- /* NAN disc frame status ecounters */
- WL_STATS_XTLV_NAN_DISC_FRM_STATUS = 0x603
+ WL_IFSTATS_XTLV_IF_SPECIFIC = 0x506
};
-/* current version of wl_stats_report_t structure for request */
-#define WL_STATS_REPORT_REQUEST_VERSION_V2 2
-
-/* current version of wl_stats_report_t structure for response */
-#define WL_STATS_REPORT_RESPONSE_VERSION_V2 2
-
-/** Top structure of if_counters IOVar buffer */
-typedef struct wl_stats_report {
- uint16 version; /**< see version definitions above */
- uint16 length; /**< length of data including all paddings. */
- uint8 data []; /**< variable length payload:
- * 1 or more bcm_xtlv_t type of tuples.
- * each tuple is padded to multiple of 4 bytes.
- * 'length' field of this structure includes all paddings.
- */
-} wl_stats_report_t;
-
/* interface specific mgt count */
#define WL_MGT_STATS_VERSION_V1 1
/* Associated stats type: WL_IFSTATS_MGT_CNT */
typedef struct {
uint16 version;
- uint16 length;
+ uint8 pad[2];
/* detailed control/management frames */
uint32 txnull;
/* Associated stats type: WL_IFSTATS_INFRA_SPECIFIC */
typedef struct wl_infra_stats {
uint16 version; /**< version of the structure */
- uint16 length;
+ uint8 pad[2];
uint32 rxbeaconmbss;
uint32 tbtt;
} wl_if_infra_stats_t;
-#define WL_INFRA_STATS_HE_VERSION_V1 (1u)
-/* Associated stats type: WL_IFSTATS_INFRA_SPECIFIC_HE */
-typedef struct wl_infra_stats_he {
- uint16 version; /**< version of the structure */
- uint16 length;
- uint32 PAD; /**< Explicit padding */
-
- /* DL SU MPDUs and total number of bytes */
- uint64 dlsu_mpdudata;
- uint64 dlsu_mpdu_bytes;
-
- /* DL MUMIMO MPDUs and total number of bytes */
- uint64 dlmumimo_mpdudata;
- uint64 dlmumimo_mpdu_bytes;
-
- /* DL OFDMA MPDUs and total number of bytes */
- uint64 dlofdma_mpdudata;
- uint64 dlofdma_mpdu_bytes;
-
- /* UL SU MPDUs and total number of bytes */
- uint64 ulsu_mpdudata;
- uint64 ulsu_mpdu_bytes;
-
- /* ULOFDMA MPSUs and total number of bytes */
- uint64 ulofdma_mpdudata;
- uint64 ulofdma_mpdu_bytes;
-} wl_if_infra_stats_he_t;
-
-#define LTECOEX_STATS_VER 1
-
-typedef struct wlc_ltecoex_stats {
- uint16 version; /**< WL_IFSTATS_XTLV_WL_SLICE_LTECOEX */
- uint16 len; /* Length of wl_ltecx_stats structure */
- uint8 slice_index; /* Slice unit of wl_ltecx_stats structure */
- uint8 pad[3]; /* Padding */
- /* LTE noise based eCounters Bins
- cumulative the wl_cnt_wlc_t and wl_ctl_mgt_cnt_t
- counter information based on LTE Coex interference level
- */
- uint32 txframe_no_LTE; /* txframe counter in no LTE Coex case */
- uint32 rxframe_no_LTE; /* rxframe counter in no LTE Coex case */
- uint32 rxrtry_no_LTE; /* rxrtry counter in no LTE Coex case */
- uint32 txretrans_no_LTE; /* txretrans counter in no LTE Coex case */
- uint32 txnocts_no_LTE; /* txnocts counter in no LTE Coex case */
- uint32 txrts_no_LTE; /* txrts counter in no LTE Coex case */
- uint32 txdeauth_no_LTE; /* txdeauth counter in no LTE Coex case */
- uint32 txassocreq_no_LTE; /* txassocreq counter in no LTE Coex case */
- uint32 txassocrsp_no_LTE; /* txassocrsp counter in no LTE Coex case */
- uint32 txreassocreq_no_LTE; /* txreassocreq counter in no LTE Coex case */
- uint32 txreassocrsp_no_LTE; /* txreassocrsp counter in no LTE Coex case */
- uint32 txframe_light_LTE; /* txframe counter in light LTE Coex case */
- uint32 txretrans_light_LTE; /* txretrans counter in light LTE Coex case */
- uint32 rxframe_light_LTE; /* rxframe counter in light LTE Coex case */
- uint32 rxrtry_light_LTE; /* rxrtry counter in light LTE Coex case */
- uint32 txnocts_light_LTE; /* txnocts counter in light LTE Coex case */
- uint32 txrts_light_LTE; /* txrts counter in light LTE Coex case */
- uint32 txdeauth_light_LTE; /* txdeauth counter in light LTE Coex case */
- uint32 txassocreq_light_LTE; /* txassocreq counter in light LTE Coex case */
- uint32 txassocrsp_light_LTE; /* txassocrsp counter in light LTE Coex case */
- uint32 txreassocreq_light_LTE; /* txreassocreq counter in light LTE Coex case */
- uint32 txreassocrsp_light_LTE; /* txreassocrsp counter in light LTE Coex case */
- uint32 txframe_heavy_LTE; /* txframe counter in heavy LTE Coex case */
- uint32 txretrans_heavy_LTE; /* txretrans counter in heavy LTE Coex case */
- uint32 rxframe_heavy_LTE; /* rxframe counter in heavy LTE Coex case */
- uint32 rxrtry_heavy_LTE; /* rxrtry counter in heavy LTE Coex case */
- uint32 txnocts_heavy_LTE; /* txnocts counter in heavy LTE Coex case */
- uint32 txrts_heavy_LTE; /* txrts counter in heavy LTE Coex case */
- uint32 txdeauth_heavy_LTE; /* txdeauth counter in heavy LTE Coex case */
- uint32 txassocreq_heavy_LTE; /* txassocreq counter in heavy LTE Coex case */
- uint32 txassocrsp_heavy_LTE; /* txassocrsp counter in heavy LTE Coex case */
- uint32 txreassocreq_heavy_LTE; /* txreassocreq counter in heavy LTE Coex case */
- uint32 txreassocrsp_heavy_LTE; /* txreassocrsp counter in heavy LTE Coex case */
-
- /* LTE specific ecounters */
- uint16 type4_txinhi_dur; /* Duration of tx inhibit(in ms) due to Type4 */
- uint16 type4_nonzero_cnt; /* Counts of none zero Type4 msg */
- uint16 type4_timeout_cnt; /* Counts of Type4 timeout */
- uint16 rx_pri_dur; /* Duration of wlan_rx_pri assertions */
- uint16 rx_pri_cnt; /* Count of wlan_rx_pri assertions */
- uint16 type6_dur; /* duration of LTE Tx power limiting assertions */
- uint16 type6_cnt; /* Count of LTE Tx power limiting assertions */
- uint16 ts_prot_frm_cnt; /* count of WLAN protection frames triggered by LTE coex */
- uint16 ts_gr_cnt; /* count of intervals granted to WLAN in timesharing */
- uint16 ts_gr_dur; /* duration granted to WLAN in timesharing */
-} wlc_ltecoex_stats_t;
-
-#define CSA_EVT_CSA_RXED (1 << 0)
-#define CSA_EVT_CSA_TIMEOUT (1 << 1)
-#define CSA_EVT_FROM_INFRA (1 << 2)
typedef struct csa_event_data {
chanspec_t chan_old;
dot11_ext_csa_ie_t ecsa;
dot11_mesh_csp_ie_t mcsp;
dot11_wide_bw_chan_switch_ie_t wbcs;
- uint8 flags;
- uint8 pad[3];
+ uint8 PAD;
} csa_event_data_t;
-/* ifdef (WL_ASSOC_BCN_RPT) */
-enum wl_bcn_report_cmd_id {
- WL_BCN_RPT_CMD_VER = 0,
- WL_BCN_RPT_CMD_CONFIG = 1,
- WL_BCN_RPT_CMD_VENDOR_IE = 2,
- WL_BCN_RPT_CMD_LAST
-};
-
-/* beacon report specific macros */
-#define WL_BCN_RPT_CCX_IE_OVERRIDE (1u << 0)
-
-/* beacon report specific macros */
-#define WL_BCN_RPT_ASSOC_SCAN_UNSOLICITED_MODE (1u << 1)
-#define WL_BCN_RPT_ASSOC_SCAN_SOLICITED_MODE (1u << 2)
-#define WL_BCN_RPT_ASSOC_SCAN_MODE_SHIFT (1)
-#define WL_BCN_RPT_ASSOC_SCAN_MODE_MASK (WL_BCN_RPT_ASSOC_SCAN_UNSOLICITED_MODE |\
- WL_BCN_RPT_ASSOC_SCAN_SOLICITED_MODE)
-#define WL_BCN_RPT_ASSOC_SCAN_MODE_MAX (WL_BCN_RPT_ASSOC_SCAN_MODE_MASK >> \
- WL_BCN_RPT_ASSOC_SCAN_MODE_SHIFT)
-/* beacon report mode specific macro */
-#define WL_BCN_RPT_ASSOC_SCAN_MODE_DEFAULT WL_BCN_RPT_ASSOC_SCAN_UNSOLICITED_MODE
-
-/* beacon report timeout config specific macros */
-#define WL_BCN_RPT_ASSOC_SCAN_CACHE_TIMEOUT_DEFAULT (120000)
-#define WL_BCN_RPT_ASSOC_SCAN_CACHE_TIMEOUT_MIN (60000)
-#define WL_BCN_RPT_ASSOC_SCAN_CACHE_TIMEOUT_MAX (0xFFFFFFFF)
-
-/* beacon report cache count specific macros */
-#define WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_MIN (0)
-#define WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_MAX (8)
-#define WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_DEFAULT (WL_BCN_RPT_ASSOC_SCAN_CACHE_COUNT_MAX)
-
-#define WL_BCN_REPORT_CMD_VERSION 1
-struct wl_bcn_report_cfg {
- uint32 flags; /**< Flags that defines the operation/setting information */
- uint32 scan_cache_timeout; /**< scan cache timeout value in millisec */
- uint32 scan_cache_timer_pend; /**< Read only pending time for timer expiry in millisec */
- uint8 scan_cache_cnt; /**< scan cache count */
-};
-
-/* endif (WL_ASSOC_BCN_RPT) */
-
-/* Thermal, Voltage, and Power Mitigation */
-#define TVPM_REQ_VERSION_1 1
-#define TVPM_REQ_CURRENT_VERSION TVPM_REQ_VERSION_1
-
-/* tvpm iovar data */
-typedef struct {
- uint16 version; /* TVPM request version */
- uint16 length; /* Length of the entire structure */
-
- uint16 req_type; /* Request type: wl_tvpm_req_type_t */
- uint16 req_len; /* Length of the following value */
- uint8 value[]; /* Variable length data depending on req_type */
-} wl_tvpm_req_t;
-
-/* tvpm iovar request types */
-typedef enum {
- WL_TVPM_REQ_CLTM_INDEX, /* req_value: uint32, range 1...100 */
- WL_TVPM_REQ_PPM_INDEX, /* req_value: uint32, range 1...100 */
- WL_TVPM_REQ_ENABLE, /* req_value: uint32, range 0...1 */
- WL_TVPM_REQ_STATUS, /* req_value: none */
- WL_TVPM_REQ_PERIOD, /* req_value: int32, range {-1,1-10} */
- WL_TVPM_REQ_MAX
-} wl_tvpm_req_type_t;
-
-/* structure for data returned by request type WL_TVPM_REQ_STATUS */
-typedef struct wl_tvpm_status {
- uint16 enable; /* whether TVPM is enabled */
- uint16 tx_dutycycle; /* a percentage: 1-100 */
- int16 tx_power_backoff; /* 0...-6 */
- uint16 num_active_chains; /* 1...3 */
- int16 temp; /* local temperature in degrees C */
- uint8 vbat; /* local voltage in units of 0.1V */
- uint8 pad;
-} wl_tvpm_status_t;
-
-/* TVPM ecounters */
-typedef struct wl_tvpm_ecounters_t {
- uint16 version; /* version field */
- uint16 length; /* byte length in wl_tvpm_ecounters_t starting at version */
- uint16 tx_dutycycle; /* a percentage: 1-100 */
- int16 tx_power_backoff; /* 0...-6 */
- uint16 num_active_chains; /* 1...3 */
- int16 temp; /* local temperature */
- uint8 vbat; /* local voltage */
- uint8 cltm; /* CLTM index */
- uint8 ppm; /* PPM index */
- uint8 pad; /* pad to align to uint16 */
-} wl_tvpm_ecounters_t;
-
-#define TDMTX_ECOUNTERS_VERSION_V1 1
-#define TDMTX_ECOUNTERS_VERSION_V2 2
-
-/* TDMTX ecounters */
-typedef struct wl_tdmtx_ecounters_v1 {
- uint16 version; /* version field */
- uint16 length; /* byte length in wl_tdmtx_ecounters_t starting at version */
- uint32 txa_on; /* TXA on requests */
- uint32 txa_tmcnt; /* Total number of TXA timeout */
- uint32 por_on; /* TXA POR requests */
- uint32 txpuen; /* Path enable requests */
- uint32 txpudis; /* Total number of times Tx path is muted on the slice */
- uint32 txpri_on; /* Total number of times Tx priority was obtained by the slice */
- uint32 txdefer; /* Total number of times Tx was deferred by the slice */
- uint32 txmute; /* Total number of times active Tx muted on the slice */
- uint32 actpwrboff; /* Total number of times TX power is backed off by the slice */
- uint32 txa_dur; /* Total time txa on */
- uint32 txpri_dur; /* Total time TXPri */
- uint32 txdefer_dur; /* Total time txdefer */
-} wl_tdmtx_ecounters_v1_t;
-
-/* TDMTX ecounters for version 2 */
-typedef struct wl_tdmtx_ecounters_v2 {
- uint16 version; /* version field */
- uint16 length; /* byte length in wl_tdmtx_ecounters_t starting at version */
- uint32 txa_on; /* TXA on requests */
- uint32 txa_tmcnt; /* Total number of TXA timeout */
- uint32 porhi_on; /* TXA PORHI requests */
- uint32 porlo_on; /* TXA PORLO requests */
- uint32 txpuen; /* Path enable requests */
- uint32 txpudis; /* Total number of times Tx path is muted on the slice */
- uint32 txpri_on; /* Total number of times Tx priority was obtained by the slice */
- uint32 txdefer; /* Total number of times Tx was deferred by the slice */
- uint32 txmute; /* Total number of times active Tx muted on the slice */
- uint32 actpwrboff; /* Total number of times TX power is backed off by the slice */
- uint32 txa_dur; /* Total time txa on */
- uint32 txpri_dur; /* Total time TXPri */
- uint32 txdefer_dur; /* Total time txdefer */
-} wl_tdmtx_ecounters_v2_t;
-
-/* Note: if this struct is changing update wl_scb_ecounters_vX_t version,
- * as this struct is sent as payload in wl_scb_ecounters_vX_t
- */
-typedef struct wlc_scb_stats_v1 {
- uint32 tx_pkts; /* num of packets transmitted (ucast) */
- uint32 tx_failures; /* num of packets failed */
- uint32 rx_ucast_pkts; /* num of unicast packets received */
- uint32 rx_mcast_pkts; /* num of multicast packets received */
- uint32 tx_rate; /* Rate of last successful tx frame */
- uint32 rx_rate; /* Rate of last successful rx frame */
- uint32 rx_decrypt_succeeds; /* num of packets decrypted successfully */
- uint32 rx_decrypt_failures; /* num of packets decrypted unsuccessfully */
- uint32 tx_mcast_pkts; /* num of mcast pkts txed */
- uint64 tx_ucast_bytes; /* data bytes txed (ucast) */
- uint64 tx_mcast_bytes; /* data bytes txed (mcast) */
- uint64 rx_ucast_bytes; /* data bytes recvd ucast */
- uint64 rx_mcast_bytes; /* data bytes recvd mcast */
- uint32 tx_pkts_retried; /* num of packets where a retry was necessary */
- uint32 tx_pkts_retry_exhausted; /* num of packets where a retry was exhausted */
- uint32 tx_rate_mgmt; /* Rate of last transmitted management frame */
- uint32 tx_rate_fallback; /* last used lowest fallback TX rate */
- uint32 rx_pkts_retried; /* # rx with retry bit set */
- uint32 tx_pkts_total; /* total num of tx pkts */
- uint32 tx_pkts_retries; /* total num of tx retries */
- uint32 tx_pkts_fw_total; /* total num of tx pkts generated from fw */
- uint32 tx_pkts_fw_retries; /* num of fw generated tx pkts retried */
- uint32 tx_pkts_fw_retry_exhausted; /* num of fw generated tx pkts where retry exhausted */
-} wlc_scb_stats_v1_t;
-
-/* ecounters for scb stats
- * XTLV ID: WL_IFSTATS_XTLV_SCB_ECOUNTERS
- */
-
-#define WL_SCB_ECOUNTERS_VERSION_1 1
-#define WL_SCB_ECOUNTERS_VERSION_2 2
-
-typedef struct wl_scb_ecounters_v1 {
- uint16 version; /* version field */
- uint16 length; /* struct length starting from version */
- uint32 chanspec; /* current chanspec where scb is operating */
- struct ether_addr ea; /* peer ndi or sta ea */
- uint8 peer_type; /* peer type */
- uint8 pad;
-
- /* scb tx and rx stats */
- wlc_scb_stats_v1_t stats;
-} wl_scb_ecounters_v1_t;
-
-typedef struct wl_scb_ecounters_v2 {
- uint16 version; /* version field */
- uint16 length; /* struct length starting from version */
- uint32 chanspec; /* current chanspec where scb is operating */
- struct ether_addr ea; /* peer ndi or sta ea */
- uint8 peer_type; /* peer type */
- uint8 pad;
-
- /* scb tx and rx stats */
- uint16 tx_rate; /* Rate(in Mbps) of last successful tx frame */
- uint16 rx_rate; /* Rate(in Mbps) of last successful rx frame */
- uint16 tx_rate_fallback; /* last used lowest fallback TX rate(in Mbps) */
- uint16 pad1;
- uint32 rx_decrypt_succeeds; /* num of packets decrypted successfully */
- uint32 rx_decrypt_failures; /* num of packets decrypted unsuccessfully */
- uint32 rx_pkts_retried; /* # rx with retry bit set */
- uint32 tx_pkts_retries; /* total num of tx retries */
- uint32 tx_failures; /* num of packets failed */
- uint32 tx_pkts_total; /* total num of tx pkts */
- int8 rssi[WL_STA_ANT_MAX]; /* average rssi per antenna of data frames */
-} wl_scb_ecounters_v2_t;
-
-/* ecounters for nan slot stats
- * XTLV ID: WL_IFSTATS_XTLV_NAN_SLOT_STATS
- */
-
-#define WL_NAN_SLOT_ECOUNTERS_VERSION_1 1
-#define WL_NAN_SLOT_ECOUNTERS_VERSION_2 2
-
-typedef struct wl_nan_slot_ecounters_v1 {
- uint16 version; /* version field */
- uint16 length; /* struct length starting from version */
- uint32 chan[NAN_MAX_BANDS]; /* cur nan slot chanspec of both bands */
- uint16 cur_slot_idx; /* cur nan slot index */
- uint16 pad;
- nan_sched_stats_t sched; /* sched stats */
- wl_nan_mac_stats_t mac; /* mac stats */
-} wl_nan_slot_ecounters_v1_t;
-
-typedef struct wl_nan_slot_ecounters_v2 {
- uint16 version; /* version field */
- uint16 length; /* struct length starting from version */
- uint32 chan[NAN_MAX_BANDS]; /* cur nan slot chanspec of both bands */
- uint16 cur_slot_idx; /* cur nan slot index */
- uint16 pad;
- nan_sched_stats_t sched; /* sched stats */
- wl_nan_mac_stats_t mac; /* mac stats */
- /* for v2 */
- uint16 bcn_rx_drop_rssi; /* Beacon received but ignored due to weak rssi */
- uint16 bcn_rx_drop_rssi_5g; /* 5G Beacon received but ignored due to weak rssi */
- uint16 cnt_rssi_close; /* cnt of beacon rssi > rssi_close received */
- uint16 cnt_rssi_close_5g; /* cnt of 5G beacon rssi > rssi_close received */
- uint16 cnt_rssi_mid; /* cnt of beacon rssi > rssi_middle received */
- uint16 cnt_rssi_mid_5g; /* cnt of 5G beacon rssi > rssi_middle received */
- uint16 bcn_txfail; /* Beacon sending failure count */
- uint16 bcn_txfail_5g; /* sending 5G beacon failure count */
-} wl_nan_slot_ecounters_v2_t;
-
-/* WL_STATS_XTLV_NDP_SESSION_STATUS for ecounters */
-#define WL_NAN_SESSION_STATUS_EC_VERSION_1 1
-typedef struct wl_nan_ndp_session_status_v1_s {
- uint16 version; /* version field */
- uint16 length; /* struct length starting from version */
- uint8 role; /* Role of NAN device */
- uint8 ndp_id; /* local NDP ID */
- uint8 state; /* NDP state */
- uint8 nan_sec_csid; /* security csid */
- struct ether_addr lndi_addr; /* Local NDI addr */
- struct ether_addr pnmi_addr; /* Peer NMI addr */
- struct ether_addr pndi_addr; /* Peer NDI addr */
- uint8 dpe_state; /* DPE state to know where timeout/dpend has come */
- uint8 pad;
-} wl_nan_ndp_session_status_v1_t;
-
-/* WL_STATS_XTLV_NAN_DISC_FRM_STATUS for ecounters */
-#define WL_NAN_DISC_FRM_STATUS_EC_VERSION_1 1
-typedef struct wl_nan_disc_frame_status_v1_s {
- uint16 version; /* version field */
- uint16 length; /* struct length starting from version */
- uint8 type; /* wl_nan_frame_type_t */
- uint8 status; /* For TX status, success or failure */
- uint8 reason_code; /* to identify reason when status is failure */
- uint8 inst_id; /* Publish or subscribe instance id */
- uint8 req_id; /* Requestor instance id */
- uint8 pad;
- uint16 token; /* seq num to keep track of pkts sent by host */
-} wl_nan_disc_frame_status_v1_t;
-/*
- * BT log definitions
- */
-
-/* common iovar struct */
-typedef struct wl_btl {
- uint16 subcmd_id; /* subcommand id */
- uint16 len; /* total length of data[] */
- uint8 data[2]; /* subcommand data, variable length */
-} wl_btl_t;
-
-/* subcommand ids */
-#define WL_BTL_SUBCMD_ENABLE 0 /* enable/disable logging */
-#define WL_BTL_SUBCMD_STATS 1 /* statistics */
-
-/* WL_BTL_SUBCMD_ENABLE data */
-typedef struct wl_blt_enable {
- uint8 enable; /* 1 - enable, 0 - disable */
- uint8 pad[3]; /* 4-byte struct alignment */
-} wl_btl_enable_t;
-
-/* WL_BTL_SUBCMD_STATS data */
-typedef struct wl_blt_stats {
- uint32 bt_interrupt; /* num BT interrupts */
- uint32 config_req; /* num CONFIG_REQ */
- uint32 config_res_success; /* num CONFIG_RES successful */
- uint32 config_res_fail; /* num CONFIG_RES failed */
- uint32 log_req; /* num LOG_REQ */
- uint32 log_res_success; /* num LOG_RES successful */
- uint32 log_res_fail; /* num LOG_RES failed */
- uint32 indirect_read_fail; /* num indirect read fail */
- uint32 indirect_write_fail; /* num indirect write fail */
- uint32 dma_fail; /* num DMA failed */
- uint32 min_log_req_duration; /* min log request duration in usec */
- uint32 max_log_req_duration; /* max log request duration in usec */
- uint16 mem_dump_req; /* num mem dump requests */
- uint16 mem_dump_success; /* num mem dumps successful */
- uint16 mem_dump_fail; /* num mem dumps failed */
- uint16 bt_wake_success; /* num BT wakes successful */
- uint16 bt_wake_fail; /* num BT wakes failed */
- uint16 mem_dump_req_interrupt; /* num MEM_DUMP_REQ interrupt */
- uint16 mem_dump_res_interrupt; /* num MEM_DUMP_RES interrupt */
- uint16 mem_dump_res_timeout; /* num MEM_DUMP_RES timeout */
- uint16 mem_dump_proc_no_bt_ready; /* num proceed if no BT ready */
- uint16 mem_dump_proc_no_bt_response; /* num proceed if no BT response */
- uint16 mem_dump_proc_no_bt_clock; /* num proceed if no BT clock */
- uint16 pad; /* alignment */
- uint32 last_failed_region; /* start addr of last failed region */
- uint32 min_mem_dump_duration; /* min mem dump duration in usec */
- uint32 max_mem_dump_duration; /* max mem dump duration in usec */
-} wl_btl_stats_t;
-
-/* IOV AWD DATA */
-
-/* AWD DATA structures */
-typedef struct {
- uint8 version; /* Extended trap version info */
- uint8 reserved; /* currently unused */
- uint16 length; /* Length of data excluding this header */
- uint8 data[]; /* this data is TLV of tags */
-} awd_data_v1_t;
-
-/* AWD TAG structure */
-typedef struct {
- uint8 tagid; /* one of AWD DATA TAGs numbers */
- uint8 length; /* the data size represented by this field must be aligned to 32 bits */
- uint8 data[]; /* variable size, defined by length field */
-} awd_tag_data_v1_t;
-
-/* IOV ETD DATA */
-
-/* ETD DATA structures */
-typedef struct {
- uint8 version; /* Extended trap version info */
- uint8 reserved; /* currently unused */
- uint16 length; /* Length of data excluding this header */
- uint8 data[]; /* this data is TLV of tags */
-} etd_data_v1_t;
-
-/* ETD TAG structure */
-typedef struct {
- uint8 tagid; /* one of ETD DATA TAGs numbers */
- uint8 length; /* the data size represented by this field must be aligned to 32 bits */
- uint8 data[]; /* variable size, defined by length field */
-} etd_tag_data_v1_t;
-
-/* ETD information structures associated with ETD_DATA_Tags */
-/* ETD_JOIN_CLASSIFICATION_INFO 10 */
-typedef struct {
- uint8 assoc_type; /* assoc type */
- uint8 assoc_state; /* current state of assoc state machine */
- uint8 wpa_state; /* wpa->state */
- uint8 wsec_portopen; /* shows if security port is open */
- uint8 total_attempts_num; /* total number of join attempts (bss_retries) */
- uint8 num_of_targets; /* up to 3, in current design */
- uint8 reserved [2]; /* padding to get 32 bits alignment */
- uint32 wsec; /* bsscfg->wsec */
- uint32 wpa_auth; /* bsscfg->WPA_auth */
- uint32 time_to_join; /* time duration to process WLC_SET_SSID request (ms) */
-} join_classification_info_v1_t;
-
-/* ETD_JOIN_TARGET_CLASSIFICATION_INFO 11 */
-typedef struct {
- int8 rssi; /* RSSI on current channel */
- uint8 cca; /* CCA on current channel */
- uint8 channel; /* current channel */
- uint8 num_of_attempts; /* (bss_retries) up to 5 */
- uint8 oui[3]; /* the first three octets of the AP's address */
- uint8 reserved; /* padding to get 32 bits alignment */
- uint32 time_duration; /* time duration of current attempt (ms) */
-} join_target_classification_info_v1_t;
-
-/* ETD_ASSOC_STATE 12 */
-typedef struct {
- uint8 assoc_state; /* assoc type */
- uint8 reserved [3]; /* padding to get 32 bits alignment */
-} join_assoc_state_v1_t;
-
-/* ETD_CHANNEL 13 tag */
-typedef struct {
- uint8 channel; /* last attempt channel */
- uint8 reserved [3]; /* padding to get 32 bits alignment */
-} join_channel_v1_t;
-
-/* ETD_TOTAL_NUM_OF_JOIN_ATTEMPTS 14 */
-typedef struct {
- uint8 total_attempts_num; /* total number of join attempts (bss_retries) */
- uint8 reserved [3]; /* padding to get 32 bits alignment */
-} join_total_attempts_num_v1_t;
-
-/* IOV_ROAM_CACHE structures */
-
-enum wl_rmc_report_cmd_id {
- WL_RMC_RPT_CMD_VER = 0,
- WL_RMC_RPT_CMD_DATA = 1,
- WL_RMC_RPT_CMD_LAST
-};
-
-enum wl_rmc_report_xtlv_id {
- WL_RMC_RPT_XTLV_VER = 0x0,
- WL_RMC_RPT_XTLV_BSS_INFO = 0x1,
- WL_RMC_RPT_XTLV_CANDIDATE_INFO = 0x2
-};
-
-/* WL_RMC_RPT_XTLV_BSS_INFO */
-typedef struct {
- int16 rssi; /* current BSS RSSI */
- uint8 reason; /* reason code for last full scan */
- uint8 status; /* last status code for not roaming */
- uint32 fullscan_count; /* number of full scans performed on current BSS */
- uint32 time_full_scan; /* delta time (in ms) between cur time and full scan timestamp */
-} rmc_bss_info_v1_t;
-
-/* WL_RMC_RPT_XTLV_CANDIDATE_INFO */
-typedef struct {
- int16 rssi; /* last seen rssi */
- uint16 ctl_channel; /* channel */
- uint32 time_last_seen; /* delta time (in ms) between cur time and last seen timestamp */
- uint16 bss_load; /* BSS load */
- uint8 bssid [6]; /* padding to get 32 bits alignment */
-} rmc_candidate_info_v1_t;
-
-#define WL_FILTER_IE_VERSION 1 /* deprecated */
-enum wl_filter_ie_options {
- WL_FILTER_IE_CLEAR = 0, /* allow element id in packet.For suboption */
- WL_FILTER_IE_SET = 1, /* filter element id in packet.For suboption */
- WL_FILTER_IE_LIST = 2, /* list element ID's.Set as option */
- WL_FILTER_IE_CLEAR_ALL = 3, /* clear all the element.Set as option */
- WL_FILTER_IE_CHECK_SUB_OPTION = 4 /* check for suboptions.Set only as option */
-};
-
-typedef struct wl_filter_ie_tlv {
- uint16 id; /* elelment id [ + ext id ] */
- uint16 len; /* sub option length + pattern length */
- uint8 data[]; /* sub option + pattern matching(OUI,type,sub-type) */
-} wl_filter_ie_tlv_t;
-
-#define WL_FILTER_IE_VERSION_1 1 /* the latest version */
-typedef struct wl_filter_ie_iov_v1 {
- uint16 version; /* Structure version */
- uint16 len; /* Total length of the structure */
- uint16 fixed_length; /* Total length of fixed fields */
- uint8 option; /* Filter action - check for suboption */
- uint8 pad[1]; /* Align to 4 bytes */
- uint32 pktflag; /* frame type - FC_XXXX */
- uint8 tlvs[]; /* variable data (zero in for list ,clearall) */
-} wl_filter_ie_iov_v1_t;
-
-/* Event aggregation config */
-#define EVENT_AGGR_CFG_VERSION 1
-#define EVENT_AGGR_DISABLED 0x0
-#define EVENT_AGGR_ENABLED 0x1
-
-#define EVENT_AGGR_BUFSIZE_MAX 1512
-#define EVENT_AGGR_BUFSIZE_MIN 512
-
-#define EVENT_AGGR_FLUSH_TIMEOUT_DEFAULT 100
-#define EVENT_AGGR_FLUSH_TIMEOUT_MAX 2000
-#define EVENT_AGGR_NUM_EVENTS_FLUSH 5
-typedef struct event_aggr_config {
- uint16 version;
- uint16 len;
- uint16 flags; /* bit 0 to enable/disable the feature */
- uint16 bufsize; /* Aggregate buffer size */
- uint16 flush_timeout; /* Timeout for event flush */
- uint16 num_events_flush; /* Number of events aggregated before flush */
-} event_aggr_config_t;
-
-#ifndef WL_TDMTX_TYPEDEF_HAS_ALIAS
-typedef tdmtx_cnt_v1_t tdmtx_cnt_t;
-typedef tdmtx_cnt_shm_v1_t tdmtx_cnt_shm_t;
-typedef wl_tdmtx_ecounters_v1_t wl_tdmtx_ecounters_t;
-#define WL_CNT_TDMTX_STRUCT_SZ (sizeof(tdmtx_cnt_t))
-#define WL_CNT_TDMTX_SHM_SZ (sizeof(tdmtx_cnt_shm_t))
-#endif // endif
-
-/** chanctxt related statistics */
-#define CHANCTXT_STATS_VERSION_1 1
-#define CHANCTXT_STATS_CURRENT_VERSION CHANCTXT_STATS_VERSION_1
-typedef struct wlc_chanctxt_stats {
- uint32 excursionq_end_miss;
- uint32 activeq_end_miss;
- uint32 no_chanctxt_count;
- uint32 txqueue_end_incomplete;
- uint32 txqueue_start_incomplete;
-} wlc_chanctxt_stats_core_t;
-
-typedef struct chanctxt_stats {
- uint16 version;
- uint16 length;
- wlc_chanctxt_stats_core_t corestats[MAX_NUM_D11CORES];
-} wlc_chanctxt_stats_t;
-
-typedef struct wl_txdc_ioc {
- uint8 ver;
- uint8 id; /* ID of the sub-command */
- uint16 len; /* total length of all data[] */
- uint8 data[]; /* var len payload */
-} wl_txdc_ioc_t;
-
-/*
- * iovar subcommand ids
- */
-enum {
- IOV_TXDC_ENB = 1,
- IOV_TXDC_MODE = 2,
- IOV_TXDC_DUMP = 3,
- IOV_TXDC_LAST
-};
-
-/* WL_NAN_XTLV_SLOT_STATS */
-/* WL_NAN_EVENT_SLOT_START, WL_NAN_EVENT_SLOT_END */
-typedef struct nan_slot_event_data {
- uint32 cur_slot_idx; /* current idx in channel schedule */
- uint32 fw_time; /* target current time in microseconds */
- uint32 band; /* current band (2G/5G) for which the event is received */
-} nan_slot_event_data_t;
-
-#ifndef BCMUTILS_ERR_CODES
-
-/* SAE (Simultaneous Authentication of Equals) error codes.
- * These error codes are local.
- */
-
-/* SAE status codes are reserved from -3072 to -4095 (1K) */
-
-enum wl_sae_status {
- WL_SAE_E_AUTH_FAILURE = -3072,
- /* Discard silently */
- WL_SAE_E_AUTH_DISCARD = -3073,
- /* Authentication in progress */
- WL_SAE_E_AUTH_CONTINUE = -3074,
- /* Invalid scalar/elt */
- WL_SAE_E_AUTH_COMMIT_INVALID = -3075,
- /* Invalid confirm token */
- WL_SAE_E_AUTH_CONFIRM_INVALID = -3076,
- /* Peer scalar validation failure */
- WL_SAE_E_CRYPTO_SCALAR_VALIDATION = -3077,
- /* Peer element prime validation failure */
- WL_SAE_E_CRYPTO_ELE_PRIME_VALIDATION = -3078,
- /* Peer element is not on the curve */
- WL_SAE_E_CRYPTO_ELE_NOT_ON_CURVE = -3079,
- /* Generic EC error (eliptic curve related) */
- WL_SAE_E_CRYPTO_EC_ERROR = -3080,
- /* Both local and peer mac addrs are same */
- WL_SAE_E_CRYPTO_EQUAL_MACADDRS = -3081,
- /* Loop exceeded in deriving the scalar */
- WL_SAE_E_CRYPTO_SCALAR_ITER_EXCEEDED = -3082,
- /* ECC group is unsupported */
- WL_SAE_E_CRYPTO_UNSUPPORTED_GROUP = -3083,
- /* Exceeded the hunting-and-pecking counter */
- WL_SAE_E_CRYPTO_PWE_COUNTER_EXCEEDED = -3084,
- /* SAE crypto component is not initialized */
- WL_SAE_E_CRYPTO_NOT_INITED = -3085,
- /* bn_get has failed */
- WL_SAE_E_CRYPTO_BN_GET_ERROR = -3086,
- /* bn_set has failed */
- WL_SAE_E_CRYPTO_BN_SET_ERROR = -3087,
- /* PMK is not computed yet */
- WL_SAE_E_CRYPTO_PMK_UNAVAILABLE = -3088,
- /* Peer confirm did not match */
- WL_SAE_E_CRYPTO_CONFIRM_MISMATCH = -3089,
- /* Element K is at infinity no the curve */
- WL_SAE_E_CRYPTO_KEY_AT_INFINITY = -3090,
- /* SAE Crypto private data magic number mismatch */
- WL_SAE_E_CRYPTO_PRIV_MAGIC_MISMATCH = -3091
-};
-
-/* PMK manager block. Event codes from -5120 to -6143 */
-
-/* PSK hashing event codes */
-typedef enum wlc_pmk_psk_hash_status {
- WL_PMK_E_PSK_HASH_FAILED = -5120,
- WL_PMK_E_PSK_HASH_DONE = -5121,
- WL_PMK_E_PSK_HASH_RUNNING = -5122,
- WL_PMK_E_PSK_INVALID = -5123,
- WL_PMK_E_PSK_NOMEM = -5124
-} wlc_pmk_psk_hash_status_t;
-
-#endif /* BCMUTILS_ERR_CODES */
-
-/* Block Channel */
-#define WL_BLOCK_CHANNEL_VER_1 1u
-
-typedef struct wl_block_ch_v1 {
- uint16 version;
- uint16 len;
- uint32 band; /* Band select */
- uint8 channel_num; /* The number of block channels in the selected band */
- uint8 padding[3];
- uint8 channel[]; /* Channel to block, Variable Length */
-} wl_block_ch_v1_t;
-
-typedef struct dma_wl_addr_region {
- uint32 addr_low;
- uint32 addr_high;
-} dma_wl_addr_region_t;
-
-#define WL_ROAMSTATS_IOV_VERSION 1
-
-#define MAX_PREV_ROAM_EVENTS 16u
-
-#define ROAMSTATS_UNKNOWN_CNT 0xFFFFu
-
-/* roaming statistics counter structures */
-typedef struct wlc_assoc_roamstats_event_msg_v1 {
- uint32 event_type; /* Message (see below) */
- uint32 status; /* Status code (see below) */
- uint32 reason; /* Reason code (if applicable) */
- uint32 timestamp; /* Timestamp of event */
-} wlc_assoc_roamstats_event_msg_v1_t;
-
-enum wl_roamstats_cmd_id {
- WL_ROAMSTATS_XTLV_CMD_VER = 0,
- WL_ROAMSTATS_XTLV_CMD_RESET = 1,
- WL_ROAMSTATS_XTLV_CMD_STATUS = 2,
- WL_ROAMSTATS_XTLV_CMD_LAST /* Keep this at the end */
-};
-
-enum wl_roamstats_xtlv_id {
- WL_ROAMSTATS_XTLV_VER = 0x0,
- WL_ROAMSTATS_XTLV_COUNTER_INFO = 0x1,
- WL_ROAMSTATS_XTLV_PREV_ROAM_EVENTS = 0x2,
- WL_ROAMSTATS_XTLV_REASON_INFO = 0x3
-};
-
-/* WL_ROAMSTATS_XTLV_COUNTER_INFO */
-typedef struct {
- uint32 initial_assoc_time;
- uint32 prev_roam_time;
- uint32 host_access_time;
- uint16 roam_success_cnt;
- uint16 roam_fail_cnt;
- uint16 roam_attempt_cnt;
- uint16 max_roam_target_cnt;
- uint16 min_roam_target_cnt;
- uint16 max_cached_ch_cnt;
- uint16 min_cached_ch_cnt;
- uint16 partial_roam_scan_cnt;
- uint16 full_roam_scan_cnt;
-} roamstats_counter_info_v1_t;
-
-/* WL_ROAMSTATS_XTLV_PREV_ROAM_EVENTS */
-typedef struct {
- uint16 max;
- uint16 pos;
- wlc_assoc_roamstats_event_msg_v1_t roam_event[];
-} roamstats_prev_roam_events_v1_t;
-
-/* WL_ROAMSTATS_XTLV_REASON_INFO */
-typedef struct {
- uint16 max;
- uint16 reason_cnt[];
-} roamstats_reason_info_v1_t;
-
-#ifdef HEALTH_CHECK_WLIOCTL
-/* Health check status format:
- * reporting status size = uint32
- * 8 LSB bits are reserved for: WARN (0), ERROR (1), and other levels
- * MSB 24 bits are reserved for client to fill in its specific status
- */
-#define HEALTH_CHECK_STATUS_OK 0
-/* Bit positions. */
-#define HEALTH_CHECK_STATUS_WARN 0x1
-#define HEALTH_CHECK_STATUS_ERROR 0x2
-#define HEALTH_CHECK_STATUS_TRAP 0x4
-#define HEALTH_CHECK_STATUS_NOEVENT 0x8
-
-/* Indication that required information is populated in log buffers */
-#define HEALTH_CHECK_STATUS_INFO_LOG_BUF 0x80
-#define HEALTH_CHECK_STATUS_MASK (0xFF)
-
-#define HEALTH_CHECK_STATUS_MSB_SHIFT 8
-#endif /* HEALTH_CHECK_WLIOCTL */
-
-/** receive signal reporting module interface */
-
-#define WL_RXSIG_IOV_MAJOR_VER (1u)
-#define WL_RXSIG_IOV_MINOR_VER (1u)
-#define WL_RXSIG_IOV_MAJOR_VER_SHIFT (8u)
-#define WL_RXSIG_IOV_VERSION \
- ((WL_RXSIG_IOV_MAJOR_VER << WL_RXSIG_IOV_MAJOR_VER_SHIFT) | WL_RXSIG_IOV_MINOR_VER)
-#define WL_RXSIG_IOV_GET_MAJOR(x) (x >> WL_RXSIG_IOV_MAJOR_VER_SHIFT)
-#define WL_RXSIG_IOV_GET_MINOR(x) (x & 0xFF)
-
-enum wl_rxsig_cmd_rssi_mode {
- WL_RXSIG_MODE_DB = 0x0,
- WL_RXSIG_MODE_QDB = 0x1,
- WL_RXSIG_MODE_LAST
-};
-
-/* structure defs for 'wl rxsig [cmd]' iovars */
-enum wl_rxsig_iov_v1 {
- WL_RXSIG_CMD_RSSI = 0x1, /**< combined rssi moving avg */
- WL_RXSIG_CMD_SNR = 0x2, /**< combined snr moving avg */
- WL_RXSIG_CMD_RSSIANT = 0x3, /**< rssi moving avg per-ant */
- WL_RXSIG_CMD_SNRANT = 0x4, /**< snr moving avg per-snr */
- WL_RXSIG_CMD_SMPLWIN = 0x5, /**< config for sampling window size */
- WL_RXSIG_CMD_SMPLGRP = 0x7, /**< config for grouping of pkt type */
- WL_RXSIG_CMD_STA_MA = 0x8,
- WL_RXSIG_CMD_MAMODE = 0x9,
- WL_RXSIG_CMD_MADIV = 0xa,
- WL_RXSIG_CMD_DUMP = 0xb,
- WL_RXSIG_CMD_DUMPWIN = 0xc,
- WL_RXSIG_CMD_TOTAL
-};
-
-struct wl_rxsig_cfg_v1 {
- uint16 version;
- chanspec_t chan; /**< chanspec info for querying stats */
- uint8 pmac[ETHER_ADDR_LEN]; /**< peer(link) mac address */
-};
-
-struct wl_rxsig_iov_rssi_v1 {
- int8 rssi;
- uint8 rssi_qdb;
- uint8 pad[2];
-};
-
-struct wl_rxsig_iov_snr_v1 {
- int16 snr;
- uint16 pad;
-};
-
-struct wl_rxsig_iov_rssi_ant_v1 {
- int8 deci[WL_RSSI_ANT_MAX];
- uint8 frac[WL_RSSI_ANT_MAX];
- uint8 rssi_mode; /**< MODE_DB or MODE_QDB */
- uint8 num_of_ant; /**< total number of ants */
- uint8 pad[2]; /**< padding for 32bit align */
-};
-
-#ifdef BCM_SDC
-
-#define SDC_TRIGGER_CONFIG_VER_1 1
-typedef struct {
- uint16 version;
- uint16 type;
- uint8 activate;
- uint8 pad;
-} sdc_trigger_cfg_t;
-
-typedef enum sdc_trigger_types {
- SDC_TYPE_STA_ONBOARD_DEBUG = 1,
- SDC_TYPE_SCAN_DEBUG = 2,
-#ifdef SDC_TEST
- /*
- * This is for test purpose only. Don't assign specific value.
- * Keep at the end
- */
- SDC_TYPE_TEST1,
- SDC_TYPE_TEST2,
- SDC_TYPE_TEST3,
-#endif /* SDC_TEST */
- SDC_TYPE_MAX_TRIGGER
-} sdc_trigger_types_t;
-
-/* *** SDC_TYPE_STA_ONBOARD_DEBUG specific ******* */
-
-/* tlv IDs uniquely identifies tx and rx stats component */
-enum wl_slice_hist_stats_xtlv_id {
- WL_STATE_HIST_TX_TOSS_REASONS = 0x1,
- WL_STATE_HIST_RX_TOSS_REASONS = 0x2
-};
-
-#ifndef WLC_HIST_TOSS_LEN
-#define WLC_HIST_TOSS_LEN (8u)
-#endif // endif
-#define WL_HIST_COMPACT_TOSS_STATS_TX_VER_1 (1u)
-#define WL_HIST_COMPACT_TOSS_STATS_RX_VER_1 (1u)
-
-/* Format of running toss reasons with seq
- * [see HIST_TOSS_xxxx macros]
- * bits [7..0] : 8 bits : toss sts.
- * [11..8] : cfgidx
- * [15..12]: ac
- * [31..16]: seq
- */
-#define HIST_TOSS_STS_POS (0u)
-#define HIST_TOSS_STS_MASK (0x000000ffu)
-#define HIST_TOSS_CFGIDX_POS (8u)
-#define HIST_TOSS_CFGIDX_MASK (0x00000f00u)
-#define HIST_TOSS_AC_POS (12u)
-#define HIST_TOSS_AC_MASK (0x0000f000u)
-#define HIST_TOSS_SEQ_POS (16u)
-#define HIST_TOSS_SEQ_MASK (0xffff0000u)
-
-/* Format of toss reasons with count
- * bits [15..0] : 16 bits : toss reason
- * bits [31..16]: 16 bits : count
- */
-#define HIST_TOSS_RC_REASON_POS (0u)
-#define HIST_TOSS_RC_REASON_MASK (0xffffu)
-#define HIST_TOSS_RC_COUNT_POS (16u)
-#define HIST_TOSS_RC_COUNT_MASK (0xffff0000u)
-
-typedef struct {
- uint16 version;
- uint8 hist_toss_type; /* from wl_slice_hist_XX_stats_xtlv_id */
- uint8 hist_toss_num; /* number of elements in hist_toss_xxx */
- uint32 hist_toss_cur_idx; /* latest data is in this index */
- uint32 hist_toss_reasons[WLC_HIST_TOSS_LEN]; /* last 8 reasons along with seq, etc as
- * per HIST_TOSS_xxx format
- */
- uint32 hist_toss_counts[WLC_HIST_TOSS_LEN]; /* toss counts corr to reasons */
-} wl_hist_compact_toss_stats_v1_t;
-
-#define WL_HIST_COMPACT_TOSS_STATS_TX_VER_2 (2u)
-#define WL_HIST_COMPACT_TOSS_STATS_RX_VER_2 (2u)
-
-typedef struct {
- uint16 version;
- uint8 htr_type; /* from wl_slice_hist_XX_stats_xtlv_id */
- uint8 htr_num; /* number of elements in htr_running or htr_rc */
- uint16 htr_rnidx; /* htr_running[rnidx-1] has latest data */
- uint16 htr_rcidx; /* htr_rc[rcidx-1] has latest data */
- uint32 htr_running[WLC_HIST_TOSS_LEN]; /* last 8 reasons along with seq, etc as
- * per WLC_SDC_COMPACT_TOSS_REASON() format
- */
- uint32 htr_rn_ts[WLC_HIST_TOSS_LEN]; /* time stamps corr to htr_running data */
- uint32 htr_rc[WLC_HIST_TOSS_LEN]; /* last 8 toss reasons and counts in
- * WLC_SDC_COMPACT_TOSS_RC() format
- */
- uint32 htr_rc_ts[WLC_HIST_TOSS_LEN]; /* time stamps corr to htr_rc */
-} wl_hist_compact_toss_stats_v2_t;
-
-/* ***END of SDC_TYPE_STA_ONBOARD_DEBUG specific ******* */
-
-#endif /* BCM_SDC */
-
-typedef struct wl_avs_info_v1 {
- uint16 version; /* Structure version */
- uint16 equ_version; /* Equation Version */
- uint32 RO; /* RO in OTP */
- uint32 equ_csr; /* Equated CSR */
- uint32 read_csr; /* Read Back CSR */
- uint32 aging; /* aging setting in nvram */
-} wl_avs_info_v1_t;
-
-#define WL_AVS_INFO_VER_1 1
-
-/* bitmap for clm_flags iovar */
-#define WL_CLM_TXBF 0x01 /**< Flag for Tx beam forming */
-#define WL_CLM_RED_EU 0x02 /* Flag for EU RED */
-#define WL_CLM_EDCRS_EU 0x04 /**< Use EU post-2015 energy detect */
-#define WL_CLM_DFS_TPC 0x08 /**< Flag for DFS TPC */
-#define WL_CLM_RADAR_TYPE_EU 0x10 /**< Flag for EU */
-#define WL_CLM_DFS_FCC WL_CLM_DFS_TPC /**< Flag for DFS FCC */
-#define WL_CLM_DFS_EU (WL_CLM_DFS_TPC | WL_CLM_RADAR_TYPE_EU) /**< Flag for DFS EU */
-
-/* SC (scan core) command IDs */
-enum wl_sc_cmd {
- WL_SC_CMD_DBG = 0,
- WL_SC_CMD_CNX = 1,
- WL_SC_CMD_CAP = 2,
- WL_SC_CMD_CONFIG = 3,
- WL_SC_CMD_LAST
-};
-
-typedef struct wl_ext_auth_evt {
- wlc_ssid_t ssid;
- struct ether_addr bssid;
- unsigned int key_mgmt_suite;
- int status;
-} wl_ext_auth_evt_t;
-
-/* WBUS sub-command IDs for unit test */
-#define WL_WBUS_INA_SLOT_START 0x01u /**< Inactive slot start sub command ID. */
-#define WL_WBUS_INA_SLOT_STOP 0x02u /**< Inactive slot stop sub command ID. */
-
-/* WBUS (WiFi BT uniform scheduler) command IDs */
-enum wl_wbus_cmd {
- WL_WBUS_CMD_VER = 0,
- WL_WBUS_CMD_STATS = 1,
- WL_WBUS_CMD_UNIT_TEST = 2,
- WL_WBUS_CMD_BT_TEST = 3,
- WL_WBUS_CMD_CAP = 4,
- WL_WBUS_CMD_LAST
-};
-
-#define WBUS_BT_SCHED_TEST_PARAMS_VER_1 1
-
-typedef struct wbus_bt_sched_test_params_v1 {
- uint16 version;
- uint16 pad;
- uint32 flags;
- uint32 action;
- uint32 duration;
- uint32 interval;
-} wbus_bt_sched_test_params_v1_t;
-
-#define WBUS_BT_SCHED_ADD 0u
-#define WBUS_BT_SCHED_REMOVE 1u
-#define WBUS_BT_SCHED_INVALID 0xFFu
-
-#define KEY_UPDATE_INFO_VER_V1 1
-typedef struct key_update_info_v1
-{
- uint16 ver;
- uint8 pad;
- uint8 flags;
- uint32 timestamp;
- uint32 algo;
- uint32 key_flags;
- struct ether_addr ea;
- struct ether_addr sa;
-} key_update_info_v1_t;
-
-/* Key update flag bit field */
-#define KEY_UPD_FLAG_ADD_KEY 0x1 /* 0 - Removal, 1 - Add key */
-
-#ifdef WLLLW
-/* LLW Session */
-#define LLW_VERSION 1
-#define LLW_STATS_VERSION 1
-
-/* LLW roles */
-#define LLW_ROLE_SCHEDULER 0
-#define LLW_ROLE_CLIENT 1
-
-/* LLW modes */
-#define LLW_MODE_GAPS 0
-#define LLW_MODE_BACK_TO_BACK 1
-
-/* LLW session max values */
-#define LLW_MAX_SESSION_ID 10
-#define LLW_MAX_FLOW_ID 40
-#define LLW_MAX_CLIENT_NUM 15
-#define LLW_MAX_GAPS_PERIOD 20
-#define LLW_MAX_GAPS_VAR 3
-#define LLW_MAX_RETX_CNT 10
-#define LLW_MAX_AIFSN EDCF_AIFSN_MAX
-#define LLW_MAX_CWMIN EDCF_ECW_MAX
-#define LLW_MAX_CWMAX EDCF_ECW_MAX
-#define LLW_MAX_PER_NUMERATOR 100
-#define LLW_MAX_PER_DENOM 10000
-#define LLW_MAX_CLIENT_ID 15
-#define LLW_MAX_PKT_SIZE 1500
-#define LLW_MAX_PKT_NUM 10
-#define LLW_MAX_MCS 9
-#define LLW_MAX_NUM_STREAMS 8
-#define LLW_MAX_IBS 32
-
-/* Per LLW session config */
-/* WL_LLW_CMD_SESSION_CREATE, WL_LLW_CMD_SESSION_UPDATE */
-typedef struct wl_llw_session_cfg {
- uint8 session_id;
- uint8 role;
- uint8 mode;
- uint8 client_id;
- uint8 gaps_period;
- uint8 gaps_var;
- uint8 aifsn;
- uint8 ecwmin; /* exponent value for minimum contention window */
- uint8 ecwmax; /* exponent value for maximum contention window */
- uint8 mcs;
- uint8 num_streams;
- uint8 ibs; /* interblock spacing in usecs, for spacing between Transaction Blocks */
- uint16 ul_pkt_size;
- uint16 dl_pkt_size;
- uint16 per_denom; /* denominator for target PER */
- uint8 per_numerator; /* this value divided by per_denom gives the target PER */
- uint8 dl_pkt_num;
- uint8 client_num;
- uint8 retx_cnt;
- uint8 pwr_save;
- uint8 auto_ba; /* automatic RX/TX BA session setup (no negotiation needed) */
- uint8 if_index;
- uint8 padding[3];
- struct ether_addr multicast_addr;
- struct ether_addr scheduler_addr;
-} wl_llw_session_cfg_t;
-
-/* WL_LLW_CMD_SESSION_DELETE, WL_LLW_CMD_SESSION_ENABLE, WL_LLW_CMD_SESSION_DISABLE, */
-/* WL_LLW_CMD_SESSION_GET */
-typedef struct wl_llw_session_cmd {
- uint8 session_id;
- uint8 padding[3];
-} wl_llw_session_cmd_t;
-
-/* LLW client config */
-/* WL_LLW_CMD_CLIENT_ADD, WL_LLW_CMD_CLIENT_DELETE, WL_LLW_CMD_CLIENT_GET */
-typedef struct wl_llw_client_cfg {
- uint8 session_id;
- uint8 client_id;
- struct ether_addr mac;
-} wl_llw_client_cfg_t;
-
-/* Get list of session IDs from FW */
-/* WL_LLW_CMD_SESSION_ID */
-typedef struct llw_session_id_list {
- uint8 id_count; /* Number of session IDs */
- uint8 list[]; /* list of session IDs */
-} llw_session_id_list_t;
-
-/* LLW XTLV structures */
-typedef struct wl_llw_iov_cmd {
- uint16 version;
- uint8 cmd_cnt;
- uint8 pad;
- uint8 cmds[];
-} wl_llw_iov_cmd_t;
-
-typedef struct wl_llw_iov_sub_cmd {
- uint16 type;
- uint16 len;
- union {
- int32 status; /* Processed status - Set by FW */
- uint32 options; /* Command Process Options - Set by Host */
- } u;
- uint8 data[];
-} wl_llw_iov_sub_cmd_t;
-
-/* to be used in type field of wl_llw_iov_sub_cmd_t structure while issuing LLW commands */
-typedef enum wl_llw_sub_cmd_xtlv_id {
- WL_LLW_CMD_SESSION_ID,
- WL_LLW_CMD_SESSION_CREATE,
- WL_LLW_CMD_SESSION_DELETE,
- WL_LLW_CMD_SESSION_UPDATE,
- WL_LLW_CMD_SESSION_ENABLE,
- WL_LLW_CMD_SESSION_DISABLE,
- WL_LLW_CMD_SESSION_GET,
- WL_LLW_CMD_CLIENT_ADD,
- WL_LLW_CMD_CLIENT_DELETE,
- WL_LLW_CMD_CLIENT_GET,
- WL_LLW_CMD_FLOW_ADD,
- WL_LLW_CMD_FLOW_DELETE,
- WL_LLW_CMD_FLOW_GET,
- WL_LLW_CMD_STATS
-} wl_llw_sub_cmd_xtlv_id_t;
-
-/* LLW stats */
-typedef enum wl_llw_xtlv {
- WL_LLW_XTLV_STATS
-} wl_llw_xtlv_t;
-
-typedef struct wl_llw_stats {
- uint32 txpackets;
- uint32 txbytes;
- uint32 txrts;
- uint32 txnocts;
- uint32 txnoack;
- uint32 txfail;
- uint32 txretry;
- uint32 txdropped;
- uint32 tx_avg_q_time;
- uint32 tx_min_q_time;
- uint32 tx_max_q_time;
- uint32 tx_avg_rem_lifetime;
- uint32 tx_min_rem_lifetime;
- uint32 tx_max_rem_lifetime;
- uint32 rxpackets;
- uint32 rxbytes;
- uint32 rxfail;
- uint32 rxretry;
- uint32 txschedfrm;
- uint32 retxschedfrm;
-} wl_llw_stats_t;
-
-typedef struct wl_llw_stats_hdr {
- uint16 version;
- uint16 stats_cnt;
- uint32 tot_len;
- uint8 stat_xtlvs[];
-} wl_llw_stats_hdr_t;
-
-/* WL_LLW_XTLV_STATS */
-typedef struct wl_llw_stats_xtlv {
- uint16 type;
- uint16 len;
- uint8 stats[];
-} wl_llw_stats_xtlv_t;
-
-/* WL_LLW_CMD_STATS */
-typedef struct wl_llw_stats_cmd {
- uint8 session_id;
- uint8 client_id;
- uint16 padding;
-} wl_llw_stats_cmd_t;
-
-/* LLW flow ring ID config */
-/* WL_LLW_CMD_FLOW_ADD, WL_LLW_CMD_FLOW_DELETE, WL_LLW_CMD_FLOW_GET */
-typedef struct wl_llw_flow_cfg {
- uint8 session_id;
- uint8 flow_id;
- uint16 padding;
-} wl_llw_flow_cfg_t;
-#endif /* End of LLW Session */
-
-#define WL_OMI_CONFIG_VERSION_1 1u
-
-/* values for valid_bm */
-#define OMI_CONFIG_VALID_BMP_RXNSS 0x0001u
-#define OMI_CONFIG_VALID_BMP_BW 0x0002u
-#define OMI_CONFIG_VALID_BMP_ULMU_DISABLE 0x0004u
-#define OMI_CONFIG_VALID_BMP_TXNSTS 0x0008u
-#define OMI_CONFIG_VALID_BMP_ERSU_DISABLE 0x0010u
-#define OMI_CONFIG_VALID_BMP_DLMU_RSD_RCM 0x0020u
-#define OMI_CONFIG_VALID_BMP_ULMU_DATA_DISABLE 0x0040u
-#define OMI_CONFIG_VALID_BMP_ALL 0x0FFFu
-
-#define OMI_CONFIG_BW_MAX 3u
-
-typedef struct wl_omi_config {
- uint16 valid_bm; /* validity bitmask for each config */
- uint8 rxnss;
- uint8 bw;
- uint8 ulmu_disable;
- uint8 txnsts;
- uint8 ersu_disable;
- uint8 dlmu_resound_rec;
- uint8 ulmu_data_disable;
- uint8 pad[3];
-} wl_omi_config_t;
-
-typedef struct wl_omi_req {
- uint16 version;
- uint16 len;
- wl_omi_config_t config;
-} wl_omi_req_v1_t;
-
-/* Bits for ULMU disable reason */
-#define OMI_ULMU_DISABLED_HOST 0x01u /* Host has disabled through he omi */
-#define OMI_ULMU_DISABLED_NAN 0x04u /* Disabled due to NAN enabled */
-#define OMI_ULMU_DISABLED_BTCOEX 0x08u /* Disabled while in BT Coex activity */
-#define OMI_ULMU_DISABLED_LTECOEX 0x10u /* Disabled due to LTE Coex activity */
-#define OMI_ULMU_DISABLED_NON11AX_CONN 0x20u /* Disabled due to not associated to 11ax AP */
-#define OMI_ULMU_DISABLED_THROTTLE_ENABLE 0x40u /* Disabled due to throttle timer running */
-#define OMI_ULMU_DISABLED_TXCHAIN_DOWNGRADE 0x80u /* Disabled due to Txchain downgrade */
-#define OMI_ULMU_DISABLED_TX_DUTY_CYCLE 0x100u /* Disabled due to tx duty cycle */
-
-/* Bits for DLMU Resound Recommendation reason */
-#define OMI_DLMU_RSD_RCM_HOST (0x1u << 0u) /* Host directly set the bit */
-#define OMI_DLMU_RSD_RCM_MPF (0x1u << 1u) /* Set on MPF state change */
-
-#define WL_OMI_STATUS_VERSION_1 1u
-typedef struct wl_omi_status {
- uint16 version;
- uint16 len;
- wl_omi_config_t omi_pending; /* OMI requests pending */
- uint16 omi_data; /* current OM Control field for completed OMI requests */
- uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */
- uint32 ulmu_disable_duration; /* Duration (ms) for which UL OFDMA is disabled */
-} wl_omi_status_v1_t;
-
-#define WL_OMI_STATUS_VERSION_2 2u
-typedef struct wl_omi_status_v2 {
- uint16 version;
- uint16 len;
- wl_omi_config_t omi_pending; /* OMI requests pending */
- uint16 omi_data; /* Current OM Control field for completed OMI requests */
- uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */
- uint32 ulmu_disable_duration; /* Duration (ms) for which UL OFDMA is disabled */
- uint32 dlmu_rsd_rcm_duration; /* Dur (ms) for which ResoundRecommentation is set */
- uint16 dlmu_rsd_rcm_mpf_state; /* The MPF state value */
- uint16 dlmu_rsd_rcm_reason; /* DL MU-MIMO recommendation reasons bitmap */
-} wl_omi_status_v2_t;
-
-#define WL_ULMU_DISABLE_STATS_VERSION_1 1u
-typedef struct wl_ulmu_disable_stats {
- uint16 version;
- uint16 len;
- uint32 ulmu_disable_ts; /* UL OFDMA disabled timestamp (ms) */
- uint16 ulmu_disable_reason; /* Bits representing UL OFDMA disable reasons */
- uint16 ulmu_disable_count; /* UL MU disable count during current infra association */
- uint32 last_trig_rx_ts; /* Last trigger frame received timestamp (ms) */
- uint16 trig_rx_count; /* No of trigger frames received after last UL OFDMA disable */
- uint16 max_latency; /* Max latency by AP to re-act for UL OFDMA disable request (ms) */
- uint16 min_latency; /* Min latency by AP to re-act for UL OFDMA disable request (ms) */
- uint16 avg_latency; /* Avg latency by AP to re-act for UL OFDMA disable request (ms) */
-} wl_ulmu_disable_stats_v1_t;
-
-/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS */
-enum wl_tx_histogram_id {
- WL_TX_HIST_TXQ_ID = 1,
- WL_TX_HIST_LOW_TXQ_ID = 2,
- WL_TX_HIST_SCBQ_ID = 3,
- WL_TX_HIST_EXCUR_TXQ_ID = 4,
- WL_TX_HIST_EXCUR_LOW_TXQ_ID = 5
-};
-
-/* common tx histogram structure */
-typedef struct wl_tx_hist {
- uint16 hist_bmap; /* bit N indicates histogram follows for priority or fifo N */
- uint16 hist_count; /* count of histograms in var len array */
- uint32 hist[1]; /* var len array of histograms each prefix by hist length */
-} wl_tx_hist_t;
-
-#define WL_TX_HIST_FIXED_LEN (OFFSETOF(wl_tx_hist_t, hist))
-#define WL_TX_HIST_FULL_LEN(num_hist, max_hist_size) \
- (WL_TX_HIST_FIXED_LEN + (num_hist) * \
- (max_hist_size + 1) * sizeof(uint32))
-
-/* structure for WL_TX_HIST_TXQ, WL_TX_HIST_EXCUR_TXQ_ID */
-typedef struct wl_tx_hist_txq {
- uint32 bsscfg_bmap; /* bitmap of bsscfg indexes associated with this queue */
- wl_tx_hist_t tx_hist; /* tx histograms */
-} wl_tx_hist_txq_t;
-
-#define WL_TX_HIST_TXQ_FIXED_LEN \
- (OFFSETOF(wl_tx_hist_txq_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
-#define WL_TX_HIST_TXQ_FULL_LEN(num_hist, max_hist_size) \
- (OFFSETOF(wl_tx_hist_txq_t, tx_hist) + \
- WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
-
-/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_HISTOGRAMS */
-enum wl_txq_stop_histogram_id {
- WL_TXQ_STOP_HIST_SW = 1,
- WL_TXQ_STOP_HIST_HW = 2,
- WL_TXQ_STOP_HIST_PKTS_SW = 3,
- WL_TXQ_STOP_HIST_PKTS_HW = 4,
- WL_TXQ_STOP_HIST_MAX = WL_TXQ_STOP_HIST_PKTS_HW
-};
-
-/* common tx histogram structure */
-typedef struct wl_txq_stop_hist {
- wl_tx_hist_t tx_hist; /* tx histograms */
-} wl_txq_stop_hist_t;
-
-#define WL_TXQ_STOP_HIST_FIXED_LEN \
- (OFFSETOF(wl_txq_stop_hist_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
-#define WL_TXQ_STOP_HIST_FULL_LEN(num_hist, max_hist_size) \
- (OFFSETOF(wl_txq_stop_hist_t, tx_hist) + \
- WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
-
-/* structure for WL_TX_HIST_LOW_TXQ, WL_TX_HIST_EXCUR_LOW_TXQ_ID */
-typedef struct wl_tx_hist_low_txq {
- wl_tx_hist_t tx_hist; /* tx histograms */
-} wl_tx_hist_low_txq_t;
-
-#define WL_TX_HIST_LOW_TXQ_FIXED_LEN \
- (OFFSETOF(wl_tx_hist_low_txq_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
-#define WL_TX_HIST_LOW_TXQ_FULL_LEN(num_hist, max_hist_size) \
- (OFFSETOF(wl_tx_hist_low_txq_t, tx_hist) + \
- WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
-
-/* structure for WL_TX_HIST_SCBQ */
-typedef struct wl_tx_hist_scbq {
- struct ether_addr ea; /* ether addr of peer */
- uint16 bsscfg_idx; /* bsscfg index */
- wl_tx_hist_t tx_hist; /* tx histograms */
-} wl_tx_hist_scbq_t;
-
-#define WL_TX_HIST_SCBQ_FIXED_LEN \
- (OFFSETOF(wl_tx_hist_scbq_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
-#define WL_TX_HIST_SCBQ_FULL_LEN(num_hist, max_hist_size) \
- (OFFSETOF(wl_tx_hist_scbq_t, tx_hist) + \
- WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
-
-/* sub-xtlv IDs within WL_STATS_XTLV_WL_SLICE_TX_QUEUE_DEPTH */
-enum wl_tx_queue_depth_id {
- WL_TX_QUEUE_DEPTH_TXQ_ID = 1,
- WL_TX_QUEUE_DEPTH_LOW_TXQ_ID = 2,
- WL_TX_QUEUE_DEPTH_SCBQ_ID = 3,
- WL_TX_QUEUE_DEPTH_EXCUR_TXQ_ID = 4,
- WL_TX_QUEUE_DEPTH_EXCUR_LOW_TXQ_ID = 5
-};
-
-/* common tx queue depth structure */
-typedef struct wl_tx_queue_depth {
- uint16 queue_depth_bmap; /* bitmap of queue depth in var len array */
- uint16 queue_depth_count; /* count of queue depth in var len array */
- uint16 queue_depth[1]; /* var len array of queue depth */
-} wl_tx_queue_depth_t;
-
-#define WL_TX_QUEUE_DEPTH_FIXED_LEN (OFFSETOF(wl_tx_queue_depth_t, queue_depth))
-#define WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth) \
- (WL_TX_QUEUE_DEPTH_FIXED_LEN + (num_queue_depth) * \
- sizeof(uint16))
-
-/* structure for WL_TX_QUEUE_DEPTH_TXQ_ID, WL_TX_QUEUE_DEPTH_EXCUR_TXQ_ID */
-typedef struct wl_tx_queue_depth_txq {
- uint32 bsscfg_map; /* bitmap of bsscfg indexes associated with this queue */
- wl_tx_queue_depth_t tx_queue_depth; /* queue depth */
-} wl_tx_queue_depth_txq_t;
-
-#define WL_TX_QUEUE_DEPTH_TXQ_FIXED_LEN \
- (OFFSETOF(wl_tx_queue_depth_txq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN)
-#define WL_TX_QUEUE_DEPTH_TXQ_FULL_LEN(num_queue_depth) \
- (OFFSETOF(wl_tx_queue_depth_txq_t, tx_queue_depth) + \
- WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth))
-
-/* structure for WL_TX_QUEUE_DEPTH_LOW_TXQ_ID, WL_TX_QUEUE_DEPTH_EXCUR_LOW_TXQ_ID */
-typedef struct wl_tx_queue_depth_low_txq {
- wl_tx_queue_depth_t tx_queue_depth; /* queue depth */
-} wl_tx_queue_depth_low_txq_t;
-
-#define WL_TX_QUEUE_DEPTH_LOW_TXQ_FIXED_LEN \
- (OFFSETOF(wl_tx_queue_depth_low_txq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN)
-#define WL_TX_QUEUE_DEPTH_LOW_TXQ_FULL_LEN(num_queue_depth) \
- (OFFSETOF(wl_tx_queue_depth_low_txq_t, tx_queue_depth) + \
- WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth))
-
-/* structure for WL_TX_QUEUE_DEPTH_SCBQ_ID */
-typedef struct wl_tx_queue_depth_scbq {
- struct ether_addr ea; /* ether addr of peer */
- uint16 bsscfg_idx; /* bsscfg index */
- wl_tx_queue_depth_t tx_queue_depth; /* queue depth */
-} wl_tx_queue_depth_scbq_t;
-
-#define WL_TX_QUEUE_DEPTH_SCBQ_FIXED_LEN \
- (OFFSETOF(wl_tx_queue_depth_scbq_t, tx_queue_depth) + WL_TX_QUEUE_DEPTH_FIXED_LEN)
-#define WL_TX_QUEUE_DEPTH_SCBQ_FULL_LEN(num_queue_depth) \
- (OFFSETOF(wl_tx_queue_depth_scbq_t, tx_queue_depth) + \
- WL_TX_QUEUE_DEPTH_FULL_LEN(num_queue_depth))
-
-/* sub-xtlv IDs within WL_STATS_XTLV_BUS_PCIE_TX_HISTOGRAMS */
-enum wl_pcie_tx_histogram_id {
- WL_PCIE_TX_HIST_ID = 1
-};
-
-/* structure for PCIE_TX_HIST_ID */
-typedef struct wl_pcie_tx_hist {
- uint16 ring_id; /* PCIe ring id */
- uint16 pad; /* 4-byte alignment */
- wl_tx_hist_t tx_hist; /* hist_bmap:
- * 0x1=tx histogram
- * 0x2=tx status pending histogram
- */
-} wl_pcie_tx_hist_t;
-
-#define WL_PCIE_TX_HIST_FIXED_LEN \
- (OFFSETOF(wl_pcie_tx_hist_t, tx_hist) + WL_TX_HIST_FIXED_LEN)
-#define WL_PCIE_TX_HIST_FULL_LEN(num_hist, max_hist_size) \
- (OFFSETOF(wl_pcie_tx_hist_t, tx_hist) + \
- WL_TX_HIST_FULL_LEN(num_hist, max_hist_size))
-
-/* sub-xtlv IDs within WL_STATS_XTLV_BUS_PCIE_TX_QUEUE_DEPTH */
-enum wl_pcie_tx_queue_depth_id {
- WL_PCIE_TX_QUEUE_DEPTH_ID = 1
-};
-
-/* structure for WL_PCIE_TX_QUEUE_DEPTH_ID */
-typedef struct wl_pcie_tx_queue_depth {
- uint16 ring_id; /* PCIe ring id */
- uint16 queue_depth; /* queue depth of ring id */
- uint16 tx_status_pend; /* tx status pending of ring id */
- uint16 pad; /* 4-byte alignment */
-} wl_pcie_tx_queue_depth_t;
-
-#define WL_PCIE_TX_QUEUE_DEPTH_FIXED_LEN sizeof(wl_pcie_tx_queue_depth_t)
-
-#define WL_WSEC_DEL_PMK_VER_V1 1u
-/* tlv ids for del pmk */
-#define WL_DEL_PMK_TLV_ID 1u
-#define WL_DEL_PMKID_TLV_ID 2u
-#define WL_DEL_PEER_ADDR_TLV_ID 3u
-typedef struct wl_wsec_del_pmk {
- uint16 version;
- uint16 length;
- uint8 xtlvs[];
-} wl_wsec_del_pmk_t;
-#define WL_WSEC_DEL_PMK_FIXED_LEN_V1 OFFSETOF(wl_wsec_del_pmk_t, xtlvs)
-
-#define WLC_RC_ROAM_VER_1 1
-
-typedef struct wlc_rcroam {
- uint16 ver;
- uint16 len;
- uint8 data[];
-} wlc_rcroam_t;
-
-typedef struct wlc_rcroam_info_v1 {
- uint16 inactivity_period; /* inactivty monitor period */
- uint16 roam_scan_timeout;
- uint16 periodic_roam_scan_timeout;
- uint8 roam_trig_step; /* roaming trigger step value */
-} wlc_rcroam_info_v1_t;
-
-#define WLC_RC_ROAM_CUR_VER WLC_RC_ROAM_VER_1
-#define RCROAM_HDRLEN 4u
-#define MAX_RCSCAN_TIMER 300u
-
-#define WLC_SILENT_ROAM_VER_1 1
-/* silent roam information struct */
-typedef struct wlc_sroam_info_v1 {
- /* Silent roam Set/Get value */
- uint8 sroam_on; /* sroam on/off */
- int8 sroam_min_rssi; /* minimum rssi threshold to activate the feature */
- uint8 sroam_rssi_range; /* rssi tolerance to determine stationary status */
- uint8 sroam_score_delta; /* roam score delta value to prune candidate ap */
- uint8 sroam_period_time; /* required monitoring period to trigger roaming scan */
- uint8 sroam_band; /* band setting of roaming scan (all, 5g, 2g) */
- uint8 sroam_inact_cnt; /* tx/rx frame count threshold for checking inactivity */
- /* Silent roam monitor value */
- int8 sroam_ref_rssi; /* reference rssi which is picked when monitoring is
- * started. it is updated to current rssi when it's
- * out from rssi range
- */
- uint8 sroam_time_since; /* elapsed time since start monitoring */
- uint8 pad[3];
- uint32 sroam_txfrm_prev; /* save current tx frame counts */
- uint32 sroam_rxfrm_prev; /* save current rx frame counts */
-} wlc_sroam_info_v1_t;
-
-typedef struct wlc_sroam {
- uint16 ver;
- uint16 len;
- uint8 data[];
-} wlc_sroam_t;
-
-#define WLC_SILENT_ROAM_CUR_VER WLC_SILENT_ROAM_VER_1
-#define SROAM_HDRLEN 4u
-
-#define DEF_SROAM_OFF 0
-#define DEF_SROAM_MIN_RSSI -65
-#define DEF_SROAM_RSSI_RANGE 3u
-#define DEF_SROAM_SCORE_DELTA 1u
-#define DEF_SROAM_PERIOD_TIME 10u
-#define DEF_SROAM_INACT_CNT 5u
-#define MAX_SROAM_RSSI -70
-#define MAX_SROAM_RSSI_RANGE 5u
-#define MAX_SROAM_SCORE_DELTA 10u
-#define MAX_SROAM_PERIOD_TIME 250u
-#define SROAM_BAND_AUTO 3u
-
-/* MACSMPL IOVAR parameters */
-typedef enum wl_macdbg_macsmpl_iovar_id {
- WL_MACSMPL_START = 0,
- WL_MACSMPL_STOP = 1,
- WL_MACSMPL_DUMP = 2,
- WL_MACSMPL_STATUS = 3,
- WL_MACSMPL_SIZE = 4
-} wl_macdbg_macsmpl_iovar_id_t;
-
-/* WL_MACSMPL_STATUS values */
-typedef enum wl_macdbg_macsmpl_status {
- WL_MACSMPL_STATUS_IDLE = 0,
- WL_MACSMPL_STATUS_ACTIVE = 1,
- WL_MACSMPL_STATUS_WAIT_FOR_TRIG = 2,
- WL_MACSMPL_STATUS_TRIGGERED = 3
-} wl_macdbg_macsmpl_status_t;
-
-/* WL_MACSMPL_START_PARAM subcommand data */
-typedef struct wl_macsmpl_start_param {
- uint32 trig_condition; /* trigger condition */
- uint16 gpio_mux; /* MACControl1 GPIOSel field */
- uint8 pad[2]; /* 4-byte struct alignment */
-} wl_macsmpl_param_start_t;
-
-/* MAC SC fragment request data */
-typedef struct wl_macsmpl_frag_req_param {
- uint32 offset; /* requested MAC SC fragment offset */
- uint32 size; /* requested MAC SC fragment size, bytes */
-} wl_macsmpl_frag_req_param_t;
-
-/* MAC SC fragment response data */
-typedef struct wl_macsmpl_frag_resp_param {
- uint32 offset; /* MAC SC response fragment offset */
- uint32 size; /* MAC SC reponse fragment size, bytes */
- uint8 data[]; /* MAC SC response fragment data, flexible array */
-} wl_macsmpl_frag_resp_param_t;
-
-/* MAC SC status data */
-typedef struct wl_macsmpl_status {
- uint32 maccontrol1; /* MACControl1 register value */
- uint32 macsc_flags; /* M_MACSC_FLAGS SHM register value */
- uint16 sc_play_ctrl; /* TXE SampleCollectPlayCtrl register value */
- uint16 sc_cur_ptr; /* TXE SampleCollectCurPtr register value */
- uint16 sc_start_ptr; /* TXE SampleCollectStartPtr register value */
- uint16 sc_stop_ptr; /* TXE SampleCollectStopPtr register value */
-} wl_macsmpl_status_t;
-
-/* WL_MACSMPL parameters data */
-typedef struct wl_macsmpl_param {
- wl_macdbg_macsmpl_iovar_id_t subcmd_id;
- union {
- wl_macsmpl_param_start_t start;
- wl_macsmpl_frag_req_param_t frag_req;
- } u;
-} wl_macsmpl_param_t;
-
-/* High priority P2P */
-#define WL_HP2P_COUNTERS_VER 2u
-typedef struct hp2p_counters {
- uint16 frames_queued;
- uint16 frames_processed;
- uint16 frames_exp;
- uint16 frames_preempt;
- uint16 frames_retried;
- uint16 reserved; /* reserved, rsvd2 and rsvd3 are experimental counters */
- uint16 rsvd2;
- uint16 rsvd3;
-} hp2p_counters_t;
-
-typedef struct hp2p_counters_v2 {
- uint32 frames_queued; /* Number of AMPDUs processed */
- uint16 frames_exp; /* Number of Lifetime expiries */
- uint16 edt_retry; /* Exceed due to - retry */
- uint16 mpif_reconf; /* MPIF Reconfigure */
- uint16 exceed_delay; /* Exceed delay threshold */
- uint16 edt_nav_thresh; /* Exceed due to - NAV threshold */
- uint16 edt_dc_def; /* Exceed due to - DC based deferral */
- uint16 edt_tx_fifo_full; /* Exceed due to - Tx FIFO full */
- uint16 edt_cts_thresh; /* Exceed due to - CTS threshold */
- uint16 dbg1; /* dbgX are for internal debugging */
- uint16 dbg2;
- uint16 dbg3;
- uint16 dbg4;
- uint16 dbg5;
- uint16 dbg6;
- uint16 dbg7;
- uint16 dbg8;
- uint16 dbg9;
- uint16 dbg10;
-} hp2p_counters_v2_t;
-
-typedef struct hp2p_counters_hdr {
- uint16 version; /* version of hp2p_counters_t structure */
- uint16 len;
- uint16 slice_idx;
- uint16 pad;
- uint8 counters[];
-} hp2p_counters_hdr_t;
-
-/* TX enable flags */
-#define WL_HP2P_TX_AMPDU 0x0001u
-#define WL_HP2P_TX_AMSDU 0x0002u
-#define WL_HP2P_TX_RDG 0x0004u
-
-/* RX enable flags */
-#define WL_HP2P_RX_AMPDU 0x0001u
-#define WL_HP2P_RX_AMSDU 0x0002u
-#define WL_HP2P_RX_RDG 0x0004u
-#define WL_HP2P_RX_AMPDU_REORDER 0x0008u
-
-/* Max/min values for configuration parameters to check validity */
-#define WL_HP2P_MAX_RETRY_MAX 14u
-#define WL_HP2P_MAX_RETRY_MIN 6u
-#define WL_HP2P_LATENCY_TARGET_MAX 30u
-#define WL_HP2P_BURST_INTERVAL_MAX 64u
-#define WL_HP2P_MAX_FIFO 5u
-#define WL_HP2P_MAX_UCODE_LATENCY_THR 500u
-#define WL_HP2P_MAX_UCODE_RECOV_TO 500u
-#define WL_HP2P_MAX_UCODE_NAV_THR 50000u
-
-#define WL_HP2P_VERSION 1u
-typedef struct hp2p_tx_config {
- struct ether_addr peer_addr;
- uint16 max_burst;
- uint16 txop; /* stored in network order (ls octet first) */
- uint16 flags; /* flags to enable/disable AMPDU, AMSDU, RDG */
- uint8 aci;
- uint8 ecw;
- uint8 fifo;
- uint8 tid;
- uint8 burst_interval;
- uint8 latency_target;
- uint8 max_retry;
- uint8 pad;
-} hp2p_tx_config_t;
-
-typedef struct hp2p_rx_config {
- struct ether_addr peer_addr;
- uint16 flags; /* flags to enable/disable AMPDU, AMSDU, RDG, AMPDU Reorder */
- uint8 tid;
- uint8 pad[3];
-} hp2p_rx_config_t;
-
-typedef struct hp2p_udbg_config {
- uint16 recovery_timeout; /* multiples of 256 usecs */
- uint16 latency_thresh; /* multiples of 256 usecs */
- uint16 enable_trap; /* trap if ucode delay exceeds latency_thresh */
- uint16 nav_thresh; /* in usec */
-} hp2p_udbg_config_t;
-
-typedef struct hp2p_cmd {
- uint16 type;
- uint16 len;
- uint8 data[];
-} hp2p_cmd_t;
-
-typedef struct hp2p_cmd_hdr {
- uint16 version;
- uint16 slice_idx;
- uint8 cmd[];
-} hp2p_cmd_hdr_t;
-
-/* to be used in type field of hp2p_cmd_t structure while issuing HP2P commands */
-typedef enum hp2p_cmd_id {
- WL_HP2P_CMD_ENABLE = 0,
- WL_HP2P_CMD_TX_CONFIG = 1,
- WL_HP2P_CMD_RX_CONFIG = 2,
- WL_HP2P_CMD_COUNTERS = 3,
- WL_HP2P_CMD_UDBG_CONFIG = 4
-} hp2p_cmd_id_t;
-
-typedef enum wl_rffe_cmd_type {
- WL_RFFE_CMD_DEBUG_MODE = 0,
- WL_RFFE_CMD_ELNABYP_MODE = 1,
- WL_RFFE_CMD_REG = 2,
- WL_RFFE_CMD_LAST
-} wl_rffe_cmd_type_t;
-
-/** RFFE struct passed through ioctl */
-typedef struct {
- uint32 regaddr; /**< rFEM_RegAddr */
- uint32 antnum; /**< rFEM AntNum */
- uint32 slaveid; /**< rFEM SlaveID */
- uint32 value; /**< read/write value */
-} rffe_reg_t;
-
-#ifndef BCMUTILS_ERR_CODES
-
-/*
- * SOE (Security Offload Engine) status codes.
- */
-
-/* SOE status codes are reserved from -6144 to -7167 (1K) */
-
-enum wl_soe_status {
- /* Invalid operational context */
- WL_SOE_E_BAD_OP_CONTEXT = -6144,
-
- /* Invalid operational type */
- WL_SOE_E_BAD_OP_TYPE = -6145,
-
- /* Failure to get NAF3 encoded scalar */
- WL_SOE_E_BN_GET_NAF3_ERROR = -6146,
-
- /* Failure to get NAF3 params */
- WL_SOE_E_ECG_GET_NAF3_PARAMS_ERROR = -6147,
-
- /* FAILURE to get Montgomery params */
- WL_SOE_E_MONT_PARAMS_GET_ERROR = -6148,
-
- /* Invalid OSL handle */
- WL_SOE_E_BAD_SI_OSH = -6149,
-
- /* Invalid ECG group */
- WL_SOE_E_BAD_ECG_GROUP = -6150,
-
- /* Invalid BN context */
- WL_SOE_E_BAD_BN_CTX = -6151,
-
- /* Invalid SOE core register base address */
- WL_SOE_E_BAD_SOE_REGBASE = -6152,
-
- /* Invalid SOE context */
- WL_SOE_E_BAD_SOE_CONTXT = -6153,
-
- /* Number of words are too short (i.e., not enough
- * room to encode the PKA sequence)
- */
- WL_SOE_E_PKA_SEQUENCE_WORDS_TOO_SHORT = -6154,
-
- /* Generic bn_get error */
- WL_SOE_E_PKA_BN_GET_ERROR = -6155,
-
- /* Sequence buf too short for BN */
- WL_SOE_E_PKA_BN_BUF_TOO_SHORT_BN = -6156,
-
- /* Sequence buf too short for ECG prime */
- WL_SOE_E_PKA_BN_BUF_TOO_SHORT_ECG_PRIME = -6157,
-
- /* Sequence buf too short for Montgomery N' */
- WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_MONT_PRIME = -6158,
-
- /* Sequence buf too short for Accumulator registers */
- WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_ACCM_REG = -6159,
-
- /* Sequence buf too short for the point P */
- WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_P = -6160,
-
- /* Sequence buf too short for -P */
- WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_PN = -6161,
-
- /* Sequence buf too short for 3P */
- WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_3P = -6162,
-
- /* Sequence buf too short for -3P */
- WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_3PN = -6163,
-
- /* Sequence buf too short for NAF3 scalar */
- WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_NAF3_SCALAR = -6164,
-
- /* Sequence buf too short for load shift count */
- WL_SOE_E_PKA_SEQ_BUF_TOO_SHORT_PRE_JMP = -6165,
-
- /* SOE engine(SHA/PKA) failed to complete the operation */
- WL_SOE_E_ENGINE_UNABLE_TO_COMPLETE = -6166,
-
- /* Wrong LIR (Long Integer Register) type */
- WL_SOE_E_PKA_BAD_LIR_TYPE = -6167,
-
- /* Reference count has reached maximum */
- WL_SOE_E_MAX_REF_COUNT_REACHED = -6168,
-
- /* Failed to get the SOE context reference */
- WL_SOE_E_GET_REF_FAILED = -6169,
-
- /* Incoming digest length is invalid */
- WL_SOE_E_SHA_WRONG_DIGEST_LEN = -6170
-};
-
-#endif /* BCMUTILS_ERR_CODES */
-
-#define NR5GCX_STATUS_VER_1 1
-/* NR coex status structures */
-typedef struct wlc_nr5gcx_status_v1 {
- uint16 version; /* version info */
- uint16 len; /* status length */
- uint32 mode; /* NR coex status */
- uint32 nr_req_cnt; /* NR req number since last read */
- uint32 nr_dur; /* NR duration since last read, us */
- uint32 nr_duty_cycle; /* NR duty cycle since last read */
- uint32 nr_max_dur; /* NR max duration in a single request */
- uint32 wlan_crit_cnt; /* aggregated # of WLAN critical events */
- uint32 wlan_crit_dur; /* aggregated WLAN critical event duration, ms */
- uint32 wlan_crit_max_dur; /* Duration of the WLAN critical events whose dur is max */
- uint16 wlan_crit_evt_bitmap; /* WLAN critical event occurrence bitmap,
- * 1 event per bit.
- */
- uint16 wlan_crit_max_evt_type; /* The event type of the WLAN critical
- * event whose dur is max
- */
-} wlc_nr5gcx_status_v1_t;
#endif /* _wlioctl_h_ */
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wlioctl_defs.h 826113 2019-06-18 21:04:03Z $
+ * $Id: wlioctl_defs.h 677667 2017-01-04 07:43:05Z $
*/
+
#ifndef wlioctl_defs_h
#define wlioctl_defs_h
+
+
+
/* All builds use the new 11ac ratespec/chanspec */
#undef D11AC_IOTYPES
#define D11AC_IOTYPES
#ifndef USE_NEW_RSPEC_DEFS
-/* Remove when no referencing branches exist.
- * These macros will be used only in older branches (prior to K branch).
- * Wl layer in newer branches and trunk use those defined in bcmwifi_rspec.h.
- * Non-wl layer in newer branches and trunk may use these as well
- * until they are removed.
- */
/* WL_RSPEC defines for rate information */
#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */
+#define WL_RSPEC_HE_MCS_MASK 0x0000000F /* HE MCS value */
+#define WL_RSPEC_HE_NSS_MASK 0x000000F0 /* HE Nss value */
+#define WL_RSPEC_HE_NSS_SHIFT 4 /* HE Nss value shift */
#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */
#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */
#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */
#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */
#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */
#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_HE 0x03000000 /* HE MCS and Nss is stored in RSPEC_RATE_MASK */
/* WL_RSPEC_BW field defs */
#define WL_RSPEC_BW_UNSPECIFIED 0
#define WL_BSSTYPE_INFRA 1
#define WL_BSSTYPE_ANY 2 /* deprecated */
#define WL_BSSTYPE_MESH 3
-
-/* Bit definitions of mws_active_scan_throttle iovar */
-
-#define WL_SCAN_THROTTLE_MASK 0xF
-
-#define WL_SCAN_THROTTLE_ASSOCSCAN (1U << 0)
-#define WL_SCAN_THROTTLE_ROAMSCAN (1U << 1)
-#define WL_SCAN_THROTTLE_OTHER_FW_SCAN (1U << 2) /* for other scans like pno etc */
-#define WL_SCAN_THROTTLE_HOSTSCAN (1U << 3)
-
-#define WL_SCANFLAGS_CLIENT_MASK 0xF00
-#define WL_SCANFLAGS_CLIENT_SHIFT 8
-
/* Bitmask for scan_type */
-/* Reserved flag precludes the use of 0xff for scan_type which is
- * interpreted as default for backward compatibility.
- * Low priority scan uses currently reserved bit,
- * this should be changed as scan_type extended.
- * So, reserved flag definition removed.
- */
-/* Use lower 16 bit for scan flags, the upper 16 bits are for internal use */
#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */
-#define WL_SCANFLAGS_LOW_PRIO 0x02 /* Low priority scan */
+#define WL_SCANFLAGS_RESERVED 0x02 /* Reserved */
#define WL_SCANFLAGS_PROHIBITED 0x04 /* allow scanning prohibited channels */
#define WL_SCANFLAGS_OFFCHAN 0x08 /* allow scanning/reporting off-channel APs */
#define WL_SCANFLAGS_HOTSPOT 0x10 /* automatic ANQP to hotspot APs */
*/
#define WL_SCANFLAGS_SISO 0x40 /* Use 1 RX chain for scanning */
#define WL_SCANFLAGS_MIMO 0x80 /* Force MIMO scanning */
-#define WL_SCANFLAGS_ASSOCSCAN 0x100 /* Assoc scan */
-#define WL_SCANFLAGS_ROAMSCAN 0x200 /* Roam scan */
-#define WL_SCANFLAGS_FWSCAN 0x400 /* Other FW scan */
-#define WL_SCANFLAGS_HOSTSCAN 0x800 /* Host scan */
-#define WL_SCANFLAGS_LOW_POWER_SCAN 0x1000 /* LOW power scan, scheduled scan
- * only on scancore
- */
-#define WL_SCANFLAGS_HIGH_ACCURACY 0x2000 /* High accuracy scan, which needs
- * reliable scan results
- */
-#define WL_SCANFLAGS_LOW_SPAN 0x4000 /* LOW span scan, which expects
- * scan to be completed ASAP
- */
/* wl_iscan_results status values */
#define WL_SCAN_RESULTS_SUCCESS 0
#define WL_SCAN_RESULTS_ABORTED 3
#define WL_SCAN_RESULTS_NO_MEM 4
-/* Flags for parallel scan */
-/* Bitmap to enable/disable rsdb parallel scan, 5g-5g/2g-2g parallel scan
- * SCAN_PARALLEL_PASSIVE_5G ==> 5g-5g parallel scan
- * SCAN_PARALLEL_PASSIVE_2G ==> 2g-2g parallel scan
- */
-#define SCAN_PARALLEL_PASSIVE_5G (0x40)
-#define SCAN_PARALLEL_PASSIVE_2G (0x80)
-
#define SCANOL_ENABLED (1 << 0)
#define SCANOL_BCAST_SSID (1 << 1)
#define SCANOL_NOTIFY_BCAST_SSID (1 << 2)
#define WL_SCAN_ACTION_START 1
#define WL_SCAN_ACTION_CONTINUE 2
#define WL_SCAN_ACTION_ABORT 3
-#if defined(SIMPLE_ISCAN)
-#define ISCAN_RETRY_CNT 5
-#define ISCAN_STATE_IDLE 0
-#define ISCAN_STATE_SCANING 1
-#define ISCAN_STATE_PENDING 2
-#endif /* SIMPLE_ISCAN */
#define ANTENNA_NUM_1 1 /* total number of antennas to be used */
#define ANTENNA_NUM_2 2
/* check this magic number */
#define WLC_IOCTL_MAGIC 0x14e46c77
+
/* bss_info_cap_t flags */
#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */
#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */
/* bit definitions for bcnflags in wl_bss_info */
#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT 0x01 /* beacon had IE, accessnet valid */
#define WL_BSS_BCNFLAGS_INTERWORK_PRESENT_VALID 0x02 /* on indicates support for this API */
-#define WL_BSS_BCNFLAGS_MULTIPLE_BSSID_SET 0x4 /* this AP belongs to a multiple BSSID set */
-#define WL_BSS_BCNFLAGS_NONTRANSMITTED_BSSID 0x8 /* this AP is the transmitted BSSID */
/* bssinfo flag for nbss_cap */
#define VHT_BI_SGI_80MHZ 0x00000100
#define ioctl_pid used /* pid param */
#define ioctl_status needed /* status param */
+
/* Enumerate crypto algorithms */
#define CRYPTO_ALGO_OFF 0
#define CRYPTO_ALGO_WEP1 1
#define CRYPTO_ALGO_AES_CCM 4
#define CRYPTO_ALGO_AES_OCB_MSDU 5
#define CRYPTO_ALGO_AES_OCB_MPDU 6
-#if !defined(BCMCCX) && !defined(BCMEXTCCX)
+#if !defined(BCMEXTCCX)
#define CRYPTO_ALGO_NALG 7
#else
#define CRYPTO_ALGO_CKIP 7
#define CRYPTO_ALGO_CKIP_MMH 8
#define CRYPTO_ALGO_WEP_MMH 9
#define CRYPTO_ALGO_NALG 10
-#endif /* !BCMCCX && !BCMEXTCCX */
+#endif
#define CRYPTO_ALGO_SMS4 11
#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */
/* algo bit vector */
#define KEY_ALGO_MASK(_algo) (1 << _algo)
-#if defined(BCMCCX) || defined(BCMEXTCCX)
+#if defined(BCMEXTCCX)
#define KEY_ALGO_MASK_CCX (KEY_ALGO_MASK(CRYPTO_ALGO_CKIP) | \
KEY_ALGO_MASK(CRYPTO_ALGO_CKIP_MMH) | \
KEY_ALGO_MASK(CRYPTO_ALGO_WEP_MMH))
-#endif /* defined(BCMCCX) || defined(BCMEXTCCX) */
+#endif
#define KEY_ALGO_MASK_WEP (KEY_ALGO_MASK(CRYPTO_ALGO_WEP1) | \
KEY_ALGO_MASK(CRYPTO_ALGO_WEP128) | \
#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */
#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */
-#if defined(BCMCCX) || defined(BCMEXTCCX)
+#if defined(BCMEXTCCX)
#define WL_CKIP_KP (1 << 4) /* CMIC */
#define WL_CKIP_MMH (1 << 5) /* CKIP */
#else
#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */
#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
-#endif /* BCMCCX || BCMEXTCCX */
+#endif
#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
-#define WL_LINK_KEY (1 << 7) /* For linking keys of both cores */
-#define WL_UNLINK_KEY (1 << 8) /* For unlinking keys of both cores */
/* wireless security bitvec */
-#define WSEC_NONE 0x0
#define WEP_ENABLED 0x0001
#define TKIP_ENABLED 0x0002
#define AES_ENABLED 0x0004
#define WSEC_SWFLAG 0x0008
-#ifdef BCMCCX
-#define CKIP_KP_ENABLED 0x0010
-#define CKIP_MIC_ENABLED 0x0020
-#endif /* BCMCCX */
#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
-#ifdef BCMWAPI_WPI
-#define SMS4_ENABLED 0x0100
-#endif /* BCMWAPI_WPI */
#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED)
#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED)
#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED)
-/* Macros to check if algorithm is enabled */
-#define WSEC_INFO_ALGO_ENABLED(_wi, _algo) \
- (_wi).cur_algos & (1 << CRYPTO_ALGO_##_algo)
-
-#define WSEC_INFO_ALGO_NONE(_wi) (((_wi).cur_algos) == 0)
-
-#ifdef BCMCCX
-#define WSEC_CKIP_KP_ENABLED(wsec) ((wsec) & CKIP_KP_ENABLED)
-#define WSEC_CKIP_MIC_ENABLED(wsec) ((wsec) & CKIP_MIC_ENABLED)
-#define WSEC_CKIP_ENABLED(wsec) ((wsec) & (CKIP_KP_ENABLED|CKIP_MIC_ENABLED))
-
-#ifdef BCMWAPI_WPI
-#define WSEC_ENABLED(wsec) \
- ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | \
- CKIP_MIC_ENABLED | SMS4_ENABLED))
-#else /* BCMWAPI_WPI */
-#define WSEC_ENABLED(wsec) \
- ((wsec) & \
- (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | CKIP_MIC_ENABLED))
-#endif /* BCMWAPI_WPI */
-#else /* defined BCMCCX */
-#ifdef BCMWAPI_WPI
-#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
-#else /* BCMWAPI_WPI */
#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
-#endif /* BCMWAPI_WPI */
-#endif /* BCMCCX */
#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED)
-#ifdef BCMWAPI_WAI
-#define WSEC_SMS4_ENABLED(wsec) ((wsec) & SMS4_ENABLED)
-#endif /* BCMWAPI_WAI */
+
/* Following macros are not used any more. Just kept here to
* avoid build issue in BISON/CARIBOU branch
#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */
/* WPA authentication mode bitvec */
-#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
-#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
-#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
-#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
-#if defined(BCMCCX) || defined(BCMEXTCCX)
-#define WPA_AUTH_CCKM 0x0008 /* CCKM */
-#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */
-#endif /* BCMCCX || BCMEXTCCX */
+#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
+#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
+#if defined(BCMEXTCCX)
+#define WPA_AUTH_CCKM 0x0008 /* CCKM */
+#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */
+#endif
/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */
-#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
-#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
-#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
-#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
-#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
-#define WPA_AUTH_WAPI 0x0400 /* why it is same as WAPI_AUTH_UNSPECIFIED */
-#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */
-#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */
-#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */
-#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
-#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */
-#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
-#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
-#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
-#define WPA2_AUTH_FILS_SHA256 0x10000 /* FILS with SHA256 key derivation */
-#define WPA2_AUTH_FILS_SHA384 0x20000 /* FILS with SHA384 key derivation */
-#define WPA2_AUTH_IS_FILS(auth) ((auth) & (WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FILS_SHA384))
-#define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */
-#define WPA3_AUTH_OWE 0x100000 /* OWE */
-#define WPA3_AUTH_1X_SUITE_B_SHA256 0x200000 /* Suite B SHA256 */
-#define WPA3_AUTH_1X_SUITE_B_SHA384 0x400000 /* Suite B-192 SHA384 */
-#define WPA3_AUTH_PSK_SHA384 0x800000 /* PSK with SHA384 key derivation */
-#define WPA3_AUTH_SAE_AP_ONLY 0x1000000 /* SAE restriction to connect to pure SAE APs */
+#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
+#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
+#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
+#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
+#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */
+#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
+#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
+#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
/* WPA2_AUTH_SHA256 not used anymore. Just kept here to avoid build issue in DINGO */
-#define WPA2_AUTH_SHA256 0x8000
-#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+#define WPA2_AUTH_SHA256 0x8000
+#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
/* pmkid */
-#define MAXPMKID 16 /* max # PMKID cache entries NDIS */
+#define MAXPMKID 16
/* SROM12 changes */
#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
-#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
-#define WLC_IOCTL_MEDLEN 1896 /* "med" length ioctl buffer required */
+
+#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
+#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */
#if defined(LCNCONF) || defined(LCN40CONF) || defined(LCN20CONF)
#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */
#else
#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */
-#endif // endif
+#endif
#define WLC_SAMPLECOLLECT_MAXLEN_LCN40 8192
-#define WLC_IOCTL_NANRESP_MAXLEN 4096u /* "max" length nan ioctl resp buffer required */
-#define WLC_IOCTL_NANRESP_MEDLEN 800u /* "med" length nan ioctl resp buffer required */
-
/* common ioctl definitions */
#define WLC_GET_MAGIC 0
#define WLC_GET_VERSION 1
#define WLC_GET_KEY_PRIMARY 235
#define WLC_SET_KEY_PRIMARY 236
+
/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */
#define WLC_GET_ACI_ARGS 238
#define WLC_SET_ACI_ARGS 239
#define WLC_GET_RSSI_QDB 321 /* qdB portion of the RSSI */
#define WLC_DUMP_RATESET 322
#define WLC_ECHO 323
-#define WLC_SCB_AUTHENTICATE 325
-#define WLC_LAST 326 /* The last ioctl. Also push this
- * number when adding new ioctls
- */
-/*
- * Alert:
- * Duplicate a few definitions that irelay requires from epiioctl.h here
- * so caller doesn't have to include this file and epiioctl.h .
- * If this grows any more, it would be time to move these irelay-specific
- * definitions out of the epiioctl.h and into a separate driver common file.
- */
+#define WLC_LAST 324
#define WLC_SPEC_FLAG 0x80000000 /* For some special IOCTL */
#ifndef EPICTRL_COOKIE
#define EPICTRL_COOKIE 0xABADCEDE
-#endif // endif
+#endif
/* vx wlc ioctl's offset */
#define CMN_IOCTL_OFF 0x180
#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */
#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */
-#define WL_AUTH_SAE_KEY 3 /* d11 sae authentication */
-#define WL_AUTH_FILS_SHARED 4 /* d11 fils shared key authentication */
-#define WL_AUTH_FILS_SHARED_PFS 5 /* d11 fils shared key w/ pfs authentication */
-#define WL_AUTH_FILS_PUBLIC 6 /* d11 fils public key authentication */
/* a large TX Power as an init value to factor out of MIN() calculations,
* keep low enough to fit in an int8, units are .25 dBm
#define WLC_BAND_5G 1 /* 5 Ghz */
#define WLC_BAND_2G 2 /* 2.4 Ghz */
#define WLC_BAND_ALL 3 /* all bands */
-#define WLC_BAND_6G 4 /* 6 Ghz */
#define WLC_BAND_INVALID -1 /* Invalid band */
/* band range returned by band_range iovar */
#define TRIGGER_BADFCS 0x08
#define TRIGGER_BADPLCP 0x10
#define TRIGGER_CRSGLITCH 0x20
-#define TRIGGER_ASYNC 0x40
#define WL_SAMPLEDATA_HEADER_TYPE 1
#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */
#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
#elif WL_RSSI_ANT_MAX != 4
#error "WL_RSSI_ANT_MAX does not match"
-#endif // endif
+#endif
/* dfs_status iovar-related defines */
#define WL_TX_POWER_F_OPENLOOP 0x40
#define WL_TX_POWER_F_PROP11NRATES 0x80
#define WL_TX_POWER_F_UNIT_QDBM 0x100
-#define WL_TX_POWER_F_TXCAP 0x200
-#define WL_TX_POWER_F_HE 0x400
-#define WL_TX_POWER_F_RU_RATE 0x800
-
/* Message levels */
#define WL_ERROR_VAL 0x00000001
#define WL_TRACE_VAL 0x00000002
#define WL_PRUSR_VAL 0x00000200
#define WL_PS_VAL 0x00000400
#define WL_TXPWR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
-#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */
+#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */
#define WL_PORT_VAL 0x00001000
#define WL_DUAL_VAL 0x00002000
#define WL_WSEC_VAL 0x00004000
#define WL_NRSSI_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
#define WL_BCNTRIM_VAL 0x00020000 /* Using retired NRSSI VAL */
#define WL_LOFT_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
-#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */
+#define WL_PFN_VAL 0x00040000 /* Using retired LOFT_VAL */
#define WL_REGULATORY_VAL 0x00080000
-#define WL_CSA_VAL 0x00080000 /* Reusing REGULATORY_VAL due to lackof bits */
+#define WL_CSA_VAL 0x00080000 /* Reusing REGULATORY_VAL due to lackof bits */
#define WL_TAF_VAL 0x00100000
#define WL_RADAR_VAL 0x00000000 /* retired in TOT on 6/10/2009 */
#define WL_WDI_VAL 0x00200000 /* Using retired WL_RADAR_VAL VAL */
#define WL_TDLS_VAL 0x00001000
#define WL_MCNX_VAL 0x00002000
#define WL_PROT_VAL 0x00004000
+#define WL_PSTA_VAL 0x00008000
#define WL_TSO_VAL 0x00010000
#define WL_TRF_MGMT_VAL 0x00020000
#define WL_LPC_VAL 0x00040000
#define WL_P2PO_VAL 0x00200000
#define WL_TBTT_VAL 0x00400000
#define WL_FBT_VAL 0x00800000
-#define WL_RRM_VAL 0x00800000 /* reuse */
+#define WL_RRM_VAL 0x00800000 /* reuse */
#define WL_MQ_VAL 0x01000000
+
/* This level is currently used in Phoenix2 only */
#define WL_SRSCAN_VAL 0x02000000
+
#define WL_WNM_VAL 0x04000000
/* re-using WL_WNM_VAL for MBO */
#define WL_MBO_VAL 0x04000000
-/* re-using WL_SRSCAN_VAL */
-#define WL_RANDMAC_VAL 0x02000000
-#define WL_UNUSED_VAL 0x10000000 /* Was a duplicate for WL_LPC_VAL. Removed */
+#define WL_PWRSEL_VAL 0x10000000
#define WL_NET_DETECT_VAL 0x20000000
#define WL_OCE_VAL 0x20000000 /* reuse */
#define WL_PCIE_VAL 0x40000000
#define WL_PMDUR_VAL 0x80000000
+
+
/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
* rather than a message-type of its own
*/
#define WL_TIMESTAMP_VAL 0x80000000
-/* wl_msg_level2 is full. For new bits take the next one and AND with
- * wl_msg_level3 in wl_dbg.h
- */
-#define WL_ASSOC_AP_VAL 0x00000001
-#define WL_FILS_VAL 0x00000002
-#define WL_LATENCY_VAL 0x00000004
-#define WL_WBUS_VAL 0x00000008
-
/* max # of leds supported by GPIO (gpio pin# == led index#) */
#define WL_LED_NUMGPIO 32 /* gpio 0-31 */
#define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */
#define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */
#define WL_LED_BGMODE 6 /* on if gmode, off if bmode */
-#define WL_LED_WI1 7 /* wlan indicator 1 mode (legacy cust) */
-#define WL_LED_WI2 8 /* wlan indicator 2 mode (legacy cust) */
-#define WL_LED_WI3 9 /* wlan indicator 3 mode (legacy cust) */
+#define WL_LED_WI1 7
+#define WL_LED_WI2 8
+#define WL_LED_WI3 9
#define WL_LED_ASSOC 10 /* associated state indicator */
#define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */
-#define WL_LED_ASSOCACT 12 /* on associated; blink fast for activity */
-#define WL_LED_WI4 13 /* wlan indicator 4 mode (legacy cust 5G) */
-#define WL_LED_WI5 14 /* wlan indicator 5 mode (legacy cust 2.4) */
+#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */
+#define WL_LED_WI4 13
+#define WL_LED_WI5 14
#define WL_LED_BLINKSLOW 15 /* blink slow */
#define WL_LED_BLINKMED 16 /* blink med */
#define WL_LED_BLINKFAST 17 /* blink fast */
#define WL_LED_BLINKCUSTOM 18 /* blink custom */
-#define WL_LED_BLINKPERIODIC 19 /* blink period (custom 1000ms / off 400ms) */
+#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */
#define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */
/* keep on for 300 sec */
#define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */
-#define WL_LED_WI6 22 /* wlan indicator 6 mode legacy rtr 43526 5 */
-#define WL_LED_WI7 23 /* wlan indicator 7 mode legacy rtr 43526 2.4 */
-#define WL_LED_WI8 24 /* wlan indicator 8 mode legacy rtr 43526 */
+#define WL_LED_WI6 22
+#define WL_LED_WI7 23
+#define WL_LED_WI8 24
#define WL_LED_NUMBEHAVIOR 25
/* led behavior numeric value format */
/* number of bytes needed to define a proper bit mask for MAC event reporting */
#define BCMIO_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define BCMIO_NBBY 8
-#define WL_EVENTING_MASK_LEN (16+4) /* Don't increase this without wl review */
+#define WL_EVENTING_MASK_LEN (16+4)
#define WL_EVENTING_MASK_EXT_LEN \
MAX(WL_EVENTING_MASK_LEN, (ROUNDUP(WLC_E_LAST, NBBY)/NBBY))
#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */
/* bit position in per_chan_info; these depend on current country/regulatory domain */
-#define WL_CHAN_VALID_HW (1u << 0) /* valid with current HW */
-#define WL_CHAN_VALID_SW (1u << 1) /* valid with current country setting */
-#define WL_CHAN_BAND_5G (1u << 2) /* 5GHz-band channel */
-#define WL_CHAN_RADAR (1u << 3) /* radar sensitive channel */
-#define WL_CHAN_INACTIVE (1u << 4) /* temporarily inactive due to radar */
-#define WL_CHAN_PASSIVE (1u << 5) /* channel is in passive mode */
-#define WL_CHAN_RESTRICTED (1u << 6) /* restricted use channel */
-#define WL_CHAN_RADAR_EU_WEATHER (1u << 7) /* EU Radar weather channel.
- * Implies an EU Radar channel.
- */
-#define WL_CHAN_CLM_RESTRICTED (1u << 8) /* channel restricted in CLM (i.e. by default) */
-#define WL_CHAN_BAND_6G (1u << 9) /* 6GHz-band channel */
-#define WL_CHAN_OOS_SHIFT 24u /* shift for OOS field */
-#define WL_CHAN_OOS_MASK 0xFF000000u /* field specifying minutes remaining for this
- * channel's out-of-service period due to radar
- * detection
- */
+#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */
+#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */
+#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */
+#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */
+#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */
+#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */
+#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */
+#define WL_CHAN_RADAR_EU_WEATHER (1 << 7) /* EU Radar weather channel. Implies an
+ * EU Radar channel.
+ */
+#define WL_CHAN_CLM_RESTRICTED (1 << 8) /* channel restricted in CLM
+ * (i.e. by default)
+ */
+
+/* following definition is for precommit; will be removed once wl, acsd switch to the new def */
+#define WL_CHAN_WEATHER_RADAR WL_CHAN_RADAR_EU_WEATHER
/* BTC mode used by "btc_mode" iovar */
#define WL_BTC_DISABLE 0 /* disable BT coexistence */
/* maximum channels returned by the get valid channels iovar */
#define WL_NUMCHANNELS 64
-/* Channels break down for 2G BAND
-* 2G 20MHz = 14
-*
-* 2G 40MHz
-* 9 * 2 = 18
-*
-* 2G tot = 14 + 18 = 32
-*
-* Channels Break down for 5G BAND
-* 5G 20MHz
-* 36-48 4
-* 52-64 4
-* 100-144 12
-* 149-161 4
-* 165 1
-* 5G 20 subtot = 25
-*
-* 5G 40 12 * 2 = 24
-* 5G 80 6 * 4 = 24
-* 5G 160 2 * 8 = 16
-*
-* 5G total = 25 + 24+ 24+ 16 = 89
-*
-* TOTAL 2G and 5G
-* 2G + 5G = (32 + 89) = 121
-*
-* Channels Break down for 6G BAND
-* 20MHz = 59
-* 40MHz 29 * 2 = 58
-* 80MHz 14 * 4 = 56
-* 160MHz 7 * 8 = 56
-* 6G total = 59 + 58 + 56 + 56 = 229
-*
-* Toal WL_NUMCHANSPECS 2G/5G/6G
-* total = 32 + 89 + 229 = 350
-*
-* IF 5g 80+80 is defined
-* 80MHz cf pairs are:
-* 42 106
-* 42 122
-* 42 138
-* 42 155
-* 58 106
-* 58 122
-* 58 138
-* 58 155
-* 106 138
-* 106 155
-* 122 155
-* 138 155
-*
-*
-* 12 pairs * 8 primary channels = 96
-* TOTAL 2G + 5G + 5G (80 + 80)
-* 32 + 89 + 96 = 217
-*
-*TOTAL 2G + 5G + 5G (80 + 80) +6G (excluding 80 + 80)
-* 32 + 89 + 96 + 229 = 446
-*
-*/
-#ifdef WL_BAND6G
-/* max number of chanspecs (used by the iovar to calc. buf space) */
-#ifdef WL11AC_80P80
-#define WL_NUMCHANSPECS 446
-#else
-#define WL_NUMCHANSPECS 350
-#endif // endif
-#else
/* max number of chanspecs (used by the iovar to calc. buf space) */
#ifdef WL11AC_80P80
#define WL_NUMCHANSPECS 206
#else
#define WL_NUMCHANSPECS 110
-#endif // endif
-#endif /* WL_BAND6G */
+#endif
/* WDS link local endpoint WPA role */
#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */
#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06
#define WL_PKTENG_PER_RX_STOP 0x08
#define WL_PKTENG_PER_RU_TX_START 0x09
-#define WL_PKTENG_PER_TRIG_TX_START 0x0a
#define WL_PKTENG_PER_MASK 0xff
#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */
#define WL_PKTENG_SYNCHRONOUS_UNBLK 0x200 /* synchronous unblock flag */
-#define WL_PKTENG_COLLECT 0x400 /* Save last Rx'ed packet */
#ifdef PKTENG_LONGPKTSZ
/* max pktsz limit for pkteng */
#define WL_PKTENG_MAXPKTSZ PKTENG_LONGPKTSZ
#else
#define WL_PKTENG_MAXPKTSZ 16384
-#endif // endif
+#endif
#define NUM_80211b_RATES 4
#define NUM_80211ag_RATES 8
#define WL_P2P_IF_DYNBCN_GO 2
#define WL_P2P_IF_DEV 3
-/* p2p GO configuration */
-#define WL_P2P_ENABLE_CONF 1 /* configure */
-#define WL_P2P_DISABLE_CONF 0 /* un-configure */
-
/* count */
#define WL_P2P_SCHED_RSVD 0
#define WL_P2P_SCHED_REPEAT 255 /* anything > 255 will be treated as 255 */
#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */
#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */
-/* at some point we may need bitvec here (combination of actions) */
/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */
#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */
/* schedule option - WL_P2P_SCHED_TYPE_XXX */
#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */
-/* at some point we may need bitvec here (combination of options) */
/* schedule option - WL_P2P_SCHED_TYPE_ABS */
#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */
#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */
#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
#define WLFEATURE_DISABLE_11N_GF 0x00000080
+/* Proxy STA modes */
+#define PSTA_MODE_DISABLED 0
+#define PSTA_MODE_PROXY 1
+#define PSTA_MODE_REPEATER 2
+
/* op code in nat_cfg */
#define NAT_OP_ENABLE 1 /* enable NAT on given interface */
#define NAT_OP_DISABLE 2 /* disable NAT on given interface */
#define WL_WNM_FMS 0x00000080
#define WL_WNM_NOTIF 0x00000100
#define WL_WNM_WBTEXT 0x00000200
-#define WL_WNM_ESTM 0x00000400
-#define WL_WNM_MAX 0x00000800
+#define WL_WNM_MAX 0x00000400
#ifdef WLWNM_BRCM
#define BRCM_WNM_FEATURE_SET\
(WL_WNM_PROXYARP | \
#define TSPEC_UNKNOWN 3 /* TSPEC unknown */
#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */
-#ifdef BCMCCX
-/* "wlan_reason" iovar interface */
-#define WL_WLAN_ASSOC_REASON_NORMAL_NETWORK 0 /* normal WLAN network setup */
-#define WL_WLAN_ASSOC_REASON_ROAM_FROM_CELLULAR_NETWORK 1 /* roam from Cellular network */
-#define WL_WLAN_ASSOC_REASON_ROAM_FROM_LAN 2 /* roam from LAN */
-#define WL_WLAN_ASSOC_REASON_MAX 2 /* largest value allowed */
-#endif /* BCMCCX */
/* Software feature flag defines used by wlfeatureflag */
#ifdef WLAFTERBURNER
#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */
#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */
#endif /* WLAFTERBURNER */
-#define WL_SWFL_NOHWRADIO 0x0004 /* Disable HW Radio monitor (e.g., Cust Spec) */
+#define WL_SWFL_NOHWRADIO 0x0004
#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */
#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */
#ifndef BESTN_MAX
#define BESTN_MAX 10
-#endif // endif
+#endif
#ifndef MSCAN_MAX
#define MSCAN_MAX 32
-#endif // endif
+#endif
/* TCP Checksum Offload error injection for testing */
#define TOE_ERRTEST_TX_CSUM 0x00000001
#define TOE_ERRTEST_RX_CSUM2 0x00000004
/* ARP Offload feature flags for arp_ol iovar */
-#define ARP_OL_AGENT 0x00000001
-#define ARP_OL_SNOOP 0x00000002
-#define ARP_OL_HOST_AUTO_REPLY 0x00000004
-#define ARP_OL_PEER_AUTO_REPLY 0x00000008
-#define ARP_OL_UPDATE_HOST_CACHE 0x00000010
+#define ARP_OL_AGENT 0x00000001
+#define ARP_OL_SNOOP 0x00000002
+#define ARP_OL_HOST_AUTO_REPLY 0x00000004
+#define ARP_OL_PEER_AUTO_REPLY 0x00000008
/* ARP Offload error injection */
#define ARP_ERRTEST_REPLY_PEER 0x1
#define ARP_ERRTEST_REPLY_HOST 0x2
#define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */
-#if defined(WL_PKT_FLTR_EXT) && !defined(WL_PKT_FLTR_EXT_DISABLED)
-#define ND_MULTIHOMING_MAX 32 /* Maximum local host IP addresses */
-#else
#define ND_MULTIHOMING_MAX 10 /* Maximum local host IP addresses */
-#endif /* WL_PKT_FLTR_EXT && !WL_PKT_FLTR_EXT_DISABLED */
#define ND_REQUEST_MAX 5 /* Max set of offload params */
/* AOAC wake event flag */
#define WAKE_EVENT_NLO_DISCOVERY_BIT 1
#define MAX_NUM_WOL_PATTERN 22 /* LOGO requirements min 22 */
+
/* Packet filter operation mode */
/* True: 1; False: 0 */
#define PKT_FILTER_MODE_FORWARD_ON_MATCH 1
#define WL_PROXD_MODE_TARGET 3
#define WL_PROXD_RANDOM_WAKEUP 0x8000
+
#ifdef NET_DETECT
#define NET_DETECT_MAX_WAKE_DATA_SIZE 2048
#define NET_DETECT_MAX_PROFILES 16
#define NET_DETECT_MAX_CHANNELS 50
#endif /* NET_DETECT */
+
/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
#define WL_RADIO_SW_DISABLE (1<<0)
#define WL_RADIO_HW_DISABLE (1<<1)
#define WL_RADIO_MPC_DISABLE (1<<2)
#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */
#define WL_RADIO_PERCORE_DISABLE (1<<4) /* Radio diable per core for DVT */
-#define WL_RADIO_TSYNC_PWRSAVE_DISABLE (1<<5) /* Disable Radio in tsync mode for power saving */
#define WL_SPURAVOID_OFF 0
#define WL_SPURAVOID_ON1 1
#define WL_SPURAVOID_ON2 2
+
#define WL_4335_SPURAVOID_ON1 1
#define WL_4335_SPURAVOID_ON2 2
#define WL_4335_SPURAVOID_ON3 3
#define WL_PWRSTATS_TYPE_PM_AWAKE2 7 /**< struct wl_pwr_pm_awake_stats_v2 */
#define WL_PWRSTATS_TYPE_SDIO 8 /* struct wl_pwr_sdio_stats */
#define WL_PWRSTATS_TYPE_MIMO_PS_METRICS 9 /* struct wl_mimo_meas_metrics_t */
-#define WL_PWRSTATS_TYPE_SLICE_INDEX 10 /* slice index for which this report is meant for */
-#define WL_PWRSTATS_TYPE_TSYNC 11 /**< struct wl_pwr_tsync_stats */
-#define WL_PWRSTATS_TYPE_OPS_STATS 12 /* struct wl_pwr_ops_stats_t */
-#define WL_PWRSTATS_TYPE_BCNTRIM_STATS 13 /* struct wl_pwr_bcntrim_stats_t */
-#define WL_PWRSTATS_TYPE_SLICE_INDEX_BAND_INFO 14 /* wl_pwr_slice_index_band_t */
-#define WL_PWRSTATS_TYPE_PSBW_STATS 15 /* struct wl_pwr_psbw_stats_t */
-
-/* IOV AWD DATA */
-#define AWD_DATA_JOIN_INFO 0
-#define AWD_DATA_VERSION_V1 1
-
-/* IOV ETD DATA */
-#define ETD_DATA_JOIN_INFO 0
-#define ETD_DATA_VERSION_V1 1
-
-/* CTMODE DBG */
-/* input param: [31:16] => MPDU_THRESHOLD
- * [15:03] => RESERVED
- * [02] => enable UFP
- * [01] => enable UFC
- * [00] => enalbe CTMODE
- */
-#define CTMODE_DBG_CTMODE_EN (0x1u)
-#define CTMODE_DBG_UFC_EN (0x2u)
-#define CTMODE_DBG_UFP_EN (0x4u)
-#define CTMODE_DBG_MPDU_THRESHOLD_SHIFT (7u)
-#define CTMODE_DBG_MPDU_THRESHOLD_MASK ((0x1FFu) << CTMODE_DBG_MPDU_THRESHOLD_SHIFT)
-#define CTMODE_DBG_BYTES_THRESHOLD_SHIFT (16u)
-#define CTMODE_DBG_BYTES_THRESHOLD_MASK ((0xFFFu) << CTMODE_DBG_BYTES_THRESHOLD_SHIFT)
-
-/* ====== SC use case configs ========= */
-/* SC user/use case request */
-#define WL_SC_REQ_SCAN 0u /* user scan */
-#define WL_SC_REQ_CNX 1u /* associated idle */
-#define WL_SC_REQ_NAN 2u /* NAN synchronization and discovery offload */
-
-/* === Per use case configuration === */
-/* scan cfgs */
-#define SC_SCAN_CFG_PASSIVE_MASK 0x01u /* Enable passive scan on sc */
-#define SC_SCAN_CFG_PASSIVE_SHIFT 0u
-#define SC_SCAN_CFG_LP_SCAN_MASK 0x02u /* Enable low prio scan on sc */
-#define SC_SCAN_CFG_LP_SCAN_SHIFT 1u
-#define SC_SCAN_CFG_REG_SCAN_MASK 0x04u /* Enable split scan using sc */
-#define SC_SCAN_CFG_REG_SCAN_SHIFT 2u
-#define SC_SCAN_CFG_FULL_SCAN_MASK 0x08u /* Enable full scan on sc */
-#define SC_SCAN_CFG_FULL_SCAN_SHIFT 3u
-/* Add get and set macros for each of the configs? */
-
-/* === Place holder for cnx and nan cfgs === */
+
#endif /* wlioctl_defs_h */
/*
* Custom OID/ioctl related helper functions.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wlioctl_utils.h 626207 2016-03-19 17:39:14Z $
+ * $Id: wlioctl_utils.h 614820 2016-01-23 17:16:17Z $
*/
#ifndef _wlioctl_utils_h_
extern const char * wl_get_reinit_rc_name(int rc);
/* Get data pointer of wlc layer counters tuple from xtlv formatted counters IOVar buffer. */
-#define GET_WLCCNT_FROM_CNTBUF(cntbuf) (const wl_cnt_wlc_t*) \
- bcm_get_data_from_xtlv_buf(((const wl_cnt_info_t *)cntbuf)->data, \
- ((const wl_cnt_info_t *)cntbuf)->datalen, WL_CNT_XTLV_WLC, \
+#define GET_WLCCNT_FROM_CNTBUF(cntbuf) \
+ bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)cntbuf)->data, \
+ ((wl_cnt_info_t *)cntbuf)->datalen, WL_CNT_XTLV_WLC, \
NULL, BCM_XTLV_OPTION_ALIGN32)
#define CHK_CNTBUF_DATALEN(cntbuf, ioctl_buflen) do { \
/*
* Fundamental types and constants relating to WPA
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wpa.h 822438 2019-05-29 17:13:44Z $
+ * $Id: wpa.h 700076 2017-05-17 14:42:22Z $
*/
#ifndef _proto_wpa_h_
#include <typedefs.h>
#include <ethernet.h>
+
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */
#define WPA_CIPHER_BIP 6 /* WEP (104-bit) */
#define WPA_CIPHER_TPK 7 /* Group addressed traffic not allowed */
-#ifdef BCMCCX
-#define WPA_CIPHER_CKIP 8 /* KP with no MIC */
-#define WPA_CIPHER_CKIP_MMH 9 /* KP with MIC ("CKIP/MMH", "CKIP+CMIC") */
-#define WPA_CIPHER_WEP_MMH 10 /* MIC with no KP ("WEP/MMH", "CMIC") */
-
-#define IS_CCX_CIPHER(cipher) ((cipher) == WPA_CIPHER_CKIP || \
- (cipher) == WPA_CIPHER_CKIP_MMH || \
- (cipher) == WPA_CIPHER_WEP_MMH)
-#endif /* BCMCCX */
#define WPA_CIPHER_AES_GCM 8 /* AES (GCM) */
#define WPA_CIPHER_AES_GCM256 9 /* AES (GCM256) */
-#define WPA_CIPHER_CCMP_256 10 /* CCMP-256 */
-#define WPA_CIPHER_BIP_GMAC_128 11 /* BIP_GMAC_128 */
-#define WPA_CIPHER_BIP_GMAC_256 12 /* BIP_GMAC_256 */
-#define WPA_CIPHER_BIP_CMAC_256 13 /* BIP_CMAC_256 */
-#ifdef BCMWAPI_WAI
-#define WAPI_CIPHER_NONE WPA_CIPHER_NONE
-#define WAPI_CIPHER_SMS4 11
-
-#define WAPI_CSE_WPI_SMS4 1
-#endif /* BCMWAPI_WAI */
#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \
(cipher) == WPA_CIPHER_WEP_40 || \
(cipher) == WPA_CIPHER_AES_CCM || \
(cipher) == WPA_CIPHER_AES_GCM || \
(cipher) == WPA_CIPHER_AES_GCM256 || \
- (cipher) == WPA_CIPHER_CCMP_256 || \
(cipher) == WPA_CIPHER_TPK)
-#define IS_WPA_BIP_CIPHER(cipher) ((cipher) == WPA_CIPHER_BIP || \
- (cipher) == WPA_CIPHER_BIP_GMAC_128 || \
- (cipher) == WPA_CIPHER_BIP_GMAC_256 || \
- (cipher) == WPA_CIPHER_BIP_CMAC_256)
-
-#ifdef BCMWAPI_WAI
-#define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \
- (cipher) == WAPI_CSE_WPI_SMS4)
-
-/* convert WAPI_CSE_WPI_XXX to WAPI_CIPHER_XXX */
-#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \
- WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE)
-
-#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \
- WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE)
-#endif /* BCMWAPI_WAI */
-
-#define IS_VALID_AKM(akm) ((akm) == RSN_AKM_NONE || \
- (akm) == RSN_AKM_UNSPECIFIED || \
- (akm) == RSN_AKM_PSK || \
- (akm) == RSN_AKM_FBT_1X || \
- (akm) == RSN_AKM_FBT_PSK || \
- (akm) == RSN_AKM_MFP_1X || \
- (akm) == RSN_AKM_MFP_PSK || \
- (akm) == RSN_AKM_SHA256_1X || \
- (akm) == RSN_AKM_SHA256_PSK || \
- (akm) == RSN_AKM_TPK || \
- (akm) == RSN_AKM_SAE_PSK || \
- (akm) == RSN_AKM_SAE_FBT || \
- (akm) == RSN_AKM_FILS_SHA256 || \
- (akm) == RSN_AKM_FILS_SHA384 || \
- (akm) == RSN_AKM_OWE || \
- (akm) == RSN_AKM_SUITEB_SHA256_1X || \
- (akm) == RSN_AKM_SUITEB_SHA384_1X)
-
-#define IS_VALID_BIP_CIPHER(cipher) ((cipher) == WPA_CIPHER_BIP || \
- (cipher) == WPA_CIPHER_BIP_GMAC_128 || \
- (cipher) == WPA_CIPHER_BIP_GMAC_256 || \
- (cipher) == WPA_CIPHER_BIP_CMAC_256)
-
-#define WPA_IS_FT_AKM(akm) ((akm) == RSN_AKM_FBT_SHA256 || \
- (akm) == RSN_AKM_FBT_SHA384)
-
-#define WPA_IS_FILS_AKM(akm) ((akm) == RSN_AKM_FILS_SHA256 || \
- (akm) == RSN_AKM_FILS_SHA384)
-
-#define WPA_IS_FILS_FT_AKM(akm) ((akm) == RSN_AKM_FBT_SHA256_FILS || \
- (akm) == RSN_AKM_FBT_SHA384_FILS)
/* WPA TKIP countermeasures parameters */
#define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */
#define WPA2_PMKID_COUNT_LEN 2
-/* RSN dev type in rsn_info struct */
-typedef enum {
- DEV_NONE = 0,
- DEV_STA = 1,
- DEV_AP = 2
-} device_type_t;
-
-typedef uint32 rsn_akm_mask_t; /* RSN_AKM_... see 802.11.h */
-typedef uint8 rsn_cipher_t; /* WPA_CIPHER_xxx */
-typedef uint32 rsn_ciphers_t; /* mask of rsn_cipher_t */
-typedef uint8 rsn_akm_t;
-typedef uint8 auth_ie_type_mask_t;
-
-/* Old location for this structure. Moved to bcmwpa.h */
-#ifndef RSN_IE_INFO_STRUCT_RELOCATED
-typedef struct rsn_ie_info {
- uint8 version;
- rsn_cipher_t g_cipher;
- uint8 p_count;
- uint8 akm_count;
- uint8 pmkid_count;
- rsn_akm_t sta_akm; /* single STA akm */
- uint16 caps;
- rsn_ciphers_t p_ciphers;
- rsn_akm_mask_t akms;
- uint8 pmkids_offset; /* offset into the IE */
- rsn_cipher_t g_mgmt_cipher;
- device_type_t dev_type; /* AP or STA */
- rsn_cipher_t sta_cipher; /* single STA cipher */
- uint16 key_desc; /* key descriptor version as STA */
- int parse_status;
- uint16 mic_len; /* unused. keep for ROM compatibility. */
- auth_ie_type_mask_t auth_ie_type; /* bit field of WPA, WPA2 and (not yet) CCX WAPI */
- uint8 pmk_len; /* EAPOL PMK */
- uint8 kck_mic_len; /* EAPOL MIC (by KCK) */
- uint8 kck_len; /* EAPOL KCK */
- uint8 kek_len; /* EAPOL KEK */
- uint8 tk_len; /* EAPOL TK */
- uint8 ptk_len; /* EAPOL PTK */
- uint8 kck2_len; /* EAPOL KCK2 */
- uint8 kek2_len; /* EAPOL KEK2 */
-} rsn_ie_info_t;
-#endif /* RSN_IE_INFO_STRUCT_RELOCATED */
-
-#ifdef BCMWAPI_WAI
-#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH
-
-/* Other WAI definition */
-#define WAPI_WAI_REQUEST 0x00F1
-#define WAPI_UNICAST_REKEY 0x00F2
-#define WAPI_STA_AGING 0x00F3
-#define WAPI_MUTIL_REKEY 0x00F4
-#define WAPI_STA_STATS 0x00F5
-
-#define WAPI_USK_REKEY_COUNT 0x4000000 /* 0xA00000 */
-#define WAPI_MSK_REKEY_COUNT 0x4000000 /* 0xA00000 */
-#endif /* BCMWAPI_WAI */
/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
/*
* WPS IE definitions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
#ifdef __cplusplus
extern "C" {
-#endif // endif
+#endif
/* Data Element Definitions */
#define WPS_ID_AP_CHANNEL 0x1001
#define WPS_WFA_SUBID_SETTINGS_DELAY_TIME 0x04
#define WPS_WFA_SUBID_REG_CFG_METHODS 0x05
+
/* WCN-NET Windows Rally Vertical Pairing Vendor Extensions */
#define MS_VENDOR_EXT_ID "\x00\x01\x37"
#define WPS_MS_ID_VPI 0x1001 /* Vertical Pairing Identifier TLV */
*/
#define WPS_ENCRTYPE_AES 0x0008
+
/* WPS Message Types */
#define WPS_ID_BEACON 0x01
#define WPS_ID_PROBE_REQ 0x02
#define WPS_PRIVATE_ID_FRAG_ACK (WPS_ID_MESSAGE_DONE + 5)
#define WPS_PRIVATE_ID_EAPOL_START (WPS_ID_MESSAGE_DONE + 6)
+
/* Device Type categories for primary and secondary device types */
#define WPS_DEVICE_TYPE_CAT_COMPUTER 1
#define WPS_DEVICE_TYPE_CAT_INPUT_DEVICE 2
#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_MPHONE 6 /* WSC 2.0 */
#define WPS_DEVICE_TYPE_SUB_CAT_AUDIO_HTS 7 /* WSC 2.0 */
+
/* Device request/response type */
#define WPS_MSGTYPE_ENROLLEE_INFO_ONLY 0x00
#define WPS_MSGTYPE_ENROLLEE_OPEN_8021X 0x01
#ifdef __cplusplus
}
-#endif // endif
+#endif
#endif /* _WPS_ */
/*
* Linux OS Independent Layer
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: linux_osl.c 815919 2019-04-22 09:06:50Z $
+ * $Id: linux_osl.c 680580 2017-01-20 11:49:58Z $
*/
#define LINUX_PORT
#include <linuxver.h>
#include <bcmdefs.h>
+
+#if !defined(STBLINUX)
#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
#include <asm/cacheflush.h>
#endif /* __ARM_ARCH_7A__ && !DHD_USE_COHERENT_MEM_FOR_RING */
+#endif /* STBLINUX */
#include <linux/random.h>
#include <asm-generic/pci-dma-compat.h>
#endif
+
#ifdef BCM_SECURE_DMA
#include <linux/module.h>
#include <linux/kernel.h>
#if defined(STB)
#include <linux/spinlock.h>
extern spinlock_t l2x0_reg_lock;
-#endif // endif
+#endif
#ifdef BCM_OBJECT_TRACE
#include <bcmutils.h>
#endif /* BCM_OBJECT_TRACE */
-#include "linux_osl_priv.h"
#define PCI_CFG_RETRY 10
+#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
#define DUMPBUFSZ 1024
+/* dependancy check */
+#if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
+#error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
+#endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
+#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
+#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
+
+#define PREALLOC_FREE_MAGIC 0xFEDC
+#define PREALLOC_USED_MAGIC 0xFCDE
+#else
+#define DHD_SKB_HDRSIZE 336
+#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
+#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#define STATIC_BUF_MAX_NUM 16
+#define STATIC_BUF_SIZE (PAGE_SIZE*2)
+#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
+
+typedef struct bcm_static_buf {
+ spinlock_t static_lock;
+ unsigned char *buf_ptr;
+ unsigned char buf_use[STATIC_BUF_MAX_NUM];
+} bcm_static_buf_t;
+
+static bcm_static_buf_t *bcm_static_buf = 0;
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define STATIC_PKT_4PAGE_NUM 0
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
+#elif defined(ENHANCED_STATIC_BUF)
+#define STATIC_PKT_4PAGE_NUM 1
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
+#else
+#define STATIC_PKT_4PAGE_NUM 0
+#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+#define STATIC_PKT_1PAGE_NUM 0
+#define STATIC_PKT_2PAGE_NUM 128
+#else
+#define STATIC_PKT_1PAGE_NUM 8
+#define STATIC_PKT_2PAGE_NUM 8
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+#define STATIC_PKT_1_2PAGE_NUM \
+ ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
+#define STATIC_PKT_MAX_NUM \
+ ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
+
+typedef struct bcm_static_pkt {
+#ifdef DHD_USE_STATIC_CTRLBUF
+ struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
+ unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM];
+ spinlock_t osl_pkt_lock;
+ uint32 last_allocated_index;
+#else
+ struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM];
+ struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
+#ifdef ENHANCED_STATIC_BUF
+ struct sk_buff *skb_16k;
+#endif /* ENHANCED_STATIC_BUF */
+ struct semaphore osl_pkt_sem;
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ unsigned char pkt_use[STATIC_PKT_MAX_NUM];
+} bcm_static_pkt_t;
+
+static bcm_static_pkt_t *bcm_static_skb = 0;
+
+void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+typedef struct bcm_mem_link {
+ struct bcm_mem_link *prev;
+ struct bcm_mem_link *next;
+ uint size;
+ int line;
+ void *osh;
+ char file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+struct osl_cmn_info {
+ atomic_t malloced;
+ atomic_t pktalloced; /* Number of allocated packet buffers */
+ spinlock_t dbgmem_lock;
+ bcm_mem_link_t *dbgmem_list;
+ bcm_mem_link_t *dbgvmem_list;
+ spinlock_t pktalloc_lock;
+ atomic_t refcount; /* Number of references to this shared structure. */
+};
+typedef struct osl_cmn_info osl_cmn_t;
+
+struct osl_info {
+ osl_pubinfo_t pub;
+ uint32 flags; /* If specific cases to be handled in the OSL */
+#ifdef CTFPOOL
+ ctfpool_t *ctfpool;
+#endif /* CTFPOOL */
+ uint magic;
+ void *pdev;
+ uint failed;
+ uint bustype;
+ osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
+
+ void *bus_handle;
+#ifdef BCMDBG_CTRACE
+ spinlock_t ctrace_lock;
+ struct list_head ctrace_list;
+ int ctrace_num;
+#endif /* BCMDBG_CTRACE */
+#ifdef BCM_SECURE_DMA
+ struct sec_mem_elem *sec_list_4096;
+ struct sec_mem_elem *sec_list_base_4096;
+ phys_addr_t contig_base;
+ void *contig_base_va;
+ phys_addr_t contig_base_alloc;
+ void *contig_base_alloc_va;
+ phys_addr_t contig_base_alloc_coherent;
+ void *contig_base_alloc_coherent_va;
+ void *contig_base_coherent_va;
+ void *contig_delta_va_pa;
+ struct {
+ phys_addr_t pa;
+ void *va;
+ bool avail;
+ } sec_cma_coherent[SEC_CMA_COHERENT_MAX];
+ int stb_ext_params;
+#endif /* BCM_SECURE_DMA */
+};
#ifdef BCM_SECURE_DMA
static void * osl_sec_dma_ioremap(osl_t *osh, struct page *page, size_t size,
bool iscache, bool isdecr);
static void osl_sec_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
#endif /* BCM_SECURE_DMA */
+#ifdef BCM_OBJECT_TRACE
+/* don't clear the first 4 byte that is the pkt sn */
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+ struct sk_buff *s = (struct sk_buff *)(p); \
+ ASSERT(OSL_PKTTAG_SZ == 32); \
+ *(uint32 *)(&s->cb[4]) = 0; \
+ *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+ *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+ *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+#else
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+ struct sk_buff *s = (struct sk_buff *)(p); \
+ ASSERT(OSL_PKTTAG_SZ == 32); \
+ *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
+ *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+ *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+ *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+#endif /* BCM_OBJECT_TRACE */
+
/* PCMCIA attribute space access macros */
uint32 g_assert_type = 0; /* By Default Kernel Panic */
static int secdma_found = 0;
#endif /* BCM_SECURE_DMA */
-#ifdef USE_DMA_LOCK
-static void osl_dma_lock(osl_t *osh);
-static void osl_dma_unlock(osl_t *osh);
-static void osl_dma_lock_init(osl_t *osh);
-
-#define DMA_LOCK(osh) osl_dma_lock(osh)
-#define DMA_UNLOCK(osh) osl_dma_unlock(osh)
-#define DMA_LOCK_INIT(osh) osl_dma_lock_init(osh);
-#else
-#define DMA_LOCK(osh) do { /* noop */ } while(0)
-#define DMA_UNLOCK(osh) do { /* noop */ } while(0)
-#define DMA_LOCK_INIT(osh) do { /* noop */ } while(0)
-#endif /* USE_DMA_LOCK */
-
static int16 linuxbcmerrormap[] =
{ 0, /* 0 */
-EINVAL, /* BCME_ERROR */
-EINVAL, /* BCME_NOA_PND */
-EINVAL, /* BCME_FRAG_Q_FAILED */
-EINVAL, /* BCME_GET_AF_FAILED */
- -EINVAL, /* BCME_MSCH_NOTREADY */
- -EINVAL, /* BCME_IOV_LAST_CMD */
- -EINVAL, /* BCME_MINIPMU_CAL_FAIL */
- -EINVAL, /* BCME_RCAL_FAIL */
- -EINVAL, /* BCME_LPF_RCCAL_FAIL */
- -EINVAL, /* BCME_DACBUF_RCCAL_FAIL */
- -EINVAL, /* BCME_VCOCAL_FAIL */
- -EINVAL, /* BCME_BANDLOCKED */
- -EINVAL, /* BCME_DNGL_DEVRESET */
+ -EINVAL, /* BCME_MSCH_NOTREADY */
/* When an new error code is added to bcmutils.h, add os
* specific error translation here as well
*/
/* check if BCME_LAST changed since the last time this function was updated */
-#if BCME_LAST != -68
+#if BCME_LAST != -60
#error "You need to add a OS error translation in the linuxbcmerrormap \
for new error code defined in bcmutils.h"
-#endif // endif
+#endif
};
uint lmtest = FALSE;
-#ifdef DHD_MAP_LOGGING
-#define DHD_MAP_LOG_SIZE 2048
-
-typedef struct dhd_map_item {
- dmaaddr_t pa; /* DMA address (physical) */
- uint64 ts_nsec; /* timestamp: nsec */
- uint32 size; /* mapping size */
- uint8 rsvd[4]; /* reserved for future use */
-} dhd_map_item_t;
-
-typedef struct dhd_map_record {
- uint32 items; /* number of total items */
- uint32 idx; /* current index of metadata */
- dhd_map_item_t map[0]; /* metadata storage */
-} dhd_map_log_t;
-
-void
-osl_dma_map_dump(osl_t *osh)
-{
- dhd_map_log_t *map_log, *unmap_log;
- uint64 ts_sec, ts_usec;
-
- map_log = (dhd_map_log_t *)(osh->dhd_map_log);
- unmap_log = (dhd_map_log_t *)(osh->dhd_unmap_log);
- osl_get_localtime(&ts_sec, &ts_usec);
-
- if (map_log && unmap_log) {
- printk("%s: map_idx=%d unmap_idx=%d "
- "current time=[%5lu.%06lu]\n", __FUNCTION__,
- map_log->idx, unmap_log->idx, (unsigned long)ts_sec,
- (unsigned long)ts_usec);
- printk("%s: dhd_map_log(pa)=0x%llx size=%d,"
- " dma_unmap_log(pa)=0x%llx size=%d\n", __FUNCTION__,
- (uint64)__virt_to_phys((ulong)(map_log->map)),
- (uint32)(sizeof(dhd_map_item_t) * map_log->items),
- (uint64)__virt_to_phys((ulong)(unmap_log->map)),
- (uint32)(sizeof(dhd_map_item_t) * unmap_log->items));
- }
-}
-
-static void *
-osl_dma_map_log_init(uint32 item_len)
-{
- dhd_map_log_t *map_log;
- gfp_t flags;
- uint32 alloc_size = (uint32)(sizeof(dhd_map_log_t) +
- (item_len * sizeof(dhd_map_item_t)));
-
- flags = CAN_SLEEP() ? GFP_KERNEL : GFP_ATOMIC;
- map_log = (dhd_map_log_t *)kmalloc(alloc_size, flags);
- if (map_log) {
- memset(map_log, 0, alloc_size);
- map_log->items = item_len;
- map_log->idx = 0;
- }
-
- return (void *)map_log;
-}
-
-static void
-osl_dma_map_log_deinit(osl_t *osh)
-{
- if (osh->dhd_map_log) {
- kfree(osh->dhd_map_log);
- osh->dhd_map_log = NULL;
- }
-
- if (osh->dhd_unmap_log) {
- kfree(osh->dhd_unmap_log);
- osh->dhd_unmap_log = NULL;
- }
-}
-
-static void
-osl_dma_map_logging(osl_t *osh, void *handle, dmaaddr_t pa, uint32 len)
-{
- dhd_map_log_t *log = (dhd_map_log_t *)handle;
- uint32 idx;
-
- if (log == NULL) {
- printk("%s: log is NULL\n", __FUNCTION__);
- return;
- }
-
- idx = log->idx;
- log->map[idx].ts_nsec = osl_localtime_ns();
- log->map[idx].pa = pa;
- log->map[idx].size = len;
- log->idx = (idx + 1) % log->items;
-}
-#endif /* DHD_MAP_LOGGING */
-
/* translate bcmerrors into linux errors */
int
osl_error(int bcmerror)
/* Array bounds covered by ASSERT in osl_attach */
return linuxbcmerrormap[-bcmerror];
}
+
osl_t *
+#ifdef SHARED_OSL_CMN
+osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
+#else
osl_attach(void *pdev, uint bustype, bool pkttag)
+#endif /* SHARED_OSL_CMN */
{
+#ifndef SHARED_OSL_CMN
void **osl_cmn = NULL;
+#endif /* SHARED_OSL_CMN */
osl_t *osh;
gfp_t flags;
#ifdef BCM_SECURE_DMA
u32 secdma_memsize;
-#endif // endif
+#endif
flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
if (!(osh = kmalloc(sizeof(osl_t), flags)))
}
osh->contig_base_va = osh->contig_base_alloc_va;
-#ifdef NOT_YET
- /*
- * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, &osh->sec_list_512);
- * osh->sec_list_base_512 = osh->sec_list_512;
- * osl_sec_dma_init_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, &osh->sec_list_2048);
- * osh->sec_list_base_2048 = osh->sec_list_2048;
- */
-#endif // endif
if (BCME_OK != osl_sec_dma_init_elem_mem_block(osh,
CMA_BUFSIZE_4K, CMA_BUFNUM, &osh->sec_list_4096)) {
osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
break;
}
- DMA_LOCK_INIT(osh);
+#ifdef BCMDBG_CTRACE
+ spin_lock_init(&osh->ctrace_lock);
+ INIT_LIST_HEAD(&osh->ctrace_list);
+ osh->ctrace_num = 0;
+#endif /* BCMDBG_CTRACE */
-#ifdef DHD_MAP_LOGGING
- osh->dhd_map_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
- if (osh->dhd_map_log == NULL) {
- printk("%s: Failed to alloc dhd_map_log\n", __FUNCTION__);
- }
-
- osh->dhd_unmap_log = osl_dma_map_log_init(DHD_MAP_LOG_SIZE);
- if (osh->dhd_unmap_log == NULL) {
- printk("%s: Failed to alloc dhd_unmap_log\n", __FUNCTION__);
- }
-#endif /* DHD_MAP_LOGGING */
return osh;
}
+int osl_static_mem_init(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (!bcm_static_buf && adapter) {
+ if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
+ 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
+ printk("can not alloc static buf!\n");
+ bcm_static_skb = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ return -ENOMEM;
+ } else {
+ printk("alloc static buf at %p!\n", bcm_static_buf);
+ }
+
+ spin_lock_init(&bcm_static_buf->static_lock);
+
+ bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
+ }
+
+#if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
+ if (!bcm_static_skb && adapter) {
+ int i;
+ void *skb_buff_ptr = 0;
+ bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+ skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
+ if (!skb_buff_ptr) {
+ printk("cannot alloc static buf!\n");
+ bcm_static_buf = NULL;
+ bcm_static_skb = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ return -ENOMEM;
+ }
+
+ bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
+ (STATIC_PKT_MAX_NUM));
+ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
+ bcm_static_skb->pkt_use[i] = 0;
+ }
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+ spin_lock_init(&bcm_static_skb->osl_pkt_lock);
+ bcm_static_skb->last_allocated_index = 0;
+#else
+ sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ }
+#endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+ return 0;
+}
+
void osl_set_bus_handle(osl_t *osh, void *bus_handle)
{
osh->bus_handle = bus_handle;
return osh->bus_handle;
}
-#if defined(BCM_BACKPLANE_TIMEOUT)
-void osl_set_bpt_cb(osl_t *osh, void *bpt_cb, void *bpt_ctx)
-{
- if (osh) {
- osh->bpt_cb = (bpt_cb_fn)bpt_cb;
- osh->sih = bpt_ctx;
- }
-}
-#endif /* BCM_BACKPLANE_TIMEOUT */
-
void
osl_detach(osl_t *osh)
{
#ifdef BCM_SECURE_DMA
if (osh->stb_ext_params == SECDMA_EXT_FILE)
stbpriv_exit(osh);
-#ifdef NOT_YET
- osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_512, CMA_BUFNUM, osh->sec_list_base_512);
- osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_2K, CMA_BUFNUM, osh->sec_list_base_2048);
-#endif /* NOT_YET */
osl_sec_dma_deinit_elem_mem_block(osh, CMA_BUFSIZE_4K, CMA_BUFNUM, osh->sec_list_base_4096);
osl_sec_dma_iounmap(osh, osh->contig_base_coherent_va, CMA_DMA_DESC_MEMBLOCK);
osl_sec_dma_iounmap(osh, osh->contig_base_va, CMA_DMA_DATA_MEMBLOCK);
secdma_found--;
#endif /* BCM_SECURE_DMA */
- bcm_object_trace_deinit();
-#ifdef DHD_MAP_LOGGING
- osl_dma_map_log_deinit(osh->dhd_map_log);
- osl_dma_map_log_deinit(osh->dhd_unmap_log);
-#endif /* DHD_MAP_LOGGING */
+ bcm_object_trace_deinit();
ASSERT(osh->magic == OS_HANDLE_MAGIC);
atomic_sub(1, &osh->cmn->refcount);
kfree(osh);
}
+int osl_static_mem_deinit(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf) {
+ bcm_static_buf = 0;
+ }
+#ifdef BCMSDIO
+ if (bcm_static_skb) {
+ bcm_static_skb = 0;
+ }
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ return 0;
+}
+
/* APIs to set/get specific quirks in OSL layer */
void BCMFASTPATH
osl_flag_set(osl_t *osh, uint32 mask)
osh->flags |= mask;
}
-void
-osl_flag_clr(osl_t *osh, uint32 mask)
-{
- osh->flags &= ~mask;
-}
+void
+osl_flag_clr(osl_t *osh, uint32 mask)
+{
+ osh->flags &= ~mask;
+}
+
+#if defined(STB)
+inline bool BCMFASTPATH
+#else
+bool
+#endif
+osl_is_flag_set(osl_t *osh, uint32 mask)
+{
+ return (osh->flags & mask);
+}
+
+
+#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING))
+
+inline int BCMFASTPATH
+osl_arch_is_coherent(void)
+{
+ return 0;
+}
+
+inline int BCMFASTPATH
+osl_acp_war_enab(void)
+{
+ return 0;
+}
+
+inline void BCMFASTPATH
+osl_cache_flush(void *va, uint size)
+{
+
+ if (size > 0)
+ dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TO_DEVICE);
+}
+
+inline void BCMFASTPATH
+osl_cache_inv(void *va, uint size)
+{
+
+ dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
+}
+
+inline void BCMFASTPATH
+osl_prefetch(const void *ptr)
+{
+ __asm__ __volatile__("pld\t%0" :: "o"(*(char *)ptr) : "cc");
+}
+
+#endif
+
+/*
+ * To avoid ACP latency, a fwder buf will be sent directly to DDR using
+ * DDR aliasing into non-ACP address space. Such Fwder buffers must be
+ * explicitly managed from a coherency perspective.
+ */
+static inline void BCMFASTPATH
+osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb)
+{
+}
+
+static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
+{
+ struct sk_buff *skb;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+ gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+#ifdef DHD_USE_ATOMIC_PKTGET
+ flags = GFP_ATOMIC;
+#endif /* DHD_USE_ATOMIC_PKTGET */
+ skb = __dev_alloc_skb(len, flags);
+#else
+ skb = dev_alloc_skb(len);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
+ return skb;
+}
+
+#ifdef CTFPOOL
+
+#ifdef CTFPOOL_SPINLOCK
+#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
+#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
+#else
+#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
+#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
+#endif /* CTFPOOL_SPINLOCK */
+/*
+ * Allocate and add an object to packet pool.
+ */
+void *
+osl_ctfpool_add(osl_t *osh)
+{
+ struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return NULL;
+
+ CTFPOOL_LOCK(osh->ctfpool, flags);
+ ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
+
+ /* No need to allocate more objects */
+ if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
+ /* Allocate a new skb and add it to the ctfpool */
+ skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
+ if (skb == NULL) {
+ printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
+ osh->ctfpool->obj_size);
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
+ /* Add to ctfpool */
+ skb->next = (struct sk_buff *)osh->ctfpool->head;
+ osh->ctfpool->head = skb;
+ osh->ctfpool->fast_frees++;
+ osh->ctfpool->curr_obj++;
+
+ /* Hijack a skb member to store ptr to ctfpool */
+ CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
+
+ /* Use bit flag to indicate skb from fast ctfpool */
+ PKTFAST(osh, skb) = FASTBUF;
+
+ /* If ctfpool's osh is a fwder osh, reset the fwder buf */
+ osl_fwderbuf_reset(osh->ctfpool->osh, skb);
+
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+ return skb;
+}
+
+/*
+ * Add new objects to the pool.
+ */
+void
+osl_ctfpool_replenish(osl_t *osh, uint thresh)
+{
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+ /* Do nothing if no refills are required */
+ while ((osh->ctfpool->refills > 0) && (thresh--)) {
+ osl_ctfpool_add(osh);
+ osh->ctfpool->refills--;
+ }
+}
+
+/*
+ * Initialize the packet pool with specified number of objects.
+ */
+int32
+osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
+{
+ gfp_t flags;
+
+ flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+ osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags);
+ ASSERT(osh->ctfpool);
+
+ osh->ctfpool->osh = osh;
+
+ osh->ctfpool->max_obj = numobj;
+ osh->ctfpool->obj_size = size;
+
+ spin_lock_init(&osh->ctfpool->lock);
+
+ while (numobj--) {
+ if (!osl_ctfpool_add(osh))
+ return -1;
+ osh->ctfpool->fast_frees--;
+ }
+
+ return 0;
+}
+
+/*
+ * Cleanup the packet pool objects.
+ */
+void
+osl_ctfpool_cleanup(osl_t *osh)
+{
+ struct sk_buff *skb, *nskb;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+ CTFPOOL_LOCK(osh->ctfpool, flags);
+
+ skb = osh->ctfpool->head;
+
+ while (skb != NULL) {
+ nskb = skb->next;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ osh->ctfpool->curr_obj--;
+ }
+
+ ASSERT(osh->ctfpool->curr_obj == 0);
+ osh->ctfpool->head = NULL;
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+ kfree(osh->ctfpool);
+ osh->ctfpool = NULL;
+}
+
+void
+osl_ctfpool_stats(osl_t *osh, void *b)
+{
+ struct bcmstrbuf *bb;
+
+ if ((osh == NULL) || (osh->ctfpool == NULL))
+ return;
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf) {
+ bcm_static_buf = 0;
+ }
+#ifdef BCMSDIO
+ if (bcm_static_skb) {
+ bcm_static_skb = 0;
+ }
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+ bb = b;
+
+ ASSERT((osh != NULL) && (bb != NULL));
+
+ bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
+ osh->ctfpool->max_obj, osh->ctfpool->obj_size,
+ osh->ctfpool->curr_obj, osh->ctfpool->refills);
+ bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
+ osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
+ osh->ctfpool->slow_allocs);
+}
+
+static inline struct sk_buff *
+osl_pktfastget(osl_t *osh, uint len)
+{
+ struct sk_buff *skb;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+ /* Try to do fast allocate. Return null if ctfpool is not in use
+ * or if there are no items in the ctfpool.
+ */
+ if (osh->ctfpool == NULL)
+ return NULL;
+
+ CTFPOOL_LOCK(osh->ctfpool, flags);
+ if (osh->ctfpool->head == NULL) {
+ ASSERT(osh->ctfpool->curr_obj == 0);
+ osh->ctfpool->slow_allocs++;
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
+ if (len > osh->ctfpool->obj_size) {
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
+ ASSERT(len <= osh->ctfpool->obj_size);
+
+ /* Get an object from ctfpool */
+ skb = (struct sk_buff *)osh->ctfpool->head;
+ osh->ctfpool->head = (void *)skb->next;
+
+ osh->ctfpool->fast_allocs++;
+ osh->ctfpool->curr_obj--;
+ ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+
+ /* Init skb struct */
+ skb->next = skb->prev = NULL;
+#if defined(__ARM_ARCH_7A__)
+ skb->data = skb->head + NET_SKB_PAD;
+ skb->tail = skb->head + NET_SKB_PAD;
+#else
+ skb->data = skb->head + 16;
+ skb->tail = skb->head + 16;
+#endif /* __ARM_ARCH_7A__ */
+ skb->len = 0;
+ skb->cloned = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
+ skb->list = NULL;
+#endif
+ atomic_set(&skb->users, 1);
+
+ PKTSETCLINK(skb, NULL);
+ PKTCCLRATTR(skb);
+ PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
+
+ return skb;
+}
+#endif /* CTFPOOL */
+
+#if defined(BCM_GMAC3)
+/* Account for a packet delivered to downstream forwarder.
+ * Decrement a GMAC forwarder interface's pktalloced count.
+ */
+void BCMFASTPATH
+osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt)
+{
+
+ atomic_sub(skb_cnt, &osh->cmn->pktalloced);
+}
+
+/* Account for a downstream forwarder delivered packet to a WL/DHD driver.
+ * Increment a GMAC forwarder interface's pktalloced count.
+ */
+void BCMFASTPATH
+#ifdef BCMDBG_CTRACE
+osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, int line, char *file)
+#else
+osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt)
+#endif /* BCMDBG_CTRACE */
+{
+#if defined(BCMDBG_CTRACE)
+ int i;
+ struct sk_buff *skb;
+#endif
+
+#if defined(BCMDBG_CTRACE)
+ if (skb_cnt > 1) {
+ struct sk_buff **skb_array = (struct sk_buff **)skbs;
+ for (i = 0; i < skb_cnt; i++) {
+ skb = skb_array[i];
+#if defined(BCMDBG_CTRACE)
+ ASSERT(!PKTISCHAINED(skb));
+ ADD_CTRACE(osh, skb, file, line);
+#endif /* BCMDBG_CTRACE */
+ }
+ } else {
+ skb = (struct sk_buff *)skbs;
+#if defined(BCMDBG_CTRACE)
+ ASSERT(!PKTISCHAINED(skb));
+ ADD_CTRACE(osh, skb, file, line);
+#endif /* BCMDBG_CTRACE */
+ }
+#endif
+
+ atomic_add(skb_cnt, &osh->cmn->pktalloced);
+}
+
+#endif /* BCM_GMAC3 */
+
+/* Convert a driver packet to native(OS) packet
+ * In the process, packettag is zeroed out before sending up
+ * IP code depends on skb->cb to be setup correctly with various options
+ * In our case, that means it should be 0
+ */
+struct sk_buff * BCMFASTPATH
+osl_pkt_tonative(osl_t *osh, void *pkt)
+{
+ struct sk_buff *nskb;
+#ifdef BCMDBG_CTRACE
+ struct sk_buff *nskb1, *nskb2;
+#endif
+
+ if (osh->pub.pkttag)
+ OSL_PKTTAG_CLEAR(pkt);
+
+ /* Decrement the packet counter */
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+ atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
+
+#ifdef BCMDBG_CTRACE
+ for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
+ if (PKTISCHAINED(nskb1)) {
+ nskb2 = PKTCLINK(nskb1);
+ } else {
+ nskb2 = NULL;
+ }
+
+ DEL_CTRACE(osh, nskb1);
+ }
+#endif /* BCMDBG_CTRACE */
+ }
+ return (struct sk_buff *)pkt;
+}
+
+/* Convert a native(OS) packet to driver packet.
+ * In the process, native packet is destroyed, there is no copying
+ * Also, a packettag is zeroed out
+ */
+void * BCMFASTPATH
+#ifdef BCMDBG_CTRACE
+osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
+#else
+osl_pkt_frmnative(osl_t *osh, void *pkt)
+#endif /* BCMDBG_CTRACE */
+{
+ struct sk_buff *cskb;
+ struct sk_buff *nskb;
+ unsigned long pktalloced = 0;
+
+ if (osh->pub.pkttag)
+ OSL_PKTTAG_CLEAR(pkt);
+
+ /* walk the PKTCLINK() list */
+ for (cskb = (struct sk_buff *)pkt;
+ cskb != NULL;
+ cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
+
+ /* walk the pkt buffer list */
+ for (nskb = cskb; nskb; nskb = nskb->next) {
+
+ /* Increment the packet counter */
+ pktalloced++;
+
+ /* clean the 'prev' pointer
+ * Kernel 3.18 is leaving skb->prev pointer set to skb
+ * to indicate a non-fragmented skb
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ nskb->prev = NULL;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
+
+
+#ifdef BCMDBG_CTRACE
+ ADD_CTRACE(osh, nskb, file, line);
+#endif /* BCMDBG_CTRACE */
+ }
+ }
+
+ /* Increment the packet counter */
+ atomic_add(pktalloced, &osh->cmn->pktalloced);
+
+ return (void *)pkt;
+}
+
+/* Return a new packet. zero out pkttag */
+void * BCMFASTPATH
+#ifdef BCMDBG_CTRACE
+osl_pktget(osl_t *osh, uint len, int line, char *file)
+#else
+#ifdef BCM_OBJECT_TRACE
+osl_pktget(osl_t *osh, uint len, int line, const char *caller)
+#else
+osl_pktget(osl_t *osh, uint len)
+#endif /* BCM_OBJECT_TRACE */
+#endif /* BCMDBG_CTRACE */
+{
+ struct sk_buff *skb;
+ uchar num = 0;
+ if (lmtest != FALSE) {
+ get_random_bytes(&num, sizeof(uchar));
+ if ((num + 1) <= (256 * lmtest / 100))
+ return NULL;
+ }
+
+#ifdef CTFPOOL
+ /* Allocate from local pool */
+ skb = osl_pktfastget(osh, len);
+ if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL))
+#else /* CTFPOOL */
+ if ((skb = osl_alloc_skb(osh, len)))
+#endif /* CTFPOOL */
+ {
+ skb->tail += len;
+ skb->len += len;
+ skb->priority = 0;
+
+#ifdef BCMDBG_CTRACE
+ ADD_CTRACE(osh, skb, file, line);
+#endif
+ atomic_inc(&osh->cmn->pktalloced);
+#ifdef BCM_OBJECT_TRACE
+ bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
+#endif /* BCM_OBJECT_TRACE */
+ }
+
+ return ((void*) skb);
+}
+
+#ifdef CTFPOOL
+static inline void
+osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
+{
+ ctfpool_t *ctfpool;
+#ifdef CTFPOOL_SPINLOCK
+ unsigned long flags;
+#endif /* CTFPOOL_SPINLOCK */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
+ skb->tstamp.tv.sec = 0;
+#else
+ skb->stamp.tv_sec = 0;
+#endif
+
+ /* We only need to init the fields that we change */
+ skb->dev = NULL;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+ skb->dst = NULL;
+#endif
+ OSL_PKTTAG_CLEAR(skb);
+ skb->ip_summed = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ skb_orphan(skb);
+#else
+ skb->destructor = NULL;
+#endif
+
+ ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+ ASSERT(ctfpool != NULL);
+
+ /* if osh is a fwder osh, reset the fwder buf */
+ osl_fwderbuf_reset(ctfpool->osh, skb);
+
+ /* Add object to the ctfpool */
+ CTFPOOL_LOCK(ctfpool, flags);
+ skb->next = (struct sk_buff *)ctfpool->head;
+ ctfpool->head = (void *)skb;
+
+ ctfpool->fast_frees++;
+ ctfpool->curr_obj++;
+
+ ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
+ CTFPOOL_UNLOCK(ctfpool, flags);
+}
+#endif /* CTFPOOL */
+
+/* Free the driver packet. Free the tag if present */
+void BCMFASTPATH
+#ifdef BCM_OBJECT_TRACE
+osl_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
+#else
+osl_pktfree(osl_t *osh, void *p, bool send)
+#endif /* BCM_OBJECT_TRACE */
+{
+ struct sk_buff *skb, *nskb;
+ if (osh == NULL)
+ return;
+
+ skb = (struct sk_buff*) p;
+
+ if (send && osh->pub.tx_fn)
+ osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
+
+ PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
+ if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
+ printk("%s: pkt %p is from static pool\n",
+ __FUNCTION__, p);
+ dump_stack();
+ return;
+ }
+
+ if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
+ printk("%s: pkt %p is from static pool and not in used\n",
+ __FUNCTION__, p);
+ dump_stack();
+ return;
+ }
+#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
+
+ /* perversion: we use skb->next to chain multi-skb packets */
+ while (skb) {
+ nskb = skb->next;
+ skb->next = NULL;
+
+#ifdef BCMDBG_CTRACE
+ DEL_CTRACE(osh, skb);
+#endif
+
+
+#ifdef BCM_OBJECT_TRACE
+ bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
+#endif /* BCM_OBJECT_TRACE */
+
+#ifdef CTFPOOL
+ if (PKTISFAST(osh, skb)) {
+ if (atomic_read(&skb->users) == 1)
+ smp_rmb();
+ else if (!atomic_dec_and_test(&skb->users))
+ goto next_skb;
+ osl_pktfastfree(osh, skb);
+ } else
+#endif
+ {
+ dev_kfree_skb_any(skb);
+ }
+#ifdef CTFPOOL
+next_skb:
+#endif
+ atomic_dec(&osh->cmn->pktalloced);
+ skb = nskb;
+ }
+}
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+void*
+osl_pktget_static(osl_t *osh, uint len)
+{
+ int i = 0;
+ struct sk_buff *skb;
+#ifdef DHD_USE_STATIC_CTRLBUF
+ unsigned long flags;
+#endif /* DHD_USE_STATIC_CTRLBUF */
+
+ if (!bcm_static_skb)
+ return osl_pktget(osh, len);
+
+ if (len > DHD_SKB_MAX_BUFSIZE) {
+ printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
+ return osl_pktget(osh, len);
+ }
+
+#ifdef DHD_USE_STATIC_CTRLBUF
+ spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
+
+ if (len <= DHD_SKB_2PAGE_BUFSIZE) {
+ uint32 index;
+ for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
+ index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
+ bcm_static_skb->last_allocated_index++;
+ if (bcm_static_skb->skb_8k[index] &&
+ bcm_static_skb->pkt_use[index] == 0) {
+ break;
+ }
+ }
+
+ if ((i != STATIC_PKT_2PAGE_NUM) &&
+ (index >= 0) && (index < STATIC_PKT_2PAGE_NUM)) {
+ bcm_static_skb->pkt_use[index] = 1;
+ skb = bcm_static_skb->skb_8k[index];
+ skb->data = skb->head;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, NET_SKB_PAD);
+#else
+ skb->tail = skb->data + NET_SKB_PAD;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->data += NET_SKB_PAD;
+ skb->cloned = 0;
+ skb->priority = 0;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
+ skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->len = len;
+ skb->mac_len = PREALLOC_USED_MAGIC;
+ spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+ return skb;
+ }
+ }
+
+ spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+ printk("%s: all static pkt in use!\n", __FUNCTION__);
+ return NULL;
+#else
+ down(&bcm_static_skb->osl_pkt_sem);
+
+ if (len <= DHD_SKB_1PAGE_BUFSIZE) {
+ for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
+ if (bcm_static_skb->skb_4k[i] &&
+ bcm_static_skb->pkt_use[i] == 0) {
+ break;
+ }
+ }
+
+ if (i != STATIC_PKT_1PAGE_NUM) {
+ bcm_static_skb->pkt_use[i] = 1;
+
+ skb = bcm_static_skb->skb_4k[i];
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
+ skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->len = len;
+
+ up(&bcm_static_skb->osl_pkt_sem);
+ return skb;
+ }
+ }
+
+ if (len <= DHD_SKB_2PAGE_BUFSIZE) {
+ for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
+ if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
+ bcm_static_skb->pkt_use[i] == 0) {
+ break;
+ }
+ }
+
+ if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
+ bcm_static_skb->pkt_use[i] = 1;
+ skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
+#else
+ skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->len = len;
+
+ up(&bcm_static_skb->osl_pkt_sem);
+ return skb;
+ }
+ }
-#if defined(STB)
-inline bool BCMFASTPATH
+#if defined(ENHANCED_STATIC_BUF)
+ if (bcm_static_skb->skb_16k &&
+ bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
+ bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
+
+ skb = bcm_static_skb->skb_16k;
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+ skb_set_tail_pointer(skb, len);
#else
-bool
-#endif // endif
-osl_is_flag_set(osl_t *osh, uint32 mask)
-{
- return (osh->flags & mask);
-}
+ skb->tail = skb->data + len;
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+ skb->len = len;
-#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
- defined(STB_SOC_WIFI)
+ up(&bcm_static_skb->osl_pkt_sem);
+ return skb;
+ }
+#endif /* ENHANCED_STATIC_BUF */
-inline int BCMFASTPATH
-osl_arch_is_coherent(void)
-{
- return 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ printk("%s: all static pkt in use!\n", __FUNCTION__);
+ return osl_pktget(osh, len);
+#endif /* DHD_USE_STATIC_CTRLBUF */
}
-inline int BCMFASTPATH
-osl_acp_war_enab(void)
+void
+osl_pktfree_static(osl_t *osh, void *p, bool send)
{
- return 0;
-}
+ int i;
+#ifdef DHD_USE_STATIC_CTRLBUF
+ struct sk_buff *skb = (struct sk_buff *)p;
+ unsigned long flags;
+#endif /* DHD_USE_STATIC_CTRLBUF */
-inline void BCMFASTPATH
-osl_cache_flush(void *va, uint size)
-{
+ if (!p) {
+ return;
+ }
- if (size > 0)
-#ifdef STB_SOC_WIFI
- dma_sync_single_for_device(OSH_NULL, virt_to_phys(va), size, DMA_TX);
-#else /* STB_SOC_WIFI */
- dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size,
- DMA_TO_DEVICE);
-#endif /* STB_SOC_WIFI */
-}
+ if (!bcm_static_skb) {
+ osl_pktfree(osh, p, send);
+ return;
+ }
-inline void BCMFASTPATH
-osl_cache_inv(void *va, uint size)
-{
+#ifdef DHD_USE_STATIC_CTRLBUF
+ spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
-#ifdef STB_SOC_WIFI
- dma_sync_single_for_cpu(OSH_NULL, virt_to_phys(va), size, DMA_RX);
-#else /* STB_SOC_WIFI */
- dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_FROM_DEVICE);
-#endif /* STB_SOC_WIFI */
-}
+ for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
+ if (p == bcm_static_skb->skb_8k[i]) {
+ if (bcm_static_skb->pkt_use[i] == 0) {
+ printk("%s: static pkt idx %d(%p) is double free\n",
+ __FUNCTION__, i, p);
+ } else {
+ bcm_static_skb->pkt_use[i] = 0;
+ }
-inline void BCMFASTPATH
-osl_prefetch(const void *ptr)
-{
-#if !defined(STB_SOC_WIFI)
- __asm__ __volatile__("pld\t%0" :: "o"(*(const char *)ptr) : "cc");
-#endif // endif
-}
+ if (skb->mac_len != PREALLOC_USED_MAGIC) {
+ printk("%s: static pkt idx %d(%p) is not in used\n",
+ __FUNCTION__, i, p);
+ }
+
+ skb->mac_len = PREALLOC_FREE_MAGIC;
+ spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+ return;
+ }
+ }
+
+ spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
+ printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
+#else
+ down(&bcm_static_skb->osl_pkt_sem);
+ for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
+ if (p == bcm_static_skb->skb_4k[i]) {
+ bcm_static_skb->pkt_use[i] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+ }
-#endif // endif
+ for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
+ if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
+ bcm_static_skb->pkt_use[i] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+ }
+#ifdef ENHANCED_STATIC_BUF
+ if (p == bcm_static_skb->skb_16k) {
+ bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
+ up(&bcm_static_skb->osl_pkt_sem);
+ return;
+ }
+#endif
+ up(&bcm_static_skb->osl_pkt_sem);
+#endif /* DHD_USE_STATIC_CTRLBUF */
+ osl_pktfree(osh, p, send);
+}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
uint32
osl_pci_read_config(osl_t *osh, uint offset, uint size)
break;
} while (retry--);
+
return (val);
}
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
-#if defined(__ARM_ARCH_7A__)
+#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
#else
return ((struct pci_dev *)osh->pdev)->bus->number;
-#endif // endif
+#endif
}
/* return slot # for the pci device pointed by osh->pdev */
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
-#if defined(__ARM_ARCH_7A__)
+#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
#else
return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
-#endif // endif
+#endif
}
/* return domain # for the pci device pointed by osh->pdev */
return (osh->failed);
}
+
uint
osl_dma_consistent_align(void)
{
*alloced = size;
#ifndef BCM_SECURE_DMA
-#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
- defined(STB_SOC_WIFI)
+#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
if (va)
*pap = (ulong)__virt_to_phys((ulong)va);
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
#ifndef BCM_SECURE_DMA
-#if (defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)) || \
- defined(STB_SOC_WIFI)
+#if defined(__ARM_ARCH_7A__) && !defined(DHD_USE_COHERENT_MEM_FOR_RING)
kfree(va);
#else
#ifdef BCMDMA64OSL
#endif /* BCM_SECURE_DMA */
}
-void *
-osl_virt_to_phys(void *va)
-{
- return (void *)(uintptr)virt_to_phys(va);
-}
-
-#include <asm/cacheflush.h>
-void BCMFASTPATH
-osl_dma_flush(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
-{
- return;
-}
-
dmaaddr_t BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
{
dma_addr_t map_addr;
int ret;
- DMA_LOCK(osh);
-
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
-#ifdef STB_SOC_WIFI
-#if (__LINUX_ARM_ARCH__ == 8)
- /* need to flush or invalidate the cache here */
- if (dir == DMA_TX) { /* to device */
- osl_cache_flush(va, size);
- } else if (dir == DMA_RX) { /* from device */
- osl_cache_inv(va, size);
- } else { /* both */
- osl_cache_flush(va, size);
- osl_cache_inv(va, size);
- }
- DMA_UNLOCK(osh);
- return virt_to_phys(va);
-#else /* (__LINUX_ARM_ARCH__ == 8) */
- map_addr = dma_map_single(osh->pdev, va, size, dir);
- DMA_UNLOCK(osh);
- return map_addr;
-#endif /* (__LINUX_ARM_ARCH__ == 8) */
-#else /* ! STB_SOC_WIFI */
- map_addr = pci_map_single(osh->pdev, va, size, dir);
-#endif /* ! STB_SOC_WIFI */
- ret = pci_dma_mapping_error(osh->pdev, map_addr);
+
+ map_addr = pci_map_single(osh->pdev, va, size, dir);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ ret = pci_dma_mapping_error(osh->pdev, map_addr);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 5))
+ ret = pci_dma_mapping_error(map_addr);
+#else
+ ret = 0;
+#endif
if (ret) {
printk("%s: Failed to map memory\n", __FUNCTION__);
PHYSADDRLOSET(ret_addr, 0);
PHYSADDRHISET(ret_addr, (map_addr >> 32) & 0xffffffff);
}
-#ifdef DHD_MAP_LOGGING
- osl_dma_map_logging(osh, osh->dhd_map_log, ret_addr, size);
-#endif /* DHD_MAP_LOGGING */
-
- DMA_UNLOCK(osh);
-
return ret_addr;
}
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
- DMA_LOCK(osh);
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
-
-#ifdef DHD_MAP_LOGGING
- osl_dma_map_logging(osh, osh->dhd_unmap_log, pa, size);
-#endif /* DHD_MAP_LOGGING */
-
#ifdef BCMDMA64OSL
PHYSADDRTOULONG(pa, paddr);
pci_unmap_single(osh->pdev, paddr, size, dir);
-#else /* BCMDMA64OSL */
-
-#ifdef STB_SOC_WIFI
-#if (__LINUX_ARM_ARCH__ == 8)
- if (dir == DMA_TX) { /* to device */
- dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
- } else if (dir == DMA_RX) { /* from device */
- dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
- } else { /* both */
- dma_sync_single_for_device(OSH_NULL, pa, size, DMA_TX);
- dma_sync_single_for_cpu(OSH_NULL, pa, size, DMA_RX);
- }
-#else /* (__LINUX_ARM_ARCH__ == 8) */
- dma_unmap_single(osh->pdev, (uintptr)pa, size, dir);
-#endif /* (__LINUX_ARM_ARCH__ == 8) */
-#else /* STB_SOC_WIFI */
+#else
pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
-#endif /* STB_SOC_WIFI */
-
#endif /* BCMDMA64OSL */
-
- DMA_UNLOCK(osh);
}
/* OSL function for CPU relax */
exp, basename, line);
#endif /* BCMASSERT_LOG */
+
switch (g_assert_type) {
case 0:
panic("%s", tempbuf);
break;
}
}
-#endif // endif
+#endif
void
osl_delay(uint usec)
void
osl_sleep(uint ms)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
if (ms < 20)
usleep_range(ms*1000, ms*1000 + 1000);
else
- msleep(ms);
+#endif
+ msleep(ms);
}
uint64
osl_sysuptime_us(void)
{
- struct osl_timespec tv;
+ struct timeval tv;
uint64 usec;
- osl_do_gettimeofday(&tv);
+ do_gettimeofday(&tv);
/* tv_usec content is fraction of a second */
usec = (uint64)tv.tv_sec * 1000000ul + tv.tv_usec;
return usec;
}
-uint64
-osl_localtime_ns(void)
+
+/* Clone a packet.
+ * The pkttag contents are NOT cloned.
+ */
+void *
+#ifdef BCMDBG_CTRACE
+osl_pktdup(osl_t *osh, void *skb, int line, char *file)
+#else
+#ifdef BCM_OBJECT_TRACE
+osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
+#else
+osl_pktdup(osl_t *osh, void *skb)
+#endif /* BCM_OBJECT_TRACE */
+#endif /* BCMDBG_CTRACE */
{
- uint64 ts_nsec = 0;
+ void * p;
+
+ ASSERT(!PKTISCHAINED(skb));
+
+ /* clear the CTFBUF flag if set and map the rest of the buffer
+ * before cloning.
+ */
+ PKTCTFMAP(osh, skb);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+ if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+#else
+ if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
+#endif
+ return NULL;
- ts_nsec = local_clock();
+#ifdef CTFPOOL
+ if (PKTISFAST(osh, skb)) {
+ ctfpool_t *ctfpool;
+
+ /* if the buffer allocated from ctfpool is cloned then
+ * we can't be sure when it will be freed. since there
+ * is a chance that we will be losing a buffer
+ * from our pool, we increment the refill count for the
+ * object to be alloced later.
+ */
+ ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
+ ASSERT(ctfpool != NULL);
+ PKTCLRFAST(osh, p);
+ PKTCLRFAST(osh, skb);
+ ctfpool->refills++;
+ }
+#endif /* CTFPOOL */
+
+ /* Clear PKTC context */
+ PKTSETCLINK(p, NULL);
+ PKTCCLRFLAGS(p);
+ PKTCSETCNT(p, 1);
+ PKTCSETLEN(p, PKTLEN(osh, skb));
+
+ /* skb_clone copies skb->cb.. we don't want that */
+ if (osh->pub.pkttag)
+ OSL_PKTTAG_CLEAR(p);
+
+ /* Increment the packet counter */
+ atomic_inc(&osh->cmn->pktalloced);
+#ifdef BCM_OBJECT_TRACE
+ bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
+#endif /* BCM_OBJECT_TRACE */
- return ts_nsec;
+#ifdef BCMDBG_CTRACE
+ ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
+#endif
+ return (p);
}
-void
-osl_get_localtime(uint64 *sec, uint64 *usec)
+#ifdef BCMDBG_CTRACE
+int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt)
{
- uint64 ts_nsec = 0;
- unsigned long rem_nsec = 0;
+ unsigned long flags;
+ struct sk_buff *skb;
+ int ck = FALSE;
- ts_nsec = local_clock();
- rem_nsec = do_div(ts_nsec, NSEC_PER_SEC);
- *sec = (uint64)ts_nsec;
- *usec = (uint64)(rem_nsec / MSEC_PER_SEC);
+ spin_lock_irqsave(&osh->ctrace_lock, flags);
+
+ list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
+ if (pkt == skb) {
+ ck = TRUE;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&osh->ctrace_lock, flags);
+ return ck;
}
-uint64
-osl_systztime_us(void)
+void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b)
{
- struct osl_timespec tv;
- uint64 tzusec;
+ unsigned long flags;
+ struct sk_buff *skb;
+ int idx = 0;
+ int i, j;
- osl_do_gettimeofday(&tv);
- /* apply timezone */
- tzusec = (uint64)((tv.tv_sec - (sys_tz.tz_minuteswest * 60)) *
- USEC_PER_SEC);
- tzusec += tv.tv_usec;
+ spin_lock_irqsave(&osh->ctrace_lock, flags);
- return tzusec;
+ if (b != NULL)
+ bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num);
+ else
+ printk(" Total %d sbk not free\n", osh->ctrace_num);
+
+ list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
+ if (b != NULL)
+ bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb);
+ else
+ printk("[%d] skb %p:\n", ++idx, skb);
+
+ for (i = 0; i < skb->ctrace_count; i++) {
+ j = (skb->ctrace_start + i) % CTRACE_NUM;
+ if (b != NULL)
+ bcm_bprintf(b, " [%s(%d)]\n", skb->func[j], skb->line[j]);
+ else
+ printk(" [%s(%d)]\n", skb->func[j], skb->line[j]);
+ }
+ if (b != NULL)
+ bcm_bprintf(b, "\n");
+ else
+ printk("\n");
+ }
+
+ spin_unlock_irqrestore(&osh->ctrace_lock, flags);
+
+ return;
}
+#endif /* BCMDBG_CTRACE */
+
/*
* OSLREGOPS specifies the use of osl_XXX routines to be used for register access
* BINOSL selects the slightly slower function-call-based binary compatible osl.
*/
+uint
+osl_pktalloced(osl_t *osh)
+{
+ if (atomic_read(&osh->cmn->refcount) == 1)
+ return (atomic_read(&osh->cmn->pktalloced));
+ else
+ return 0;
+}
+
uint32
osl_rand(void)
{
if (!image)
return 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
rdlen = kernel_read(fp, buf, len, &fp->f_pos);
#else
rdlen = kernel_read(fp, fp->f_pos, buf, len);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
-
if (rdlen > 0)
fp->f_pos += rdlen;
+#endif
return rdlen;
}
/* Linux Kernel: File Operations: end */
#if (defined(STB) && defined(__arm__))
-inline void osl_pcie_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
+inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size)
{
unsigned long flags = 0;
int pci_access = 0;
+#if defined(BCM_GMAC3)
+ const int acp_war_enab = 1;
+#else /* !BCM_GMAC3 */
int acp_war_enab = ACP_WAR_ENAB();
+#endif /* !BCM_GMAC3 */
if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
pci_access = 1;
switch (size) {
case sizeof(uint8):
- *(volatile uint8*)v = readb((volatile uint8*)(addr));
+ *(uint8*)v = readb((volatile uint8*)(addr));
break;
case sizeof(uint16):
- *(volatile uint16*)v = readw((volatile uint16*)(addr));
+ *(uint16*)v = readw((volatile uint16*)(addr));
break;
case sizeof(uint32):
- *(volatile uint32*)v = readl((volatile uint32*)(addr));
+ *(uint32*)v = readl((volatile uint32*)(addr));
break;
case sizeof(uint64):
- *(volatile uint64*)v = *((volatile uint64*)(addr));
+ *(uint64*)v = *((volatile uint64*)(addr));
break;
}
if (pci_access && acp_war_enab)
spin_unlock_irqrestore(&l2x0_reg_lock, flags);
}
-#endif // endif
-
-#if defined(BCM_BACKPLANE_TIMEOUT)
-inline void osl_bpt_rreg(osl_t *osh, ulong addr, volatile void *v, uint size)
-{
- bool poll_timeout = FALSE;
- static int in_si_clear = FALSE;
-
- switch (size) {
- case sizeof(uint8):
- *(volatile uint8*)v = readb((volatile uint8*)(addr));
- if (*(volatile uint8*)v == 0xff)
- poll_timeout = TRUE;
- break;
- case sizeof(uint16):
- *(volatile uint16*)v = readw((volatile uint16*)(addr));
- if (*(volatile uint16*)v == 0xffff)
- poll_timeout = TRUE;
- break;
- case sizeof(uint32):
- *(volatile uint32*)v = readl((volatile uint32*)(addr));
- if (*(volatile uint32*)v == 0xffffffff)
- poll_timeout = TRUE;
- break;
- case sizeof(uint64):
- *(volatile uint64*)v = *((volatile uint64*)(addr));
- if (*(volatile uint64*)v == 0xffffffffffffffff)
- poll_timeout = TRUE;
- break;
- }
-
- if (osh && osh->sih && (in_si_clear == FALSE) && poll_timeout && osh->bpt_cb) {
- in_si_clear = TRUE;
- osh->bpt_cb((void *)osh->sih, (void *)addr);
- in_si_clear = FALSE;
- }
-}
-#endif /* BCM_BACKPLANE_TIMEOUT */
+#endif
#ifdef BCM_SECURE_DMA
static void *
#if defined(__ARM_ARCH_7A__)
addr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
pgprot_noncached(__pgprot(PAGE_KERNEL)));
-#endif // endif
+#endif
if (isdecr) {
osh->contig_delta_va_pa = ((uint8 *)addr - page_to_phys(page));
}
return ret;
}
+
static void
osl_sec_dma_deinit_elem_mem_block(osl_t *osh, size_t mbsize, int max, void *sec_list_base)
{
{
sec_mem_elem_t *sec_mem_elem = NULL;
-#ifdef NOT_YET
- if (size <= 512 && osh->sec_list_512) {
- sec_mem_elem = osh->sec_list_512;
- osh->sec_list_512 = sec_mem_elem->next;
- }
- else if (size <= 2048 && osh->sec_list_2048) {
- sec_mem_elem = osh->sec_list_2048;
- osh->sec_list_2048 = sec_mem_elem->next;
- }
- else
-#else
ASSERT(osh->sec_list_4096);
sec_mem_elem = osh->sec_list_4096;
osh->sec_list_4096 = sec_mem_elem->next;
-#endif /* NOT_YET */
sec_mem_elem->next = NULL;
{
sec_mem_elem->dma_handle = 0x0;
sec_mem_elem->va = NULL;
-#ifdef NOT_YET
- if (sec_mem_elem->size == 512) {
- sec_mem_elem->next = osh->sec_list_512;
- osh->sec_list_512 = sec_mem_elem;
- } else if (sec_mem_elem->size == 2048) {
- sec_mem_elem->next = osh->sec_list_2048;
- osh->sec_list_2048 = sec_mem_elem;
- } else if (sec_mem_elem->size == 4096) {
-#endif /* NOT_YET */
sec_mem_elem->next = osh->sec_list_4096;
osh->sec_list_4096 = sec_mem_elem;
-#ifdef NOT_YET
- }
- else
- printf("%s free failed size=%d\n", __FUNCTION__, sec_mem_elem->size);
-#endif /* NOT_YET */
}
static sec_mem_elem_t * BCMFASTPATH
uint buflen = 0;
dma_addr_t dma_handle = 0x0;
uint loffset;
-#ifdef NOT_YET
- int *fragva;
- struct sk_buff *skb;
- int i = 0;
-#endif /* NOT_YET */
ASSERT((direction == DMA_RX) || (direction == DMA_TX));
sec_mem_elem = osl_sec_dma_alloc_mem_elem(osh, va, size, direction, ptr_cma_info, offset);
if (direction == DMA_TX) {
memcpy((uint8*)pa_cma_kmap_va+offset, va, size);
-#ifdef NOT_YET
- if (p == NULL) {
-
- memcpy(pa_cma_kmap_va, va, size);
- /* prhex("Txpkt",pa_cma_kmap_va, size); */
- } else {
- for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
- if (skb_is_nonlinear(skb)) {
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- fragva = kmap_atomic(skb_frag_page(f));
- pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
- memcpy((pa_cma_kmap_va),
- (fragva + f->page_offset), skb_frag_size(f));
- kunmap_atomic(fragva);
- buflen += skb_frag_size(f);
- }
- } else {
-
- pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
- memcpy(pa_cma_kmap_va, skb->data, skb->len);
- buflen += skb->len;
- }
- }
-
- }
-#endif /* NOT_YET */
if (dmah) {
dmah->nsegs = 1;
dmah->origsize = buflen;
void *p, hnddma_seg_map_t *map, void *ptr_cma_info, uint offset)
{
sec_mem_elem_t *sec_mem_elem;
-#ifdef NOT_YET
- struct page *pa_cma_page;
-#endif // endif
void *pa_cma_kmap_va = NULL;
uint buflen = 0;
dma_addr_t pa_cma;
va = (uint8 *)va - offset;
pa_cma = sec_mem_elem->pa_cma;
-#ifdef NOT_YET
- pa_cma_page = sec_mem_elem->pa_cma_page;
-#endif // endif
if (direction == DMA_RX) {
memcpy(va, pa_cma_kmap_va, size);
/* kunmap_atomic(pa_cma_kmap_va); */
}
-#ifdef NOT_YET
- else {
- buflen = 0;
- for (skb = (struct sk_buff *)p; (buflen < size) &&
- (skb != NULL); skb = skb->next) {
- if (skb_is_nonlinear(skb)) {
- pa_cma_kmap_va = kmap_atomic(pa_cma_page);
- for (i = 0; (buflen < size) &&
- (i < skb_shinfo(skb)->nr_frags); i++) {
- skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- cpuaddr = kmap_atomic(skb_frag_page(f));
- pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
- memcpy((cpuaddr + f->page_offset),
- pa_cma_kmap_va, skb_frag_size(f));
- kunmap_atomic(cpuaddr);
- buflen += skb_frag_size(f);
- }
- kunmap_atomic(pa_cma_kmap_va);
- } else {
- pa_cma_kmap_va = kmap_atomic(pa_cma_page);
- pa_cma_kmap_va = ((uint8 *)pa_cma_kmap_va + buflen);
- memcpy(skb->data, pa_cma_kmap_va, skb->len);
- kunmap_atomic(pa_cma_kmap_va);
- buflen += skb->len;
- }
-
- }
-
- }
-#endif /* NOT YET */
} else {
dma_unmap_page(OSH_NULL, pa_cma, size+offset, DMA_TO_DEVICE);
}
printf("%s:Error: va = 0x%p pa = 0x%lx size = %d\n", __FUNCTION__,
va, (ulong)pa, size);
}
-#endif /* BCM_SECURE_DMA */
-/* timer apis */
-/* Note: All timer api's are thread unsafe and should be protected with locks by caller */
+#endif /* BCM_SECURE_DMA */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
+#include <linux/kallsyms.h>
+#include <net/sock.h>
void
-timer_cb_compat(struct timer_list *tl)
+osl_pkt_orphan_partial(struct sk_buff *skb, int tsq)
{
- timer_list_compat_t *t = container_of(tl, timer_list_compat_t, timer);
- t->callback((ulong)t->arg);
+ uint32 fraction;
+ static void *p_tcp_wfree = NULL;
+
+ if (tsq <= 0)
+ return;
+
+ if (!skb->destructor || skb->destructor == sock_wfree)
+ return;
+
+ if (unlikely(!p_tcp_wfree)) {
+ char sym[KSYM_SYMBOL_LEN];
+ sprint_symbol(sym, (unsigned long)skb->destructor);
+ sym[9] = 0;
+ if (!strcmp(sym, "tcp_wfree"))
+ p_tcp_wfree = skb->destructor;
+ else
+ return;
+ }
+
+ if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
+ return;
+
+ /* abstract a certain portion of skb truesize from the socket
+ * sk_wmem_alloc to allow more skb can be allocated for this
+ * socket for better cusion meeting WiFi device requirement
+ */
+ fraction = skb->truesize * (tsq - 1) / tsq;
+ skb->truesize -= fraction;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
+ atomic_sub(fraction, &skb->sk->sk_wmem_alloc.refs);
+#else
+ atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
+#endif /* LINUX_VERSION >= 4.13.0 */
+ skb_orphan(skb);
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
+#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
+
+/* timer apis */
+/* Note: All timer api's are thread unsafe and should be protected with locks by caller */
+#ifdef REPORT_FATAL_TIMEOUTS
osl_timer_t *
osl_timer_init(osl_t *osh, const char *name, void (*fn)(void *arg), void *arg)
{
osl_timer_t *t;
BCM_REFERENCE(fn);
if ((t = MALLOCZ(NULL, sizeof(osl_timer_t))) == NULL) {
- printk(KERN_ERR "osl_timer_init: out of memory, malloced %d bytes\n",
- (int)sizeof(osl_timer_t));
+ printk(KERN_ERR "osl_timer_init: malloced failed for osl_timer_t\n");
return (NULL);
}
bzero(t, sizeof(osl_timer_t));
if ((t->timer = MALLOCZ(NULL, sizeof(struct timer_list))) == NULL) {
- printf("osl_timer_init: malloc failed\n");
+ printk(KERN_ERR "osl_timer_init: malloc failed\n");
MFREE(NULL, t, sizeof(osl_timer_t));
return (NULL);
}
+ t->timer->data = (ulong)arg;
+ t->timer->function = (linux_timer_fn)fn;
t->set = TRUE;
- init_timer_compat(t->timer, (linux_timer_fn)fn, arg);
+ init_timer(t->timer);
return (t);
}
void
osl_timer_add(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
{
+
if (t == NULL) {
printf("%s: Timer handle is NULL\n", __FUNCTION__);
return;
if (periodic) {
printf("Periodic timers are not supported by Linux timer apis\n");
}
- timer_expires(t->timer) = jiffies + ms*HZ/1000;
+ t->timer->expires = jiffies + ms*HZ/1000;
add_timer(t->timer);
void
osl_timer_update(osl_t *osh, osl_timer_t *t, uint32 ms, bool periodic)
{
+
if (t == NULL) {
printf("%s: Timer handle is NULL\n", __FUNCTION__);
return;
printf("Periodic timers are not supported by Linux timer apis\n");
}
t->set = TRUE;
- timer_expires(t->timer) = jiffies + ms*HZ/1000;
+ t->timer->expires = jiffies + ms*HZ/1000;
- mod_timer(t->timer, timer_expires(t->timer));
+ mod_timer(t->timer, t->timer->expires);
return;
}
}
return (TRUE);
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
-int
-kernel_read_compat(struct file *file, loff_t offset, char *addr, unsigned long count)
-{
- return (int)kernel_read(file, addr, (size_t)count, &offset);
-}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
-
-void *
-osl_spin_lock_init(osl_t *osh)
-{
- /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
- /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
- /* and this results in kernel asserts in internal builds */
- spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
- if (lock)
- spin_lock_init(lock);
- return ((void *)lock);
-}
-
-void
-osl_spin_lock_deinit(osl_t *osh, void *lock)
-{
- if (lock)
- MFREE(osh, lock, sizeof(spinlock_t) + 4);
-}
-
-unsigned long
-osl_spin_lock(void *lock)
-{
- unsigned long flags = 0;
-
- if (lock)
- spin_lock_irqsave((spinlock_t *)lock, flags);
-
- return flags;
-}
-
-void
-osl_spin_unlock(void *lock, unsigned long flags)
-{
- if (lock)
- spin_unlock_irqrestore((spinlock_t *)lock, flags);
-}
-
-#ifdef USE_DMA_LOCK
-static void
-osl_dma_lock(osl_t *osh)
-{
- if (likely(in_irq() || irqs_disabled())) {
- spin_lock(&osh->dma_lock);
- } else {
- spin_lock_bh(&osh->dma_lock);
- osh->dma_lock_bh = TRUE;
- }
-}
-
-static void
-osl_dma_unlock(osl_t *osh)
-{
- if (unlikely(osh->dma_lock_bh)) {
- osh->dma_lock_bh = FALSE;
- spin_unlock_bh(&osh->dma_lock);
- } else {
- spin_unlock(&osh->dma_lock);
- }
-}
-
-static void
-osl_dma_lock_init(osl_t *osh)
-{
- spin_lock_init(&osh->dma_lock);
- osh->dma_lock_bh = FALSE;
-}
-#endif /* USE_DMA_LOCK */
-
-void
-osl_do_gettimeofday(struct osl_timespec *ts)
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
- struct timespec64 curtime;
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
- struct timespec curtime;
-#else
- struct timeval curtime;
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
- ktime_get_real_ts64(&curtime);
- ts->tv_nsec = curtime.tv_nsec;
- ts->tv_usec = curtime.tv_nsec / 1000;
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
- getnstimeofday(&curtime);
- ts->tv_nsec = curtime.tv_nsec;
- ts->tv_usec = curtime.tv_nsec / 1000;
-#else
- do_gettimeofday(&curtime);
- ts->tv_usec = curtime.tv_usec;
- ts->tv_nsec = curtime.tv_usec * 1000;
-#endif
- ts->tv_sec = curtime.tv_sec;
-}
-
-void
-osl_get_monotonic_boottime(struct osl_timespec *ts)
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
- struct timespec64 curtime;
-#else
- struct timespec curtime;
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)
- curtime = ktime_to_timespec64(ktime_get_boottime());
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
- curtime = ktime_to_timespec(ktime_get_boottime());
-#else
- get_monotonic_boottime(&curtime);
#endif
- ts->tv_sec = curtime.tv_sec;
- ts->tv_nsec = curtime.tv_nsec;
- ts->tv_usec = curtime.tv_nsec / 1000;
-}
\ No newline at end of file
+++ /dev/null
-/*
- * Private header file for Linux OS Independent Layer
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: linux_osl_priv.h 794159 2018-12-12 07:41:14Z $
- */
-
-#ifndef _LINUX_OSL_PRIV_H_
-#define _LINUX_OSL_PRIV_H_
-
-#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
-#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
-
-/* dependancy check */
-#if !defined(BCMPCIE) && defined(DHD_USE_STATIC_CTRLBUF)
-#error "DHD_USE_STATIC_CTRLBUF suppored PCIE target only"
-#endif /* !BCMPCIE && DHD_USE_STATIC_CTRLBUF */
-
-#ifdef CONFIG_DHD_USE_STATIC_BUF
-#ifdef DHD_USE_STATIC_CTRLBUF
-#define DHD_SKB_1PAGE_BUFSIZE (PAGE_SIZE*1)
-#define DHD_SKB_2PAGE_BUFSIZE (PAGE_SIZE*2)
-#define DHD_SKB_4PAGE_BUFSIZE (PAGE_SIZE*4)
-
-#define PREALLOC_FREE_MAGIC 0xFEDC
-#define PREALLOC_USED_MAGIC 0xFCDE
-#else
-#define DHD_SKB_HDRSIZE 336
-#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
-#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
-#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
-#endif /* DHD_USE_STATIC_CTRLBUF */
-
-#define STATIC_BUF_MAX_NUM 16
-#define STATIC_BUF_SIZE (PAGE_SIZE*2)
-#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
-
-typedef struct bcm_static_buf {
- spinlock_t static_lock;
- unsigned char *buf_ptr;
- unsigned char buf_use[STATIC_BUF_MAX_NUM];
-} bcm_static_buf_t;
-
-extern bcm_static_buf_t *bcm_static_buf;
-
-#ifdef DHD_USE_STATIC_CTRLBUF
-#define STATIC_PKT_4PAGE_NUM 0
-#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
-#elif defined(ENHANCED_STATIC_BUF)
-#define STATIC_PKT_4PAGE_NUM 1
-#define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
-#else
-#define STATIC_PKT_4PAGE_NUM 0
-#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
-#endif /* DHD_USE_STATIC_CTRLBUF */
-
-#ifdef DHD_USE_STATIC_CTRLBUF
-#define STATIC_PKT_1PAGE_NUM 0
-#define STATIC_PKT_2PAGE_NUM 128
-#else
-#define STATIC_PKT_1PAGE_NUM 8
-#define STATIC_PKT_2PAGE_NUM 8
-#endif /* DHD_USE_STATIC_CTRLBUF */
-
-#define STATIC_PKT_1_2PAGE_NUM \
- ((STATIC_PKT_1PAGE_NUM) + (STATIC_PKT_2PAGE_NUM))
-#define STATIC_PKT_MAX_NUM \
- ((STATIC_PKT_1_2PAGE_NUM) + (STATIC_PKT_4PAGE_NUM))
-
-typedef struct bcm_static_pkt {
-#ifdef DHD_USE_STATIC_CTRLBUF
- struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
- unsigned char pkt_invalid[STATIC_PKT_2PAGE_NUM];
- spinlock_t osl_pkt_lock;
- uint32 last_allocated_index;
-#else
- struct sk_buff *skb_4k[STATIC_PKT_1PAGE_NUM];
- struct sk_buff *skb_8k[STATIC_PKT_2PAGE_NUM];
-#ifdef ENHANCED_STATIC_BUF
- struct sk_buff *skb_16k;
-#endif /* ENHANCED_STATIC_BUF */
- struct semaphore osl_pkt_sem;
-#endif /* DHD_USE_STATIC_CTRLBUF */
- unsigned char pkt_use[STATIC_PKT_MAX_NUM];
-} bcm_static_pkt_t;
-
-extern bcm_static_pkt_t *bcm_static_skb;
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
-
-typedef struct bcm_mem_link {
- struct bcm_mem_link *prev;
- struct bcm_mem_link *next;
- uint size;
- int line;
- void *osh;
- char file[BCM_MEM_FILENAME_LEN];
-} bcm_mem_link_t;
-
-struct osl_cmn_info {
- atomic_t malloced;
- atomic_t pktalloced; /* Number of allocated packet buffers */
- spinlock_t dbgmem_lock;
- bcm_mem_link_t *dbgmem_list;
- bcm_mem_link_t *dbgvmem_list;
- spinlock_t pktalloc_lock;
- atomic_t refcount; /* Number of references to this shared structure. */
-};
-typedef struct osl_cmn_info osl_cmn_t;
-
-#if defined(BCM_BACKPLANE_TIMEOUT)
-typedef uint32 (*bpt_cb_fn)(void *ctx, void *addr);
-#endif /* BCM_BACKPLANE_TIMEOUT */
-
-struct osl_info {
- osl_pubinfo_t pub;
- uint32 flags; /* If specific cases to be handled in the OSL */
- uint magic;
- void *pdev;
- uint failed;
- uint bustype;
- osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
-
- void *bus_handle;
-#ifdef BCM_SECURE_DMA
-#ifdef NOT_YET
- struct sec_mem_elem *sec_list_512;
- struct sec_mem_elem *sec_list_base_512;
- struct sec_mem_elem *sec_list_2048;
- struct sec_mem_elem *sec_list_base_2048;
-#endif /* NOT_YET */
- struct sec_mem_elem *sec_list_4096;
- struct sec_mem_elem *sec_list_base_4096;
- phys_addr_t contig_base;
- void *contig_base_va;
- phys_addr_t contig_base_alloc;
- void *contig_base_alloc_va;
- phys_addr_t contig_base_alloc_coherent;
- void *contig_base_alloc_coherent_va;
- void *contig_base_coherent_va;
- void *contig_delta_va_pa;
- struct {
- phys_addr_t pa;
- void *va;
- bool avail;
- } sec_cma_coherent[SEC_CMA_COHERENT_MAX];
- int stb_ext_params;
-#endif /* BCM_SECURE_DMA */
-#if defined(BCM_BACKPLANE_TIMEOUT)
- bpt_cb_fn bpt_cb;
- void *sih;
-#endif /* BCM_BACKPLANE_TIMEOUT */
-#ifdef USE_DMA_LOCK
- spinlock_t dma_lock;
- bool dma_lock_bh;
-#endif /* USE_DMA_LOCK */
-#ifdef DHD_MAP_LOGGING
- void *dhd_map_log;
- void *dhd_unmap_log;
-#endif /* DHD_MAP_LOGGING */
-};
-
-#endif /* _LINUX_OSL_PRIV_H_ */
+++ /dev/null
-/*
- * Linux Packet (skb) interface
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id: linux_pkt.c 769682 2018-06-27 07:29:55Z $
- */
-
-#include <typedefs.h>
-#include <bcmendian.h>
-#include <linuxver.h>
-#include <bcmdefs.h>
-
-#include <linux/random.h>
-
-#include <osl.h>
-#include <bcmutils.h>
-#include <pcicfg.h>
-#include <dngl_stats.h>
-#include <dhd.h>
-
-#include <linux/fs.h>
-#include "linux_osl_priv.h"
-
-#ifdef CONFIG_DHD_USE_STATIC_BUF
-
-bcm_static_buf_t *bcm_static_buf = 0;
-bcm_static_pkt_t *bcm_static_skb = 0;
-
-void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
-
-#ifdef BCM_OBJECT_TRACE
-/* don't clear the first 4 byte that is the pkt sn */
-#define OSL_PKTTAG_CLEAR(p) \
-do { \
- struct sk_buff *s = (struct sk_buff *)(p); \
- uint tagsz = sizeof(s->cb); \
- ASSERT(OSL_PKTTAG_SZ <= tagsz); \
- memset(s->cb + 4, 0, tagsz - 4); \
-} while (0)
-#else
-#define OSL_PKTTAG_CLEAR(p) \
-do { \
- struct sk_buff *s = (struct sk_buff *)(p); \
- uint tagsz = sizeof(s->cb); \
- ASSERT(OSL_PKTTAG_SZ <= tagsz); \
- memset(s->cb, 0, tagsz); \
-} while (0)
-#endif /* BCM_OBJECT_TRACE */
-
-int osl_static_mem_init(osl_t *osh, void *adapter)
-{
-#ifdef CONFIG_DHD_USE_STATIC_BUF
- if (!bcm_static_buf && adapter) {
- if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
- DHD_PREALLOC_OSL_BUF, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
- printk("can not alloc static buf!\n");
- bcm_static_skb = NULL;
- ASSERT(osh->magic == OS_HANDLE_MAGIC);
- return -ENOMEM;
- } else {
- printk("succeed to alloc static buf\n");
- }
-
- spin_lock_init(&bcm_static_buf->static_lock);
-
- bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
- }
-
-#if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
- if (!bcm_static_skb && adapter) {
- int i;
- void *skb_buff_ptr = 0;
- bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
- skb_buff_ptr = wifi_platform_prealloc(adapter, DHD_PREALLOC_SKB_BUF, 0);
- if (!skb_buff_ptr) {
- printk("cannot alloc static buf!\n");
- bcm_static_buf = NULL;
- bcm_static_skb = NULL;
- ASSERT(osh->magic == OS_HANDLE_MAGIC);
- return -ENOMEM;
- }
-
- bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
- (STATIC_PKT_MAX_NUM));
- for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
- bcm_static_skb->pkt_use[i] = 0;
- }
-
-#ifdef DHD_USE_STATIC_CTRLBUF
- spin_lock_init(&bcm_static_skb->osl_pkt_lock);
- bcm_static_skb->last_allocated_index = 0;
-#else
- sema_init(&bcm_static_skb->osl_pkt_sem, 1);
-#endif /* DHD_USE_STATIC_CTRLBUF */
- }
-#endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
-
- return 0;
-}
-
-int osl_static_mem_deinit(osl_t *osh, void *adapter)
-{
-#ifdef CONFIG_DHD_USE_STATIC_BUF
- if (bcm_static_buf) {
- bcm_static_buf = 0;
- }
-#ifdef BCMSDIO
- if (bcm_static_skb) {
- bcm_static_skb = 0;
- }
-#endif /* BCMSDIO */
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
- return 0;
-}
-
-/*
- * To avoid ACP latency, a fwder buf will be sent directly to DDR using
- * DDR aliasing into non-ACP address space. Such Fwder buffers must be
- * explicitly managed from a coherency perspective.
- */
-static inline void BCMFASTPATH
-osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb)
-{
-}
-
-static struct sk_buff * BCMFASTPATH
-osl_alloc_skb(osl_t *osh, unsigned int len)
-{
- struct sk_buff *skb;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
- gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
-#ifdef DHD_USE_ATOMIC_PKTGET
- flags = GFP_ATOMIC;
-#endif /* DHD_USE_ATOMIC_PKTGET */
- skb = __dev_alloc_skb(len, flags);
-#else
- skb = dev_alloc_skb(len);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
-
- return skb;
-}
-
-/* Convert a driver packet to native(OS) packet
- * In the process, packettag is zeroed out before sending up
- * IP code depends on skb->cb to be setup correctly with various options
- * In our case, that means it should be 0
- */
-struct sk_buff * BCMFASTPATH
-osl_pkt_tonative(osl_t *osh, void *pkt)
-{
- struct sk_buff *nskb;
-
- if (osh->pub.pkttag)
- OSL_PKTTAG_CLEAR(pkt);
-
- /* Decrement the packet counter */
- for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
- atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
-
- }
- return (struct sk_buff *)pkt;
-}
-
-/* Convert a native(OS) packet to driver packet.
- * In the process, native packet is destroyed, there is no copying
- * Also, a packettag is zeroed out
- */
-void * BCMFASTPATH
-osl_pkt_frmnative(osl_t *osh, void *pkt)
-{
- struct sk_buff *cskb;
- struct sk_buff *nskb;
- unsigned long pktalloced = 0;
-
- if (osh->pub.pkttag)
- OSL_PKTTAG_CLEAR(pkt);
-
- /* walk the PKTCLINK() list */
- for (cskb = (struct sk_buff *)pkt;
- cskb != NULL;
- cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
-
- /* walk the pkt buffer list */
- for (nskb = cskb; nskb; nskb = nskb->next) {
-
- /* Increment the packet counter */
- pktalloced++;
-
- /* clean the 'prev' pointer
- * Kernel 3.18 is leaving skb->prev pointer set to skb
- * to indicate a non-fragmented skb
- */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- nskb->prev = NULL;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
-
- }
- }
-
- /* Increment the packet counter */
- atomic_add(pktalloced, &osh->cmn->pktalloced);
-
- return (void *)pkt;
-}
-
-/* Return a new packet. zero out pkttag */
-void * BCMFASTPATH
-#ifdef BCM_OBJECT_TRACE
-linux_pktget(osl_t *osh, uint len, int line, const char *caller)
-#else
-linux_pktget(osl_t *osh, uint len)
-#endif /* BCM_OBJECT_TRACE */
-{
- struct sk_buff *skb;
- uchar num = 0;
- if (lmtest != FALSE) {
- get_random_bytes(&num, sizeof(uchar));
- if ((num + 1) <= (256 * lmtest / 100))
- return NULL;
- }
-
- if ((skb = osl_alloc_skb(osh, len))) {
- skb->tail += len;
- skb->len += len;
- skb->priority = 0;
-
- atomic_inc(&osh->cmn->pktalloced);
-#ifdef BCM_OBJECT_TRACE
- bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
-#endif /* BCM_OBJECT_TRACE */
- }
-
- return ((void*) skb);
-}
-
-/* Free the driver packet. Free the tag if present */
-void BCMFASTPATH
-#ifdef BCM_OBJECT_TRACE
-linux_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
-#else
-linux_pktfree(osl_t *osh, void *p, bool send)
-#endif /* BCM_OBJECT_TRACE */
-{
- struct sk_buff *skb, *nskb;
- if (osh == NULL)
- return;
-
- skb = (struct sk_buff*) p;
-
- if (send) {
- if (osh->pub.tx_fn) {
- osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
- }
- } else {
- if (osh->pub.rx_fn) {
- osh->pub.rx_fn(osh->pub.rx_ctx, p);
- }
- }
-
- PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
-
-#if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
- if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
- printk("%s: pkt %p is from static pool\n",
- __FUNCTION__, p);
- dump_stack();
- return;
- }
-
- if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
- printk("%s: pkt %p is from static pool and not in used\n",
- __FUNCTION__, p);
- dump_stack();
- return;
- }
-#endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
-
- /* perversion: we use skb->next to chain multi-skb packets */
- while (skb) {
- nskb = skb->next;
- skb->next = NULL;
-
-#ifdef BCM_OBJECT_TRACE
- bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
-#endif /* BCM_OBJECT_TRACE */
-
- {
- if (skb->destructor) {
- /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
- * destructor exists
- */
- dev_kfree_skb_any(skb);
- } else {
- /* can free immediately (even in_irq()) if destructor
- * does not exist
- */
- dev_kfree_skb(skb);
- }
- }
- atomic_dec(&osh->cmn->pktalloced);
- skb = nskb;
- }
-}
-
-#ifdef CONFIG_DHD_USE_STATIC_BUF
-void*
-osl_pktget_static(osl_t *osh, uint len)
-{
- int i = 0;
- struct sk_buff *skb;
-#ifdef DHD_USE_STATIC_CTRLBUF
- unsigned long flags;
-#endif /* DHD_USE_STATIC_CTRLBUF */
-
- if (!bcm_static_skb)
- return linux_pktget(osh, len);
-
- if (len > DHD_SKB_MAX_BUFSIZE) {
- printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
- return linux_pktget(osh, len);
- }
-
-#ifdef DHD_USE_STATIC_CTRLBUF
- spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
-
- if (len <= DHD_SKB_2PAGE_BUFSIZE) {
- uint32 index;
- for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
- index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
- bcm_static_skb->last_allocated_index++;
- if (bcm_static_skb->skb_8k[index] &&
- bcm_static_skb->pkt_use[index] == 0) {
- break;
- }
- }
-
- if (i < STATIC_PKT_2PAGE_NUM) {
- bcm_static_skb->pkt_use[index] = 1;
- skb = bcm_static_skb->skb_8k[index];
- skb->data = skb->head;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- skb_set_tail_pointer(skb, PKT_HEADROOM_DEFAULT);
-#else
- skb->tail = skb->data + PKT_HEADROOM_DEFAULT;
-#endif /* NET_SKBUFF_DATA_USES_OFFSET */
- skb->data += PKT_HEADROOM_DEFAULT;
- skb->cloned = 0;
- skb->priority = 0;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- skb_set_tail_pointer(skb, len);
-#else
- skb->tail = skb->data + len;
-#endif /* NET_SKBUFF_DATA_USES_OFFSET */
- skb->len = len;
- skb->mac_len = PREALLOC_USED_MAGIC;
- spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
- return skb;
- }
- }
-
- spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
- printk("%s: all static pkt in use!\n", __FUNCTION__);
- return NULL;
-#else
- down(&bcm_static_skb->osl_pkt_sem);
-
- if (len <= DHD_SKB_1PAGE_BUFSIZE) {
- for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
- if (bcm_static_skb->skb_4k[i] &&
- bcm_static_skb->pkt_use[i] == 0) {
- break;
- }
- }
-
- if (i != STATIC_PKT_1PAGE_NUM) {
- bcm_static_skb->pkt_use[i] = 1;
-
- skb = bcm_static_skb->skb_4k[i];
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- skb_set_tail_pointer(skb, len);
-#else
- skb->tail = skb->data + len;
-#endif /* NET_SKBUFF_DATA_USES_OFFSET */
- skb->len = len;
-
- up(&bcm_static_skb->osl_pkt_sem);
- return skb;
- }
- }
-
- if (len <= DHD_SKB_2PAGE_BUFSIZE) {
- for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
- if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
- bcm_static_skb->pkt_use[i] == 0) {
- break;
- }
- }
-
- if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
- bcm_static_skb->pkt_use[i] = 1;
- skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- skb_set_tail_pointer(skb, len);
-#else
- skb->tail = skb->data + len;
-#endif /* NET_SKBUFF_DATA_USES_OFFSET */
- skb->len = len;
-
- up(&bcm_static_skb->osl_pkt_sem);
- return skb;
- }
- }
-
-#if defined(ENHANCED_STATIC_BUF)
- if (bcm_static_skb->skb_16k &&
- bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
- bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
-
- skb = bcm_static_skb->skb_16k;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- skb_set_tail_pointer(skb, len);
-#else
- skb->tail = skb->data + len;
-#endif /* NET_SKBUFF_DATA_USES_OFFSET */
- skb->len = len;
-
- up(&bcm_static_skb->osl_pkt_sem);
- return skb;
- }
-#endif /* ENHANCED_STATIC_BUF */
-
- up(&bcm_static_skb->osl_pkt_sem);
- printk("%s: all static pkt in use!\n", __FUNCTION__);
- return linux_pktget(osh, len);
-#endif /* DHD_USE_STATIC_CTRLBUF */
-}
-
-void
-osl_pktfree_static(osl_t *osh, void *p, bool send)
-{
- int i;
-#ifdef DHD_USE_STATIC_CTRLBUF
- struct sk_buff *skb = (struct sk_buff *)p;
- unsigned long flags;
-#endif /* DHD_USE_STATIC_CTRLBUF */
-
- if (!p) {
- return;
- }
-
- if (!bcm_static_skb) {
- linux_pktfree(osh, p, send);
- return;
- }
-
-#ifdef DHD_USE_STATIC_CTRLBUF
- spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
-
- for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
- if (p == bcm_static_skb->skb_8k[i]) {
- if (bcm_static_skb->pkt_use[i] == 0) {
- printk("%s: static pkt idx %d(%p) is double free\n",
- __FUNCTION__, i, p);
- } else {
- bcm_static_skb->pkt_use[i] = 0;
- }
-
- if (skb->mac_len != PREALLOC_USED_MAGIC) {
- printk("%s: static pkt idx %d(%p) is not in used\n",
- __FUNCTION__, i, p);
- }
-
- skb->mac_len = PREALLOC_FREE_MAGIC;
- spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
- return;
- }
- }
-
- spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
- printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
-#else
- down(&bcm_static_skb->osl_pkt_sem);
- for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
- if (p == bcm_static_skb->skb_4k[i]) {
- bcm_static_skb->pkt_use[i] = 0;
- up(&bcm_static_skb->osl_pkt_sem);
- return;
- }
- }
-
- for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
- if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
- bcm_static_skb->pkt_use[i] = 0;
- up(&bcm_static_skb->osl_pkt_sem);
- return;
- }
- }
-#ifdef ENHANCED_STATIC_BUF
- if (p == bcm_static_skb->skb_16k) {
- bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
- up(&bcm_static_skb->osl_pkt_sem);
- return;
- }
-#endif // endif
- up(&bcm_static_skb->osl_pkt_sem);
-#endif /* DHD_USE_STATIC_CTRLBUF */
- linux_pktfree(osh, p, send);
-}
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
-
-/* Clone a packet.
- * The pkttag contents are NOT cloned.
- */
-void *
-#ifdef BCM_OBJECT_TRACE
-osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
-#else
-osl_pktdup(osl_t *osh, void *skb)
-#endif /* BCM_OBJECT_TRACE */
-{
- void * p;
-
- ASSERT(!PKTISCHAINED(skb));
-
- /* clear the CTFBUF flag if set and map the rest of the buffer
- * before cloning.
- */
- PKTCTFMAP(osh, skb);
-
- if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
- return NULL;
-
- /* skb_clone copies skb->cb.. we don't want that */
- if (osh->pub.pkttag)
- OSL_PKTTAG_CLEAR(p);
-
- /* Increment the packet counter */
- atomic_inc(&osh->cmn->pktalloced);
-#ifdef BCM_OBJECT_TRACE
- bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
-#endif /* BCM_OBJECT_TRACE */
-
- return (p);
-}
-
-/*
- * BINOSL selects the slightly slower function-call-based binary compatible osl.
- */
-
-uint
-osl_pktalloced(osl_t *osh)
-{
- if (atomic_read(&osh->cmn->refcount) == 1)
- return (atomic_read(&osh->cmn->pktalloced));
- else
- return 0;
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
-#include <linux/kallsyms.h>
-#include <net/sock.h>
-void
-osl_pkt_orphan_partial(struct sk_buff *skb, int tsq)
-{
- uint32 fraction;
- static void *p_tcp_wfree = NULL;
-
- if (tsq <= 0)
- return;
-
- if (!skb->destructor || skb->destructor == sock_wfree)
- return;
-
- if (unlikely(!p_tcp_wfree)) {
- char sym[KSYM_SYMBOL_LEN];
- sprint_symbol(sym, (unsigned long)skb->destructor);
- sym[9] = 0;
- if (!strcmp(sym, "tcp_wfree"))
- p_tcp_wfree = skb->destructor;
- else
- return;
- }
-
- if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
- return;
-
- /* abstract a certain portion of skb truesize from the socket
- * sk_wmem_alloc to allow more skb can be allocated for this
- * socket for better cusion meeting WiFi device requirement
- */
- fraction = skb->truesize * (tsq - 1) / tsq;
- skb->truesize -= fraction;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
- atomic_sub(fraction, &skb->sk->sk_wmem_alloc.refs);
-#else
- atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
-#endif // endif
- skb_orphan(skb);
-}
-#endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
+++ /dev/null
-/*
- * otpdefs.h SROM/OTP definitions.
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id$
- */
-
-#ifndef _OTPDEFS_H_
-#define _OTPDEFS_H_
-
-/* SFLASH */
-#define SFLASH_ADDRESS_OFFSET_4368 0x1C000000u
-#define SFLASH_SKU_OFFSET_4368 0xEu
-#define SFLASH_MACADDR_OFFSET_4368 0x4u
-/*
- * In sflash based chips, first word in sflash says the length.
- * So only default value is defined here. Actual length is read
- * from sflash in dhdpcie_srom_sflash_health_chk
- * 0x0521 * 2 .x2 since length says number of words.
- */
-#define SFLASH_LEN_4368 0xA42u
-
-#define SROM_ADDRESS_OFFSET_4355 0x0800u
-#define SROM_ADDRESS_OFFSET_4364 0xA000u
-#define SROM_ADDRESS_OFFSET_4377 0x0800u
-#define SROM_ADDRESS(sih, offset) (SI_ENUM_BASE(sih) + (offset))
-#define SROM_MACADDR_OFFSET_4355 0x84u
-#define SROM_MACADDR_OFFSET_4364 0x82u
-#define SROM_MACADDR_OFFSET_4377 0xE2u
-#define SROM_SKU_OFFSET_4355 0x8Au
-#define SROM_SKU_OFFSET_4364 0x8Cu
-#define SROM_SKU_OFFSET_4377 0xECu
-#define SROM_CAL_SIG1_OFFSET_4355 0xB8u
-#define SROM_CAL_SIG2_OFFSET_4355 0xBAu
-#define SROM_CAL_SIG1_OFFSET_4364 0xA0u
-#define SROM_CAL_SIG2_OFFSET_4364 0xA2u
-#define SROM_CAL_SIG1 0x4c42u
-#define SROM_CAL_SIG2 0x424fu
-#define SROM_LEN_4355 512u
-#define SROM_LEN_4364 2048u
-#define SROM_LEN_4377 2048u
-
-#define OTP_USER_AREA_OFFSET_4355 0xC0u
-#define OTP_USER_AREA_OFFSET_4364 0xC0u
-#define OTP_USER_AREA_OFFSET_4368 0x120u
-#define OTP_USER_AREA_OFFSET_4377 0x120u
-#define OTP_OFFSET_4368 0x5000u
-#define OTP_OFFSET_4377 0x11000u
-#define OTP_CTRL1_VAL 0xFA0000
-#define OTP_ADDRESS(sih, offset) (SI_ENUM_BASE(sih) + (offset))
-#define OTP_VERSION_TUPLE_ID 0x15
-#define OTP_VENDOR_TUPLE_ID 0x80
-#define OTP_CIS_REGION_END_TUPLE_ID 0XFF
-
-#define PCIE_CTRL_REG_ADDR(sih) (SI_ENUM_BASE(sih) + 0x3000)
-#define SPROM_CTRL_REG_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SROM_CTRL)
-#define SPROM_CTRL_OPCODE_READ_MASK 0x9FFFFFFF
-#define SPROM_CTRL_START_BUSY_MASK 0x80000000
-#define SPROM_ADDR(sih) (SI_ENUM_BASE(sih) + CC_SROM_ADDRESS)
-#define SPROM_DATA(sih) (SI_ENUM_BASE(sih) + CC_SROM_DATA)
-#define OTP_CTRL1_REG_ADDR(sih) (SI_ENUM_BASE(sih) + 0xF4)
-#define PMU_MINRESMASK_REG_ADDR(sih) (SI_ENUM_BASE(sih) + MINRESMASKREG)
-#define CHIP_COMMON_STATUS_REG_ADDR(sih) (SI_ENUM_BASE(sih) + CC_CHIPST)
-#define CHIP_COMMON_CLKDIV2_ADDR(sih) (SI_ENUM_BASE(sih) + CC_CLKDIV2)
-
-#define CC_CLKDIV2_SPROMDIV_MASK 0x7u
-#define CC_CLKDIV2_SPROMDIV_VAL 0X4u
-#define CC_CHIPSTATUS_STRAP_BTUART_MASK 0x40u
-#define PMU_OTP_PWR_ON_MASK 0xC47
-#define PMU_PWRUP_DELAY 500 /* in us */
-#define DONGLE_TREFUP_PROGRAM_DELAY 5000 /* 5ms in us */
-#define SPROM_BUSY_POLL_DELAY 5 /* 5us */
-
-typedef enum {
- BCM4355_IDX = 0,
- BCM4364_IDX,
- BCM4368_IDX,
- BCM4377_IDX,
- BCMMAX_IDX
-} chip_idx_t;
-
-typedef enum {
- BCM4368_BTOP_IDX,
- BCM4377_BTOP_IDX,
- BCMMAX_BTOP_IDX
-} chip_idx_btop_t;
-
-typedef enum {
- BCM4368_SFLASH_IDX,
- BCMMAX_SFLASH_IDX
-} chip_idx_sflash_t;
-
-extern uint32 otp_addr_offsets[];
-extern uint32 otp_usrarea_offsets[];
-extern uint32 sku_offsets[];
-extern uint32 srf_addr_offsets[];
-extern uint32 supported_chips[];
-
-char *dhd_get_plat_sku(void);
-#endif /* _OTPDEFS_H */
* Contains PCIe related functions that are shared between different driver models (e.g. firmware
* builds, DHD builds, BMAC builds), in order to avoid code duplication.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: pcie_core.c 769591 2018-06-27 00:08:22Z $
+ * $Id: pcie_core.c 658668 2016-09-09 00:42:11Z $
*/
#include <bcm_cfg.h>
#include <hndsoc.h>
#include <sbchipc.h>
#include <pcicfg.h>
+
#include "pcie_core.h"
/* local prototypes */
#ifdef BCMDRIVER
-/* wd_mask/wd_val is only for chipc_corerev >= 65 */
-void pcie_watchdog_reset(osl_t *osh, si_t *sih, uint32 wd_mask, uint32 wd_val)
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs)
{
uint32 val, i, lsc;
uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR,
PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL,
PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG,
PCIECFGREG_REG_BAR3_CONFIG};
- sbpcieregs_t *pcieregs = NULL;
+ sbpcieregs_t *pcie = NULL;
uint32 origidx = si_coreidx(sih);
-#ifdef BCMFPGA_HW
- if (CCREV(sih->ccrev) < 67) {
- /* To avoid hang on FPGA, donot reset watchdog */
- si_setcoreidx(sih, origidx);
- return;
- }
-#endif // endif
-
/* Switch to PCIE2 core */
- pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
- BCM_REFERENCE(pcieregs);
- ASSERT(pcieregs != NULL);
+ pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
+ BCM_REFERENCE(pcie);
+ ASSERT(pcie != NULL);
/* Disable/restore ASPM Control to protect the watchdog reset */
- W_REG(osh, &pcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
- lsc = R_REG(osh, &pcieregs->configdata);
+ W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+ lsc = R_REG(osh, &sbpcieregs->configdata);
val = lsc & (~PCIE_ASPM_ENAB);
- W_REG(osh, &pcieregs->configdata, val);
-
- if (CCREV(sih->ccrev) >= 65) {
- si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), wd_mask, wd_val);
- si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), WD_COUNTER_MASK, 4);
-#ifdef BCMQT_HW
- OSL_DELAY(2000 * 4000);
-#else
- OSL_DELAY(2000); /* 2 ms */
-#endif // endif
- val = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, intstatus), 0, 0);
- si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, intstatus),
- wd_mask, val & wd_mask);
- } else {
- si_corereg_writeonly(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4);
- /* Read a config space to make sure the above write gets flushed on PCIe bus */
- val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
- OSL_DELAY(100000);
- }
+ W_REG(osh, &sbpcieregs->configdata, val);
- W_REG(osh, &pcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
- W_REG(osh, &pcieregs->configdata, lsc);
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4);
+ OSL_DELAY(100000);
+
+ W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+ W_REG(osh, &sbpcieregs->configdata, lsc);
if (sih->buscorerev <= 13) {
/* Write configuration registers back to the shadow registers
* cause shadow registers are cleared out after watchdog reset.
*/
for (i = 0; i < ARRAYSIZE(cfg_offset); i++) {
- W_REG(osh, &pcieregs->configaddr, cfg_offset[i]);
- val = R_REG(osh, &pcieregs->configdata);
- W_REG(osh, &pcieregs->configdata, val);
+ W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]);
+ val = R_REG(osh, &sbpcieregs->configdata);
+ W_REG(osh, &sbpcieregs->configdata, val);
}
}
si_setcoreidx(sih, origidx);
}
+
/* CRWLPCIEGEN2-117 pcie_pipe_Iddq should be controlled
* by the L12 state from MAC to save power by putting the
* SerDes analog in IDDQ mode
si_corereg(sih, sih->buscoreidx,
OFFSETOF(sbpcieregs_t, configdata), PCIE_PMCR_REFEXT_MASK, PCIE_PMCR_REFEXT_100US);
}
-
#endif /* BCMDRIVER */
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: sbutils.c 700323 2017-05-18 16:12:11Z $
+ * $Id: sbutils.c 599296 2015-11-13 06:36:13Z $
*/
#include <bcm_cfg.h>
#include "siutils_priv.h"
+
/* local prototypes */
static uint _sb_coreidx(si_info_t *sii, uint32 sba);
static uint _sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus, uint32 sbba,
- uint ncores, uint devid);
+ uint ncores);
static uint32 _sb_coresba(si_info_t *sii);
static volatile void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
#define SET_SBREG(sii, r, mask, val) \
uint8 tmp;
uint32 val, intr_val = 0;
+
/*
* compact flash only has 11 bits address, while we needs 12 bits address.
* MEM_SEG will be OR'd with other 11 bits address in hardware,
volatile uint32 dummy;
uint32 intr_val = 0;
+
/*
* compact flash only has 11 bits address, while we needs 12 bits address.
* MEM_SEG will be OR'd with other 11 bits address in hardware,
{
uint32 sbaddr;
+
switch (BUSTYPE(sii->pub.bustype)) {
case SI_BUS: {
sbconfig_t *sb = REGS2SB(sii->curmap);
case SDIO_BUS:
sbaddr = (uint32)(uintptr)sii->curmap;
break;
-#endif // endif
+#endif
+
default:
sbaddr = BADCOREADDR;
#define SB_MAXBUSES 2
static uint
_sb_scan(si_info_t *sii, uint32 sba, volatile void *regs, uint bus,
- uint32 sbba, uint numcores, uint devid)
+ uint32 sbba, uint numcores)
{
uint next;
uint ncc = 0;
sii->numcores = next + 1;
- if ((nsbba & 0xfff00000) != si_enum_base(devid))
+ if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
continue;
nsbba &= 0xfffff000;
if (_sb_coreidx(sii, nsbba) != BADIDX)
continue;
nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
- nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc, devid);
- if (sbba == si_enum_base(devid))
+ nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
+ if (sbba == SI_ENUM_BASE)
numcores -= nsbcc;
ncc += nsbcc;
}
*/
origsba = _sb_coresba(sii);
- /* scan all SB(s) starting from SI_ENUM_BASE_DEFAULT */
- sii->numcores = _sb_scan(sii, origsba, regs, 0, si_enum_base(devid), 1, devid);
+ /* scan all SB(s) starting from SI_ENUM_BASE */
+ sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
}
/*
break;
#endif /* BCMSDIO */
+
default:
ASSERT(0);
regs = NULL;
return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
}
+
/* do buffered registers update */
void
sb_commit(si_t *sih)
uint32 tmp, ret = 0xffffffff;
sbconfig_t *sb;
+
if ((to & ~TO_MASK) != 0)
return ret;
case PCMCIA_BUS:
#ifdef BCMSDIO
case SDIO_BUS:
-#endif // endif
+#endif
idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
break;
case SI_BUS:
sb_setcoreidx(sih, origidx);
INTR_RESTORE(sii, intr_val);
}
-#endif // endif
+#endif
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: siutils.c 798061 2019-01-04 23:27:15Z $
+ * $Id: siutils.c 668442 2016-11-03 08:42:43Z $
*/
#include <bcm_cfg.h>
#include <hndsoc.h>
#include <sbchipc.h>
#include <sbgci.h>
-#ifndef BCMSDIO
-#include <pcie_core.h>
-#endif // endif
#ifdef BCMPCIEDEV
#include <pciedev.h>
#endif /* BCMPCIEDEV */
#include <bcmsdpcm.h>
#endif /* BCMSDIO */
#include <hndpmu.h>
-#ifdef BCMSPI
-#include <spid.h>
-#endif /* BCMSPI */
#include <dhd_config.h>
#ifdef BCM_SDRBL
#ifdef HNDGCI
#include <hndgci.h>
#endif /* HNDGCI */
-#ifdef WLGCIMBHLR
-#include <hnd_gci.h>
-#endif /* WLGCIMBHLR */
#ifdef BCMULP
#include <ulp.h>
#endif /* BCMULP */
-#include <hndlhl.h>
-#include <lpflags.h>
#include "siutils_priv.h"
#ifdef SECI_UART
/* Defines the set of GPIOs to be used for SECI UART if not specified in NVRAM */
-/* For further details on each ppin functionality please refer to PINMUX table in
- * Top level architecture of BCMXXXX Chip
- */
-#define DEFAULT_SECI_UART_PINMUX 0x08090a0b
#define DEFAULT_SECI_UART_PINMUX_43430 0x0102
static bool force_seci_clk = 0;
#endif /* SECI_UART */
-#define XTAL_FREQ_26000KHZ 26000
-
/**
* A set of PMU registers is clocked in the ILP domain, which has an implication on register write
* behavior: if such a register is written, it takes multiple ILP clocks for the PMU block to absorb
#define GCI_FEM_CTRL_WAR 0x11111111
-#ifndef AXI_TO_VAL
-#define AXI_TO_VAL 19
-#endif /* AXI_TO_VAL */
-
-#ifndef AXI_TO_VAL_4347
-/*
- * Increase BP timeout for fast clock and short PCIe timeouts
- * New timeout: 2 ** 25 cycles
- */
-#define AXI_TO_VAL_4347 25
-#endif /* AXI_TO_VAL_4347 */
-
/* local prototypes */
static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, volatile void *regs,
uint bustype, void *sdh, char **vars, uint *varsz);
static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
uint *origidx, volatile void *regs);
+
static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff);
+
+
/* global variable to indicate reservation/release of gpio's */
static uint32 si_gpioreservation = 0;
#ifdef BCMULP
/* Variable to store boot_type: warm_boot/cold_boot/etc. */
static int boot_type = 0;
-#endif // endif
+#endif
/* global kernel resource */
static si_info_t ksii;
static si_cores_info_t ksii_cores_info;
+static const char rstr_rmin[] = "rmin";
+static const char rstr_rmax[] = "rmax";
+
/**
* Allocate an si handle. This function may be called multiple times.
*
si_info_t *sii;
si_cores_info_t *cores_info;
/* alloc si_info_t */
- /* freed after ucode download for firmware builds */
- if ((sii = MALLOCZ_NOPERSIST(osh, sizeof(si_info_t))) == NULL) {
+ if ((sii = MALLOCZ(osh, sizeof (si_info_t))) == NULL) {
SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
return (NULL);
}
/* alloc si_cores_info_t */
- if ((cores_info = (si_cores_info_t *)MALLOCZ(osh,
- sizeof(si_cores_info_t))) == NULL) {
+ if ((cores_info = (si_cores_info_t *)MALLOCZ(osh, sizeof (si_cores_info_t))) == NULL) {
SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
MFREE(osh, sii, sizeof(si_info_t));
return (NULL);
return (si_t *)sii;
}
-static uint32 wd_msticks; /**< watchdog timer ticks normalized to ms */
-/** Returns the backplane address of the chipcommon core for a particular chip */
-uint32
-si_enum_base(uint devid)
-{
- // NIC/DHD build
- switch (devid) {
- case BCM7271_CHIP_ID:
- case BCM7271_D11AC_ID:
- case BCM7271_D11AC2G_ID:
- case BCM7271_D11AC5G_ID:
- return 0xF1800000;
- }
-
- return SI_ENUM_BASE_DEFAULT;
-}
+static uint32 wd_msticks; /**< watchdog timer ticks normalized to ms */
-/** generic kernel variant of si_attach(). Is not called for Linux WLAN NIC builds. */
+/** generic kernel variant of si_attach() */
si_t *
si_kattach(osl_t *osh)
{
if (!ksii_attached) {
void *regs = NULL;
- const uint device_id = BCM4710_DEVICE_ID; // pick an arbitrary default device_id
-
- regs = REG_MAP(si_enum_base(device_id), SI_CORE_SIZE); // map physical to virtual
+ regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
cores_info = (si_cores_info_t *)&ksii_cores_info;
ksii.cores_info = cores_info;
ASSERT(osh);
- if (si_doattach(&ksii, device_id, osh, regs,
+ if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
SI_BUS, NULL,
osh != SI_OSH ? &(ksii.vars) : NULL,
osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) {
/* save ticks normalized to ms for si_watchdog_ms() */
if (PMUCTL_ENAB(&ksii.pub)) {
- /* based on 32KHz ILP clock */
- wd_msticks = 32;
+ {
+ /* based on 32KHz ILP clock */
+ wd_msticks = 32;
+ }
} else {
wd_msticks = ALP_CLOCK / 1000;
}
if (BUSTYPE(bustype) == PCMCIA_BUS)
sii->memseg = TRUE;
-#if defined(BCMSDIO) && !defined(BCMSDIOLITE)
+
+#if defined(BCMSDIO)
if (BUSTYPE(bustype) == SDIO_BUS) {
int err;
uint8 clkset;
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
}
-#ifdef BCMSPI
- /* Avoid backplane accesses before wake-wlan (i.e. htavail) for spi.
- * F1 read accesses may return correct data but with data-not-available dstatus bit set.
- */
- if (BUSTYPE(bustype) == SPI_BUS) {
-
- int err;
- uint32 regdata;
- /* wake up wlan function :WAKE_UP goes as HT_AVAIL request in hardware */
- regdata = bcmsdh_cfg_read_word(sdh, SDIO_FUNC_0, SPID_CONFIG, NULL);
- SI_MSG(("F0 REG0 rd = 0x%x\n", regdata));
- regdata |= WAKE_UP;
-
- bcmsdh_cfg_write_word(sdh, SDIO_FUNC_0, SPID_CONFIG, regdata, &err);
-
- OSL_DELAY(100000);
- }
-#endif /* BCMSPI */
-#endif /* BCMSDIO && BCMDONGLEHOST && !BCMSDIOLITE */
+#endif /* BCMSDIO && BCMDONGLEHOST */
return TRUE;
}
pmuaddr = (uint32)(uintptr)((volatile uint8*)pmu + offset);
si_setcoreidx(sih, origidx);
} else
- pmuaddr = SI_ENUM_BASE(sih) + offset;
+ pmuaddr = SI_ENUM_BASE + offset;
done:
- SI_MSG(("%s: addrRET: %x\n", __FUNCTION__, pmuaddr));
+ printf("%s: addrRET: %x\n", __FUNCTION__, pmuaddr);
return pmuaddr;
}
#if defined(BCM_BACKPLANE_TIMEOUT) || defined(AXI_TIMEOUTS)
/* first, enable backplane timeouts */
si_slave_wrapper_add(&sii->pub);
-#endif // endif
+#endif
sii->curidx = 0;
cc = si_setcoreidx(&sii->pub, SI_CC_IDX);
crev = si_corerev(&sii->pub);
/* Display cores found */
- SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x size:%x regs 0x%p\n",
- i, cid, crev, sii->coresba[i], sii->coresba_size[i],
- OSL_OBFUSCATE_BUF(sii->regs[i])));
+ SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
+ i, cid, crev, cores_info->coresba[i], cores_info->regs[i]));
if (BUSTYPE(bustype) == SI_BUS) {
/* now look at the chipstatus register to figure the pacakge */
/* for SDIO but downloaded on PCIE dev */
-#ifdef BCMPCIEDEV_ENABLED
if (cid == PCIE2_CORE_ID) {
+ if (BCM43602_CHIP(sii->pub.chip) ||
+ (CHIPID(sii->pub.chip) == BCM4365_CHIP_ID) ||
+ (CHIPID(sii->pub.chip) == BCM4347_CHIP_ID) ||
+ (CHIPID(sii->pub.chip) == BCM4366_CHIP_ID) ||
+ ((BCM4345_CHIP(sii->pub.chip) ||
+ BCM4349_CHIP(sii->pub.chip)) &&
+ CST4345_CHIPMODE_PCIE(sii->pub.chipst))) {
pcieidx = i;
pcierev = crev;
pcie = TRUE;
pcie_gen2 = TRUE;
+ }
}
-#endif // endif
} else if (BUSTYPE(bustype) == PCI_BUS) {
if (cid == PCI_CORE_ID) {
*origidx = i;
}
+
#if defined(PCIE_FULL_DONGLE)
if (pcie) {
if (pcie_gen2)
SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype,
sii->pub.buscorerev));
+
#if defined(BCMSDIO)
/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
* already running.
return TRUE;
}
+
+
+
+
uint16
si_chipid(si_t *sih)
{
}
#endif /* BCMULP */
-#ifdef BCM_BACKPLANE_TIMEOUT
-uint32
-si_clear_backplane_to_fast(void *sih, void *addr)
-{
- si_t *_sih = DISCARD_QUAL(sih, si_t);
-
- if (CHIPTYPE(_sih->socitype) == SOCI_AI) {
- return ai_clear_backplane_to_fast(_sih, addr);
- }
-
- return 0;
-}
-
-const si_axi_error_info_t *
-si_get_axi_errlog_info(si_t *sih)
-{
- if (CHIPTYPE(sih->socitype) == SOCI_AI) {
- return (const si_axi_error_info_t *)sih->err_info;
- }
-
- return NULL;
-}
-
-void
-si_reset_axi_errlog_info(si_t *sih)
-{
- if (sih->err_info) {
- sih->err_info->count = 0;
- }
-}
-#endif /* BCM_BACKPLANE_TIMEOUT */
-
/**
- * Allocate an si handle. This function may be called multiple times. This function is called by
- * both si_attach() and si_kattach().
+ * Allocate an si handle. This function may be called multiple times.
*
* vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
* function set 'vars' to NULL.
chipcregs_t *cc;
char *pvars = NULL;
uint origidx;
-#ifdef NVSRCX
- char *sromvars;
-#endif // endif
+#if !defined(_CFEZ_) || defined(CFG_WL)
+#endif
ASSERT(GOODREGS(regs));
sii->sdh = sdh;
sii->osh = osh;
sii->second_bar0win = ~0x0;
- sih->enum_base = si_enum_base(devid);
#if defined(BCM_BACKPLANE_TIMEOUT)
sih->err_info = MALLOCZ(osh, sizeof(si_axi_error_info_t));
if (sih->err_info == NULL) {
- SI_ERROR(("%s: %zu bytes MALLOC FAILED",
+ SI_ERROR(("%s: %d bytes MALLOC FAILED",
__FUNCTION__, sizeof(si_axi_error_info_t)));
+ return NULL;
}
#endif /* BCM_BACKPLANE_TIMEOUT */
-#if defined(BCM_BACKPLANE_TIMEOUT)
- osl_set_bpt_cb(osh, (void *)si_clear_backplane_to_fast, (void *)sih);
-#endif // endif
/* check to see if we are a si core mimic'ing a pci core */
if ((bustype == PCI_BUS) &&
/* find Chipcommon address */
if (bustype == PCI_BUS) {
savewin = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
- if (!GOODCOREADDR(savewin, SI_ENUM_BASE(sih)))
- savewin = SI_ENUM_BASE(sih);
- OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE(sih));
+ if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
+ savewin = SI_ENUM_BASE;
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, SI_ENUM_BASE);
if (!regs)
return NULL;
cc = (chipcregs_t *)regs;
#ifdef BCMSDIO
} else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
cc = (chipcregs_t *)sii->curmap;
-#endif // endif
+#endif
} else {
- cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE(sih), SI_CORE_SIZE);
+ cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
}
sih->bustype = bustype;
bustype, BUSTYPE(bustype)));
return NULL;
}
-#endif // endif
+#endif
/* bus/core/clk setup for register access */
if (!si_buscore_prep(sii, bustype, devid, sdh)) {
sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
#if defined(BCMSDIO) && (defined(HW_OOB) || defined(FORCE_WOWLAN))
- dhd_conf_set_hw_oob_intr(sdh, sih);
+ dhd_conf_set_hw_oob_intr(sdh, sih->chip);
#endif
si_chipid_fixup(sih);
- if (CHIPID(sih->chip) == BCM43465_CHIP_ID) {
- sih->chip = BCM4366_CHIP_ID;
- } else if (CHIPID(sih->chip) == BCM43525_CHIP_ID) {
- sih->chip = BCM4365_CHIP_ID;
- }
-
sih->issim = IS_SIM(sih->chippkg);
/* scan for cores */
SI_MSG(("Found chip type SB (0x%08x)\n", w));
sb_scan(&sii->pub, regs, devid);
} else if ((CHIPTYPE(sii->pub.socitype) == SOCI_AI) ||
- (CHIPTYPE(sii->pub.socitype) == SOCI_NAI) ||
- (CHIPTYPE(sii->pub.socitype) == SOCI_DVTBUS)) {
-
+ (CHIPTYPE(sii->pub.socitype) == SOCI_NAI)) {
if (CHIPTYPE(sii->pub.socitype) == SOCI_AI)
SI_MSG(("Found chip type AI (0x%08x)\n", w));
- else if (CHIPTYPE(sii->pub.socitype) == SOCI_NAI)
- SI_MSG(("Found chip type NAI (0x%08x)\n", w));
else
- SI_MSG(("Found chip type DVT (0x%08x)\n", w));
+ SI_MSG(("Found chip type NAI (0x%08x)\n", w));
/* pass chipc address instead of original core base */
- if (sii->osh) {
- sii->axi_wrapper = (axi_wrapper_t *)MALLOCZ(sii->osh,
- (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS));
+ sii->axi_wrapper = (axi_wrapper_t *)MALLOCZ(sii->osh,
+ (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS));
- if (sii->axi_wrapper == NULL) {
- SI_ERROR(("%s: %zu bytes MALLOC Failed", __FUNCTION__,
- (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS)));
- }
- } else {
- sii->axi_wrapper = NULL;
+ if (sii->axi_wrapper == NULL) {
+ SI_ERROR(("%s: %zu bytes MALLOC Failed", __FUNCTION__,
+ (sizeof(axi_wrapper_t) * SI_MAX_AXI_WRAPPERS)));
+ return NULL;
}
ai_scan(&sii->pub, (void *)(uintptr)cc, devid);
goto exit;
}
#ifdef BCMULP
- if (BCMULP_ENAB()) {
- si_check_boot_type(sih, osh);
- if (ulp_module_init(osh, sih) != BCME_OK) {
- ULP_ERR(("%s: err in ulp_module_init\n", __FUNCTION__));
- goto exit;
- }
+ si_check_boot_type(sih, osh);
+
+ if (ulp_module_init(osh, sih) != BCME_OK) {
+ ULP_ERR(("%s: err in ulp_module_init\n", __FUNCTION__));
+ goto exit;
}
#endif /* BCMULP */
OSL_DELAY(10);
}
- /* Set the clkdiv2 divisor bits (2:0) to 0x4 if srom is present */
- if (bustype == SI_BUS) {
- uint32 clkdiv2, sromprsnt, capabilities, srom_supported;
- capabilities = R_REG(osh, &cc->capabilities);
- srom_supported = capabilities & SROM_SUPPORTED;
- if (srom_supported)
- {
- sromprsnt = R_REG(osh, &cc->sromcontrol);
- sromprsnt = sromprsnt & SROM_PRSNT_MASK;
- if (sromprsnt) {
- /* SROM clock come from backplane clock/div2. Must <= 1Mhz */
- clkdiv2 = (R_REG(osh, &cc->clkdiv2) & ~CLKD2_SROM);
- clkdiv2 |= CLKD2_SROMDIV_192;
- W_REG(osh, &cc->clkdiv2, clkdiv2);
- }
- }
- }
-
if (bustype == PCI_BUS) {
}
-#endif // endif
+#endif
#ifdef BCM_SDRBL
/* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is
* not turned on, then we want to hold arm in reset.
pvars = NULL;
BCM_REFERENCE(pvars);
- {
- sii->lhl_ps_mode = LHL_PS_MODE_0;
- }
-
if (!si_onetimeinit) {
+
if (CCREV(sii->pub.ccrev) >= 20) {
uint32 gpiopullup = 0, gpiopulldown = 0;
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
ASSERT(cc != NULL);
+ /* 4314/43142 has pin muxing, don't clear gpio bits */
+ if ((CHIPID(sih->chip) == BCM4314_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43142_CHIP_ID)) {
+ gpiopullup |= 0x402e0;
+ gpiopulldown |= 0x20500;
+ }
+
+
W_REG(osh, &cc->gpiopullup, gpiopullup);
W_REG(osh, &cc->gpiopulldown, gpiopulldown);
si_setcoreidx(sih, origidx);
/* clear any previous epidiag-induced target abort */
ASSERT(!si_taclear(sih, FALSE));
-#if defined(BCMPMU_STATS) && !defined(BCMPMU_STATS_DISABLED)
- si_pmustatstimer_init(sih);
-#endif /* BCMPMU_STATS */
#ifdef BOOTLOADER_CONSOLE_OUTPUT
/* Enable console prints */
si_muxenab(sii, 3);
-#endif // endif
+#endif
return (sii);
si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint idx;
+
if (BUSTYPE(sih->bustype) == SI_BUS)
for (idx = 0; idx < SI_MAXCORES; idx++)
if (cores_info->regs[idx]) {
cores_info->regs[idx] = NULL;
}
+
#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
if (cores_info != &ksii_cores_info)
#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_intflag(sih);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return R_REG(sii->osh, ((uint32 *)(uintptr)
(sii->oob_router + OOB_STATUSA)));
else {
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_flag(sih);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_flag(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_flag(sih);
uint
si_flag_alt(si_t *sih)
{
- if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_flag_alt(sih);
else {
ASSERT(0);
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_setint(sih, siflag);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
ai_setint(sih, siflag);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
ub_setint(sih, siflag);
ASSERT(0);
}
-uint32
-si_oobr_baseaddr(si_t *sih, bool second)
-{
- si_info_t *sii = SI_INFO(sih);
-
- if (CHIPTYPE(sih->socitype) == SOCI_SB)
- return 0;
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
- return (second ? sii->oob_router1 : sii->oob_router);
- else {
- ASSERT(0);
- return 0;
- }
-}
-
uint
si_coreid(si_t *sih)
{
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_corevendor(sih);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_corevendor(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_corevendor(sih);
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_corerev(sih);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_corerev(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_corerev(sih);
}
}
-uint
-si_corerev_minor(si_t *sih)
-{
- if (CHIPTYPE(sih->socitype) == SOCI_AI) {
- return ai_corerev_minor(sih);
- } else {
- return 0;
- }
-}
-
/* return index of coreid or BADIDX if not found */
uint
si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
uint found;
uint i;
+
found = 0;
for (i = 0; i < sii->numcores; i++)
uint
si_numcoreunits(si_t *sih, uint coreid)
{
- si_info_t *sii = SI_INFO(sih);
- si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
- uint found = 0;
- uint i;
+ if ((CHIPID(sih->chip) == BCM4347_CHIP_ID) &&
+ (CHIPREV(sih->chiprev) == 0)) {
+ /*
+ * 4347TC2 does not have Aux core.
+ * fixed to 1 here because EROM (using 4349 EROM) has two entries
+ */
+ return 1;
+ } else {
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint found = 0;
+ uint i;
- for (i = 0; i < sii->numcores; i++) {
- if (cores_info->coreid[i] == coreid) {
- found++;
+ for (i = 0; i < sii->numcores; i++) {
+ if (cores_info->coreid[i] == coreid) {
+ found++;
+ }
}
- }
- return found;
+ return found;
+ }
}
/** return total D11 coreunits */
return (sii->curmap);
}
+
/**
* This function changes logical "focus" to the indicated core;
* must be called with interrupts off.
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_setcoreidx(sih, idx);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_setcoreidx(sih, idx);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_setcoreidx(sih, idx);
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_setcoreidx(sih, coreidx);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_setcoreidx(sih, coreidx);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_setcoreidx(sih, coreidx);
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_numaddrspaces(sih);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_numaddrspaces(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_numaddrspaces(sih);
}
}
-/* Return the address of the nth address space in the current core
- * Arguments:
- * sih : Pointer to struct si_t
- * spidx : slave port index
- * baidx : base address index
- */
-
uint32
-si_addrspace(si_t *sih, uint spidx, uint baidx)
+si_addrspace(si_t *sih, uint asidx)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
- return sb_addrspace(sih, baidx);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
- return ai_addrspace(sih, spidx, baidx);
+ return sb_addrspace(sih, asidx);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_addrspace(sih, asidx);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
- return ub_addrspace(sih, baidx);
+ return ub_addrspace(sih, asidx);
else {
ASSERT(0);
return 0;
}
}
-/* Return the size of the nth address space in the current core
- * Arguments:
- * sih : Pointer to struct si_t
- * spidx : slave port index
- * baidx : base address index
- */
uint32
-si_addrspacesize(si_t *sih, uint spidx, uint baidx)
+si_addrspacesize(si_t *sih, uint asidx)
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
- return sb_addrspacesize(sih, baidx);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
- return ai_addrspacesize(sih, spidx, baidx);
+ return sb_addrspacesize(sih, asidx);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_addrspacesize(sih, asidx);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
- return ub_addrspacesize(sih, baidx);
+ return ub_addrspacesize(sih, asidx);
else {
ASSERT(0);
return 0;
si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
{
/* Only supported for SOCI_AI */
- if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
ai_coreaddrspaceX(sih, asidx, addr, size);
else
*size = 0;
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_core_cflags(sih, mask, val);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_core_cflags(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_core_cflags(sih, mask, val);
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_core_cflags_wo(sih, mask, val);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
ai_core_cflags_wo(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
ub_core_cflags_wo(sih, mask, val);
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_core_sflags(sih, mask, val);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_core_sflags(sih, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_core_sflags(sih, mask, val);
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_commit(sih);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if (CHIPTYPE(sih->socitype) == SOCI_AI || CHIPTYPE(sih->socitype) == SOCI_NAI)
;
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
;
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_iscoreup(sih);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_iscoreup(sih);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_iscoreup(sih);
si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
{
/* only for AI back plane chips */
- if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return (ai_wrap_reg(sih, offset, mask, val));
return 0;
}
sii->second_bar0win = ~0x0;
}
-int
+uint
si_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read)
{
volatile uint32 *r = NULL;
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_corereg(sih, coreidx, regoff, mask, val);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_corereg(sih, coreidx, regoff, mask, val);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
return ub_corereg(sih, coreidx, regoff, mask, val);
}
}
-uint
-si_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
-{
- return ai_corereg_writeonly(sih, coreidx, regoff, mask, val);
-}
-
/** ILP sensitive register access needs special treatment to avoid backplane stalls */
bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff)
{
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
return sb_corereg_addr(sih, coreidx, regoff);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
return ai_corereg_addr(sih, coreidx, regoff);
else {
return 0;
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_core_disable(sih, bits);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
ai_core_disable(sih, bits);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
ub_core_disable(sih, bits);
{
if (CHIPTYPE(sih->socitype) == SOCI_SB)
sb_core_reset(sih, bits, resetbits);
- else if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
ai_core_reset(sih, bits, resetbits);
else if (CHIPTYPE(sih->socitype) == SOCI_UBUS)
ub_core_reset(sih, bits, resetbits);
uint idx = si_findcoreidx(sih, coreid, 0);
uint num = 0;
- if (idx != BADIDX) {
- if (CHIPTYPE(sih->socitype) == SOCI_AI) {
- num = ai_num_slaveports(sih, idx);
- }
- }
+ if ((CHIPTYPE(sih->socitype) == SOCI_AI))
+ num = ai_num_slaveports(sih, idx);
+
return num;
}
uint32
-si_get_slaveport_addr(si_t *sih, uint spidx, uint baidx, uint core_id, uint coreunit)
+si_get_slaveport_addr(si_t *sih, uint asidx, uint core_id, uint coreunit)
{
si_info_t *sii = SI_INFO(sih);
uint origidx = sii->curidx;
uint32 addr = 0x0;
- if (!((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI)))
+ if (!((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)))
goto done;
si_setcore(sih, core_id, coreunit);
- addr = ai_addrspace(sih, spidx, baidx);
+ addr = ai_addrspace(sih, asidx);
si_setcoreidx(sih, origidx);
}
uint32
-si_get_d11_slaveport_addr(si_t *sih, uint spidx, uint baidx, uint coreunit)
+si_get_d11_slaveport_addr(si_t *sih, uint asidx, uint coreunit)
{
si_info_t *sii = SI_INFO(sih);
uint origidx = sii->curidx;
uint32 addr = 0x0;
- if (!((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS) ||
- (CHIPTYPE(sih->socitype) == SOCI_NAI)))
+ if (!((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI)))
goto done;
si_setcore(sih, D11_CORE_ID, coreunit);
- addr = ai_addrspace(sih, spidx, baidx);
+ addr = ai_addrspace(sih, asidx);
si_setcoreidx(sih, origidx);
return div ? clock / div : 0;
}
+
/** calculate the speed the SI would run at given a set of clockcontrol values */
uint32
si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
else if (CST4349_CHIPMODE_PCIE(sih->chipst))
hosti = CHIP_HOSTIF_PCIEMODE;
break;
- case BCM4364_CHIP_ID:
- if (CST4364_CHIPMODE_SDIOD(sih->chipst))
- hosti = CHIP_HOSTIF_SDIOMODE;
- else if (CST4364_CHIPMODE_PCIE(sih->chipst))
- hosti = CHIP_HOSTIF_PCIEMODE;
- break;
- case BCM4373_CHIP_ID:
- if (CST4373_CHIPMODE_USB20D(sih->chipst))
- hosti = CHIP_HOSTIF_USBMODE;
- else if (CST4373_CHIPMODE_SDIOD(sih->chipst))
- hosti = CHIP_HOSTIF_SDIOMODE;
- else if (CST4373_CHIPMODE_PCIE(sih->chipst))
- hosti = CHIP_HOSTIF_PCIEMODE;
- break;
-
- case BCM4347_CHIP_GRPID:
+ case BCM4347_CHIP_ID:
if (CST4347_CHIPMODE_SDIOD(sih->chipst))
hosti = CHIP_HOSTIF_SDIOMODE;
else if (CST4347_CHIPMODE_PCIE(sih->chipst))
hosti = CHIP_HOSTIF_PCIEMODE;
break;
- case BCM4369_CHIP_GRPID:
- if (CST4369_CHIPMODE_SDIOD(sih->chipst))
- hosti = CHIP_HOSTIF_SDIOMODE;
- else if (CST4369_CHIPMODE_PCIE(sih->chipst))
- hosti = CHIP_HOSTIF_PCIEMODE;
- break;
case BCM4350_CHIP_ID:
case BCM4354_CHIP_ID:
return hosti;
}
+
/** set chip watchdog reset timer to fire in 'ticks' */
void
si_watchdog(si_t *sih, uint ticks)
uint nb, maxt;
uint pmu_wdt = 1;
+
if (PMUCTL_ENAB(sih) && pmu_wdt) {
- nb = (CCREV(sih->ccrev) < 26) ? 16 : ((CCREV(sih->ccrev) >= 37) ? 32 : 24);
+ nb = (CCREV(sih->ccrev) < 26) ? 16 : ((CCREV(sih->ccrev) >= 37) ? 32 : 24);
/* The mips compiler uses the sllv instruction,
* so we specially handle the 32-bit case.
*/
SPINWAIT((PMU_REG(sih, pmustatus, 0, 0) & PST_ILPFASTLPO),
PMU_MAX_TRANSITION_DLY);
}
+
pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks);
} else {
maxt = (1 << 28) - 1;
return FALSE;
}
+
+
/** return the slow clock source - LPO, XTAL, or PCI */
static uint
si_slowclk_src(si_info_t *sii)
si_setcoreidx(sih, origidx);
}
+
/** change logical "focus" to the gpio core for optimized access */
volatile void *
si_gpiosetcore(si_t *sih)
return memsize;
}
+
/** Return the TCM-RAM size of the ARMCR4 core. */
uint32
si_tcm_size(si_t *sih)
bool wasup;
uint32 corecap;
uint memsize = 0;
- uint banku_size = 0;
uint32 nab = 0;
uint32 nbb = 0;
uint32 totb = 0;
W_REG(sii->osh, arm_bidx, idx);
bxinfo = R_REG(sii->osh, arm_binfo);
- if (bxinfo & ARMCR4_BUNITSZ_MASK) {
- banku_size = ARMCR4_BSZ_1K;
- } else {
- banku_size = ARMCR4_BSZ_8K;
- }
- memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * banku_size;
+ memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT;
}
/* Return to previous state and core */
uint32 coreinfo;
uint memsize = 0;
+ if ((CHIPID(sih->chip) == BCM4334_CHIP_ID) && (CHIPREV(sih->chiprev) < 2)) {
+ return (32 * 1024);
+ }
+
if (CHIPID(sih->chip) == BCM43430_CHIP_ID ||
CHIPID(sih->chip) == BCM43018_CHIP_ID) {
return (64 * 1024);
return memsize;
}
+
#if !defined(_CFEZ_) || defined(CFG_WL)
void
si_btcgpiowar(si_t *sih)
INTR_RESTORE(sii, intr_val);
}
+void
+si_chipcontrl_btshd0_4331(si_t *sih, bool on)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+ uint intr_val = 0;
+
+ INTR_OFF(sii, intr_val);
+
+ origidx = si_coreidx(sih);
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ /* bt_shd0 controls are same for 4331 chiprevs 0 and 1, packages 12x9 and 12x12 */
+ if (on) {
+ /* Enable bt_shd0 on gpio4: */
+ val |= (CCTRL4331_BT_SHD0_ON_GPIO4);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ val &= ~(CCTRL4331_BT_SHD0_ON_GPIO4);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ }
+
+ /* restore the original index */
+ si_setcoreidx(sih, origidx);
+
+ INTR_RESTORE(sii, intr_val);
+}
+
void
si_chipcontrl_restore(si_t *sih, uint32 val)
{
return val;
}
+void
+si_chipcontrl_epa4331(si_t *sih, bool on)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+ uint32 val;
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ if (on) {
+ if (sih->chippkg == 9 || sih->chippkg == 0xb) {
+ val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+ /* Ext PA Controls for 4331 12x9 Package */
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ } else {
+ /* Ext PA Controls for 4331 12x12 Package */
+ if (CHIPREV(sih->chiprev) > 0) {
+ W_REG(sii->osh, &cc->chipcontrol, val |
+ (CCTRL4331_EXTPA_EN) | (CCTRL4331_EXTPA_EN2));
+ } else {
+ W_REG(sii->osh, &cc->chipcontrol, val | (CCTRL4331_EXTPA_EN));
+ }
+ }
+ } else {
+ val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_EN2 | CCTRL4331_EXTPA_ON_GPIO2_5);
+ W_REG(sii->osh, &cc->chipcontrol, val);
+ }
+
+ si_setcoreidx(sih, origidx);
+}
+
/** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */
void
si_chipcontrl_srom4360(si_t *sih, bool on)
si_setcoreidx(sih, origidx);
}
-/**
- * The SROM clock is derived from the backplane clock. 4365 (200Mhz) and 43684 (240Mhz) have a fast
- * backplane clock that requires a higher-than-POR-default clock divisor ratio for the SROM clock.
- */
void
-si_srom_clk_set(si_t *sih)
+si_clk_srom4365(si_t *sih)
{
si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc;
uint origidx = si_coreidx(sih);
uint32 val;
- uint32 divisor = 1;
if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
return;
}
-
val = R_REG(sii->osh, &cc->clkdiv2);
- if (BCM4365_CHIP(sih->chip)) {
- divisor = CLKD2_SROMDIV_192; /* divide 200 by 192 -> SPROM clock ~ 1.04Mhz */
+ W_REG(sii->osh, &cc->clkdiv2, ((val&~0xf) | 0x4));
+
+ si_setcoreidx(sih, origidx);
+}
+
+void
+si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl)
+{
+ si_info_t *sii;
+ chipcregs_t *cc;
+ uint origidx;
+ uint32 val;
+ bool sel_chip;
+
+ sel_chip = (CHIPID(sih->chip) == BCM4331_CHIP_ID) ||
+ (CHIPID(sih->chip) == BCM43431_CHIP_ID);
+ sel_chip &= ((sih->chippkg == 9 || sih->chippkg == 0xb));
+
+ if (!sel_chip)
+ return;
+
+ sii = SI_INFO(sih);
+ origidx = si_coreidx(sih);
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
+
+ val = R_REG(sii->osh, &cc->chipcontrol);
+
+ if (enter_wowl) {
+ val |= CCTRL4331_EXTPA_EN;
+ W_REG(sii->osh, &cc->chipcontrol, val);
} else {
- ASSERT(0);
+ val |= (CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
+ W_REG(sii->osh, &cc->chipcontrol, val);
}
+ si_setcoreidx(sih, origidx);
+}
+#endif
+
+uint
+si_pll_reset(si_t *sih)
+{
+ uint err = 0;
+
+ return (err);
+}
+
+/** Enable BT-COEX & Ex-PA for 4313 */
+void
+si_epa_4313war(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
+
+ /* EPA Fix */
+ W_REG(sii->osh, &cc->gpiocontrol,
+ R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
- W_REG(sii->osh, &cc->clkdiv2, ((val & ~CLKD2_SROM) | divisor));
si_setcoreidx(sih, origidx);
}
-#endif // endif
+
+void
+si_clk_pmu_htavail_set(si_t *sih, bool set_clear)
+{
+}
void
si_pmu_avb_clk_set(si_t *sih, osl_t *osh, bool set_flag)
{
}
+/** Re-enable synth_pwrsw resource in min_res_mask for 4313 */
+void
+si_pmu_synth_pwrsw_4313_war(si_t *sih)
+{
+}
+
+/** WL/BT control for 4313 btcombo boards >= P250 */
+void
+si_btcombo_p250_4313_war(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
+ W_REG(sii->osh, &cc->gpiocontrol,
+ R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_5_6_EN_MASK);
+
+ W_REG(sii->osh, &cc->gpioouten,
+ R_REG(sii->osh, &cc->gpioouten) | GPIO_CTRL_5_6_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
void
si_btc_enable_chipcontrol(si_t *sih)
{
si_setcoreidx(sih, origidx);
}
+void
+si_btcombo_43228_war(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+ chipcregs_t *cc;
+ uint origidx = si_coreidx(sih);
+
+ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) {
+ SI_ERROR(("%s: Failed to find CORE ID!\n", __FUNCTION__));
+ return;
+ }
+
+ W_REG(sii->osh, &cc->gpioouten, GPIO_CTRL_7_6_EN_MASK);
+ W_REG(sii->osh, &cc->gpioout, GPIO_OUT_7_EN_MASK);
+
+ si_setcoreidx(sih, origidx);
+}
/** cache device removed state */
void si_set_device_removed(si_t *sih, bool status)
return (boot_type == WARM_BOOT);
#else
return FALSE;
-#endif // endif
+#endif
}
bool
case BCM43018_CHIP_ID:
case BCM43430_CHIP_ID:
return FALSE;
+ case BCM4336_CHIP_ID:
+ case BCM43362_CHIP_ID:
+ return (sih->chipst & CST4336_SPROM_PRESENT) != 0;
+ case BCM4330_CHIP_ID:
+ return (sih->chipst & CST4330_SPROM_PRESENT) != 0;
+ case BCM4313_CHIP_ID:
+ return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+ case BCM4331_CHIP_ID:
+ case BCM43431_CHIP_ID:
+ return (sih->chipst & CST4331_SPROM_PRESENT) != 0;
+ case BCM43239_CHIP_ID:
+ return ((sih->chipst & CST43239_SPROM_MASK) &&
+ !(sih->chipst & CST43239_SFLASH_MASK));
+ case BCM4324_CHIP_ID:
+ case BCM43242_CHIP_ID:
+ return ((sih->chipst & CST4324_SPROM_MASK) &&
+ !(sih->chipst & CST4324_SFLASH_MASK));
case BCM4335_CHIP_ID:
CASE_BCM4345_CHIP:
return ((sih->chipst & CST4335_SPROM_MASK) &&
return (sih->chipst & CST4349_SPROM_PRESENT) != 0;
case BCM53573_CHIP_GRPID:
return FALSE; /* SPROM PRESENT is not defined for 53573 as of now */
- case BCM4364_CHIP_ID:
- return (sih->chipst & CST4364_SPROM_PRESENT) != 0;
- case BCM4369_CHIP_GRPID:
- if (CHIPREV(sih->chiprev) == 0) {
- /* WAR for 4369a0: HW4369-1729. no sprom, default to otp always. */
- return 0;
- } else {
- return (sih->chipst & CST4369_SPROM_PRESENT) != 0;
- }
- case BCM4347_CHIP_GRPID:
+ case BCM4347_CHIP_ID:
return (sih->chipst & CST4347_SPROM_PRESENT) != 0;
break;
case BCM4350_CHIP_ID:
return (sih->chipst & CST43602_SPROM_PRESENT) != 0;
case BCM43131_CHIP_ID:
case BCM43217_CHIP_ID:
+ case BCM43227_CHIP_ID:
+ case BCM43228_CHIP_ID:
case BCM43428_CHIP_ID:
return (sih->chipst & CST43228_OTP_PRESENT) != CST43228_OTP_PRESENT;
- case BCM4373_CHIP_ID:
case BCM43012_CHIP_ID:
return FALSE;
default:
}
}
+
uint32 si_get_sromctl(si_t *sih)
{
chipcregs_t *cc;
return ret_val;
}
+
/* cleanup the timer from the host when ARM is been halted
* without a chance for ARM cleanup its resources
* If left not cleanup, Intr from a software timer can still
{
}
+
+#ifdef SURVIVE_PERST_ENAB
+static uint32
+si_pcie_survive_perst(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return (0);
+
+ return pcie_survive_perst(sii->pch, mask, val);
+}
+
+static void
+si_watchdog_reset(si_t *sih)
+{
+ uint32 i;
+
+ /* issue a watchdog reset */
+ pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, 2, 2);
+ /* do busy wait for 20ms */
+ for (i = 0; i < 2000; i++) {
+ OSL_DELAY(10);
+ }
+}
+#endif /* SURVIVE_PERST_ENAB */
+
+void
+si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 sperst_val)
+{
+#ifdef SURVIVE_PERST_ENAB
+ if (BUSTYPE(sih->bustype) != PCI_BUS)
+ return;
+
+ if ((CHIPID(sih->chip) != BCM4360_CHIP_ID && CHIPID(sih->chip) != BCM4352_CHIP_ID) ||
+ (CHIPREV(sih->chiprev) >= 4))
+ return;
+
+ if (reset) {
+ si_info_t *sii = SI_INFO(sih);
+ uint32 bar0win, bar0win_after;
+
+ /* save the bar0win */
+ bar0win = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+
+ si_watchdog_reset(sih);
+
+ bar0win_after = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ if (bar0win_after != bar0win) {
+ SI_ERROR(("%s: bar0win before %08x, bar0win after %08x\n",
+ __FUNCTION__, bar0win, bar0win_after));
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32), bar0win);
+ }
+ }
+ if (sperst_mask) {
+ /* enable survive perst */
+ si_pcie_survive_perst(sih, sperst_mask, sperst_val);
+ }
+#endif /* SURVIVE_PERST_ENAB */
+}
+
/* Caller of this function should make sure is on PCIE core
* Used in pciedev.c.
*/
si_pcie_disable_oobselltr(si_t *sih)
{
ASSERT(si_coreid(sih) == PCIE2_CORE_ID);
- if (PCIECOREREV(sih->buscorerev) >= 23)
- si_wrapperreg(sih, AI_OOBSELIND74, ~0, 0);
- else
- si_wrapperreg(sih, AI_OOBSELIND30, ~0, 0);
+ si_wrapperreg(sih, AI_OOBSELIND30, ~0, 0);
}
void
{
}
+
+#ifdef BCM_BACKPLANE_TIMEOUT
+uint32
+si_clear_backplane_to_fast(si_t *sih, void * addr)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ return ai_clear_backplane_to_fast(sih, addr);
+ }
+
+ return 0;
+}
+
+const si_axi_error_info_t * si_get_axi_errlog_info(si_t * sih)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
+ return (const si_axi_error_info_t *)sih->err_info;
+ }
+
+ return NULL;
+}
+
+void si_reset_axi_errlog_info(si_t * sih)
+{
+ sih->err_info->count = 0;
+}
+#endif /* BCM_BACKPLANE_TIMEOUT */
+
#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
uint32
si_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void * wrap)
{
- if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS)) {
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
return ai_clear_backplane_to_per_core(sih, coreid, coreunit, wrap);
}
uint32
si_clear_backplane_to(si_t *sih)
{
- if ((CHIPTYPE(sih->socitype) == SOCI_AI) ||
- (CHIPTYPE(sih->socitype) == SOCI_DVTBUS)) {
+ if (CHIPTYPE(sih->socitype) == SOCI_AI) {
return ai_clear_backplane_to(sih);
}
return 0;
}
-void
-si_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout_exp, uint32 cid)
-{
-#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
- /* Enable only for AXI */
- if (CHIPTYPE(sih->socitype) != SOCI_AI) {
- return;
- }
-
- ai_update_backplane_timeouts(sih, enable, timeout_exp, cid);
-#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
-}
-
/*
* This routine adds the AXI timeouts for
* chipcommon, pcie and ARM slave wrappers
si_slave_wrapper_add(si_t *sih)
{
#if defined(AXI_TIMEOUTS) || defined(BCM_BACKPLANE_TIMEOUT)
- uint32 axi_to = 0;
-
/* Enable only for AXI */
- if ((CHIPTYPE(sih->socitype) != SOCI_AI) &&
- (CHIPTYPE(sih->socitype) != SOCI_DVTBUS)) {
+ if (CHIPTYPE(sih->socitype) != SOCI_AI) {
return;
}
ASSERT(wrapper_idx >= 0); /* all addresses valid for the chiprev under test */
}
- if (BCM4347_CHIP(sih->chip)) {
- axi_to = AXI_TO_VAL_4347;
- }
- else {
- axi_to = AXI_TO_VAL;
- }
-
/* All required slave wrappers are added in ai_scan */
- ai_update_backplane_timeouts(sih, TRUE, axi_to, 0);
-
-#ifdef DISABLE_PCIE2_AXI_TIMEOUT
- ai_update_backplane_timeouts(sih, FALSE, 0, PCIE_CORE_ID);
- ai_update_backplane_timeouts(sih, FALSE, 0, PCIE2_CORE_ID);
-#endif // endif
-
+ ai_enable_backplane_timeouts(sih);
#endif /* AXI_TIMEOUTS || BCM_BACKPLANE_TIMEOUT */
-
}
+
void
si_pll_sr_reinit(si_t *sih)
{
}
+
/* Programming d11 core oob settings for 4364
* WARs for HW4364-237 and HW4364-166
*/
}
break;
#endif /* SAVERESTORE */
- case BCM4347_CHIP_GRPID:
- case BCM4369_CHIP_GRPID:
- si_pmu_chipcontrol(sih, PMU_CHIPCTL1,
- PMU_CC1_ENABLE_CLOSED_LOOP_MASK, PMU_CC1_ENABLE_CLOSED_LOOP);
- break;
default:
/* any unsupported chip bail */
return;
}
-#endif // endif
+#endif
}
-#if defined(BCMSRPWR) && !defined(BCMSRPWR_DISABLED)
-bool _bcmsrpwr = TRUE;
-#else
-bool _bcmsrpwr = FALSE;
-#endif // endif
-
-#define PWRREQ_OFFSET(sih) OFFSETOF(chipcregs_t, powerctl)
-
-static void
-si_corereg_pciefast_write(si_t *sih, uint regoff, uint val)
+void
+si_update_macclk_mul_fact(si_t *sih, uint32 mul_fact)
{
- volatile uint32 *r = NULL;
si_info_t *sii = SI_INFO(sih);
-
- ASSERT((BUSTYPE(sih->bustype) == PCI_BUS));
-
- r = (volatile uint32 *)((volatile char *)sii->curmap +
- PCI_16KB0_PCIREGS_OFFSET + regoff);
-
- W_REG(sii->osh, r, val);
+ sii->macclk_mul_fact = mul_fact;
}
-static uint
-si_corereg_pciefast_read(si_t *sih, uint regoff)
+uint32
+si_get_macclk_mul_fact(si_t *sih)
{
- volatile uint32 *r = NULL;
si_info_t *sii = SI_INFO(sih);
+ return sii->macclk_mul_fact;
+}
- ASSERT((BUSTYPE(sih->bustype) == PCI_BUS));
-
- r = (volatile uint32 *)((volatile char *)sii->curmap +
- PCI_16KB0_PCIREGS_OFFSET + regoff);
- return R_REG(sii->osh, r);
-}
+#if defined(BCMSRPWR) && !defined(BCMSRPWR_DISABLED)
+bool _bcmsrpwr = TRUE;
+#else
+bool _bcmsrpwr = FALSE;
+#endif
uint32
si_srpwr_request(si_t *sih, uint32 mask, uint32 val)
{
- uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
- OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
- uint32 mask2 = mask;
- uint32 val2 = val;
- volatile uint32 *fast_srpwr_addr = (volatile uint32 *)((uintptr)SI_ENUM_BASE(sih)
- + (uintptr)offset);
+ uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */
+ uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
if (mask || val) {
mask <<= SRPWR_REQON_SHIFT;
val <<= SRPWR_REQON_SHIFT;
- /* Return if requested power request is already set */
- if (BUSTYPE(sih->bustype) == SI_BUS) {
- r = R_REG(OSH_NULL, fast_srpwr_addr);
- } else {
- r = si_corereg_pciefast_read(sih, offset);
- }
-
- if ((r & mask) == val) {
- return r;
- }
-
- r = (r & ~mask) | val;
-
- if (BUSTYPE(sih->bustype) == SI_BUS) {
- W_REG(OSH_NULL, fast_srpwr_addr, r);
- r = R_REG(OSH_NULL, fast_srpwr_addr);
- } else {
- si_corereg_pciefast_write(sih, offset, r);
- r = si_corereg_pciefast_read(sih, offset);
- }
-
- if (val2) {
- if ((r & (mask2 << SRPWR_STATUS_SHIFT)) ==
- (val2 << SRPWR_STATUS_SHIFT)) {
- return r;
- }
- si_srpwr_stat_spinwait(sih, mask2, val2);
- }
+ r = ((si_corereg(sih, cidx, offset, 0, 0) & ~mask) | val);
+ r = si_corereg(sih, cidx, offset, ~0, r);
} else {
- if (BUSTYPE(sih->bustype) == SI_BUS) {
- r = R_REG(OSH_NULL, fast_srpwr_addr);
- } else {
- r = si_corereg_pciefast_read(sih, offset);
- }
+ r = si_corereg(sih, cidx, offset, 0, 0);
}
return r;
uint32
si_srpwr_stat_spinwait(si_t *sih, uint32 mask, uint32 val)
{
- uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
- OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
- volatile uint32 *fast_srpwr_addr = (volatile uint32 *)((uintptr)SI_ENUM_BASE(sih)
- + (uintptr)offset);
+ uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */
+ uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
ASSERT(mask);
ASSERT(val);
mask <<= SRPWR_STATUS_SHIFT;
val <<= SRPWR_STATUS_SHIFT;
- if (BUSTYPE(sih->bustype) == SI_BUS) {
- SPINWAIT(((R_REG(OSH_NULL, fast_srpwr_addr) & mask) != val),
- PMU_MAX_TRANSITION_DLY);
- r = R_REG(OSH_NULL, fast_srpwr_addr) & mask;
- ASSERT(r == val);
- } else {
- SPINWAIT(((si_corereg_pciefast_read(sih, offset) & mask) != val),
- PMU_MAX_TRANSITION_DLY);
- r = si_corereg_pciefast_read(sih, offset) & mask;
- ASSERT(r == val);
- }
+ SPINWAIT(((si_corereg(sih, cidx, offset, 0, 0) & mask) != val),
+ PMU_MAX_TRANSITION_DLY);
+ ASSERT((si_corereg(sih, cidx, offset, 0, 0) & mask) == val);
- r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih);
+ r = si_corereg(sih, cidx, offset, 0, 0) & mask;
+ r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK;
return r;
}
uint32
si_srpwr_stat(si_t *sih)
{
- uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
- OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
+ uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */
uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
- if (BUSTYPE(sih->bustype) == SI_BUS) {
- r = si_corereg(sih, cidx, offset, 0, 0);
- } else {
- r = si_corereg_pciefast_read(sih, offset);
- }
-
- r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK(sih);
+ r = si_corereg(sih, cidx, offset, 0, 0);
+ r = (r >> SRPWR_STATUS_SHIFT) & SRPWR_DMN_ALL_MASK;
return r;
}
uint32
si_srpwr_domain(si_t *sih)
{
- uint32 r, offset = (BUSTYPE(sih->bustype) == SI_BUS) ?
- OFFSETOF(chipcregs_t, powerctl) : PWRREQ_OFFSET(sih);
+ uint32 r, offset = OFFSETOF(chipcregs_t, powerctl); /* Same 0x1e8 per core */
uint cidx = (BUSTYPE(sih->bustype) == SI_BUS) ? SI_CC_IDX : sih->buscoreidx;
- if (BUSTYPE(sih->bustype) == SI_BUS) {
- r = si_corereg(sih, cidx, offset, 0, 0);
- } else {
- r = si_corereg_pciefast_read(sih, offset);
- }
-
- r = (r >> SRPWR_DMN_ID_SHIFT) & SRPWR_DMN_ID_MASK;
+ r = si_corereg(sih, cidx, offset, 0, 0);
+ r = (r >> SRPWR_DMN_SHIFT) & SRPWR_DMN_ALL_MASK;
return r;
}
-uint32
-si_srpwr_domain_all_mask(si_t *sih)
-{
- uint32 mask = SRPWR_DMN0_PCIE_MASK |
- SRPWR_DMN1_ARMBPSD_MASK |
- SRPWR_DMN2_MACAUX_MASK |
- SRPWR_DMN3_MACMAIN_MASK;
-
- if (si_scan_core_present(sih)) {
- mask |= SRPWR_DMN4_MACSCAN_MASK;
- }
-
- return mask;
-}
-
/* Utility API to read/write the raw registers with absolute address.
* This function can be invoked from either FW or host driver.
*/
return val;
}
-
-uint8
-si_lhl_ps_mode(si_t *sih)
-{
- si_info_t *sii = SI_INFO(sih);
- return sii->lhl_ps_mode;
-}
-
-bool
-BCMRAMFN(si_scan_core_present)(si_t *sih)
-{
- return ((si_numcoreunits(sih, D11_CORE_ID) >= 2) &&
- (si_numcoreunits(sih, SR_CORE_ID) > 4));
-}
/*
* Include file private to the SOC Interconnect support files.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: siutils_priv.h 795345 2018-12-18 16:52:03Z $
+ * $Id: siutils_priv.h 625739 2016-03-17 12:28:03Z $
*/
#ifndef _siutils_priv_h_
#if defined(SI_ERROR_ENFORCE)
#define SI_ERROR(args) printf args
#else
-#define SI_ERROR(args)
-#endif // endif
+#define SI_ERROR(args) printf args
+#endif
#if defined(ENABLE_CORECAPTURE)
#endif /* ENABLE_CORECAPTURE */
+
#define SI_MSG(args)
#ifdef BCMDBG_SI
#define SI_VMSG(args) printf args
#else
#define SI_VMSG(args)
-#endif // endif
+#endif
#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg);
typedef bool (*si_intrsenabled_t)(void *intr_arg);
+
#define SI_GPIO_MAX 16
typedef struct gci_gpio_item {
/* for some combo chips, BT side accesses chipcommon->0x190, as a 16 byte addr */
/* register at 0x19C doesn't exist, so error is logged at the slave wrapper */
#define BT_CC_SPROM_BADREG_LO 0x18000190
-#define BT_CC_SPROM_BADREG_SIZE 4
#define BT_CC_SPROM_BADREG_HI 0
-#define BCM4350_BT_AXI_ID 6
-#define BCM4345_BT_AXI_ID 6
-#define BCM4349_BT_AXI_ID 5
-#define BCM4364_BT_AXI_ID 5
-
-/* for BT logging and memory dump, ignore failed access to BT memory */
-#define BCM4347_BT_ADDR_HI 0
-#define BCM4347_BT_ADDR_LO 0x19000000 /* BT address space */
-#define BCM4347_BT_SIZE 0x01000000 /* BT address space size */
-#define BCM4347_UNUSED_AXI_ID 0xffffffff
-#define BCM4347_CC_AXI_ID 0
-#define BCM4347_PCIE_AXI_ID 1
+#define BCM4350_BT_AXI_ID 6
+#define BCM4345_BT_AXI_ID 6
typedef struct si_cores_info {
volatile void *regs[SI_MAXCORES]; /* other regs va */
void *wrappers2[SI_MAXCORES]; /**< other cores wrapper va */
uint32 wrapba2[SI_MAXCORES]; /**< address of controlling wrapper */
- void *wrappers3[SI_MAXCORES]; /**< other cores wrapper va */
- uint32 wrapba3[SI_MAXCORES]; /**< address of controlling wrapper */
-
uint32 cia[SI_MAXCORES]; /**< erom cia entry for each core */
uint32 cib[SI_MAXCORES]; /**< erom cia entry for each core */
-
- uint32 csp2ba[SI_MAXCORES]; /**< Second slave port base addr 0 */
- uint32 csp2ba_size[SI_MAXCORES]; /**< Second slave port addr space size */
} si_cores_info_t;
/** misc si info needed by some of the routines */
void *curwrap; /**< current wrapper va */
uint32 oob_router; /**< oob router registers for axi */
- uint32 oob_router1; /**< oob router registers for axi */
si_cores_info_t *cores_info;
gci_gpio_item_t *gci_gpio_head; /**< gci gpio interrupts head */
uint num_br; /**< # discovered bridges */
uint32 br_wrapba[SI_MAXBR]; /**< address of bridge controlling wrapper */
uint32 xtalfreq;
- uint32 openloop_dco_code; /**< OPEN loop calibration dco code */
- uint8 spurmode;
+ uint32 macclk_mul_fact; /* Multiplication factor necessary to adjust MAC Clock
+ * during ULB Mode operation. One instance where this is used is configuring TSF L-frac
+ * register
+ */
bool device_removed;
uint axi_num_wrappers;
axi_wrapper_t * axi_wrapper;
- uint8 device_wake_opt; /* device_wake GPIO number */
- uint8 lhl_ps_mode;
} si_info_t;
+
#define SI_INFO(sih) ((si_info_t *)(uintptr)sih)
#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
#ifndef DEFAULT_GPIOTIMERVAL
#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME)
-#endif // endif
+#endif
/* Silicon Backplane externs */
extern void sb_scan(si_t *sih, volatile void *regs, uint devid);
#if defined(BCMDBG_PHYDUMP)
extern void sb_dumpregs(si_t *sih, struct bcmstrbuf *b);
-#endif // endif
+#endif
/* Wake-on-wireless-LAN (WOWL) */
extern bool sb_pci_pmecap(si_t *sih);
extern uint ai_coreidx(si_t *sih);
extern uint ai_corevendor(si_t *sih);
extern uint ai_corerev(si_t *sih);
-extern uint ai_corerev_minor(si_t *sih);
extern volatile uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff);
extern bool ai_iscoreup(si_t *sih);
extern volatile void *ai_setcoreidx(si_t *sih, uint coreidx);
extern volatile void *ai_setcoreidx_2ndwrap(si_t *sih, uint coreidx);
-extern volatile void *ai_setcoreidx_3rdwrap(si_t *sih, uint coreidx);
extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
-extern uint ai_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits,
- uint32 resetbits, void *p, volatile void *s);
+ uint32 resetbits, void *p, void *s);
extern void ai_core_disable(si_t *sih, uint32 bits);
extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits,
aidmp_t *pmacai, aidmp_t *smacai);
extern int ai_numaddrspaces(si_t *sih);
-extern uint32 ai_addrspace(si_t *sih, uint spidx, uint baidx);
-extern uint32 ai_addrspacesize(si_t *sih, uint spidx, uint baidx);
+extern uint32 ai_addrspace(si_t *sih, uint asidx);
+extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
-extern void ai_update_backplane_timeouts(si_t *sih, bool enable, uint32 timeout, uint32 cid);
+extern void ai_enable_backplane_timeouts(si_t *sih);
extern uint32 ai_clear_backplane_to(si_t *sih);
-void ai_force_clocks(si_t *sih, uint clock_state);
extern uint ai_num_slaveports(si_t *sih, uint coreidx);
#ifdef BCM_BACKPLANE_TIMEOUT
#if defined(BCMDBG_PHYDUMP)
extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b);
-#endif // endif
+#endif
extern uint32 ai_wrapper_dump_buf_size(si_t *sih);
extern uint32 ai_wrapper_dump_binary(si_t *sih, uchar *p);
-extern bool ai_check_enable_backplane_log(si_t *sih);
-extern uint32 ai_wrapper_dump_last_timeout(si_t *sih, uint32 *error, uint32 *core, uint32 *ba,
- uchar *p);
#define ub_scan(a, b, c) do {} while (0)
#define ub_flag(a) (0)
/*
* Linux cfg80211 driver - Android related functions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_android.c 825470 2019-06-14 09:08:11Z $
+ * $Id: wl_android.c 710862 2017-07-14 07:43:59Z $
*/
#include <linux/module.h>
#include <net/netlink.h>
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
-#endif // endif
+#endif
#include <wl_android.h>
#include <wldev_common.h>
+#include <wlc_types.h>
#include <wlioctl.h>
#include <wlioctl_utils.h>
#include <bcmutils.h>
-#include <bcmstdlib_s.h>
#include <linux_osl.h>
#include <dhd_dbg.h>
#include <dngl_stats.h>
#include <bcmip.h>
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
-#endif // endif
+#endif
#ifdef BCMSDIO
#include <bcmsdbus.h>
-#endif // endif
+#endif
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
-#include <wl_cfgscan.h>
-#endif // endif
-#ifdef WL_NAN
-#include <wl_cfgnan.h>
-#endif /* WL_NAN */
+#endif
#ifdef DHDTCPACK_SUPPRESS
#include <dhd_ip.h>
#endif /* DHDTCPACK_SUPPRESS */
-#include <bcmwifi_rspec.h>
#include <dhd_linux.h>
-#include <bcmiov.h>
-#ifdef WL_BCNRECV
-#include <wl_cfgvendor.h>
-#include <brcm_nl80211.h>
-#endif /* WL_BCNRECV */
-#ifdef WL_MBO
-#include <mbo.h>
-#endif /* WL_MBO */
-#ifdef RTT_SUPPORT
-#include <dhd_rtt.h>
-#endif /* RTT_SUPPORT */
-#ifdef WL_ESCAN
-#include <wl_escan.h>
+#ifdef DHD_PKT_LOGGING
+#include <dhd_pktlog.h>
+#endif /* DHD_PKT_LOGGING */
+
+#if defined(STAT_REPORT)
+#include <wl_statreport.h>
+#endif /* STAT_REPORT */
+
+#ifndef WL_CFG80211
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i
#endif
-#ifdef WL_STATIC_IF
-#define WL_BSSIDX_MAX 16
-#endif /* WL_STATIC_IF */
-
-uint android_msg_level = ANDROID_ERROR_LEVEL | ANDROID_MSG_LEVEL;
-
-#define ANDROID_ERROR_MSG(x, args...) \
- do { \
- if (android_msg_level & ANDROID_ERROR_LEVEL) { \
- printk(KERN_ERR "[dhd] ANDROID-ERROR) " x, ## args); \
- } \
- } while (0)
-#define ANDROID_TRACE_MSG(x, args...) \
- do { \
- if (android_msg_level & ANDROID_TRACE_LEVEL) { \
- printk(KERN_INFO "[dhd] ANDROID-TRACE) " x, ## args); \
- } \
- } while (0)
-#define ANDROID_INFO_MSG(x, args...) \
- do { \
- if (android_msg_level & ANDROID_INFO_LEVEL) { \
- printk(KERN_INFO "[dhd] ANDROID-INFO) " x, ## args); \
- } \
- } while (0)
-#define ANDROID_ERROR(x) ANDROID_ERROR_MSG x
-#define ANDROID_TRACE(x) ANDROID_TRACE_MSG x
-#define ANDROID_INFO(x) ANDROID_INFO_MSG x
+uint android_msg_level = ANDROID_ERROR_LEVEL;
/*
* Android private command strings, PLEASE define new private commands here
#define CMD_BTCOEXMODE "BTCOEXMODE"
#define CMD_SETSUSPENDOPT "SETSUSPENDOPT"
#define CMD_SETSUSPENDMODE "SETSUSPENDMODE"
-#define CMD_SETDTIM_IN_SUSPEND "SET_DTIM_IN_SUSPEND"
#define CMD_MAXDTIM_IN_SUSPEND "MAX_DTIM_IN_SUSPEND"
-#define CMD_DISDTIM_IN_SUSPEND "DISABLE_DTIM_IN_SUSPEND"
#define CMD_P2P_DEV_ADDR "P2P_DEV_ADDR"
#define CMD_SETFWPATH "SETFWPATH"
#define CMD_SETBAND "SETBAND"
#define CMD_GETBAND "GETBAND"
#define CMD_COUNTRY "COUNTRY"
+#ifdef WLMESH
+#define CMD_SAE_SET_PASSWORD "SAE_SET_PASSWORD"
+#define CMD_SET_RSDB_MODE "RSDB_MODE"
+#endif
#define CMD_P2P_SET_NOA "P2P_SET_NOA"
+#if !defined WL_ENABLE_P2P_IF
#define CMD_P2P_GET_NOA "P2P_GET_NOA"
+#endif /* WL_ENABLE_P2P_IF */
#define CMD_P2P_SD_OFFLOAD "P2P_SD_"
#define CMD_P2P_LISTEN_OFFLOAD "P2P_LO_"
#define CMD_P2P_SET_PS "P2P_SET_PS"
#define CMD_SETROAMMODE "SETROAMMODE"
#define CMD_SETIBSSBEACONOUIDATA "SETIBSSBEACONOUIDATA"
#define CMD_MIRACAST "MIRACAST"
-#ifdef WL_NAN
-#define CMD_NAN "NAN_"
-#endif /* WL_NAN */
#define CMD_COUNTRY_DELIMITER "/"
+#ifdef WL11ULB
+#define CMD_ULB_MODE "ULB_MODE"
+#define CMD_ULB_BW "ULB_BW"
+#endif /* WL11ULB */
#if defined(WL_SUPPORT_AUTO_CHANNEL)
#define CMD_GET_BEST_CHANNELS "GET_BEST_CHANNELS"
#ifdef WL_SUPPORT_AUTO_CHANNEL
#define CMD_SET_HAPD_AUTO_CHANNEL "HAPD_AUTO_CHANNEL"
#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef SUPPORT_SET_LPC
+#define CMD_HAPD_LPC_ENABLED "HAPD_LPC_ENABLED"
+#endif /* SUPPORT_SET_LPC */
+#ifdef SUPPORT_TRIGGER_HANG_EVENT
+#define CMD_TEST_FORCE_HANG "TEST_FORCE_HANG"
+#endif /* SUPPORT_TRIGGER_HANG_EVENT */
+#ifdef TEST_TX_POWER_CONTROL
+#define CMD_TEST_SET_TX_POWER "TEST_SET_TX_POWER"
+#define CMD_TEST_GET_TX_POWER "TEST_GET_TX_POWER"
+#endif /* TEST_TX_POWER_CONTROL */
+#define CMD_SARLIMIT_TX_CONTROL "SET_TX_POWER_CALLING"
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
#define CMD_KEEP_ALIVE "KEEPALIVE"
+
#ifdef PNO_SUPPORT
#define CMD_PNOSSIDCLR_SET "PNOSSIDCLR"
#define CMD_PNOSETUP_SET "PNOSETUP "
#define CMD_HAPD_MAC_FILTER "HAPD_MAC_FILTER"
-#ifdef WLFBT
-#define CMD_GET_FTKEY "GET_FTKEY"
-#endif // endif
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+
+
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+#define ENABLE_RANDOM_MAC "ENABLE_RANDOM_MAC"
+#define DISABLE_RANDOM_MAC "DISABLE_RANDOM_MAC"
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+
+#define CMD_CHANGE_RL "CHANGE_RL"
+#define CMD_RESTORE_RL "RESTORE_RL"
+
+#define CMD_SET_RMC_ENABLE "SETRMCENABLE"
+#define CMD_SET_RMC_TXRATE "SETRMCTXRATE"
+#define CMD_SET_RMC_ACTPERIOD "SETRMCACTIONPERIOD"
+#define CMD_SET_RMC_IDLEPERIOD "SETRMCIDLEPERIOD"
+#define CMD_SET_RMC_LEADER "SETRMCLEADER"
+#define CMD_SET_RMC_EVENT "SETRMCEVENT"
+
+#define CMD_SET_SCSCAN "SETSINGLEANT"
+#define CMD_GET_SCSCAN "GETSINGLEANT"
+#ifdef WLTDLS
+#define CMD_TDLS_RESET "TDLS_RESET"
+#endif /* WLTDLS */
+
+#ifdef FCC_PWR_LIMIT_2G
+#define CMD_GET_FCC_PWR_LIMIT_2G "GET_FCC_CHANNEL"
+#define CMD_SET_FCC_PWR_LIMIT_2G "SET_FCC_CHANNEL"
+/* CUSTOMER_HW4's value differs from BRCM FW value for enable/disable */
+#define CUSTOMER_HW4_ENABLE 0
+#define CUSTOMER_HW4_DISABLE -1
+#define CUSTOMER_HW4_EN_CONVERT(i) (i += 1)
+#endif /* FCC_PWR_LIMIT_2G */
+
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+
#define CMD_ROAM_OFFLOAD "SETROAMOFFLOAD"
#define CMD_INTERFACE_CREATE "INTERFACE_CREATE"
#define CMD_INTERFACE_DELETE "INTERFACE_DELETE"
#define CMD_GET_LINK_STATUS "GETLINKSTATUS"
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define CMD_GET_BSS_INFO "GETBSSINFO"
+#define CMD_GET_ASSOC_REJECT_INFO "GETASSOCREJECTINFO"
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
#define CMD_GET_STA_INFO "GETSTAINFO"
/* related with CMD_GET_LINK_STATUS */
#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG"
#define CMD_WBTEXT_BTM_TIMER_THRESHOLD "WBTEXT_BTM_TIMER_THRESHOLD"
#define CMD_WBTEXT_BTM_DELTA "WBTEXT_BTM_DELTA"
-#define CMD_WBTEXT_ESTM_ENABLE "WBTEXT_ESTM_ENABLE"
-
-#define BUFSZ 8
-#define BUFSZN BUFSZ + 1
-
-#define _S(x) #x
-#define S(x) _S(x)
-
-#define MAXBANDS 2 /**< Maximum #of bands */
-#define BAND_2G_INDEX 0
-#define BAND_5G_INDEX 0
-
-typedef union {
- wl_roam_prof_band_v1_t v1;
- wl_roam_prof_band_v2_t v2;
- wl_roam_prof_band_v3_t v3;
-} wl_roamprof_band_t;
#ifdef WLWFDS
#define CMD_ADD_WFDS_HASH "ADD_WFDS_HASH"
#define CMD_MURX_BFE_CAP "MURX_BFE_CAP"
-#ifdef SUPPORT_RSSI_SUM_REPORT
-#define CMD_SET_RSSI_LOGGING "SET_RSSI_LOGGING"
-#define CMD_GET_RSSI_LOGGING "GET_RSSI_LOGGING"
-#define CMD_GET_RSSI_PER_ANT "GET_RSSI_PER_ANT"
-#endif /* SUPPORT_RSSI_SUM_REPORT */
-
-#define CMD_GET_SNR "GET_SNR"
-
#ifdef SUPPORT_AP_HIGHER_BEACONRATE
#define CMD_SET_AP_BEACONRATE "SET_AP_BEACONRATE"
#define CMD_GET_AP_BASICRATE "GET_AP_BASICRATE"
#define CMD_SET_AP_RPS_PARAMS "SET_AP_RPS_PARAMS"
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#ifdef SUPPORT_AP_SUSPEND
-#define CMD_SET_AP_SUSPEND "SET_AP_SUSPEND"
-#endif /* SUPPORT_AP_SUSPEND */
+#ifdef SUPPORT_RSSI_LOGGING
+#define CMD_SET_RSSI_LOGGING "SET_RSSI_LOGGING"
+#define CMD_GET_RSSI_LOGGING "GET_RSSI_LOGGING"
+#define CMD_GET_RSSI_PER_ANT "GET_RSSI_PER_ANT"
+#endif /* SUPPORT_RSSI_LOGGING */
-#ifdef SUPPORT_AP_BWCTRL
-#define CMD_SET_AP_BW "SET_AP_BW"
-#define CMD_GET_AP_BW "GET_AP_BW"
-#endif /* SUPPORT_AP_BWCTRL */
+#define CMD_GET_SNR "GET_SNR"
/* miracast related definition */
#define MIRACAST_MODE_OFF 0
#define MIRACAST_MODE_SOURCE 1
#define MIRACAST_MODE_SINK 2
+#ifndef MIRACAST_AMPDU_SIZE
+#define MIRACAST_AMPDU_SIZE 8
+#endif
+
+#ifndef MIRACAST_MCHAN_ALGO
+#define MIRACAST_MCHAN_ALGO 1
+#endif
+
+#ifndef MIRACAST_MCHAN_BW
+#define MIRACAST_MCHAN_BW 25
+#endif
+
#ifdef CONNECTION_STATISTICS
#define CMD_GET_CONNECTION_STATS "GET_CONNECTION_STATS"
#ifdef SUPPORT_LQCM
#define CMD_SET_LQCM_ENABLE "SET_LQCM_ENABLE"
#define CMD_GET_LQCM_REPORT "GET_LQCM_REPORT"
-#endif // endif
+#endif
static LIST_HEAD(miracast_resume_list);
#ifdef WL_CFG80211
static u8 miracast_cur_mode;
-#endif /* WL_CFG80211 */
+#endif
#ifdef DHD_LOG_DUMP
-#define CMD_NEW_DEBUG_PRINT_DUMP "DEBUG_DUMP"
-#define SUBCMD_UNWANTED "UNWANTED"
-#define SUBCMD_DISCONNECTED "DISCONNECTED"
-void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd);
+#define CMD_NEW_DEBUG_PRINT_DUMP "DEBUG_DUMP"
+extern void dhd_schedule_log_dump(dhd_pub_t *dhdp);
+extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
#endif /* DHD_LOG_DUMP */
-#ifdef DHD_STATUS_LOGGING
-#define CMD_DUMP_STATUS_LOG "DUMP_STAT_LOG"
-#define CMD_QUERY_STATUS_LOG "QUERY_STAT_LOG"
-#endif /* DHD_STATUS_LOGGING */
-
+#ifdef DHD_HANG_SEND_UP_TEST
+#define CMD_MAKE_HANG "MAKE_HANG"
+#endif /* CMD_DHD_HANG_SEND_UP_TEST */
#ifdef DHD_DEBUG_UART
extern bool dhd_debug_uart_is_running(struct net_device *dev);
#endif /* DHD_DEBUG_UART */
-#ifdef RTT_GEOFENCE_INTERVAL
-#if defined(RTT_SUPPORT) && defined(WL_NAN)
-#define CMD_GEOFENCE_INTERVAL "GEOFENCE_INT"
-#endif /* RTT_SUPPORT && WL_NAN */
-#endif /* RTT_GEOFENCE_INTERVAL */
-
struct io_cfg {
s8 *iovar;
s32 param;
struct list_head list;
};
-typedef enum {
- HEAD_SAR_BACKOFF_DISABLE = -1,
- HEAD_SAR_BACKOFF_ENABLE = 0,
- GRIP_SAR_BACKOFF_DISABLE,
- GRIP_SAR_BACKOFF_ENABLE,
- NR_mmWave_SAR_BACKOFF_DISABLE,
- NR_mmWave_SAR_BACKOFF_ENABLE,
- NR_Sub6_SAR_BACKOFF_DISABLE,
- NR_Sub6_SAR_BACKOFF_ENABLE,
- SAR_BACKOFF_DISABLE_ALL
-} sar_modes;
-
#if defined(BCMFW_ROAM_ENABLE)
#define CMD_SET_ROAMPREF "SET_ROAMPREF"
(JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES))
#endif /* BCMFW_ROAM_ENABLE */
-#define CMD_DEBUG_VERBOSE "DEBUG_VERBOSE"
#ifdef WL_NATOE
#define CMD_NATOE "NATOE"
#define CMD_PCIE_IRQ_CORE "PCIE_IRQ_CORE"
#endif /* SET_PCIE_IRQ_CPU_CORE */
-#ifdef WL_BCNRECV
-#define CMD_BEACON_RECV "BEACON_RECV"
-#endif /* WL_BCNRECV */
-#ifdef WL_CAC_TS
-#define CMD_CAC_TSPEC "CAC_TSPEC"
-#endif /* WL_CAC_TS */
-#ifdef WL_CHAN_UTIL
-#define CMD_GET_CHAN_UTIL "GET_CU"
-#endif /* WL_CHAN_UTIL */
-
-#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
-#define CMD_SET_SOFTAP_ELNA_BYPASS "SET_SOFTAP_ELNA_BYPASS"
-#define CMD_GET_SOFTAP_ELNA_BYPASS "GET_SOFTAP_ELNA_BYPASS"
-#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
-
-#ifdef WL_NAN
-#define CMD_GET_NAN_STATUS "GET_NAN_STATUS"
-#endif /* WL_NAN */
-
-/* drv command info structure */
-typedef struct wl_drv_cmd_info {
- uint8 *command; /* pointer to the actual command */
- uint16 tot_len; /* total length of the command */
- uint16 bytes_written; /* Bytes written for get response */
-} wl_drv_cmd_info_t;
-
-typedef struct wl_drv_sub_cmd wl_drv_sub_cmd_t;
-typedef int (drv_cmd_handler_t)(struct net_device *dev,
- const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info);
-
-struct wl_drv_sub_cmd {
- char *name;
- uint8 version; /* cmd version */
- uint16 id; /* id for the dongle f/w switch/case */
- uint16 type; /* base type of argument */
- drv_cmd_handler_t *handler; /* cmd handler */
-};
-
-#ifdef WL_MBO
-
-#define CMD_MBO "MBO"
-enum {
- WL_MBO_CMD_NON_CHAN_PREF = 1,
- WL_MBO_CMD_CELL_DATA_CAP = 2
-};
-#define WL_ANDROID_MBO_FUNC(suffix) wl_android_mbo_subcmd_ ##suffix
-
-static int wl_android_process_mbo_cmd(struct net_device *dev,
- char *command, int total_len);
-static int wl_android_mbo_subcmd_cell_data_cap(struct net_device *dev,
- const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info);
-static int wl_android_mbo_subcmd_non_pref_chan(struct net_device *dev,
- const wl_drv_sub_cmd_t *cmd, char *command, wl_drv_cmd_info_t *cmd_info);
-
-static const wl_drv_sub_cmd_t mbo_cmd_list[] = {
- {"non_pref_chan", 0x01, WL_MBO_CMD_NON_CHAN_PREF,
- IOVT_BUFFER, WL_ANDROID_MBO_FUNC(non_pref_chan)
- },
- {"cell_data_cap", 0x01, WL_MBO_CMD_CELL_DATA_CAP,
- IOVT_BUFFER, WL_ANDROID_MBO_FUNC(cell_data_cap)
- },
- {NULL, 0, 0, 0, NULL}
-};
-
-#endif /* WL_MBO */
-
-#ifdef WL_GENL
-static s32 wl_genl_handle_msg(struct sk_buff *skb, struct genl_info *info);
-static int wl_genl_init(void);
-static int wl_genl_deinit(void);
-
-extern struct net init_net;
-/* attribute policy: defines which attribute has which type (e.g int, char * etc)
- * possible values defined in net/netlink.h
- */
-static struct nla_policy wl_genl_policy[BCM_GENL_ATTR_MAX + 1] = {
- [BCM_GENL_ATTR_STRING] = { .type = NLA_NUL_STRING },
- [BCM_GENL_ATTR_MSG] = { .type = NLA_BINARY },
-};
-
-#define WL_GENL_VER 1
-/* family definition */
-static struct genl_family wl_genl_family = {
- .id = GENL_ID_GENERATE, /* Genetlink would generate the ID */
- .hdrsize = 0,
- .name = "bcm-genl", /* Netlink I/F for Android */
- .version = WL_GENL_VER, /* Version Number */
- .maxattr = BCM_GENL_ATTR_MAX,
-};
-
-/* commands: mapping between the command enumeration and the actual function */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
-struct genl_ops wl_genl_ops[] = {
- {
- .cmd = BCM_GENL_CMD_MSG,
- .flags = 0,
- .policy = wl_genl_policy,
- .doit = wl_genl_handle_msg,
- .dumpit = NULL,
- },
-};
-#else
-struct genl_ops wl_genl_ops = {
- .cmd = BCM_GENL_CMD_MSG,
- .flags = 0,
- .policy = wl_genl_policy,
- .doit = wl_genl_handle_msg,
- .dumpit = NULL,
+#ifdef WLADPS_PRIVATE_CMD
+#define CMD_SET_ADPS "SET_ADPS"
+#define CMD_GET_ADPS "GET_ADPS"
+#endif /* WLADPS_PRIVATE_CMD */
+
+#ifdef DHD_PKT_LOGGING
+#define CMD_PKTLOG_FILTER_ENABLE "PKTLOG_FILTER_ENABLE"
+#define CMD_PKTLOG_FILTER_DISABLE "PKTLOG_FILTER_DISABLE"
+#define CMD_PKTLOG_FILTER_PATTERN_ENABLE "PKTLOG_FILTER_PATTERN_ENABLE"
+#define CMD_PKTLOG_FILTER_PATTERN_DISABLE "PKTLOG_FILTER_PATTERN_DISABLE"
+#define CMD_PKTLOG_FILTER_ADD "PKTLOG_FILTER_ADD"
+#define CMD_PKTLOG_FILTER_INFO "PKTLOG_FILTER_INFO"
+#define CMD_PKTLOG_START "PKTLOG_START"
+#define CMD_PKTLOG_STOP "PKTLOG_STOP"
+#define CMD_PKTLOG_FILTER_EXIST "PKTLOG_FILTER_EXIST"
+#endif /* DHD_PKT_LOGGING */
+
+#if defined(STAT_REPORT)
+#define CMD_STAT_REPORT_GET_START "STAT_REPORT_GET_START"
+#define CMD_STAT_REPORT_GET_NEXT "STAT_REPORT_GET_NEXT"
+#endif /* STAT_REPORT */
-};
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
-static struct genl_multicast_group wl_genl_mcast[] = {
- { .name = "bcm-genl-mcast", },
-};
-#else
-static struct genl_multicast_group wl_genl_mcast = {
- .id = GENL_ID_GENERATE, /* Genetlink would generate the ID */
- .name = "bcm-genl-mcast",
-};
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
-#endif /* WL_GENL */
#ifdef SUPPORT_LQCM
#define LQCM_ENAB_MASK 0x000000FF /* LQCM enable flag mask */
#define LQCM_RX_INDEX_SHIFT 16 /* LQCM rx index shift */
#endif /* SUPPORT_LQCM */
-#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
-#define NUMBER_SEQUENTIAL_PRIVCMD_ERRORS 7
-static int priv_cmd_errors = 0;
-#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
-
/**
* Extern function declarations (TODO: move them to dhd_linux.h)
*/
int wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len)
{ return 0; }
#endif /* WL_CFG80211 */
-#ifdef ROAM_CHANNEL_CACHE
-extern void wl_update_roamscan_cache_by_band(struct net_device *dev, int band);
-#endif /* ROAM_CHANNEL_CACHE */
+#ifdef WBTEXT
+static int wl_android_wbtext(struct net_device *dev, char *command, int total_len);
+static int wl_cfg80211_wbtext_btm_timer_threshold(struct net_device *dev,
+ char *command, int total_len);
+static int wl_cfg80211_wbtext_btm_delta(struct net_device *dev,
+ char *command, int total_len);
+#endif /* WBTEXT */
#ifdef ENABLE_4335BT_WAR
extern int bcm_bt_lock(int cookie);
extern bool ap_fw_loaded;
extern char iface_name[IFNAMSIZ];
-#ifdef DHD_PM_CONTROL_FROM_FILE
-extern bool g_pm_control;
-#endif /* DHD_PM_CONTROL_FROM_FILE */
-
-/* private command support for restoring roam/scan parameters */
-#ifdef SUPPORT_RESTORE_SCAN_PARAMS
-#define CMD_RESTORE_SCAN_PARAMS "RESTORE_SCAN_PARAMS"
-
-typedef int (*PRIV_CMD_HANDLER) (struct net_device *dev, char *command);
-typedef int (*PRIV_CMD_HANDLER_WITH_LEN) (struct net_device *dev, char *command, int total_len);
-
-enum {
- RESTORE_TYPE_UNSPECIFIED = 0,
- RESTORE_TYPE_PRIV_CMD = 1,
- RESTORE_TYPE_PRIV_CMD_WITH_LEN = 2
-};
-
-typedef struct android_restore_scan_params {
- char command[64];
- int parameter;
- int cmd_type;
- union {
- PRIV_CMD_HANDLER cmd_handler;
- PRIV_CMD_HANDLER_WITH_LEN cmd_handler_w_len;
- };
-} android_restore_scan_params_t;
-
-/* function prototypes of private command handler */
-static int wl_android_set_roam_trigger(struct net_device *dev, char* command);
-int wl_android_set_roam_delta(struct net_device *dev, char* command);
-int wl_android_set_roam_scan_period(struct net_device *dev, char* command);
-int wl_android_set_full_roam_scan_period(struct net_device *dev, char* command, int total_len);
-int wl_android_set_roam_scan_control(struct net_device *dev, char *command);
-int wl_android_set_scan_channel_time(struct net_device *dev, char *command);
-int wl_android_set_scan_home_time(struct net_device *dev, char *command);
-int wl_android_set_scan_home_away_time(struct net_device *dev, char *command);
-int wl_android_set_scan_nprobes(struct net_device *dev, char *command);
-static int wl_android_set_band(struct net_device *dev, char *command);
-int wl_android_set_scan_dfs_channel_mode(struct net_device *dev, char *command);
-int wl_android_set_wes_mode(struct net_device *dev, char *command);
-int wl_android_set_okc_mode(struct net_device *dev, char *command);
-
-/* default values */
-#ifdef ROAM_API
-#define DEFAULT_ROAM_TIRGGER -75
-#define DEFAULT_ROAM_DELTA 10
-#define DEFAULT_ROAMSCANPERIOD 10
-#define DEFAULT_FULLROAMSCANPERIOD_SET 120
-#endif /* ROAM_API */
-#define DEFAULT_BAND 0
-
-/* restoring parameter list, please don't change order */
-static android_restore_scan_params_t restore_params[] =
-{
-/* wbtext need to be disabled while updating roam/scan parameters */
-#ifdef ROAM_API
- { CMD_ROAMTRIGGER_SET, DEFAULT_ROAM_TIRGGER,
- RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_trigger},
- { CMD_ROAMDELTA_SET, DEFAULT_ROAM_DELTA,
- RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_delta},
- { CMD_ROAMSCANPERIOD_SET, DEFAULT_ROAMSCANPERIOD,
- RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_roam_scan_period},
- { CMD_FULLROAMSCANPERIOD_SET, DEFAULT_FULLROAMSCANPERIOD_SET,
- RESTORE_TYPE_PRIV_CMD_WITH_LEN,
- .cmd_handler_w_len = wl_android_set_full_roam_scan_period},
-#endif /* ROAM_API */
- { CMD_SETBAND, DEFAULT_BAND,
- RESTORE_TYPE_PRIV_CMD, .cmd_handler = wl_android_set_band},
- { "\0", 0, RESTORE_TYPE_UNSPECIFIED, .cmd_handler = NULL}
-};
-#endif /* SUPPORT_RESTORE_SCAN_PARAMS */
/**
* Local (static) functions and variables
#ifdef WLWFDS
static int wl_android_set_wfds_hash(
- struct net_device *dev, char *command, bool enable)
+ struct net_device *dev, char *command, int total_len, bool enable)
{
int error = 0;
wl_p2p_wfds_hash_t *wfds_hash = NULL;
char *smbuf = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ smbuf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
- smbuf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MAXLEN);
if (smbuf == NULL) {
- ANDROID_ERROR(("wl_android_set_wfds_hash: failed to allocated memory %d bytes\n",
- WLC_IOCTL_MAXLEN));
+ ANDROID_ERROR(("%s: failed to allocated memory %d bytes\n",
+ __FUNCTION__, WLC_IOCTL_MAXLEN));
return -ENOMEM;
}
}
if (error) {
- ANDROID_ERROR(("wl_android_set_wfds_hash: failed to %s, error=%d\n", command, error));
+ ANDROID_ERROR(("%s: failed to %s, error=%d\n", __FUNCTION__, command, error));
}
- if (smbuf) {
- MFREE(cfg->osh, smbuf, WLC_IOCTL_MAXLEN);
- }
+ if (smbuf)
+ kfree(smbuf);
return error;
}
#endif /* WLWFDS */
/* Convert Kbps to Android Mbps */
link_speed = link_speed / 1000;
bytes_written = snprintf(command, total_len, "LinkSpeed %d", link_speed);
- ANDROID_INFO(("wl_android_get_link_speed: command result is %s\n", command));
+ ANDROID_INFO(("%s: command result is %s\n", __FUNCTION__, command));
return bytes_written;
}
/* Ap/GO mode
* driver rssi <sta_mac_addr>
*/
- ANDROID_TRACE(("wl_android_get_rssi: cmd:%s\n", delim));
+ ANDROID_TRACE(("%s: cmd:%s\n", __FUNCTION__, delim));
/* skip space from delim after finding char */
delim++;
- if (!(bcm_ether_atoe((delim), &scbval.ea))) {
- ANDROID_ERROR(("wl_android_get_rssi: address err\n"));
+ if (!(bcm_ether_atoe((delim), &scbval.ea)))
+ {
+ ANDROID_ERROR(("%s:address err\n", __FUNCTION__));
return -1;
}
- scbval.val = htod32(0);
- ANDROID_TRACE(("wl_android_get_rssi: address:"MACDBG, MAC2STRDBG(scbval.ea.octet)));
+ scbval.val = htod32(0);
+ ANDROID_TRACE(("%s: address:"MACDBG, __FUNCTION__, MAC2STRDBG(scbval.ea.octet)));
#ifdef WL_VIRTUAL_APSTA
/* RSDB AP may have another virtual interface
* In this case, format of private command is as following,
}
else {
/* STA/GC mode */
- bzero(&scbval, sizeof(scb_val_t));
+ memset(&scbval, 0, sizeof(scb_val_t));
}
error = wldev_get_rssi(target_ndev, &scbval);
if (error)
return -1;
if ((ssid.SSID_len == 0) || (ssid.SSID_len > DOT11_MAX_SSID_LEN)) {
- ANDROID_ERROR(("wl_android_get_rssi: wldev_get_ssid failed\n"));
+ ANDROID_ERROR(("%s: wldev_get_ssid failed\n", __FUNCTION__));
} else if (total_len <= ssid.SSID_len) {
return -ENOMEM;
} else {
" rssi %d", scbval.val);
command[bytes_written] = '\0';
- ANDROID_TRACE(("wl_android_get_rssi: command result is %s (%d)\n", command, bytes_written));
+ ANDROID_TRACE(("%s: command result is %s (%d)\n", __FUNCTION__, command, bytes_written));
return bytes_written;
}
-static int wl_android_set_suspendopt(struct net_device *dev, char *command)
+static int wl_android_set_suspendopt(struct net_device *dev, char *command, int total_len)
{
int suspend_flag;
int ret_now;
if (ret_now != suspend_flag) {
if (!(ret = net_os_set_suspend(dev, ret_now, 1))) {
- ANDROID_INFO(("wl_android_set_suspendopt: Suspend Flag %d -> %d\n",
- ret_now, suspend_flag));
+ ANDROID_INFO(("%s: Suspend Flag %d -> %d\n",
+ __FUNCTION__, ret_now, suspend_flag));
} else {
- ANDROID_ERROR(("wl_android_set_suspendopt: failed %d\n", ret));
+ ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
}
}
return ret;
}
-static int wl_android_set_suspendmode(struct net_device *dev, char *command)
+static int wl_android_set_suspendmode(struct net_device *dev, char *command, int total_len)
{
int ret = 0;
suspend_flag = 1;
if (!(ret = net_os_set_suspend(dev, suspend_flag, 0)))
- ANDROID_INFO(("wl_android_set_suspendmode: Suspend Mode %d\n", suspend_flag));
+ ANDROID_INFO(("%s: Suspend Mode %d\n", __FUNCTION__, suspend_flag));
else
- ANDROID_ERROR(("wl_android_set_suspendmode: failed %d\n", ret));
-#endif // endif
+ ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
+#endif
return ret;
}
if (error)
return -1;
- ANDROID_INFO(("wl_android_get_80211_mode: mode:%s\n", mode));
+ ANDROID_INFO(("%s: mode:%s\n", __FUNCTION__, mode));
bytes_written = snprintf(command, total_len, "%s %s", CMD_80211_MODE, mode);
- ANDROID_INFO(("wl_android_get_80211_mode: command:%s EXIT\n", command));
+ ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command));
return bytes_written;
}
return -1;
chanspec = wl_chspec_driver_to_host(chsp);
- ANDROID_INFO(("wl_android_get_80211_mode: return value of chanspec:%x\n", chanspec));
+ ANDROID_INFO(("%s:return value of chanspec:%x\n", __FUNCTION__, chanspec));
channel = chanspec & WL_CHANSPEC_CHAN_MASK;
band = chanspec & WL_CHANSPEC_BAND_MASK;
bw = chanspec & WL_CHANSPEC_BW_MASK;
- ANDROID_INFO(("wl_android_get_80211_mode: channel:%d band:%d bandwidth:%d\n",
- channel, band, bw));
+ ANDROID_INFO(("%s:channel:%d band:%d bandwidth:%d\n", __FUNCTION__, channel, band, bw));
if (bw == WL_CHANSPEC_BW_80)
bw = WL_CH_BANDWIDTH_80MHZ;
bytes_written = snprintf(command, total_len, "%s channel %d band %s bw %d", CMD_CHANSPEC,
channel, band == WL_CHANSPEC_BAND_5G ? "5G":"2G", bw);
- ANDROID_INFO(("wl_android_get_chanspec: command:%s EXIT\n", command));
+ ANDROID_INFO(("%s: command:%s EXIT\n", __FUNCTION__, command));
return bytes_written;
}
-#endif /* WL_CFG80211 */
+#endif
/* returns current datarate datarate returned from firmware are in 500kbps */
int wl_android_get_datarate(struct net_device *dev, char *command, int total_len)
if (error)
return -1;
- ANDROID_INFO(("wl_android_get_datarate: datarate:%d\n", datarate));
+ ANDROID_INFO(("%s:datarate:%d\n", __FUNCTION__, datarate));
bytes_written = snprintf(command, total_len, "%s %d", CMD_DATARATE, (datarate/2));
return bytes_written;
int error = 0;
int bytes_written = 0;
uint i;
- int len = 0;
char mac_buf[MAX_NUM_OF_ASSOCLIST *
sizeof(struct ether_addr) + sizeof(uint)] = {0};
struct maclist *assoc_maclist = (struct maclist *)mac_buf;
- ANDROID_TRACE(("wl_android_get_assoclist: ENTER\n"));
+ ANDROID_TRACE(("%s: ENTER\n", __FUNCTION__));
assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
CMD_ASSOC_CLIENTS, assoc_maclist->count);
for (i = 0; i < assoc_maclist->count; i++) {
- len = snprintf(command + bytes_written, total_len - bytes_written, " " MACDBG,
+ bytes_written += snprintf(command + bytes_written, total_len, " " MACDBG,
MAC2STRDBG(assoc_maclist->ea[i].octet));
- /* A return value of '(total_len - bytes_written)' or more means that the
- * output was truncated
- */
- if ((len > 0) && (len < (total_len - bytes_written))) {
- bytes_written += len;
- } else {
- ANDROID_ERROR(("wl_android_get_assoclist: Insufficient buffer %d,"
- " bytes_written %d\n",
- total_len, bytes_written));
- bytes_written = -1;
- break;
- }
}
return bytes_written;
+
}
#ifdef WL_CFG80211
extern chanspec_t
wl_chspec_host_to_driver(chanspec_t chanspec);
-static int wl_android_set_csa(struct net_device *dev, char *command)
+static int wl_android_set_csa(struct net_device *dev, char *command, int total_len)
{
int error = 0;
char smbuf[WLC_IOCTL_SMLEN];
u32 chnsp = 0;
int err = 0;
- ANDROID_INFO(("wl_android_set_csa: command:%s\n", command));
+ ANDROID_INFO(("%s: command:%s\n", __FUNCTION__, command));
command = (command + strlen(CMD_SET_CSA));
/* Order is mode, count channel */
if (!*++command) {
- ANDROID_ERROR(("wl_android_set_csa:error missing arguments\n"));
+ ANDROID_ERROR(("%s:error missing arguments\n", __FUNCTION__));
return -1;
}
csa_arg.mode = bcm_atoi(command);
}
if (!*++command) {
- ANDROID_ERROR(("wl_android_set_csa: error missing count\n"));
+ ANDROID_ERROR(("%s:error missing count\n", __FUNCTION__));
return -1;
}
command++;
csa_arg.chspec = 0;
command += 2;
if (!*command) {
- ANDROID_ERROR(("wl_android_set_csa: error missing channel\n"));
+ ANDROID_ERROR(("%s:error missing channel\n", __FUNCTION__));
return -1;
}
chnsp = wf_chspec_aton(command);
if (chnsp == 0) {
- ANDROID_ERROR(("wl_android_set_csa:chsp is not correct\n"));
+ ANDROID_ERROR(("%s:chsp is not correct\n", __FUNCTION__));
return -1;
}
chnsp = wl_chspec_host_to_driver(chnsp);
error = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(csa_arg),
smbuf, sizeof(smbuf), NULL);
if (error) {
- ANDROID_ERROR(("wl_android_set_csa:set csa failed:%d\n", error));
+ ANDROID_ERROR(("%s:set csa failed:%d\n", __FUNCTION__, error));
return -1;
}
return 0;
}
-#endif /* WL_CFG80211 */
+#endif
static int
-wl_android_set_bcn_li_dtim(struct net_device *dev, char *command)
+wl_android_set_max_dtim(struct net_device *dev, char *command, int total_len)
{
int ret = 0;
- int dtim;
-
- dtim = *(command + strlen(CMD_SETDTIM_IN_SUSPEND) + 1) - '0';
+ int dtim_flag;
- if (dtim > (MAX_DTIM_ALLOWED_INTERVAL / MAX_DTIM_SKIP_BEACON_INTERVAL)) {
- ANDROID_ERROR(("%s: failed, invalid dtim %d\n",
- __FUNCTION__, dtim));
- return BCME_ERROR;
- }
+ dtim_flag = *(command + strlen(CMD_MAXDTIM_IN_SUSPEND) + 1) - '0';
- if (!(ret = net_os_set_suspend_bcn_li_dtim(dev, dtim))) {
- ANDROID_TRACE(("%s: SET bcn_li_dtim in suspend %d\n",
- __FUNCTION__, dtim));
+ if (!(ret = net_os_set_max_dtim_enable(dev, dtim_flag))) {
+ ANDROID_TRACE(("%s: use Max bcn_li_dtim in suspend %s\n",
+ __FUNCTION__, (dtim_flag ? "Enable" : "Disable")));
} else {
ANDROID_ERROR(("%s: failed %d\n", __FUNCTION__, ret));
}
return ret;
}
-static int
-wl_android_set_max_dtim(struct net_device *dev, char *command)
+static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+{
+ uint band;
+ int bytes_written;
+ int error;
+
+ error = wldev_get_band(dev, &band);
+ if (error)
+ return -1;
+ bytes_written = snprintf(command, total_len, "Band %d", band);
+ return bytes_written;
+}
+
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+
+#ifdef WLTDLS
+int wl_android_tdls_reset(struct net_device *dev)
{
int ret = 0;
- int dtim_flag;
+ ret = dhd_tdls_enable(dev, false, false, NULL);
+ if (ret < 0) {
+ ANDROID_ERROR(("Disable tdls failed. %d\n", ret));
+ return ret;
+ }
+ ret = dhd_tdls_enable(dev, true, true, NULL);
+ if (ret < 0) {
+ ANDROID_ERROR(("enable tdls failed. %d\n", ret));
+ return ret;
+ }
+ return 0;
+}
+#endif /* WLTDLS */
+#ifdef FCC_PWR_LIMIT_2G
+int
+wl_android_set_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int enable = 0;
- dtim_flag = *(command + strlen(CMD_MAXDTIM_IN_SUSPEND) + 1) - '0';
+ sscanf(command+sizeof("SET_FCC_CHANNEL"), "%d", &enable);
- if (!(ret = net_os_set_max_dtim_enable(dev, dtim_flag))) {
- ANDROID_TRACE(("wl_android_set_max_dtim: use Max bcn_li_dtim in suspend %s\n",
- (dtim_flag ? "Enable" : "Disable")));
- } else {
- ANDROID_ERROR(("wl_android_set_max_dtim: failed %d\n", ret));
+ if ((enable != CUSTOMER_HW4_ENABLE) && (enable != CUSTOMER_HW4_DISABLE)) {
+ ANDROID_ERROR(("%s: Invalid data\n", __FUNCTION__));
+ return BCME_ERROR;
}
- return ret;
+ CUSTOMER_HW4_EN_CONVERT(enable);
+
+ ANDROID_ERROR(("%s: fccpwrlimit2g set (%d)\n", __FUNCTION__, enable));
+ error = wldev_iovar_setint(dev, "fccpwrlimit2g", enable);
+ if (error) {
+ ANDROID_ERROR(("%s: fccpwrlimit2g set returned (%d)\n", __FUNCTION__, error));
+ return BCME_ERROR;
+ }
+
+ return error;
}
-#ifdef DISABLE_DTIM_IN_SUSPEND
-static int
-wl_android_set_disable_dtim_in_suspend(struct net_device *dev, char *command)
+int
+wl_android_get_fcc_pwr_limit_2g(struct net_device *dev, char *command, int total_len)
{
- int ret = 0;
- int dtim_flag;
-
- dtim_flag = *(command + strlen(CMD_DISDTIM_IN_SUSPEND) + 1) - '0';
+ int error = 0;
+ int enable = 0;
+ int bytes_written = 0;
- if (!(ret = net_os_set_disable_dtim_in_suspend(dev, dtim_flag))) {
- ANDROID_TRACE(("wl_android_set_disable_dtim_in_suspend: "
- "use Disable bcn_li_dtim in suspend %s\n",
- (dtim_flag ? "Enable" : "Disable")));
- } else {
- ANDROID_ERROR(("wl_android_set_disable_dtim_in_suspend: failed %d\n", ret));
+ error = wldev_iovar_getint(dev, "fccpwrlimit2g", &enable);
+ if (error) {
+ ANDROID_ERROR(("%s: fccpwrlimit2g get error (%d)\n", __FUNCTION__, error));
+ return BCME_ERROR;
}
+ ANDROID_ERROR(("%s: fccpwrlimit2g get (%d)\n", __FUNCTION__, enable));
- return ret;
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_FCC_PWR_LIMIT_2G, enable);
+
+ return bytes_written;
}
-#endif /* DISABLE_DTIM_IN_SUSPEND */
+#endif /* FCC_PWR_LIMIT_2G */
-static int wl_android_get_band(struct net_device *dev, char *command, int total_len)
+s32
+wl_cfg80211_get_sta_info(struct net_device *dev, char* command, int total_len)
{
- uint band;
- int bytes_written;
- int error;
+ static char iovar_buf[WLC_IOCTL_MAXLEN];
+ int bytes_written = -1, ret = 0;
+ char *pcmd = command;
+ char *str;
+ sta_info_t *sta = NULL;
+ wl_cnt_wlc_t* wlc_cnt = NULL;
+ struct ether_addr mac;
- error = wldev_get_band(dev, &band);
- if (error)
- return -1;
- bytes_written = snprintf(command, total_len, "Band %d", band);
+ /* Client information */
+ uint16 cap = 0;
+ uint32 rxrtry = 0;
+ uint32 rxmulti = 0;
+
+ ANDROID_TRACE(("%s\n", command));
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (str) {
+ str = bcmstrtok(&pcmd, " ", NULL);
+ /* If GETSTAINFO subcmd name is not provided, return error */
+ if (str == NULL) {
+ ANDROID_ERROR(("GETSTAINFO subcmd not provided %s\n", __FUNCTION__));
+ goto error;
+ }
+
+ memset(&mac, 0, ETHER_ADDR_LEN);
+ if ((bcm_ether_atoe((str), &mac))) {
+ /* get the sta info */
+ ret = wldev_iovar_getbuf(dev, "sta_info",
+ (struct ether_addr *)mac.octet,
+ ETHER_ADDR_LEN, iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ if (ret < 0) {
+ ANDROID_ERROR(("Get sta_info ERR %d\n", ret));
+ goto error;
+ }
+
+ sta = (sta_info_t *)iovar_buf;
+ cap = dtoh16(sta->cap);
+ rxrtry = dtoh32(sta->rx_pkts_retried);
+ rxmulti = dtoh32(sta->rx_mcast_pkts);
+ } else if ((!strncmp(str, "all", 3)) || (!strncmp(str, "ALL", 3))) {
+ /* get counters info */
+ ret = wldev_iovar_getbuf(dev, "counters", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (unlikely(ret)) {
+ ANDROID_ERROR(("counters error (%d) - size = %zu\n",
+ ret, sizeof(wl_cnt_wlc_t)));
+ goto error;
+ }
+ ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0);
+ if (ret != BCME_OK) {
+ ANDROID_ERROR(("wl_cntbuf_to_xtlv_format ERR %d\n", ret));
+ goto error;
+ }
+ if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) {
+ ANDROID_ERROR(("wlc_cnt NULL!\n"));
+ goto error;
+ }
+
+ rxrtry = dtoh32(wlc_cnt->rxrtry);
+ rxmulti = dtoh32(wlc_cnt->rxmulti);
+ } else {
+ ANDROID_ERROR(("Get address fail\n"));
+ goto error;
+ }
+ } else {
+ ANDROID_ERROR(("Command ERR\n"));
+ goto error;
+ }
+
+ bytes_written = snprintf(command, total_len,
+ "%s %s Rx_Retry_Pkts=%d Rx_BcMc_Pkts=%d CAP=%04x\n",
+ CMD_GET_STA_INFO, str, rxrtry, rxmulti, cap);
+
+ ANDROID_TRACE(("%s", command));
+
+error:
return bytes_written;
}
+#endif
-#ifdef WL_CFG80211
-static int
-wl_android_set_band(struct net_device *dev, char *command)
+#ifdef WBTEXT
+static int wl_android_wbtext(struct net_device *dev, char *command, int total_len)
{
- int error = 0;
- uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
-#ifdef WL_HOST_BAND_MGMT
- int ret = 0;
- if ((ret = wl_cfg80211_set_band(dev, band)) < 0) {
- if (ret == BCME_UNSUPPORTED) {
- /* If roam_var is unsupported, fallback to the original method */
- ANDROID_ERROR(("WL_HOST_BAND_MGMT defined, "
- "but roam_band iovar unsupported in the firmware\n"));
+ int error = BCME_OK, argc = 0;
+ int data, bytes_written;
+ int roam_trigger[2];
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ argc = sscanf(command+sizeof(CMD_WBTEXT_ENABLE), "%d", &data);
+ if (!argc) {
+ error = wldev_iovar_getint(dev, "wnm_bsstrans_resp", &data);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to set wbtext error = %d\n",
+ __FUNCTION__, error));
+ return error;
+ }
+ bytes_written = snprintf(command, total_len, "WBTEXT %s\n",
+ (data == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT)?
+ "ENABLED" : "DISABLED");
+ return bytes_written;
+ } else {
+ if (data) {
+ data = WL_BSSTRANS_POLICY_PRODUCT_WBTEXT;
+ }
+
+ if ((error = wldev_iovar_setint(dev, "wnm_bsstrans_resp", data)) != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to set wbtext error = %d\n",
+ __FUNCTION__, error));
+ return error;
+ }
+
+ if (data) {
+ /* reset roam_prof when wbtext is on */
+ if ((error = wl_cfg80211_wbtext_set_default(dev)) != BCME_OK) {
+ return error;
+ }
+ dhdp->wbtext_support = TRUE;
} else {
- error = -1;
+ /* reset legacy roam trigger when wbtext is off */
+ roam_trigger[0] = DEFAULT_ROAM_TRIGGER_VALUE;
+ roam_trigger[1] = WLC_BAND_ALL;
+ if ((error = wldev_ioctl_set(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
+ sizeof(roam_trigger))) != BCME_OK) {
+ ANDROID_ERROR(("%s: Failed to reset roam trigger = %d\n",
+ __FUNCTION__, error));
+ return error;
+ }
+ dhdp->wbtext_support = FALSE;
+ }
+ }
+ return error;
+}
+
+static int wl_cfg80211_wbtext_btm_timer_threshold(struct net_device *dev,
+ char *command, int total_len)
+{
+ int error = BCME_OK, argc = 0;
+ int data, bytes_written;
+
+ argc = sscanf(command, CMD_WBTEXT_BTM_TIMER_THRESHOLD " %d\n", &data);
+ if (!argc) {
+ error = wldev_iovar_getint(dev, "wnm_bsstrans_timer_threshold", &data);
+ if (error) {
+ ANDROID_ERROR(("Failed to get wnm_bsstrans_timer_threshold (%d)\n", error));
+ return error;
+ }
+ bytes_written = snprintf(command, total_len, "%d\n", data);
+ return bytes_written;
+ } else {
+ if ((error = wldev_iovar_setint(dev, "wnm_bsstrans_timer_threshold",
+ data)) != BCME_OK) {
+ ANDROID_ERROR(("Failed to set wnm_bsstrans_timer_threshold (%d)\n", error));
+ return error;
}
}
- if (((ret == 0) && (band == WLC_BAND_AUTO)) || (ret == BCME_UNSUPPORTED)) {
- /* Apply if roam_band iovar is not supported or band setting is AUTO */
- error = wldev_set_band(dev, band);
+ return error;
+}
+
+static int wl_cfg80211_wbtext_btm_delta(struct net_device *dev,
+ char *command, int total_len)
+{
+ int error = BCME_OK, argc = 0;
+ int data = 0, bytes_written;
+
+ argc = sscanf(command, CMD_WBTEXT_BTM_DELTA " %d\n", &data);
+ if (!argc) {
+ error = wldev_iovar_getint(dev, "wnm_btmdelta", &data);
+ if (error) {
+ ANDROID_ERROR(("Failed to get wnm_btmdelta (%d)\n", error));
+ return error;
+ }
+ bytes_written = snprintf(command, total_len, "%d\n", data);
+ return bytes_written;
+ } else {
+ if ((error = wldev_iovar_setint(dev, "wnm_btmdelta",
+ data)) != BCME_OK) {
+ ANDROID_ERROR(("Failed to set wnm_btmdelta (%d)\n", error));
+ return error;
+ }
}
-#else
- error = wl_cfg80211_set_if_band(dev, band);
-#endif /* WL_HOST_BAND_MGMT */
-#ifdef ROAM_CHANNEL_CACHE
- wl_update_roamscan_cache_by_band(dev, band);
-#endif /* ROAM_CHANNEL_CACHE */
return error;
}
-#endif /* WL_CFG80211 */
+
+#endif /* WBTEXT */
#ifdef PNO_SUPPORT
#define PNO_PARAM_SIZE 50
wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len)
{
int err = BCME_OK;
- uint i, tokens, len_remain;
+ uint i, tokens;
char *pos, *pos2, *token, *token2, *delim;
char param[PNO_PARAM_SIZE+1], value[VALUE_SIZE+1];
struct dhd_pno_batch_params batch_params;
- ANDROID_INFO(("wls_parse_batching_cmd: command=%s, len=%d\n", command, total_len));
- len_remain = total_len;
- if (len_remain > (strlen(CMD_WLS_BATCHING) + 1)) {
- pos = command + strlen(CMD_WLS_BATCHING) + 1;
- len_remain -= strlen(CMD_WLS_BATCHING) + 1;
- } else {
- ANDROID_ERROR(("wls_parse_batching_cmd: No arguments, total_len %d\n", total_len));
+ ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+ if (total_len < strlen(CMD_WLS_BATCHING)) {
+ ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
err = BCME_ERROR;
goto exit;
}
- bzero(&batch_params, sizeof(struct dhd_pno_batch_params));
+ pos = command + strlen(CMD_WLS_BATCHING) + 1;
+ memset(&batch_params, 0, sizeof(struct dhd_pno_batch_params));
+
if (!strncmp(pos, PNO_BATCHING_SET, strlen(PNO_BATCHING_SET))) {
- if (len_remain > (strlen(PNO_BATCHING_SET) + 1)) {
- pos += strlen(PNO_BATCHING_SET) + 1;
- } else {
- ANDROID_ERROR(("wls_parse_batching_cmd: %s missing arguments, total_len %d\n",
- PNO_BATCHING_SET, total_len));
- err = BCME_ERROR;
- goto exit;
- }
+ pos += strlen(PNO_BATCHING_SET) + 1;
while ((token = strsep(&pos, PNO_PARAMS_DELIMETER)) != NULL) {
- bzero(param, sizeof(param));
- bzero(value, sizeof(value));
+ memset(param, 0, sizeof(param));
+ memset(value, 0, sizeof(value));
if (token == NULL || !*token)
break;
if (*token == '\0')
tokens = sscanf(value, "<%s>", value);
if (tokens != 1) {
err = BCME_ERROR;
- ANDROID_ERROR(("wls_parse_batching_cmd: invalid format"
- " for channel"
- " <> params\n"));
+ ANDROID_ERROR(("%s : invalid format for channel"
+ " <> params\n", __FUNCTION__));
goto exit;
}
while ((token2 = strsep(&pos2,
batch_params.rtt = simple_strtol(value, NULL, 0);
ANDROID_INFO(("rtt : %d\n", batch_params.rtt));
} else {
- ANDROID_ERROR(("wls_parse_batching_cmd : unknown param: %s\n", param));
+ ANDROID_ERROR(("%s : unknown param: %s\n", __FUNCTION__, param));
err = BCME_ERROR;
goto exit;
}
if (err < 0) {
ANDROID_ERROR(("failed to configure batch scan\n"));
} else {
- bzero(command, total_len);
+ memset(command, 0, total_len);
err = snprintf(command, total_len, "%d", err);
}
} else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) {
if (err < 0) {
ANDROID_ERROR(("failed to stop batching scan\n"));
} else {
- bzero(command, total_len);
+ memset(command, 0, total_len);
err = snprintf(command, total_len, "OK");
}
} else {
- ANDROID_ERROR(("wls_parse_batching_cmd : unknown command\n"));
+ ANDROID_ERROR(("%s : unknown command\n", __FUNCTION__));
err = BCME_ERROR;
goto exit;
}
0x00
};
#endif /* PNO_SET_DEBUG */
- ANDROID_INFO(("wl_android_set_pno_setup: command=%s, len=%d\n", command, total_len));
+ ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
- ANDROID_ERROR(("wl_android_set_pno_setup: argument=%d less min size\n", total_len));
+ ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
goto exit_proc;
}
#ifdef PNO_SET_DEBUG
memcpy(command, pno_in_example, sizeof(pno_in_example));
total_len = sizeof(pno_in_example);
-#endif // endif
+#endif
str_ptr = command + strlen(CMD_PNOSETUP_SET);
tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
cmd_tlv_temp = (cmd_tlv_t *)str_ptr;
- bzero(ssids_local, sizeof(ssids_local));
+ memset(ssids_local, 0, sizeof(ssids_local));
if ((cmd_tlv_temp->prefix == PNO_TLV_PREFIX) &&
(cmd_tlv_temp->version == PNO_TLV_VERSION) &&
goto exit_proc;
} else {
if ((str_ptr[0] != PNO_TLV_TYPE_TIME) || (tlv_size_left <= 1)) {
- ANDROID_ERROR(("wl_android_set_pno_setup: scan duration corrupted"
- " field size %d\n",
- tlv_size_left));
+ ANDROID_ERROR(("%s scan duration corrupted field size %d\n",
+ __FUNCTION__, tlv_size_left));
goto exit_proc;
}
str_ptr++;
pno_time = simple_strtoul(str_ptr, &str_ptr, 16);
- ANDROID_INFO(("wl_android_set_pno_setup: pno_time=%d\n", pno_time));
+ ANDROID_INFO(("%s: pno_time=%d\n", __FUNCTION__, pno_time));
if (str_ptr[0] != 0) {
if ((str_ptr[0] != PNO_TLV_FREQ_REPEAT)) {
- ANDROID_ERROR(("wl_android_set_pno_setup: pno repeat:"
- " corrupted field\n"));
+ ANDROID_ERROR(("%s pno repeat : corrupted field\n",
+ __FUNCTION__));
goto exit_proc;
}
str_ptr++;
pno_repeat = simple_strtoul(str_ptr, &str_ptr, 16);
- ANDROID_INFO(("wl_android_set_pno_setup: got pno_repeat=%d\n",
- pno_repeat));
+ ANDROID_INFO(("%s :got pno_repeat=%d\n", __FUNCTION__, pno_repeat));
if (str_ptr[0] != PNO_TLV_FREQ_EXPO_MAX) {
- ANDROID_ERROR(("wl_android_set_pno_setup: FREQ_EXPO_MAX"
- " corrupted field size\n"));
+ ANDROID_ERROR(("%s FREQ_EXPO_MAX corrupted field size\n",
+ __FUNCTION__));
goto exit_proc;
}
str_ptr++;
pno_freq_expo_max = simple_strtoul(str_ptr, &str_ptr, 16);
- ANDROID_INFO(("wl_android_set_pno_setup: pno_freq_expo_max=%d\n",
- pno_freq_expo_max));
+ ANDROID_INFO(("%s: pno_freq_expo_max=%d\n",
+ __FUNCTION__, pno_freq_expo_max));
}
}
} else {
- ANDROID_ERROR(("wl_android_set_pno_setup: get wrong TLV command\n"));
+ ANDROID_ERROR(("%s get wrong TLV command\n", __FUNCTION__));
goto exit_proc;
}
static int wl_android_get_p2p_dev_addr(struct net_device *ndev, char *command, int total_len)
{
int ret;
- struct ether_addr p2pdev_addr;
-
-#define MAC_ADDR_STR_LEN 18
- if (total_len < MAC_ADDR_STR_LEN) {
- ANDROID_ERROR(("wl_android_get_p2p_dev_addr: buflen %d is less than p2p dev addr\n",
- total_len));
- return -1;
- }
+ int bytes_written = 0;
- ret = wl_cfg80211_get_p2p_dev_addr(ndev, &p2pdev_addr);
- if (ret) {
- ANDROID_ERROR(("wl_android_get_p2p_dev_addr: Failed to get p2p dev addr\n"));
- return -1;
- }
- return (snprintf(command, total_len, MACF, ETHERP_TO_MACF(&p2pdev_addr)));
+ ret = wl_cfg80211_get_p2p_dev_addr(ndev, (struct ether_addr*)command);
+ if (ret)
+ return 0;
+ bytes_written = sizeof(struct ether_addr);
+ return bytes_written;
}
+
int
wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist)
{
/* set filtering mode */
if ((ret = wldev_ioctl_set(dev, WLC_SET_MACMODE, &macmode, sizeof(macmode)) != 0)) {
- ANDROID_ERROR(("wl_android_set_ap_mac_list : WLC_SET_MACMODE error=%d\n", ret));
+ ANDROID_ERROR(("%s : WLC_SET_MACMODE error=%d\n", __FUNCTION__, ret));
return ret;
}
if (macmode != MACLIST_MODE_DISABLED) {
/* set the MAC filter list */
if ((ret = wldev_ioctl_set(dev, WLC_SET_MACLIST, maclist,
sizeof(int) + sizeof(struct ether_addr) * maclist->count)) != 0) {
- ANDROID_ERROR(("wl_android_set_ap_mac_list : WLC_SET_MACLIST error=%d\n", ret));
+ ANDROID_ERROR(("%s : WLC_SET_MACLIST error=%d\n", __FUNCTION__, ret));
return ret;
}
/* get the current list of associated STAs */
assoc_maclist->count = MAX_NUM_OF_ASSOCLIST;
if ((ret = wldev_ioctl_get(dev, WLC_GET_ASSOCLIST, assoc_maclist,
sizeof(mac_buf))) != 0) {
- ANDROID_ERROR(("wl_android_set_ap_mac_list: WLC_GET_ASSOCLIST error=%d\n",
- ret));
+ ANDROID_ERROR(("%s : WLC_GET_ASSOCLIST error=%d\n", __FUNCTION__, ret));
return ret;
}
/* do we have any STA associated? */
match = 0;
/* compare with each entry */
for (j = 0; j < maclist->count; j++) {
- ANDROID_INFO(("wl_android_set_ap_mac_list: associated="MACDBG
- "list = "MACDBG "\n",
- MAC2STRDBG(assoc_maclist->ea[i].octet),
+ ANDROID_INFO(("%s : associated="MACDBG " list="MACDBG "\n",
+ __FUNCTION__, MAC2STRDBG(assoc_maclist->ea[i].octet),
MAC2STRDBG(maclist->ea[j].octet)));
if (memcmp(assoc_maclist->ea[i].octet,
maclist->ea[j].octet, ETHER_ADDR_LEN) == 0) {
if ((ret = wldev_ioctl_set(dev,
WLC_SCB_DEAUTHENTICATE_FOR_REASON,
&scbval, sizeof(scb_val_t))) != 0)
- ANDROID_ERROR(("wl_android_set_ap_mac_list:"
- " WLC_SCB_DEAUTHENTICATE"
- " error=%d\n",
- ret));
+ ANDROID_ERROR(("%s WLC_SCB_DEAUTHENTICATE error=%d\n",
+ __FUNCTION__, ret));
}
}
}
struct maclist *list;
char eabuf[ETHER_ADDR_STR_LEN];
const char *token;
- dhd_pub_t *dhd = dhd_get_pub(dev);
/* string should look like below (macmode/macnum/maclist) */
/* 1 2 00:11:22:33:44:55 00:11:22:33:44:ff */
macmode = bcm_atoi(token);
if (macmode < MACLIST_MODE_DISABLED || macmode > MACLIST_MODE_ALLOW) {
- ANDROID_ERROR(("wl_android_set_mac_address_filter: invalid macmode %d\n", macmode));
+ ANDROID_ERROR(("%s : invalid macmode %d\n", __FUNCTION__, macmode));
return -1;
}
}
macnum = bcm_atoi(token);
if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
- ANDROID_ERROR(("wl_android_set_mac_address_filter: invalid number of MAC"
- " address entries %d\n",
- macnum));
+ ANDROID_ERROR(("%s : invalid number of MAC address entries %d\n",
+ __FUNCTION__, macnum));
return -1;
}
/* allocate memory for the MAC list */
- list = (struct maclist*) MALLOCZ(dhd->osh, sizeof(int) +
- sizeof(struct ether_addr) * macnum);
+ list = (struct maclist*)kmalloc(sizeof(int) +
+ sizeof(struct ether_addr) * macnum, GFP_KERNEL);
if (!list) {
- ANDROID_ERROR(("wl_android_set_mac_address_filter : failed to allocate memory\n"));
+ ANDROID_ERROR(("%s : failed to allocate memory\n", __FUNCTION__));
return -1;
}
/* prepare the MAC list */
list->count = htod32(macnum);
bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
for (i = 0; i < list->count; i++) {
- token = strsep((char**)&str, " ");
- if (token == NULL) {
- ANDROID_ERROR(("wl_android_set_mac_address_filter : No mac address present\n"));
- ret = -EINVAL;
- goto exit;
- }
- strlcpy(eabuf, token, sizeof(eabuf));
+ strncpy(eabuf, strsep((char**)&str, " "), ETHER_ADDR_STR_LEN - 1);
if (!(ret = bcm_ether_atoe(eabuf, &list->ea[i]))) {
- ANDROID_ERROR(("wl_android_set_mac_address_filter : mac parsing err index=%d,"
- " addr=%s\n",
- i, eabuf));
- list->count = i;
+ ANDROID_ERROR(("%s : mac parsing err index=%d, addr=%s\n",
+ __FUNCTION__, i, eabuf));
+ list->count--;
break;
}
- ANDROID_INFO(("wl_android_set_mac_address_filter : %d/%d MACADDR=%s",
- i, list->count, eabuf));
+ ANDROID_INFO(("%s : %d/%d MACADDR=%s", __FUNCTION__, i, list->count, eabuf));
}
- if (i == 0)
- goto exit;
-
/* set the list */
if ((ret = wl_android_set_ap_mac_list(dev, macmode, list)) != 0)
- ANDROID_ERROR(("wl_android_set_mac_address_filter: Setting MAC list failed error=%d\n",
- ret));
+ ANDROID_ERROR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
-exit:
- MFREE(dhd->osh, list, sizeof(int) + sizeof(struct ether_addr) * macnum);
+ kfree(list);
- return ret;
+ return 0;
}
/**
int retry = POWERUP_MAX_RETRY;
if (!dev) {
- ANDROID_ERROR(("wl_android_wifi_on: dev is null\n"));
+ ANDROID_ERROR(("%s: dev is null\n", __FUNCTION__));
return -EINVAL;
}
+ printf("%s in 1\n", __FUNCTION__);
dhd_net_if_lock(dev);
- WL_MSG(dev->name, "in g_wifi_on=%d\n", g_wifi_on);
+ printf("%s in 2: g_wifi_on=%d\n", __FUNCTION__, g_wifi_on);
if (!g_wifi_on) {
do {
- dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY);
+ if (!dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY)) {
#ifdef BCMSDIO
ret = dhd_net_bus_resume(dev, 0);
#endif /* BCMSDIO */
+ }
#ifdef BCMPCIE
ret = dhd_net_bus_devreset(dev, FALSE);
#endif /* BCMPCIE */
} while (retry-- > 0);
if (ret != 0) {
ANDROID_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n"));
-#ifdef BCM_DETECT_TURN_ON_FAILURE
- BUG_ON(1);
-#endif /* BCM_DETECT_TURN_ON_FAILURE */
goto exit;
}
#if defined(BCMSDIO) || defined(BCMDBUS)
}
exit:
- WL_MSG(dev->name, "Success\n");
+ printf("%s: Success\n", __FUNCTION__);
dhd_net_if_unlock(dev);
return ret;
dhd_net_bus_suspend(dev);
#endif /* BCMSDIO */
dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
- WL_MSG(dev->name, "Failed\n");
+ printf("%s: Failed\n", __FUNCTION__);
dhd_net_if_unlock(dev);
return ret;
#endif /* BCMSDIO || BCMDBUS */
return -EINVAL;
}
+ printf("%s in 1\n", __FUNCTION__);
#if defined(BCMPCIE) && defined(DHD_DEBUG_UART)
ret = dhd_debug_uart_is_running(dev);
if (ret) {
- ANDROID_ERROR(("wl_android_wifi_off: - Debug UART App is running\n"));
+ ANDROID_ERROR(("%s - Debug UART App is running\n", __FUNCTION__));
return -EBUSY;
}
#endif /* BCMPCIE && DHD_DEBUG_UART */
dhd_net_if_lock(dev);
- WL_MSG(dev->name, "in g_wifi_on=%d, on_failure=%d\n", g_wifi_on, on_failure);
+ printf("%s in 2: g_wifi_on=%d, on_failure=%d\n", __FUNCTION__, g_wifi_on, on_failure);
if (g_wifi_on || on_failure) {
#if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMDBUS)
ret = dhd_net_bus_devreset(dev, TRUE);
-#ifdef BCMSDIO
+#if defined(BCMSDIO)
dhd_net_bus_suspend(dev);
#endif /* BCMSDIO */
#endif /* BCMSDIO || BCMPCIE || BCMDBUS */
dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
g_wifi_on = FALSE;
}
- WL_MSG(dev->name, "out\n");
+ printf("%s out\n", __FUNCTION__);
dhd_net_if_unlock(dev);
return ret;
u8 result[WLC_IOCTL_SMLEN];
chanim_stats_t *stats;
- bzero(¶m, sizeof(param));
+ memset(¶m, 0, sizeof(param));
param.buflen = htod32(sizeof(wl_chanim_stats_t));
param.count = htod32(WL_CHANIM_COUNT_ONE);
wl_android_get_connection_stats(struct net_device *dev, char *command, int total_len)
{
static char iovar_buf[WLC_IOCTL_MAXLEN];
- const wl_cnt_wlc_t* wlc_cnt = NULL;
+ wl_cnt_wlc_t* wlc_cnt = NULL;
#ifndef DISABLE_IF_COUNTERS
wl_if_stats_t* if_stats = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
#endif /* DISABLE_IF_COUNTERS */
int link_speed = 0;
int bytes_written = -1;
int ret = 0;
- ANDROID_INFO(("wl_android_get_connection_stats: enter Get Connection Stats\n"));
+ ANDROID_INFO(("%s: enter Get Connection Stats\n", __FUNCTION__));
if (total_len <= 0) {
- ANDROID_ERROR(("wl_android_get_connection_stats: invalid buffer size %d\n", total_len));
+ ANDROID_ERROR(("%s: invalid buffer size %d\n", __FUNCTION__, total_len));
goto error;
}
bufsize = total_len;
if (bufsize < sizeof(struct connection_stats)) {
- ANDROID_ERROR(("wl_android_get_connection_stats: not enough buffer size, provided=%u,"
- " requires=%zu\n",
- bufsize,
+ ANDROID_ERROR(("%s: not enough buffer size, provided=%u, requires=%zu\n",
+ __FUNCTION__, bufsize,
sizeof(struct connection_stats)));
goto error;
}
output = (struct connection_stats *)command;
#ifndef DISABLE_IF_COUNTERS
- if_stats = (wl_if_stats_t *)MALLOCZ(cfg->osh, sizeof(*if_stats));
- if (if_stats == NULL) {
- ANDROID_ERROR(("wl_android_get_connection_stats: MALLOCZ failed\n"));
+ if ((if_stats = kmalloc(sizeof(*if_stats), GFP_KERNEL)) == NULL) {
+ ANDROID_ERROR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__));
goto error;
}
- bzero(if_stats, sizeof(*if_stats));
-
- if (FW_SUPPORTED(dhdp, ifst)) {
- ret = wl_cfg80211_ifstats_counters(dev, if_stats);
- } else
- {
- ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
- (char *)if_stats, sizeof(*if_stats), NULL);
- }
+ memset(if_stats, 0, sizeof(*if_stats));
ret = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
(char *)if_stats, sizeof(*if_stats), NULL);
if (ret) {
- ANDROID_ERROR(("wl_android_get_connection_stats: if_counters not supported ret=%d\n",
- ret));
+ ANDROID_ERROR(("%s: if_counters not supported ret=%d\n",
+ __FUNCTION__, ret));
/* In case if_stats IOVAR is not supported, get information from counters. */
#endif /* DISABLE_IF_COUNTERS */
}
ret = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WL_CNTBUF_MAX_SIZE, 0);
if (ret != BCME_OK) {
- ANDROID_ERROR(("wl_android_get_connection_stats:"
- " wl_cntbuf_to_xtlv_format ERR %d\n",
- ret));
+ ANDROID_ERROR(("%s wl_cntbuf_to_xtlv_format ERR %d\n",
+ __FUNCTION__, ret));
goto error;
}
if (!(wlc_cnt = GET_WLCCNT_FROM_CNTBUF(iovar_buf))) {
- ANDROID_ERROR(("wl_android_get_connection_stats: wlc_cnt NULL!\n"));
+ ANDROID_ERROR(("%s wlc_cnt NULL!\n", __FUNCTION__));
goto error;
}
} else {
/* Populate from if_stats. */
if (dtoh16(if_stats->version) > WL_IF_STATS_T_VERSION) {
- ANDROID_ERROR(("wl_android_get_connection_stats: incorrect version of"
- " wl_if_stats_t,"
- " expected=%u got=%u\n",
- WL_IF_STATS_T_VERSION, if_stats->version));
+ ANDROID_ERROR(("%s: incorrect version of wl_if_stats_t, expected=%u got=%u\n",
+ __FUNCTION__, WL_IF_STATS_T_VERSION, if_stats->version));
goto error;
}
/* link_speed is in kbps */
ret = wldev_get_link_speed(dev, &link_speed);
if (ret || link_speed < 0) {
- ANDROID_ERROR(("wl_android_get_connection_stats: wldev_get_link_speed()"
- " failed, ret=%d, speed=%d\n",
- ret, link_speed));
+ ANDROID_ERROR(("%s: wldev_get_link_speed() failed, ret=%d, speed=%d\n",
+ __FUNCTION__, ret, link_speed));
goto error;
}
error:
#ifndef DISABLE_IF_COUNTERS
if (if_stats) {
- MFREE(cfg->osh, if_stats, sizeof(*if_stats));
+ kfree(if_stats);
}
#endif /* DISABLE_IF_COUNTERS */
/* If natoe subcmd name is not provided, return error */
if (*pcmd == '\0') {
- ANDROID_ERROR(("natoe subcmd not provided wl_android_process_natoe_cmd\n"));
+ ANDROID_ERROR(("natoe subcmd not provided %s\n", __FUNCTION__));
ret = -EINVAL;
return ret;
}
}
else
{
- ANDROID_ERROR(("wl_natoe_get_ioctl: get command failed code %d\n", res));
+ ANDROID_ERROR(("%s: get command failed code %d\n", __FUNCTION__, res));
res = BCME_ERROR;
}
int ret = BCME_OK;
wl_natoe_ioc_t *natoe_ioc;
char *pcmd = command;
+ uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
uint16 buflen = WL_NATOE_IOC_BUFSZ;
bcm_xtlv_t *pxtlv = NULL;
char *ioctl_buf = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags);
if (!ioctl_buf) {
ANDROID_ERROR(("ioctl memory alloc failed\n"));
return -ENOMEM;
}
/* alloc mem for ioctl headr + tlv data */
- natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz);
+ natoe_ioc = kzalloc(iocsz, kflags);
if (!natoe_ioc) {
ANDROID_ERROR(("ioctl header memory alloc failed\n"));
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ kfree(ioctl_buf);
return -ENOMEM;
}
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_enable\n"));
+ ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
ret = -EINVAL;
}
} else { /* set */
}
exit:
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
- MFREE(cfg->osh, natoe_ioc, iocsz);
+ kfree(ioctl_buf);
+ kfree(natoe_ioc);
return ret;
}
wl_natoe_ioc_t *natoe_ioc;
char *pcmd = command;
char *str;
+ uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
uint16 buflen = WL_NATOE_IOC_BUFSZ;
bcm_xtlv_t *pxtlv = NULL;
char *ioctl_buf = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags);
if (!ioctl_buf) {
ANDROID_ERROR(("ioctl memory alloc failed\n"));
return -ENOMEM;
}
/* alloc mem for ioctl headr + tlv data */
- natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz);
+ natoe_ioc = kzalloc(iocsz, kflags);
if (!natoe_ioc) {
ANDROID_ERROR(("ioctl header memory alloc failed\n"));
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ kfree(ioctl_buf);
return -ENOMEM;
}
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_config_ips\n"));
+ ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
ret = -EINVAL;
}
} else { /* set */
/* save buflen at start */
uint16 buflen_at_start = buflen;
- bzero(&config_ips, sizeof(config_ips));
+ memset(&config_ips, 0, sizeof(config_ips));
str = bcmstrtok(&pcmd, " ", NULL);
if (!str || !bcm_atoipv4(str, (struct ipv4_addr *)&config_ips.sta_ip)) {
}
exit:
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
- MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ);
+ kfree(ioctl_buf);
+ kfree(natoe_ioc);
return ret;
}
wl_natoe_ioc_t *natoe_ioc;
char *pcmd = command;
char *str;
+ uint16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
uint16 iocsz = sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ;
uint16 buflen = WL_NATOE_IOC_BUFSZ;
bcm_xtlv_t *pxtlv = NULL;
char *ioctl_buf = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags);
if (!ioctl_buf) {
ANDROID_ERROR(("ioctl memory alloc failed\n"));
return -ENOMEM;
}
/* alloc mem for ioctl headr + tlv data */
- natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz);
+ natoe_ioc = kzalloc(iocsz, kflags);
if (!natoe_ioc) {
ANDROID_ERROR(("ioctl header memory alloc failed\n"));
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ kfree(ioctl_buf);
return -ENOMEM;
}
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_config_ports\n"));
+ ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
ret = -EINVAL;
}
} else { /* set */
/* save buflen at start */
uint16 buflen_at_start = buflen;
- bzero(&ports_config, sizeof(ports_config));
+ memset(&ports_config, 0, sizeof(ports_config));
str = bcmstrtok(&pcmd, " ", NULL);
if (!str) {
}
exit:
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
- MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ);
+ kfree(ioctl_buf);
+ kfree(natoe_ioc);
return ret;
}
uint16 buflen = WL_NATOE_DBG_STATS_BUFSZ;
bcm_xtlv_t *pxtlv = NULL;
char *ioctl_buf = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ ioctl_buf = kzalloc(WLC_IOCTL_MAXLEN, kflags);
if (!ioctl_buf) {
ANDROID_ERROR(("ioctl memory alloc failed\n"));
return -ENOMEM;
}
/* alloc mem for ioctl headr + tlv data */
- natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz);
+ natoe_ioc = kzalloc(iocsz, kflags);
if (!natoe_ioc) {
ANDROID_ERROR(("ioctl header memory alloc failed\n"));
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MAXLEN);
+ kfree(ioctl_buf);
return -ENOMEM;
}
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MAXLEN, cmd_info);
if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_dbg_stats\n"));
+ ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
ret = -EINVAL;
}
} else { /* set */
}
exit:
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MAXLEN);
- MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_DBG_STATS_BUFSZ);
+ kfree(ioctl_buf);
+ kfree(natoe_ioc);
return ret;
}
uint16 buflen = WL_NATOE_IOC_BUFSZ;
bcm_xtlv_t *pxtlv = NULL;
char *ioctl_buf = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- ioctl_buf = (char *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, kflags);
if (!ioctl_buf) {
ANDROID_ERROR(("ioctl memory alloc failed\n"));
return -ENOMEM;
}
/* alloc mem for ioctl headr + tlv data */
- natoe_ioc = (wl_natoe_ioc_t *)MALLOCZ(cfg->osh, iocsz);
+ natoe_ioc = kzalloc(iocsz, kflags);
if (!natoe_ioc) {
ANDROID_ERROR(("ioctl header memory alloc failed\n"));
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ kfree(ioctl_buf);
return -ENOMEM;
}
ret = wl_natoe_get_ioctl(dev, natoe_ioc, iocsz, ioctl_buf,
WLC_IOCTL_MEDLEN, cmd_info);
if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to get iovar wl_android_natoe_subcmd_tbl_cnt\n"));
+ ANDROID_ERROR(("Fail to get iovar %s\n", __FUNCTION__));
ret = -EINVAL;
}
} else { /* set */
}
exit:
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
- MFREE(cfg->osh, natoe_ioc, sizeof(*natoe_ioc) + WL_NATOE_IOC_BUFSZ);
+ kfree(ioctl_buf);
+ kfree(natoe_ioc);
return ret;
}
#endif /* WL_NATOE */
-#ifdef WL_MBO
-static int
-wl_android_process_mbo_cmd(struct net_device *dev, char *command, int total_len)
-{
- int ret = BCME_ERROR;
- char *pcmd = command;
- char *str = NULL;
- wl_drv_cmd_info_t cmd_info;
- const wl_drv_sub_cmd_t *mbo_cmd = &mbo_cmd_list[0];
-
- /* skip to cmd name after "mbo" */
- str = bcmstrtok(&pcmd, " ", NULL);
-
- /* If mbo subcmd name is not provided, return error */
- if (*pcmd == '\0') {
- ANDROID_ERROR(("mbo subcmd not provided %s\n", __FUNCTION__));
- ret = -EINVAL;
- return ret;
- }
-
- /* get the mbo command name to str */
- str = bcmstrtok(&pcmd, " ", NULL);
-
- while (mbo_cmd->name != NULL) {
- if (strnicmp(mbo_cmd->name, str, strlen(mbo_cmd->name)) == 0) {
- /* dispatch cmd to appropriate handler */
- if (mbo_cmd->handler) {
- cmd_info.command = command;
- cmd_info.tot_len = total_len;
- ret = mbo_cmd->handler(dev, mbo_cmd, pcmd, &cmd_info);
- }
- return ret;
- }
- mbo_cmd++;
- }
- return ret;
-}
-
-static int
-wl_android_send_wnm_notif(struct net_device *dev, bcm_iov_buf_t *iov_buf,
- uint16 iov_buf_len, uint8 *iov_resp, uint16 iov_resp_len, uint8 sub_elem_type)
-{
- int ret = BCME_OK;
- uint8 *pxtlv = NULL;
- uint16 iovlen = 0;
- uint16 buflen = 0, buflen_start = 0;
-
- memset_s(iov_buf, iov_buf_len, 0, iov_buf_len);
- iov_buf->version = WL_MBO_IOV_VERSION;
- iov_buf->id = WL_MBO_CMD_SEND_NOTIF;
- buflen = buflen_start = iov_buf_len - sizeof(bcm_iov_buf_t);
- pxtlv = (uint8 *)&iov_buf->data[0];
- ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_SUB_ELEM_TYPE,
- sizeof(sub_elem_type), &sub_elem_type, BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- return ret;
- }
- iov_buf->len = buflen_start - buflen;
- iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
- ret = wldev_iovar_setbuf(dev, "mbo",
- iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL);
- if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to sent wnm notif %d\n", ret));
- }
- return ret;
-}
-
-static int
-wl_android_mbo_resp_parse_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
-{
- wl_drv_cmd_info_t *cmd_info = (wl_drv_cmd_info_t *)ctx;
- uint8 *command = cmd_info->command;
- uint16 total_len = cmd_info->tot_len;
- uint16 bytes_written = 0;
-
- UNUSED_PARAMETER(len);
- /* TODO: validate data value */
- if (data == NULL) {
- ANDROID_ERROR(("%s: Bad argument !!\n", __FUNCTION__));
- return -EINVAL;
- }
- switch (type) {
- case WL_MBO_XTLV_CELL_DATA_CAP:
- {
- bytes_written = snprintf(command, total_len, "cell_data_cap: %u\n", *data);
- cmd_info->bytes_written = bytes_written;
- }
- break;
- default:
- ANDROID_ERROR(("%s: Unknown tlv %u\n", __FUNCTION__, type));
- }
- return BCME_OK;
-}
-
-static int
-wl_android_mbo_subcmd_cell_data_cap(struct net_device *dev, const wl_drv_sub_cmd_t *cmd,
- char *command, wl_drv_cmd_info_t *cmd_info)
-{
- int ret = BCME_OK;
- uint8 *pxtlv = NULL;
- uint16 buflen = 0, buflen_start = 0;
- uint16 iovlen = 0;
- char *pcmd = command;
- bcm_iov_buf_t *iov_buf = NULL;
- bcm_iov_buf_t *p_resp = NULL;
- uint8 *iov_resp = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- uint16 version;
-
- /* first get the configured value */
- iov_buf = (bcm_iov_buf_t *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
- if (iov_buf == NULL) {
- ret = -ENOMEM;
- ANDROID_ERROR(("iov buf memory alloc exited\n"));
- goto exit;
- }
- iov_resp = (uint8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
- if (iov_resp == NULL) {
- ret = -ENOMEM;
- ANDROID_ERROR(("iov resp memory alloc exited\n"));
- goto exit;
- }
-
- /* fill header */
- iov_buf->version = WL_MBO_IOV_VERSION;
- iov_buf->id = WL_MBO_CMD_CELLULAR_DATA_CAP;
-
- ret = wldev_iovar_getbuf(dev, "mbo", iov_buf, WLC_IOCTL_MEDLEN, iov_resp,
- WLC_IOCTL_MAXLEN,
- NULL);
- if (ret != BCME_OK) {
- goto exit;
- }
- p_resp = (bcm_iov_buf_t *)iov_resp;
-
- /* get */
- if (*pcmd == WL_IOCTL_ACTION_GET) {
- /* Check for version */
- version = dtoh16(*(uint16 *)iov_resp);
- if (version != WL_MBO_IOV_VERSION) {
- ret = -EINVAL;
- }
- if (p_resp->id == WL_MBO_CMD_CELLULAR_DATA_CAP) {
- ret = bcm_unpack_xtlv_buf((void *)cmd_info, (uint8 *)p_resp->data,
- p_resp->len, BCM_XTLV_OPTION_ALIGN32,
- wl_android_mbo_resp_parse_cbfn);
- if (ret == BCME_OK) {
- ret = cmd_info->bytes_written;
- }
- } else {
- ret = -EINVAL;
- ANDROID_ERROR(("Mismatch: resp id %d req id %d\n", p_resp->id, cmd->id));
- goto exit;
- }
- } else {
- uint8 cell_cap = bcm_atoi(pcmd);
- const uint8* old_cell_cap = NULL;
- uint16 len = 0;
-
- old_cell_cap = bcm_get_data_from_xtlv_buf((uint8 *)p_resp->data, p_resp->len,
- WL_MBO_XTLV_CELL_DATA_CAP, &len, BCM_XTLV_OPTION_ALIGN32);
- if (old_cell_cap && *old_cell_cap == cell_cap) {
- ANDROID_ERROR(("No change is cellular data capability\n"));
- /* No change in value */
- goto exit;
- }
-
- buflen = buflen_start = WLC_IOCTL_MEDLEN - sizeof(bcm_iov_buf_t);
-
- if (cell_cap < MBO_CELL_DATA_CONN_AVAILABLE ||
- cell_cap > MBO_CELL_DATA_CONN_NOT_CAPABLE) {
- ANDROID_ERROR(("wrong value %u\n", cell_cap));
- ret = -EINVAL;
- goto exit;
- }
- pxtlv = (uint8 *)&iov_buf->data[0];
- ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_CELL_DATA_CAP,
- sizeof(cell_cap), &cell_cap, BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- goto exit;
- }
- iov_buf->len = buflen_start - buflen;
- iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
- ret = wldev_iovar_setbuf(dev, "mbo",
- iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL);
- if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to set iovar %d\n", ret));
- ret = -EINVAL;
- goto exit;
- }
- /* Skip for CUSTOMER_HW4 - WNM notification
- * for cellular data capability is handled by host
- */
- /* send a WNM notification request to associated AP */
- if (wl_get_drv_status(cfg, CONNECTED, dev)) {
- ANDROID_INFO(("Sending WNM Notif\n"));
- ret = wl_android_send_wnm_notif(dev, iov_buf, WLC_IOCTL_MEDLEN,
- iov_resp, WLC_IOCTL_MAXLEN, MBO_ATTR_CELL_DATA_CAP);
- if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to send WNM notification %d\n", ret));
- ret = -EINVAL;
- }
- }
- }
-exit:
- if (iov_buf) {
- MFREE(cfg->osh, iov_buf, WLC_IOCTL_MEDLEN);
- }
- if (iov_resp) {
- MFREE(cfg->osh, iov_resp, WLC_IOCTL_MAXLEN);
- }
- return ret;
-}
-
-static int
-wl_android_mbo_non_pref_chan_parse_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
-{
- wl_drv_cmd_info_t *cmd_info = (wl_drv_cmd_info_t *)ctx;
- uint8 *command = cmd_info->command + cmd_info->bytes_written;
- uint16 total_len = cmd_info->tot_len;
- uint16 bytes_written = 0;
-
- ANDROID_INFO(("Total bytes written at begining %u\n", cmd_info->bytes_written));
- UNUSED_PARAMETER(len);
- if (data == NULL) {
- ANDROID_ERROR(("%s: Bad argument !!\n", __FUNCTION__));
- return -EINVAL;
- }
- switch (type) {
- case WL_MBO_XTLV_OPCLASS:
- {
- bytes_written = snprintf(command, total_len, "%u:", *data);
- ANDROID_ERROR(("wr %u %u\n", bytes_written, *data));
- command += bytes_written;
- cmd_info->bytes_written += bytes_written;
- }
- break;
- case WL_MBO_XTLV_CHAN:
- {
- bytes_written = snprintf(command, total_len, "%u:", *data);
- ANDROID_ERROR(("wr %u\n", bytes_written));
- command += bytes_written;
- cmd_info->bytes_written += bytes_written;
- }
- break;
- case WL_MBO_XTLV_PREFERENCE:
- {
- bytes_written = snprintf(command, total_len, "%u:", *data);
- ANDROID_ERROR(("wr %u\n", bytes_written));
- command += bytes_written;
- cmd_info->bytes_written += bytes_written;
- }
- break;
- case WL_MBO_XTLV_REASON_CODE:
- {
- bytes_written = snprintf(command, total_len, "%u ", *data);
- ANDROID_ERROR(("wr %u\n", bytes_written));
- command += bytes_written;
- cmd_info->bytes_written += bytes_written;
- }
- break;
- default:
- ANDROID_ERROR(("%s: Unknown tlv %u\n", __FUNCTION__, type));
- }
- ANDROID_INFO(("Total bytes written %u\n", cmd_info->bytes_written));
- return BCME_OK;
-}
-
-static int
-wl_android_mbo_subcmd_non_pref_chan(struct net_device *dev,
- const wl_drv_sub_cmd_t *cmd, char *command,
- wl_drv_cmd_info_t *cmd_info)
-{
- int ret = BCME_OK;
- uint8 *pxtlv = NULL;
- uint16 buflen = 0, buflen_start = 0;
- uint16 iovlen = 0;
- char *pcmd = command;
- bcm_iov_buf_t *iov_buf = NULL;
- bcm_iov_buf_t *p_resp = NULL;
- uint8 *iov_resp = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- uint16 version;
-
- ANDROID_ERROR(("%s:%d\n", __FUNCTION__, __LINE__));
- iov_buf = (bcm_iov_buf_t *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
- if (iov_buf == NULL) {
- ret = -ENOMEM;
- ANDROID_ERROR(("iov buf memory alloc exited\n"));
- goto exit;
- }
- iov_resp = (uint8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
- if (iov_resp == NULL) {
- ret = -ENOMEM;
- ANDROID_ERROR(("iov resp memory alloc exited\n"));
- goto exit;
- }
- /* get */
- if (*pcmd == WL_IOCTL_ACTION_GET) {
- /* fill header */
- iov_buf->version = WL_MBO_IOV_VERSION;
- iov_buf->id = WL_MBO_CMD_LIST_CHAN_PREF;
-
- ret = wldev_iovar_getbuf(dev, "mbo", iov_buf, WLC_IOCTL_MEDLEN, iov_resp,
- WLC_IOCTL_MAXLEN, NULL);
- if (ret != BCME_OK) {
- goto exit;
- }
- p_resp = (bcm_iov_buf_t *)iov_resp;
- /* Check for version */
- version = dtoh16(*(uint16 *)iov_resp);
- if (version != WL_MBO_IOV_VERSION) {
- ANDROID_ERROR(("Version mismatch. returned ver %u expected %u\n",
- version, WL_MBO_IOV_VERSION));
- ret = -EINVAL;
- }
- if (p_resp->id == WL_MBO_CMD_LIST_CHAN_PREF) {
- ret = bcm_unpack_xtlv_buf((void *)cmd_info, (uint8 *)p_resp->data,
- p_resp->len, BCM_XTLV_OPTION_ALIGN32,
- wl_android_mbo_non_pref_chan_parse_cbfn);
- if (ret == BCME_OK) {
- ret = cmd_info->bytes_written;
- }
- } else {
- ret = -EINVAL;
- ANDROID_ERROR(("Mismatch: resp id %d req id %d\n", p_resp->id, cmd->id));
- goto exit;
- }
- } else {
- char *str = pcmd;
- uint opcl = 0, ch = 0, pref = 0, rc = 0;
-
- str = bcmstrtok(&pcmd, " ", NULL);
- if (!(strnicmp(str, "set", 3)) || (!strnicmp(str, "clear", 5))) {
- /* delete all configurations */
- iov_buf->version = WL_MBO_IOV_VERSION;
- iov_buf->id = WL_MBO_CMD_DEL_CHAN_PREF;
- iov_buf->len = 0;
- iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
- ret = wldev_iovar_setbuf(dev, "mbo",
- iov_buf, iovlen, iov_resp, WLC_IOCTL_MAXLEN, NULL);
- if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to set iovar %d\n", ret));
- ret = -EINVAL;
- goto exit;
- }
- } else {
- ANDROID_ERROR(("Unknown command %s\n", str));
- goto exit;
- }
- /* parse non pref channel list */
- if (strnicmp(str, "set", 3) == 0) {
- uint8 cnt = 0;
- str = bcmstrtok(&pcmd, " ", NULL);
- while (str != NULL) {
- ret = sscanf(str, "%u:%u:%u:%u", &opcl, &ch, &pref, &rc);
- ANDROID_ERROR(("buflen %u op %u, ch %u, pref %u rc %u\n",
- buflen, opcl, ch, pref, rc));
- if (ret != 4) {
- ANDROID_ERROR(("Not all parameter presents\n"));
- ret = -EINVAL;
- }
- /* TODO: add a validation check here */
- memset_s(iov_buf, WLC_IOCTL_MEDLEN, 0, WLC_IOCTL_MEDLEN);
- buflen = buflen_start = WLC_IOCTL_MEDLEN;
- pxtlv = (uint8 *)&iov_buf->data[0];
- /* opclass */
- ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_OPCLASS,
- sizeof(uint8), (uint8 *)&opcl, BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- goto exit;
- }
- /* channel */
- ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_CHAN,
- sizeof(uint8), (uint8 *)&ch, BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- goto exit;
- }
- /* preference */
- ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_PREFERENCE,
- sizeof(uint8), (uint8 *)&pref, BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- goto exit;
- }
- /* reason */
- ret = bcm_pack_xtlv_entry(&pxtlv, &buflen, WL_MBO_XTLV_REASON_CODE,
- sizeof(uint8), (uint8 *)&rc, BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- goto exit;
- }
- ANDROID_ERROR(("len %u\n", (buflen_start - buflen)));
- /* Now set the new non pref channels */
- iov_buf->version = WL_MBO_IOV_VERSION;
- iov_buf->id = WL_MBO_CMD_ADD_CHAN_PREF;
- iov_buf->len = buflen_start - buflen;
- iovlen = sizeof(bcm_iov_buf_t) + iov_buf->len;
- ret = wldev_iovar_setbuf(dev, "mbo",
- iov_buf, iovlen, iov_resp, WLC_IOCTL_MEDLEN, NULL);
- if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to set iovar %d\n", ret));
- ret = -EINVAL;
- goto exit;
- }
- cnt++;
- if (cnt >= MBO_MAX_CHAN_PREF_ENTRIES) {
- break;
- }
- ANDROID_ERROR(("%d cnt %u\n", __LINE__, cnt));
- str = bcmstrtok(&pcmd, " ", NULL);
- }
- }
- /* send a WNM notification request to associated AP */
- if (wl_get_drv_status(cfg, CONNECTED, dev)) {
- ANDROID_INFO(("Sending WNM Notif\n"));
- ret = wl_android_send_wnm_notif(dev, iov_buf, WLC_IOCTL_MEDLEN,
- iov_resp, WLC_IOCTL_MAXLEN, MBO_ATTR_NON_PREF_CHAN_REPORT);
- if (ret != BCME_OK) {
- ANDROID_ERROR(("Fail to send WNM notification %d\n", ret));
- ret = -EINVAL;
- }
- }
- }
-exit:
- if (iov_buf) {
- MFREE(cfg->osh, iov_buf, WLC_IOCTL_MEDLEN);
- }
- if (iov_resp) {
- MFREE(cfg->osh, iov_resp, WLC_IOCTL_MAXLEN);
- }
- return ret;
-}
-#endif /* WL_MBO */
-
-#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
-extern int wl_cfg80211_send_msg_to_ril(void);
-extern void wl_cfg80211_register_dev_ril_bridge_event_notifier(void);
-extern void wl_cfg80211_unregister_dev_ril_bridge_event_notifier(void);
-extern int g_mhs_chan_for_cpcoex;
-#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
#if defined(WL_SUPPORT_AUTO_CHANNEL)
/* SoftAP feature */
#define APCS_MAX_RETRY 10
#define APCS_DEFAULT_2G_CH 1
#define APCS_DEFAULT_5G_CH 149
-
static int
wl_android_set_auto_channel(struct net_device *dev, const char* cmd_str,
char* command, int total_len)
int ret = 0;
int spect = 0;
u8 *reqbuf = NULL;
- uint32 band = WLC_BAND_2G, sta_band = WLC_BAND_2G;
+ uint32 band = WLC_BAND_2G;
uint32 buf_size;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ char *pos = command;
+ int band_new, band_cur;
if (cmd_str) {
ANDROID_INFO(("Command: %s len:%d \n", cmd_str, (int)strlen(cmd_str)));
- if (strnicmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) {
+ if (strncmp(cmd_str, APCS_BAND_AUTO, strlen(APCS_BAND_AUTO)) == 0) {
band = WLC_BAND_AUTO;
- } else if (strnicmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) {
+ } else if (strncmp(cmd_str, APCS_BAND_5G, strlen(APCS_BAND_5G)) == 0) {
band = WLC_BAND_5G;
- } else if (strnicmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) {
+ } else if (strncmp(cmd_str, APCS_BAND_2G, strlen(APCS_BAND_2G)) == 0) {
band = WLC_BAND_2G;
} else {
/*
(channel == APCS_BAND_2G_LEGACY2)) {
band = WLC_BAND_2G;
} else {
- ANDROID_ERROR(("Invalid argument\n"));
+ ANDROID_ERROR(("%s: Invalid argument\n", __FUNCTION__));
return -EINVAL;
}
}
} else {
/* If no argument is provided, default to 2G */
- ANDROID_ERROR(("No argument given default to 2.4G scan\n"));
+ ANDROID_ERROR(("%s: No argument given default to 2.4G scan\n", __FUNCTION__));
band = WLC_BAND_2G;
}
- ANDROID_INFO(("HAPD_AUTO_CHANNEL = %d, band=%d \n", channel, band));
-
-#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
- wl_cfg80211_register_dev_ril_bridge_event_notifier();
- if (band == WLC_BAND_2G) {
- wl_cfg80211_send_msg_to_ril();
-
- if (g_mhs_chan_for_cpcoex) {
- channel = g_mhs_chan_for_cpcoex;
- g_mhs_chan_for_cpcoex = 0;
- goto done2;
- }
- }
- wl_cfg80211_unregister_dev_ril_bridge_event_notifier();
-#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
-
- /* If STA is connected, return is STA channel, else ACS can be issued,
- * set spect to 0 and proceed with ACS
- */
- channel = wl_cfg80211_get_sta_channel(cfg);
- if (channel) {
- sta_band = WL_GET_BAND(channel);
- switch (sta_band) {
- case (WL_CHANSPEC_BAND_5G): {
- if (band == WLC_BAND_2G || band == WLC_BAND_AUTO) {
- channel = APCS_DEFAULT_2G_CH;
- }
- break;
- }
- case (WL_CHANSPEC_BAND_2G): {
- if (band == WLC_BAND_5G) {
- channel = APCS_DEFAULT_5G_CH;
- }
- break;
- }
- default:
- /* Intentional fall through to use same sta channel for softap */
- break;
- }
- WL_MSG(dev->name, "band=%d, sta_band=%d, channel=%d\n", band, sta_band, channel);
- goto done2;
- }
+ ANDROID_INFO(("%s : HAPD_AUTO_CHANNEL = %d, band=%d \n", __FUNCTION__, channel, band));
- channel = wl_ext_autochannel(dev, ACS_FW_BIT|ACS_DRV_BIT, band);
- if (channel)
- goto done2;
- else
- goto done;
+ ret = wldev_ioctl_set(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur));
- ret = wldev_ioctl_get(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect));
- if (ret) {
- ANDROID_ERROR(("ACS: error getting the spect, ret=%d\n", ret));
+ if ((ret =
+ wldev_ioctl_get(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect))) < 0) {
+ ANDROID_ERROR(("%s: ACS: error getting the spect\n", __FUNCTION__));
goto done;
}
if (spect > 0) {
- ret = wl_cfg80211_set_spect(dev, 0);
- if (ret < 0) {
- ANDROID_ERROR(("ACS: error while setting spect, ret=%d\n", ret));
+ /* If STA is connected, return is STA channel, else ACS can be issued,
+ * set spect to 0 and proceed with ACS
+ */
+ channel = wl_cfg80211_get_sta_channel(dev);
+ if (channel) {
+ channel = (channel <= CH_MAX_2G_CHANNEL) ? channel : APCS_DEFAULT_2G_CH;
+ goto done2;
+ }
+
+ if ((ret = wl_cfg80211_set_spect(dev, 0) < 0)) {
+ ANDROID_ERROR(("ACS: error while setting spect\n"));
goto done;
}
}
- reqbuf = (u8 *)MALLOCZ(cfg->osh, CHANSPEC_BUF_SIZE);
+ reqbuf = kzalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
if (reqbuf == NULL) {
- ANDROID_ERROR(("failed to allocate chanspec buffer\n"));
+ ANDROID_ERROR(("%s: failed to allocate chanspec buffer\n", __FUNCTION__));
return -ENOMEM;
}
if (band == WLC_BAND_AUTO) {
- ANDROID_INFO(("ACS full channel scan \n"));
+ ANDROID_INFO(("%s: ACS full channel scan \n", __func__));
reqbuf[0] = htod32(0);
} else if (band == WLC_BAND_5G) {
- ANDROID_INFO(("ACS 5G band scan \n"));
+ band_new = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G;
+ ret = wldev_ioctl_set(dev, WLC_SET_BAND, &band_new, sizeof(band_new));
+ if (ret < 0)
+ WL_ERR(("WLC_SET_BAND error %d\n", ret));
+ ANDROID_INFO(("%s: ACS 5G band scan \n", __func__));
if ((ret = wl_cfg80211_get_chanspecs_5g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) {
ANDROID_ERROR(("ACS 5g chanspec retreival failed! \n"));
goto done;
* If channel argument is not provided/ argument 20 is provided,
* Restrict channel to 2GHz, 20MHz BW, No SB
*/
- ANDROID_INFO(("ACS 2G band scan \n"));
+ ANDROID_INFO(("%s: ACS 2G band scan \n", __func__));
if ((ret = wl_cfg80211_get_chanspecs_2g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) {
ANDROID_ERROR(("ACS 2g chanspec retreival failed! \n"));
goto done;
goto done2;
}
- buf_size = (band == WLC_BAND_AUTO) ? sizeof(int) : CHANSPEC_BUF_SIZE;
+ buf_size = CHANSPEC_BUF_SIZE;
ret = wldev_ioctl_set(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf,
buf_size);
if (ret < 0) {
- ANDROID_ERROR(("can't start auto channel scan, err = %d\n", ret));
+ ANDROID_ERROR(("%s: can't start auto channel scan, err = %d\n",
+ __FUNCTION__, ret));
channel = 0;
goto done;
}
chosen = dtoh32(chosen);
}
+ if ((ret == 0) && (dtoh32(chosen) != 0)) {
+ uint chip;
+ chip = dhd_conf_get_chip(dhd_get_pub(dev));
+ if (chip != BCM43143_CHIP_ID) {
+ u32 chanspec = 0;
+ chanspec = wl_chspec_driver_to_host(chosen);
+ ANDROID_INFO(("%s: selected chanspec = 0x%x\n", __FUNCTION__, chanspec));
+ chosen = wf_chspec_ctlchan(chanspec);
+ ANDROID_INFO(("%s: selected chosen = 0x%x\n", __FUNCTION__, chosen));
+ }
+ }
+
if (chosen) {
int chosen_band;
int apcs_band;
#endif /* D11AC_IOTYPES */
apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band;
chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G;
- if (apcs_band == chosen_band) {
- WL_MSG(dev->name, "selected channel = %d\n", channel);
+ if (band == WLC_BAND_AUTO) {
+ printf("%s: selected channel = %d\n", __FUNCTION__, channel);
+ break;
+ } else if (apcs_band == chosen_band) {
+ printf("%s: selected channel = %d\n", __FUNCTION__, channel);
break;
}
}
- ANDROID_INFO(("%d tried, ret = %d, chosen = 0x%x\n",
+ ANDROID_INFO(("%s: %d tried, ret = %d, chosen = 0x%x\n", __FUNCTION__,
(APCS_MAX_RETRY - retry), ret, chosen));
OSL_SLEEP(250);
}
done:
if ((retry == 0) || (ret < 0)) {
/* On failure, fallback to a default channel */
- if (band == WLC_BAND_5G) {
+ if ((band == WLC_BAND_5G)) {
channel = APCS_DEFAULT_5G_CH;
} else {
channel = APCS_DEFAULT_2G_CH;
}
- ANDROID_ERROR(("ACS failed. Fall back to default channel (%d) \n", channel));
+ ANDROID_ERROR(("%s: ACS failed."
+ " Fall back to default channel (%d) \n", __FUNCTION__, channel));
}
done2:
+ ret = wldev_ioctl_set(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur));
+ if (ret < 0)
+ WL_ERR(("WLC_SET_BAND error %d\n", ret));
if (spect > 0) {
if ((ret = wl_cfg80211_set_spect(dev, spect) < 0)) {
- ANDROID_ERROR(("ACS: error while setting spect\n"));
+ ANDROID_ERROR(("%s: ACS: error while setting spect\n", __FUNCTION__));
}
}
if (reqbuf) {
- MFREE(cfg->osh, reqbuf, CHANSPEC_BUF_SIZE);
+ kfree(reqbuf);
}
if (channel) {
- ret = snprintf(command, total_len, "%d", channel);
- ANDROID_INFO(("command result is %s \n", command));
+ if (channel < 15)
+ pos += snprintf(pos, total_len, "2g=");
+ else
+ pos += snprintf(pos, total_len, "5g=");
+ pos += snprintf(pos, total_len, "%d", channel);
+ ANDROID_INFO(("%s: command result is %s \n", __FUNCTION__, command));
+ return strlen(command);
+ } else {
+ return ret;
}
-
- return ret;
}
#endif /* WL_SUPPORT_AUTO_CHANNEL */
-int wl_android_set_roam_mode(struct net_device *dev, char *command)
-{
- int error = 0;
- int mode = 0;
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
- if (sscanf(command, "%*s %d", &mode) != 1) {
- ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
- return -1;
- }
- error = wldev_iovar_setint(dev, "roam_off", mode);
- if (error) {
+#ifdef SUPPORT_SET_LPC
+static int
+wl_android_set_lpc(struct net_device *dev, const char* string_num)
+{
+ int lpc_enabled, ret;
+ s32 val = 1;
+
+ lpc_enabled = bcm_atoi(string_num);
+ ANDROID_INFO(("%s : HAPD_LPC_ENABLED = %d\n", __FUNCTION__, lpc_enabled));
+
+ ret = wldev_ioctl_set(dev, WLC_DOWN, &val, sizeof(s32));
+ if (ret < 0)
+ ANDROID_ERROR(("WLC_DOWN error %d\n", ret));
+
+ wldev_iovar_setint(dev, "lpc", lpc_enabled);
+
+ ret = wldev_ioctl_set(dev, WLC_UP, &val, sizeof(s32));
+ if (ret < 0)
+ ANDROID_ERROR(("WLC_UP error %d\n", ret));
+
+ return 1;
+}
+#endif /* SUPPORT_SET_LPC */
+
+static int
+wl_android_ch_res_rl(struct net_device *dev, bool change)
+{
+ int error = 0;
+ s32 srl = 7;
+ s32 lrl = 4;
+ printk("%s enter\n", __FUNCTION__);
+ if (change) {
+ srl = 4;
+ lrl = 2;
+ }
+ error = wldev_ioctl_set(dev, WLC_SET_SRL, &srl, sizeof(s32));
+ if (error) {
+ ANDROID_ERROR(("Failed to set SRL, error = %d\n", error));
+ }
+ error = wldev_ioctl_set(dev, WLC_SET_LRL, &lrl, sizeof(s32));
+ if (error) {
+ ANDROID_ERROR(("Failed to set LRL, error = %d\n", error));
+ }
+ return error;
+}
+
+
+#ifdef WL_RELMCAST
+static int
+wl_android_rmc_enable(struct net_device *net, int rmc_enable)
+{
+ int err;
+
+ err = wldev_iovar_setint(net, "rmc_ackreq", rmc_enable);
+ return err;
+}
+
+static int
+wl_android_rmc_set_leader(struct net_device *dev, const char* straddr)
+{
+ int error = BCME_OK;
+ char smbuf[WLC_IOCTL_SMLEN];
+ wl_rmc_entry_t rmc_entry;
+ ANDROID_INFO(("%s: Set new RMC leader %s\n", __FUNCTION__, straddr));
+
+ memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t));
+ if (!bcm_ether_atoe(straddr, &rmc_entry.addr)) {
+ if (strlen(straddr) == 1 && bcm_atoi(straddr) == 0) {
+ ANDROID_INFO(("%s: Set auto leader selection mode\n", __FUNCTION__));
+ memset(&rmc_entry, 0, sizeof(wl_rmc_entry_t));
+ } else {
+ ANDROID_ERROR(("%s: No valid mac address provided\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ }
+
+ error = wldev_iovar_setbuf(dev, "rmc_ar", &rmc_entry, sizeof(wl_rmc_entry_t),
+ smbuf, sizeof(smbuf), NULL);
+
+ if (error != BCME_OK) {
+ ANDROID_ERROR(("%s: Unable to set RMC leader, error = %d\n",
+ __FUNCTION__, error));
+ }
+
+ return error;
+}
+
+static int wl_android_set_rmc_event(struct net_device *dev, char *command, int total_len)
+{
+ int err = 0;
+ int pid = 0;
+
+ if (sscanf(command, CMD_SET_RMC_EVENT " %d", &pid) <= 0) {
+ ANDROID_ERROR(("Failed to get Parameter from : %s\n", command));
+ return -1;
+ }
+
+ /* set pid, and if the event was happened, let's send a notification through netlink */
+ wl_cfg80211_set_rmc_pid(dev, pid);
+
+ ANDROID_TRACE(("RMC pid=%d\n", pid));
+
+ return err;
+}
+#endif /* WL_RELMCAST */
+
+int wl_android_get_singlecore_scan(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int bytes_written = 0;
+ int mode = 0;
+
+ error = wldev_iovar_getint(dev, "scan_ps", &mode);
+ if (error) {
+ ANDROID_ERROR(("%s: Failed to get single core scan Mode, error = %d\n",
+ __FUNCTION__, error));
+ return -1;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_SCSCAN, mode);
+
+ return bytes_written;
+}
+
+int wl_android_set_singlecore_scan(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int mode = 0;
+
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ return -1;
+ }
+
+ error = wldev_iovar_setint(dev, "scan_ps", mode);
+ if (error) {
+ ANDROID_ERROR(("%s[1]: Failed to set Mode %d, error = %d\n",
+ __FUNCTION__, mode, error));
+ return -1;
+ }
+
+ return error;
+}
+#ifdef TEST_TX_POWER_CONTROL
+static int
+wl_android_set_tx_power(struct net_device *dev, const char* string_num)
+{
+ int err = 0;
+ s32 dbm;
+ enum nl80211_tx_power_setting type;
+
+ dbm = bcm_atoi(string_num);
+
+ if (dbm < -1) {
+ ANDROID_ERROR(("%s: dbm is negative...\n", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ if (dbm == -1)
+ type = NL80211_TX_POWER_AUTOMATIC;
+ else
+ type = NL80211_TX_POWER_FIXED;
+
+ err = wl_set_tx_power(dev, type, dbm);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+ return err;
+ }
+
+ return 1;
+}
+
+static int
+wl_android_get_tx_power(struct net_device *dev, char *command, int total_len)
+{
+ int err;
+ int bytes_written;
+ s32 dbm = 0;
+
+ err = wl_get_tx_power(dev, &dbm);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+ return err;
+ }
+
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_TEST_GET_TX_POWER, dbm);
+
+ ANDROID_ERROR(("%s: GET_TX_POWER: dBm=%d\n", __FUNCTION__, dbm));
+
+ return bytes_written;
+}
+#endif /* TEST_TX_POWER_CONTROL */
+
+static int
+wl_android_set_sarlimit_txctrl(struct net_device *dev, const char* string_num)
+{
+ int err = 0;
+ int setval = 0;
+ s32 mode = bcm_atoi(string_num);
+
+ /* '0' means activate sarlimit
+ * and '-1' means back to normal state (deactivate sarlimit)
+ */
+ if (mode == 0) {
+ ANDROID_INFO(("%s: SAR limit control activated\n", __FUNCTION__));
+ setval = 1;
+ } else if (mode == -1) {
+ ANDROID_INFO(("%s: SAR limit control deactivated\n", __FUNCTION__));
+ setval = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ err = wldev_iovar_setint(dev, "sar_enable", setval);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("%s: error (%d)\n", __FUNCTION__, err));
+ return err;
+ }
+ return 1;
+}
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
+int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ int mode = 0;
+
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ return -1;
+ }
+
+ error = wldev_iovar_setint(dev, "roam_off", mode);
+ if (error) {
ANDROID_ERROR(("%s: Failed to set roaming Mode %d, error = %d\n",
__FUNCTION__, mode, error));
return -1;
vndr_ie_setbuf_t *vndr_ie = NULL;
s32 iecount;
uint32 pktflag;
+ u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
s32 err = BCME_OK, bssidx;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
}
tot_len = (int)(sizeof(vndr_ie_setbuf_t) + (datalen - 1));
- vndr_ie = (vndr_ie_setbuf_t *)MALLOCZ(cfg->osh, tot_len);
+ vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, kflags);
if (!vndr_ie) {
ANDROID_ERROR(("IE memory alloc failed\n"));
return -ENOMEM;
}
/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
- strlcpy(vndr_ie->cmd, "add", sizeof(vndr_ie->cmd));
+ strncpy(vndr_ie->cmd, "add", VNDR_IE_CMD_LEN - 1);
+ vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
/* Set the IE count - the buffer contains only 1 IE */
iecount = htod32(1);
ielen = DOT11_OUI_LEN + datalen;
vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar) ielen;
- ioctl_buf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MEDLEN);
+ ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
if (!ioctl_buf) {
ANDROID_ERROR(("ioctl memory alloc failed\n"));
if (vndr_ie) {
- MFREE(cfg->osh, vndr_ie, tot_len);
+ kfree(vndr_ie);
}
return -ENOMEM;
}
- bzero(ioctl_buf, WLC_IOCTL_MEDLEN); /* init the buffer */
+ memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN); /* init the buffer */
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
ANDROID_ERROR(("Find index failed\n"));
err = BCME_ERROR;
goto end;
}
err = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie", vndr_ie, tot_len, ioctl_buf,
- WLC_IOCTL_MEDLEN, bssidx, &cfg->ioctl_buf_sync);
+ WLC_IOCTL_MEDLEN, bssidx, &cfg->ioctl_buf_sync);
+
end:
if (err != BCME_OK) {
err = -EINVAL;
if (vndr_ie) {
- MFREE(cfg->osh, vndr_ie, tot_len);
+ kfree(vndr_ie);
}
}
else {
}
if (ioctl_buf) {
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
+ kfree(ioctl_buf);
}
return err;
}
-#endif /* WL_CFG80211 */
+#endif
#if defined(BCMFW_ROAM_ENABLE)
static int
if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE))
return -1;
- bzero(buf, sizeof(buf));
- bzero(akm_suites, sizeof(akm_suites));
- bzero(ucipher_suites, sizeof(ucipher_suites));
+ memset(buf, 0, sizeof(buf));
+ memset(akm_suites, 0, sizeof(akm_suites));
+ memset(ucipher_suites, 0, sizeof(ucipher_suites));
/* Save the AKM suites passed in the command */
for (i = 0; i < num_akm_suites; i++) {
memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN);
pref += WPA_SUITE_LEN;
/* Set to 0 to match any available multicast cipher */
- bzero(pref, WPA_SUITE_LEN);
+ memset(pref, 0, WPA_SUITE_LEN);
pref += WPA_SUITE_LEN;
}
}
{
struct io_cfg *resume_cfg;
s32 ret;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- resume_cfg = (struct io_cfg *)MALLOCZ(cfg->osh, sizeof(struct io_cfg));
+ resume_cfg = kzalloc(sizeof(struct io_cfg), GFP_KERNEL);
if (!resume_cfg)
return -ENOMEM;
resume_cfg->iovar = config->iovar;
} else {
- resume_cfg->arg = MALLOCZ(cfg->osh, config->len);
+ resume_cfg->arg = kzalloc(config->len, GFP_KERNEL);
if (!resume_cfg->arg) {
ret = -ENOMEM;
goto error;
return 0;
error:
- MFREE(cfg->osh, resume_cfg->arg, config->len);
- MFREE(cfg->osh, resume_cfg, sizeof(struct io_cfg));
+ kfree(resume_cfg->arg);
+ kfree(resume_cfg);
return ret;
}
struct io_cfg *config;
struct list_head *cur, *q;
s32 ret = 0;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_safe(cur, q, head) {
config = list_entry(cur, struct io_cfg, list);
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
if (config->iovar) {
if (!ret)
ret = wldev_iovar_setint(dev, config->iovar,
config->arg, config->len);
if (config->ioctl + 1 == WLC_SET_PM)
wl_cfg80211_update_power_mode(dev);
- MFREE(cfg->osh, config->arg, config->len);
+ kfree(config->arg);
}
list_del(cur);
- MFREE(cfg->osh, config, sizeof(struct io_cfg));
+ kfree(config);
+ }
+}
+#ifdef WL11ULB
+static int
+wl_android_set_ulb_mode(struct net_device *dev, char *command, int total_len)
+{
+ int mode = 0;
+
+ ANDROID_INFO(("set ulb mode (%s) \n", command));
+ if (sscanf(command, "%*s %d", &mode) != 1) {
+ ANDROID_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ return -1;
}
+ return wl_cfg80211_set_ulb_mode(dev, mode);
}
+static int
+wl_android_set_ulb_bw(struct net_device *dev, char *command, int total_len)
+{
+ int bw = 0;
+ u8 *pos;
+ char *ifname = NULL;
+ ANDROID_INFO(("set ulb bw (%s) \n", command));
+
+ /*
+ * For sta/ap: IFNAME=<ifname> DRIVER ULB_BW <bw> ifname
+ * For p2p: IFNAME=wlan0 DRIVER ULB_BW <bw> p2p-dev-wlan0
+ */
+ if (total_len < strlen(CMD_ULB_BW) + 2)
+ return -EINVAL;
+
+ pos = command + strlen(CMD_ULB_BW) + 1;
+ bw = bcm_atoi(pos);
+ if ((strlen(pos) >= 5)) {
+ ifname = pos + 2;
+ }
+
+ ANDROID_INFO(("[ULB] ifname:%s ulb_bw:%d \n", ifname, bw));
+ return wl_cfg80211_set_ulb_bw(dev, bw, ifname);
+}
+#endif /* WL11ULB */
static int
-wl_android_set_miracast(struct net_device *dev, char *command)
+wl_android_set_miracast(struct net_device *dev, char *command, int total_len)
{
- int mode, val = 0;
+ int mode, val;
int ret = 0;
struct io_cfg config;
wl_android_iolist_resume(dev, &miracast_resume_list);
miracast_cur_mode = MIRACAST_MODE_OFF;
- bzero((void *)&config, sizeof(config));
switch (mode) {
case MIRACAST_MODE_SOURCE:
-#ifdef MIRACAST_MCHAN_ALGO
/* setting mchan_algo to platform specific value */
config.iovar = "mchan_algo";
if (ret) {
goto resume;
}
-#endif /* MIRACAST_MCHAN_ALGO */
-#ifdef MIRACAST_MCHAN_BW
/* setting mchan_bw to platform specific value */
config.iovar = "mchan_bw";
config.param = MIRACAST_MCHAN_BW;
if (ret) {
goto resume;
}
-#endif /* MIRACAST_MCHAN_BW */
-#ifdef MIRACAST_AMPDU_SIZE
/* setting apmdu to platform specific value */
config.iovar = "ampdu_mpdu";
config.param = MIRACAST_AMPDU_SIZE;
if (ret) {
goto resume;
}
-#endif /* MIRACAST_AMPDU_SIZE */
/* FALLTROUGH */
/* Source mode shares most configurations with sink mode.
* Fall through here to avoid code duplication
/* disable internal roaming */
config.iovar = "roam_off";
config.param = 1;
- config.arg = NULL;
- config.len = 0;
ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
if (ret) {
goto resume;
wl_android_iolist_resume(dev, &miracast_resume_list);
return ret;
}
-#endif /* WL_CFG80211 */
+#endif
-#ifdef WL_RELMCAST
#define NETLINK_OXYGEN 30
#define AIBSS_BEACON_TIMEOUT 10
struct netlink_kernel_cfg cfg = {
.input = wl_netlink_recv,
};
-#endif // endif
+#endif
if (nl_sk != NULL) {
ANDROID_ERROR(("nl_sk already exist\n"));
nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, THIS_MODULE, &cfg);
#else
nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, &cfg);
-#endif // endif
+#endif
if (nl_sk == NULL) {
ANDROID_ERROR(("nl_sk is not ready\n"));
/* netlink_unicast() takes ownership of the skb and frees it itself. */
ret = netlink_unicast(nl_sk, skb, pid, 0);
- ANDROID_INFO(("netlink_unicast() pid=%d, ret=%d\n", pid, ret));
+ ANDROID_TRACE(("netlink_unicast() pid=%d, ret=%d\n", pid, ret));
nlmsg_failure:
return ret;
}
-#endif /* WL_RELMCAST */
-int wl_keep_alive_set(struct net_device *dev, char* extra)
+
+int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len)
{
wl_mkeep_alive_pkt_t mkeep_alive_pkt;
int ret;
uint period_msec = 0;
char *buf;
- dhd_pub_t *dhd = dhd_get_pub(dev);
if (extra == NULL) {
ANDROID_ERROR(("%s: extra is NULL\n", __FUNCTION__));
}
ANDROID_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec));
- bzero(&mkeep_alive_pkt, sizeof(wl_mkeep_alive_pkt_t));
+ memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
mkeep_alive_pkt.period_msec = period_msec;
mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
mkeep_alive_pkt.keep_alive_id = 0;
mkeep_alive_pkt.len_bytes = 0;
- buf = (char *)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
+ buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
if (!buf) {
ANDROID_ERROR(("%s: buffer alloc failed\n", __FUNCTION__));
return BCME_NOMEM;
ANDROID_ERROR(("%s:keep_alive set failed:%d\n", __FUNCTION__, ret));
else
ANDROID_TRACE(("%s:keep_alive set ok\n", __FUNCTION__));
- MFREE(dhd->osh, buf, WLC_IOCTL_SMLEN);
+ kfree(buf);
return ret;
}
#ifdef BT_WIFI_HANDOVER
static int
-wl_tbow_teardown(struct net_device *dev)
+wl_tbow_teardown(struct net_device *dev, char *command, int total_len)
{
int err = BCME_OK;
char buf[WLC_IOCTL_SMLEN];
tbow_setup_netinfo_t netinfo;
- bzero(&netinfo, sizeof(netinfo));
+ memset(&netinfo, 0, sizeof(netinfo));
netinfo.opmode = TBOW_HO_MODE_TEARDOWN;
err = wldev_iovar_setbuf_bsscfg(dev, "tbow_doho", &netinfo,
sizeof(tbow_setup_netinfo_t), buf, WLC_IOCTL_SMLEN, 0, NULL);
if (err < 0) {
ANDROID_ERROR(("tbow_doho iovar error %d\n", err));
- return err;
+ return err;
}
return err;
}
#ifdef SET_RPS_CPUS
static int
-wl_android_set_rps_cpus(struct net_device *dev, char *command)
+wl_android_set_rps_cpus(struct net_device *dev, char *command, int total_len)
{
int error, enable;
{
int bytes_written, error, result = 0, single_stream, stf = -1, i, nss = 0, mcs_map;
uint32 rspec;
- uint encode, txexp;
- wl_bss_info_t *bi;
+ uint encode, rate, txexp;
+ struct wl_bss_info *bi;
int datalen = sizeof(uint32) + sizeof(wl_bss_info_t);
- char buf[WLC_IOCTL_SMLEN];
+ char buf[datalen];
- if (datalen > WLC_IOCTL_SMLEN) {
- ANDROID_ERROR(("data too big\n"));
- return -1;
- }
-
- bzero(buf, datalen);
/* get BSS information */
*(u32 *) buf = htod32(datalen);
error = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, (void *)buf, datalen);
return -1;
}
- bi = (wl_bss_info_t*) (buf + sizeof(uint32));
+ bi = (struct wl_bss_info *) (buf + sizeof(uint32));
for (i = 0; i < ETHER_ADDR_LEN; i++) {
if (bi->BSSID.octet[i] > 0) {
}
if (i == ETHER_ADDR_LEN) {
- ANDROID_INFO(("No BSSID\n"));
+ ANDROID_TRACE(("No BSSID\n"));
return -1;
}
}
encode = (rspec & WL_RSPEC_ENCODING_MASK);
+ rate = (rspec & WL_RSPEC_RATE_MASK);
txexp = (rspec & WL_RSPEC_TXEXP_MASK) >> WL_RSPEC_TXEXP_SHIFT;
switch (encode) {
}
single_stream = (encode == WL_RSPEC_ENCODE_RATE) ||
- ((encode == WL_RSPEC_ENCODE_HT) && (rspec & WL_RSPEC_HT_MCS_MASK) < 8) ||
+ ((encode == WL_RSPEC_ENCODE_HT) && rate < 8) ||
((encode == WL_RSPEC_ENCODE_VHT) &&
((rspec & WL_RSPEC_VHT_NSS_MASK) >> WL_RSPEC_VHT_NSS_SHIFT) == 1);
}
}
- ANDROID_INFO(("%s:result=%d, stf=%d, single_stream=%d, mcs map=%d\n",
+ ANDROID_TRACE(("%s:result=%d, stf=%d, single_stream=%d, mcs map=%d\n",
__FUNCTION__, result, stf, single_stream, nss));
- bytes_written = snprintf(command, total_len, "%s %d", CMD_GET_LINK_STATUS, result);
+ bytes_written = sprintf(command, "%s %d", CMD_GET_LINK_STATUS, result);
return bytes_written;
}
#ifdef P2P_LISTEN_OFFLOADING
-
s32
-wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg)
+wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len)
{
- s32 bssidx;
int ret = 0;
- int p2plo_pause = 0;
- dhd_pub_t *dhd = NULL;
- if (!cfg || !cfg->p2p) {
- ANDROID_ERROR(("Wl %p or cfg->p2p %p is null\n",
- cfg, cfg ? cfg->p2p : 0));
- return 0;
- }
- dhd = (dhd_pub_t *)(cfg->pub);
- if (!dhd->up) {
- ANDROID_ERROR(("bus is already down\n"));
- return ret;
- }
+ ANDROID_ERROR(("Entry cmd:%s arg_len:%d \n", cmd, len));
- bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
- "p2po_stop", (void*)&p2plo_pause, sizeof(p2plo_pause),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
- if (ret < 0) {
- ANDROID_ERROR(("p2po_stop Failed :%d\n", ret));
+ if (strncmp(cmd, "P2P_LO_START", strlen("P2P_LO_START")) == 0) {
+ ret = wl_cfg80211_p2plo_listen_start(dev, buf, len);
+ } else if (strncmp(cmd, "P2P_LO_STOP", strlen("P2P_LO_STOP")) == 0) {
+ ret = wl_cfg80211_p2plo_listen_stop(dev);
+ } else {
+ ANDROID_ERROR(("Request for Unsupported CMD:%s \n", buf));
+ ret = -EINVAL;
}
-
- return ret;
+ return ret;
}
-s32
-wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- wl_p2plo_listen_t p2plo_listen;
- int ret = -EAGAIN;
- int channel = 0;
- int period = 0;
- int interval = 0;
- int count = 0;
- if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
- ANDROID_ERROR(("Sending Action Frames. Try it again.\n"));
- goto exit;
- }
+#endif /* P2P_LISTEN_OFFLOADING */
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- ANDROID_ERROR(("Scanning already\n"));
- goto exit;
- }
+#if defined(BCM4359_CHIP) && defined(WL_CFG80211)
+int
+wl_android_murx_bfe_cap(struct net_device *dev, int val)
+{
+ int err = BCME_OK;
+ int iface_count = wl_cfg80211_iface_count(dev);
+ struct ether_addr bssid;
+ wl_reassoc_params_t params;
- if (wl_get_drv_status(cfg, SCAN_ABORTING, dev)) {
- ANDROID_ERROR(("Scanning being aborted\n"));
- goto exit;
+ if (iface_count > 1) {
+ ANDROID_ERROR(("murx_bfe_cap change is not allowed when "
+ "there are multiple interfaces\n"));
+ return -EINVAL;
}
-
- if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
- ANDROID_ERROR(("p2p listen offloading already running\n"));
- goto exit;
+ /* Now there is only single interface */
+ err = wldev_iovar_setint(dev, "murx_bfe_cap", val);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to set murx_bfe_cap IOVAR to %d,"
+ "error %d\n", val, err));
+ return err;
}
- /* Just in case if it is not enabled */
- if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
- ANDROID_ERROR(("cfgp2p_enable discovery failed"));
- goto exit;
+ /* If successful intiate a reassoc */
+ memset(&bssid, 0, ETHER_ADDR_LEN);
+ if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN)) < 0) {
+ ANDROID_ERROR(("Failed to get bssid, error=%d\n", err));
+ return err;
}
- bzero(&p2plo_listen, sizeof(wl_p2plo_listen_t));
-
- if (len) {
- sscanf(buf, " %10d %10d %10d %10d", &channel, &period, &interval, &count);
- if ((channel == 0) || (period == 0) ||
- (interval == 0) || (count == 0)) {
- ANDROID_ERROR(("Wrong argument %d/%d/%d/%d \n",
- channel, period, interval, count));
- ret = -EAGAIN;
- goto exit;
- }
- p2plo_listen.period = period;
- p2plo_listen.interval = interval;
- p2plo_listen.count = count;
+ bzero(¶ms, sizeof(wl_reassoc_params_t));
+ memcpy(¶ms.bssid, &bssid, ETHER_ADDR_LEN);
- ANDROID_ERROR(("channel:%d period:%d, interval:%d count:%d\n",
- channel, period, interval, count));
+ if ((err = wldev_ioctl_set(dev, WLC_REASSOC, ¶ms,
+ sizeof(wl_reassoc_params_t))) < 0) {
+ ANDROID_ERROR(("reassoc failed err:%d \n", err));
} else {
- ANDROID_ERROR(("Argument len is wrong.\n"));
- ret = -EAGAIN;
- goto exit;
- }
-
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
- sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &cfg->ioctl_buf_sync)) < 0) {
- ANDROID_ERROR(("p2po_listen_channel Failed :%d\n", ret));
- goto exit;
- }
-
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&p2plo_listen,
- sizeof(wl_p2plo_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &cfg->ioctl_buf_sync)) < 0) {
- ANDROID_ERROR(("p2po_listen Failed :%d\n", ret));
- goto exit;
+ ANDROID_TRACE(("reassoc issued successfully\n"));
}
- wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
-exit :
- return ret;
+ return err;
}
-s32
-wl_cfg80211_p2plo_listen_stop(struct net_device *dev)
+#endif /* BCM4359_CHIP */
+
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+int
+wl_android_set_ap_beaconrate(struct net_device *dev, char *command)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- int ret = -EAGAIN;
+ int rate = 0;
+ char *pos, *token;
+ char *ifname = NULL;
+ int err = BCME_OK;
- if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", NULL,
- 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &cfg->ioctl_buf_sync)) < 0) {
- ANDROID_ERROR(("p2po_stop Failed :%d\n", ret));
- goto exit;
+ /*
+ * DRIVER SET_AP_BEACONRATE <rate> <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* Rate */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ rate = bcm_atoi(token);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ ifname = token;
+
+ ANDROID_TRACE(("rate %d, ifacename %s\n", rate, ifname));
+
+ err = wl_set_ap_beacon_rate(dev, rate, ifname);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to set ap beacon rate to %d, error = %d\n", rate, err));
}
-exit:
- return ret;
+ return err;
}
-s32
-wl_cfg80211_p2plo_offload(struct net_device *dev, char *cmd, char* buf, int len)
+int wl_android_get_ap_basicrate(struct net_device *dev, char *command, int total_len)
{
- int ret = 0;
+ char *pos, *token;
+ char *ifname = NULL;
+ int bytes_written = 0;
+ /*
+ * DRIVER GET_AP_BASICRATE <ifname>
+ */
+ pos = command;
- ANDROID_ERROR(("Entry cmd:%s arg_len:%d \n", cmd, len));
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
- if (strncmp(cmd, "P2P_LO_START", strlen("P2P_LO_START")) == 0) {
- ret = wl_cfg80211_p2plo_listen_start(dev, buf, len);
- } else if (strncmp(cmd, "P2P_LO_STOP", strlen("P2P_LO_STOP")) == 0) {
- ret = wl_cfg80211_p2plo_listen_stop(dev);
- } else {
- ANDROID_ERROR(("Request for Unsupported CMD:%s \n", buf));
- ret = -EINVAL;
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ ifname = token;
+
+ ANDROID_TRACE(("ifacename %s\n", ifname));
+
+ bytes_written = wl_get_ap_basic_rate(dev, command, ifname, total_len);
+ if (bytes_written < 1) {
+ ANDROID_ERROR(("Failed to get ap basic rate, error = %d\n", bytes_written));
+ return -EPROTO;
}
- return ret;
+
+ return bytes_written;
+
}
-void
-wl_cfg80211_cancel_p2plo(struct bcm_cfg80211 *cfg)
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
+
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+int
+wl_android_get_ap_rps(struct net_device *dev, char *command, int total_len)
{
- struct wireless_dev *wdev;
- if (!cfg) {
- return;
- }
+ char *pos, *token;
+ char *ifname = NULL;
+ int bytes_written = 0;
+ /*
+ * DRIVER GET_AP_RPS <ifname>
+ */
+ pos = command;
- wdev = bcmcfg_to_p2p_wdev(cfg);
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
- if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
- WL_INFORM_MEM(("P2P_FIND: Discovery offload is already in progress."
- "it aborted\n"));
- wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
- if (wdev != NULL) {
-#if defined(WL_CFG80211_P2P_DEV_IF)
- cfg80211_remain_on_channel_expired(wdev,
- cfg->last_roc_id,
- &cfg->remain_on_chan, GFP_KERNEL);
-#else
- cfg80211_remain_on_channel_expired(wdev,
- cfg->last_roc_id,
- &cfg->remain_on_chan,
- cfg->remain_on_chan_type, GFP_KERNEL);
-#endif /* WL_CFG80211_P2P_DEV_IF */
- }
- wl_cfg80211_p2plo_deinit(cfg);
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ ifname = token;
+
+ ANDROID_TRACE(("ifacename %s\n", ifname));
+
+ bytes_written = wl_get_ap_rps(dev, command, ifname, total_len);
+ if (bytes_written < 1) {
+ ANDROID_ERROR(("Failed to get rps, error = %d\n", bytes_written));
+ return -EPROTO;
}
+
+ return bytes_written;
+
}
-#endif /* P2P_LISTEN_OFFLOADING */
-#ifdef WL_MURX
int
-wl_android_murx_bfe_cap(struct net_device *dev, int val)
+wl_android_set_ap_rps(struct net_device *dev, char *command, int total_len)
{
+ int enable = 0;
+ char *pos, *token;
+ char *ifname = NULL;
int err = BCME_OK;
- int iface_count = wl_cfg80211_iface_count(dev);
- struct ether_addr bssid;
- wl_reassoc_params_t params;
- if (iface_count > 1) {
- ANDROID_ERROR(("murx_bfe_cap change is not allowed when "
- "there are multiple interfaces\n"));
+ /*
+ * DRIVER SET_AP_RPS <0/1> <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* Enable */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
return -EINVAL;
- }
- /* Now there is only single interface */
- err = wldev_iovar_setint(dev, "murx_bfe_cap", val);
+ enable = bcm_atoi(token);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ ifname = token;
+
+ ANDROID_TRACE(("enable %d, ifacename %s\n", enable, ifname));
+
+ err = wl_set_ap_rps(dev, enable? TRUE: FALSE, ifname);
if (unlikely(err)) {
- ANDROID_ERROR(("Failed to set murx_bfe_cap IOVAR to %d,"
- "error %d\n", val, err));
- return err;
+ ANDROID_ERROR(("Failed to set rps, enable %d, error = %d\n", enable, err));
}
- /* If successful intiate a reassoc */
- bzero(&bssid, ETHER_ADDR_LEN);
- if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN)) < 0) {
- ANDROID_ERROR(("Failed to get bssid, error=%d\n", err));
- return err;
- }
+ return err;
+}
- bzero(¶ms, sizeof(wl_reassoc_params_t));
- memcpy(¶ms.bssid, &bssid, ETHER_ADDR_LEN);
+int
+wl_android_set_ap_rps_params(struct net_device *dev, char *command, int total_len)
+{
+ ap_rps_info_t rps;
+ char *pos, *token;
+ char *ifname = NULL;
+ int err = BCME_OK;
- if ((err = wldev_ioctl_set(dev, WLC_REASSOC, ¶ms,
- sizeof(wl_reassoc_params_t))) < 0) {
- ANDROID_ERROR(("reassoc failed err:%d \n", err));
- } else {
- ANDROID_INFO(("reassoc issued successfully\n"));
+ memset(&rps, 0, sizeof(rps));
+ /*
+ * DRIVER SET_AP_RPS_PARAMS <pps> <level> <quiettime> <assoccheck> <ifname>
+ */
+ pos = command;
+
+ /* drop command */
+ token = bcmstrtok(&pos, " ", NULL);
+
+ /* pps */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ rps.pps = bcm_atoi(token);
+
+ /* level */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ rps.level = bcm_atoi(token);
+
+ /* quiettime */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ rps.quiet_time = bcm_atoi(token);
+
+ /* sta assoc check */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ rps.sta_assoc_check = bcm_atoi(token);
+
+ /* get the interface name */
+ token = bcmstrtok(&pos, " ", NULL);
+ if (!token)
+ return -EINVAL;
+ ifname = token;
+
+ ANDROID_TRACE(("pps %d, level %d, quiettime %d, sta_assoc_check %d, "
+ "ifacename %s\n", rps.pps, rps.level, rps.quiet_time,
+ rps.sta_assoc_check, ifname));
+
+ err = wl_update_ap_rps_params(dev, &rps, ifname);
+ if (unlikely(err)) {
+ ANDROID_ERROR(("Failed to update rps, pps %d, level %d, quiettime %d, "
+ "sta_assoc_check %d, err = %d\n", rps.pps, rps.level, rps.quiet_time,
+ rps.sta_assoc_check, err));
}
return err;
}
-#endif /* WL_MURX */
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#ifdef SUPPORT_RSSI_SUM_REPORT
+#ifdef SUPPORT_RSSI_LOGGING
int
wl_android_get_rssi_per_ant(struct net_device *dev, char *command, int total_len)
{
int bytes_written = 0;
bool mimo_rssi = FALSE;
- bzero(&rssi_ant_mimo, sizeof(wl_rssi_ant_mimo_t));
+ memset(&rssi_ant_mimo, 0, sizeof(wl_rssi_ant_mimo_t));
/*
* STA I/F: DRIVER GET_RSSI_PER_ANT <ifname> <mimo>
* AP/GO I/F: DRIVER GET_RSSI_PER_ANT <ifname> <Peer MAC addr> <mimo>
}
/* Parse the results */
- ANDROID_INFO(("ifname %s, version %d, count %d, mimo rssi %d\n",
+ ANDROID_TRACE(("ifname %s, version %d, count %d, mimo rssi %d\n",
ifname, rssi_ant_mimo.version, rssi_ant_mimo.count, mimo_rssi));
if (mimo_rssi) {
- ANDROID_INFO(("MIMO RSSI: %d\n", rssi_ant_mimo.rssi_sum));
+ ANDROID_TRACE(("MIMO RSSI: %d\n", rssi_ant_mimo.rssi_sum));
bytes_written = snprintf(command, total_len, "%s MIMO %d",
CMD_GET_RSSI_PER_ANT, rssi_ant_mimo.rssi_sum);
} else {
int cnt;
bytes_written = snprintf(command, total_len, "%s PER_ANT ", CMD_GET_RSSI_PER_ANT);
for (cnt = 0; cnt < rssi_ant_mimo.count; cnt++) {
- ANDROID_INFO(("RSSI[%d]: %d\n", cnt, rssi_ant_mimo.rssi_ant[cnt]));
+ ANDROID_TRACE(("RSSI[%d]: %d\n", cnt, rssi_ant_mimo.rssi_ant[cnt]));
bytes_written = snprintf(command, total_len, "%d ",
rssi_ant_mimo.rssi_ant[cnt]);
}
char *pos, *token;
int err = BCME_OK;
- bzero(&set_param, sizeof(rssilog_set_param_t));
+ memset(&set_param, 0, sizeof(rssilog_set_param_t));
/*
* DRIVER SET_RSSI_LOGGING <enable/disable> <RSSI Threshold> <Time Threshold>
*/
}
set_param.time_threshold = bcm_atoi(token);
- ANDROID_INFO(("enable %d, RSSI threshold %d, Time threshold %d\n", set_param.enable,
+ ANDROID_TRACE(("enable %d, RSSI threshold %d, Time threshold %d\n", set_param.enable,
set_param.rssi_threshold, set_param.time_threshold));
err = wl_set_rssi_logging(dev, (void *)&set_param);
return BCME_ERROR;
}
- ANDROID_INFO(("report_count %d, enable %d, rssi_threshold %d, time_threshold %d\n",
+ ANDROID_TRACE(("report_count %d, enable %d, rssi_threshold %d, time_threshold %d\n",
get_param.report_count, get_param.enable, get_param.rssi_threshold,
get_param.time_threshold));
/* Parse the parameter */
if (!get_param.enable) {
- ANDROID_INFO(("RSSI LOGGING: Feature is disables\n"));
+ ANDROID_TRACE(("RSSI LOGGING: Feature is disables\n"));
bytes_written = snprintf(command, total_len,
"%s FEATURE DISABLED\n", CMD_GET_RSSI_LOGGING);
} else if (get_param.enable &
(RSSILOG_FLAG_FEATURE_SW | RSSILOG_FLAG_REPORT_READY)) {
if (!get_param.report_count) {
- ANDROID_INFO(("[PASS] RSSI difference across antennas is within"
+ ANDROID_TRACE(("[PASS] RSSI difference across antennas is within"
" threshold limits\n"));
bytes_written = snprintf(command, total_len, "%s PASS\n",
CMD_GET_RSSI_LOGGING);
} else {
- ANDROID_INFO(("[FAIL] RSSI difference across antennas found "
+ ANDROID_TRACE(("[FAIL] RSSI difference across antennas found "
"to be greater than %3d dB\n", get_param.rssi_threshold));
- ANDROID_INFO(("[FAIL] RSSI difference check have failed for "
+ ANDROID_TRACE(("[FAIL] RSSI difference check have failed for "
"%d out of %d times\n", get_param.report_count,
get_param.time_threshold));
- ANDROID_INFO(("[FAIL] RSSI difference is being monitored once "
+ ANDROID_TRACE(("[FAIL] RSSI difference is being monitored once "
"per second, for a %d secs window\n", get_param.time_threshold));
bytes_written = snprintf(command, total_len, "%s FAIL - RSSI Threshold "
"%d dBm for %d out of %d times\n", CMD_GET_RSSI_LOGGING,
get_param.time_threshold);
}
} else {
- ANDROID_INFO(("[BUSY] Reprot is not ready\n"));
+ ANDROID_TRACE(("[BUSY] Reprot is not ready\n"));
bytes_written = snprintf(command, total_len, "%s BUSY - NOT READY\n",
CMD_GET_RSSI_LOGGING);
}
return bytes_written;
}
-#endif /* SUPPORT_RSSI_SUM_REPORT */
+#endif /* SUPPORT_RSSI_LOGGING */
#ifdef SET_PCIE_IRQ_CPU_CORE
void
-wl_android_set_irq_cpucore(struct net_device *net, int affinity_cmd)
+wl_android_set_irq_cpucore(struct net_device *net, int set)
{
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
if (!dhdp) {
ANDROID_ERROR(("dhd is NULL\n"));
return;
}
+ dhd_set_irq_cpucore(dhdp, set);
+}
+#endif /* SET_PCIE_IRQ_CPU_CORE */
- if (affinity_cmd < PCIE_IRQ_AFFINITY_OFF || affinity_cmd > PCIE_IRQ_AFFINITY_LAST) {
- ANDROID_ERROR(("Wrong Affinity cmds:%d, %s\n", affinity_cmd, __FUNCTION__));
- return;
+#if defined(DHD_HANG_SEND_UP_TEST)
+void
+wl_android_make_hang_with_reason(struct net_device *dev, const char *string_num)
+{
+ dhd_make_hang_with_reason(dev, string_num);
+}
+#endif /* DHD_HANG_SEND_UP_TEST */
+
+#ifdef WL_CFG80211
+#ifdef WLMESH
+static int
+wl_android_set_rsdb_mode(struct net_device *dev, char *command, int total_len)
+{
+ int ret;
+ wl_config_t rsdb_mode_cfg = {-1, 0};
+ char smbuf[WLC_IOCTL_SMLEN];
+ s32 val = 1;
+
+ if (sscanf(command, "%*s %d", &rsdb_mode_cfg.config) != 1) {
+ DHD_ERROR(("%s: Failed to get Parameter\n", __FUNCTION__));
+ return -1;
}
+ DHD_INFO(("%s : RSDB_MODE = %d\n", __FUNCTION__, rsdb_mode_cfg.config));
- dhd_set_irq_cpucore(dhdp, affinity_cmd);
+ ret = wldev_ioctl_set(dev, WLC_DOWN, &val, sizeof(s32));
+ if (ret < 0)
+ DHD_ERROR(("WLC_DOWN error %d\n", ret));
+
+ ret = wldev_iovar_setbuf(dev, "rsdb_mode", &rsdb_mode_cfg, sizeof(rsdb_mode_cfg),
+ smbuf, sizeof(smbuf), NULL);
+ if (ret < 0)
+ DHD_ERROR(("%s : set rsdb_mode error=%d\n", __FUNCTION__, ret));
+
+ ret = wldev_ioctl_set(dev, WLC_UP, &val, sizeof(s32));
+ if (ret < 0)
+ DHD_ERROR(("WLC_UP error %d\n", ret));
+
+ return ret;
}
-#endif /* SET_PCIE_IRQ_CPU_CORE */
+#endif /* WLMESH */
+#endif /* WL_CFG80211 */
#ifdef SUPPORT_LQCM
static int
return err;
}
-static int
-wl_android_get_lqcm_report(struct net_device *dev, char *command, int total_len)
+static int wl_android_get_lqcm_report(
+ struct net_device *dev, char *command, int total_len)
{
int bytes_written, err = 0;
uint32 lqcm_report = 0;
tx_lqcm_idx = (lqcm_report & LQCM_TX_INDEX_MASK) >> LQCM_TX_INDEX_SHIFT;
rx_lqcm_idx = (lqcm_report & LQCM_RX_INDEX_MASK) >> LQCM_RX_INDEX_SHIFT;
- ANDROID_INFO(("lqcm report EN:%d, TX:%d, RX:%d\n", lqcm_enable, tx_lqcm_idx, rx_lqcm_idx));
+ ANDROID_ERROR(("lqcm report EN:%d, TX:%d, RX:%d\n", lqcm_enable, tx_lqcm_idx, rx_lqcm_idx));
bytes_written = snprintf(command, total_len, "%s %d",
- CMD_GET_LQCM_REPORT, lqcm_report);
+ CMD_GET_LQCM_REPORT, lqcm_report);
return bytes_written;
}
ANDROID_INFO(("%s: command result is %s\n", __FUNCTION__, command));
return bytes_written;
}
-
-#ifdef SUPPORT_AP_HIGHER_BEACONRATE
-int
-wl_android_set_ap_beaconrate(struct net_device *dev, char *command)
+#ifdef WLADPS_PRIVATE_CMD
+static int
+wl_android_set_adps_mode(struct net_device *dev, const char* string_num)
{
- int rate = 0;
- char *pos, *token;
- char *ifname = NULL;
- int err = BCME_OK;
-
- /*
- * DRIVER SET_AP_BEACONRATE <rate> <ifname>
- */
- pos = command;
+ int err = 0, adps_mode;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
- /* drop command */
- token = bcmstrtok(&pos, " ", NULL);
+ adps_mode = bcm_atoi(string_num);
- /* Rate */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
+ if ((adps_mode < 0) && (1 < adps_mode)) {
+ ANDROID_ERROR(("%s: Invalid value %d.\n", __FUNCTION__, adps_mode));
return -EINVAL;
- rate = bcm_atoi(token);
-
- /* get the interface name */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
- return -EINVAL;
- ifname = token;
-
- ANDROID_INFO(("rate %d, ifacename %s\n", rate, ifname));
-
- err = wl_set_ap_beacon_rate(dev, rate, ifname);
- if (unlikely(err)) {
- ANDROID_ERROR(("Failed to set ap beacon rate to %d, error = %d\n", rate, err));
}
+ err = dhd_enable_adps(dhdp, adps_mode);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("failed to set adps mode %d, error = %d\n", adps_mode, err));
+ return -EIO;
+ }
return err;
}
-
-int wl_android_get_ap_basicrate(struct net_device *dev, char *command, int total_len)
+static int
+wl_android_get_adps_mode(
+ struct net_device *dev, char *command, int total_len)
{
- char *pos, *token;
- char *ifname = NULL;
- int bytes_written = 0;
- /*
- * DRIVER GET_AP_BASICRATE <ifname>
- */
- pos = command;
-
- /* drop command */
- token = bcmstrtok(&pos, " ", NULL);
+ int bytes_written, err = 0;
+ int len;
+ char buf[WLC_IOCTL_SMLEN];
- /* get the interface name */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
- return -EINVAL;
- ifname = token;
+ bcm_iov_buf_t iov_buf;
+ bcm_iov_buf_t *ptr = NULL;
+ wl_adps_params_v1_t *data = NULL;
- ANDROID_INFO(("ifacename %s\n", ifname));
+ uint8 *pdata = NULL;
+ uint8 band, mode = 0;
- bytes_written = wl_get_ap_basic_rate(dev, command, ifname, total_len);
- if (bytes_written < 1) {
- ANDROID_ERROR(("Failed to get ap basic rate, error = %d\n", bytes_written));
- return -EPROTO;
- }
-
- return bytes_written;
-}
-#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
-
-#ifdef SUPPORT_AP_RADIO_PWRSAVE
-int
-wl_android_get_ap_rps(struct net_device *dev, char *command, int total_len)
-{
- char *pos, *token;
- char *ifname = NULL;
- int bytes_written = 0;
- char name[IFNAMSIZ];
- /*
- * DRIVER GET_AP_RPS <ifname>
- */
- pos = command;
+ memset(&iov_buf, 0, sizeof(iov_buf));
- /* drop command */
- token = bcmstrtok(&pos, " ", NULL);
+ len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
- /* get the interface name */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
- return -EINVAL;
- ifname = token;
+ iov_buf.version = WL_ADPS_IOV_VER;
+ iov_buf.len = sizeof(band);
+ iov_buf.id = WL_ADPS_IOV_MODE;
- strlcpy(name, ifname, sizeof(name));
- ANDROID_INFO(("ifacename %s\n", name));
+ pdata = (uint8 *)&iov_buf.data;
- bytes_written = wl_get_ap_rps(dev, command, name, total_len);
- if (bytes_written < 1) {
- ANDROID_ERROR(("Failed to get rps, error = %d\n", bytes_written));
- return -EPROTO;
+ for (band = 1; band <= MAX_BANDS; band++) {
+ pdata[0] = band;
+ err = wldev_iovar_getbuf(dev, "adps", &iov_buf, len,
+ buf, WLC_IOCTL_SMLEN, NULL);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("%s fail to get adps band %d(%d).\n",
+ __FUNCTION__, band, err));
+ return -EIO;
+ }
+ ptr = (bcm_iov_buf_t *) buf;
+ data = (wl_adps_params_v1_t *) ptr->data;
+ mode = data->mode;
+ if (mode != OFF) {
+ break;
+ }
}
+ bytes_written = snprintf(command, total_len, "%s %d",
+ CMD_GET_ADPS, mode);
return bytes_written;
-
-}
-
-int
-wl_android_set_ap_rps(struct net_device *dev, char *command, int total_len)
-{
- int enable = 0;
- char *pos, *token;
- char *ifname = NULL;
- int err = BCME_OK;
- char name[IFNAMSIZ];
-
- /*
- * DRIVER SET_AP_RPS <0/1> <ifname>
- */
- pos = command;
-
- /* drop command */
- token = bcmstrtok(&pos, " ", NULL);
-
- /* Enable */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
- return -EINVAL;
- enable = bcm_atoi(token);
-
- /* get the interface name */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
- return -EINVAL;
- ifname = token;
-
- strlcpy(name, ifname, sizeof(name));
- ANDROID_INFO(("enable %d, ifacename %s\n", enable, name));
-
- err = wl_set_ap_rps(dev, enable? TRUE: FALSE, name);
- if (unlikely(err)) {
- ANDROID_ERROR(("Failed to set rps, enable %d, error = %d\n", enable, err));
- }
-
- return err;
}
+#endif /* WLADPS_PRIVATE_CMD */
-int
-wl_android_set_ap_rps_params(struct net_device *dev, char *command, int total_len)
+#ifdef DHD_PKT_LOGGING
+static int
+wl_android_pktlog_filter_enable(struct net_device *dev, char *command, int total_len)
{
- ap_rps_info_t rps;
- char *pos, *token;
- char *ifname = NULL;
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
int err = BCME_OK;
- char name[IFNAMSIZ];
-
- bzero(&rps, sizeof(rps));
- /*
- * DRIVER SET_AP_RPS_PARAMS <pps> <level> <quiettime> <assoccheck> <ifname>
- */
- pos = command;
-
- /* drop command */
- token = bcmstrtok(&pos, " ", NULL);
- /* pps */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
return -EINVAL;
- rps.pps = bcm_atoi(token);
-
- /* level */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
- return -EINVAL;
- rps.level = bcm_atoi(token);
-
- /* quiettime */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
- return -EINVAL;
- rps.quiet_time = bcm_atoi(token);
-
- /* sta assoc check */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
- return -EINVAL;
- rps.sta_assoc_check = bcm_atoi(token);
-
- /* get the interface name */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token)
- return -EINVAL;
- ifname = token;
- strlcpy(name, ifname, sizeof(name));
-
- ANDROID_INFO(("pps %d, level %d, quiettime %d, sta_assoc_check %d, "
- "ifacename %s\n", rps.pps, rps.level, rps.quiet_time,
- rps.sta_assoc_check, name));
-
- err = wl_update_ap_rps_params(dev, &rps, name);
- if (unlikely(err)) {
- ANDROID_ERROR(("Failed to update rps, pps %d, level %d, quiettime %d, "
- "sta_assoc_check %d, err = %d\n", rps.pps, rps.level, rps.quiet_time,
- rps.sta_assoc_check, err));
- }
-
- return err;
-}
-#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-
-#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
-static void
-wl_android_check_priv_cmd_errors(struct net_device *dev)
-{
- dhd_pub_t *dhdp;
- int memdump_mode;
-
- if (!dev) {
- ANDROID_ERROR(("dev is NULL\n"));
- return;
}
- dhdp = wl_cfg80211_get_dhdp(dev);
- if (!dhdp) {
- ANDROID_ERROR(("dhdp is NULL\n"));
- return;
- }
+ filter = dhdp->pktlog->pktlog_filter;
-#ifdef DHD_FW_COREDUMP
- memdump_mode = dhdp->memdump_enabled;
-#else
- /* Default enable if DHD doesn't support SOCRAM dump */
- memdump_mode = 1;
-#endif /* DHD_FW_COREDUMP */
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_TXPKT_CASE, TRUE);
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_TXSTATUS_CASE, TRUE);
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_RXPKT_CASE, TRUE);
- if (report_hang_privcmd_err) {
- priv_cmd_errors++;
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter enable success\n", __FUNCTION__));
} else {
- priv_cmd_errors = 0;
- }
-
- /* Trigger HANG event only if memdump mode is enabled
- * due to customer's request
- */
- if (memdump_mode == DUMP_MEMFILE_BUGON &&
- (priv_cmd_errors > NUMBER_SEQUENTIAL_PRIVCMD_ERRORS)) {
- ANDROID_ERROR(("Send HANG event due to sequential private cmd errors\n"));
- priv_cmd_errors = 0;
-#ifdef DHD_FW_COREDUMP
- /* Take a SOCRAM dump */
- dhdp->memdump_type = DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR;
- dhd_common_socram_dump(dhdp);
-#endif /* DHD_FW_COREDUMP */
- /* Send the HANG event to upper layer */
- dhdp->hang_reason = HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR;
- dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
- }
-}
-#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
-
-#ifdef SUPPORT_AP_SUSPEND
-int
-wl_android_set_ap_suspend(struct net_device *dev, char *command, int total_len)
-{
- int suspend = 0;
- char *pos, *token;
- char *ifname = NULL;
- int err = BCME_OK;
- char name[IFNAMSIZ];
-
- /*
- * DRIVER SET_AP_SUSPEND <0/1> <ifname>
- */
- pos = command;
-
- /* drop command */
- token = bcmstrtok(&pos, " ", NULL);
-
- /* Enable */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token) {
- return -EINVAL;
- }
- suspend = bcm_atoi(token);
-
- /* get the interface name */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token) {
- return -EINVAL;
- }
- ifname = token;
-
- strlcpy(name, ifname, sizeof(name));
- ANDROID_INFO(("suspend %d, ifacename %s\n", suspend, name));
-
- err = wl_set_ap_suspend(dev, suspend? TRUE: FALSE, name);
- if (unlikely(err)) {
- ANDROID_ERROR(("Failed to set suspend, suspend %d, error = %d\n", suspend, err));
- }
-
- return err;
-}
-#endif /* SUPPORT_AP_SUSPEND */
-
-#ifdef SUPPORT_AP_BWCTRL
-int
-wl_android_set_ap_bw(struct net_device *dev, char *command, int total_len)
-{
- int bw = DOT11_OPER_MODE_20MHZ;
- char *pos, *token;
- char *ifname = NULL;
- int err = BCME_OK;
- char name[IFNAMSIZ];
-
- /*
- * DRIVER SET_AP_BW <0/1/2> <ifname>
- * 0 : 20MHz, 1 : 40MHz, 2 : 80MHz 3: 80+80 or 160MHz
- * This is from operating mode field
- * in 8.4.1.50 of 802.11ac-2013
- */
- pos = command;
-
- /* drop command */
- token = bcmstrtok(&pos, " ", NULL);
-
- /* BW */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token) {
- return -EINVAL;
- }
- bw = bcm_atoi(token);
-
- /* get the interface name */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token) {
- return -EINVAL;
- }
- ifname = token;
-
- strlcpy(name, ifname, sizeof(name));
- ANDROID_INFO(("bw %d, ifacename %s\n", bw, name));
-
- err = wl_set_ap_bw(dev, bw, name);
- if (unlikely(err)) {
- ANDROID_ERROR(("Failed to set bw, bw %d, error = %d\n", bw, err));
- }
-
- return err;
-}
-
-int
-wl_android_get_ap_bw(struct net_device *dev, char *command, int total_len)
-{
- char *pos, *token;
- char *ifname = NULL;
- int bytes_written = 0;
- char name[IFNAMSIZ];
-
- /*
- * DRIVER GET_AP_BW <ifname>
- * returns 0 : 20MHz, 1 : 40MHz, 2 : 80MHz 3: 80+80 or 160MHz
- * This is from operating mode field
- * in 8.4.1.50 of 802.11ac-2013
- */
- pos = command;
-
- /* drop command */
- token = bcmstrtok(&pos, " ", NULL);
-
- /* get the interface name */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token) {
- return -EINVAL;
- }
- ifname = token;
-
- strlcpy(name, ifname, sizeof(name));
- ANDROID_INFO(("ifacename %s\n", name));
-
- bytes_written = wl_get_ap_bw(dev, command, name, total_len);
- if (bytes_written < 1) {
- ANDROID_ERROR(("Failed to get bw, error = %d\n", bytes_written));
- return -EPROTO;
+ ANDROID_ERROR(("%s: pktlog filter enable fail\n", __FUNCTION__));
+ return BCME_ERROR;
}
return bytes_written;
-
-}
-#endif /* SUPPORT_AP_BWCTRL */
-
-int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr)
-{
-#define PRIVATE_COMMAND_MAX_LEN 8192
-#define PRIVATE_COMMAND_DEF_LEN 4096
- int ret = 0;
- char *command = NULL;
- int bytes_written = 0;
- android_wifi_priv_cmd priv_cmd;
- int buf_size = 0;
- dhd_pub_t *dhd = dhd_get_pub(net);
-
- net_os_wake_lock(net);
-
- if (!capable(CAP_NET_ADMIN)) {
- ret = -EPERM;
- goto exit;
- }
-
- if (!ifr->ifr_data) {
- ret = -EINVAL;
- goto exit;
- }
-
-#ifdef CONFIG_COMPAT
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
- if (in_compat_syscall())
-#else
- if (is_compat_task())
-#endif
- {
- compat_android_wifi_priv_cmd compat_priv_cmd;
- if (copy_from_user(&compat_priv_cmd, ifr->ifr_data,
- sizeof(compat_android_wifi_priv_cmd))) {
- ret = -EFAULT;
- goto exit;
-
- }
- priv_cmd.buf = compat_ptr(compat_priv_cmd.buf);
- priv_cmd.used_len = compat_priv_cmd.used_len;
- priv_cmd.total_len = compat_priv_cmd.total_len;
- } else
-#endif /* CONFIG_COMPAT */
- {
- if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
- ret = -EFAULT;
- goto exit;
- }
- }
- if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) {
- ANDROID_ERROR(("%s: buf length invalid:%d\n", __FUNCTION__,
- priv_cmd.total_len));
- ret = -EINVAL;
- goto exit;
- }
-
- buf_size = max(priv_cmd.total_len, PRIVATE_COMMAND_DEF_LEN);
- command = (char *)MALLOC(dhd->osh, (buf_size + 1));
- if (!command) {
- ANDROID_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
- ret = -ENOMEM;
- goto exit;
- }
- if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
- ret = -EFAULT;
- goto exit;
- }
- command[priv_cmd.total_len] = '\0';
-
- ANDROID_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
-
- bytes_written = wl_handle_private_cmd(net, command, priv_cmd.total_len);
- if (bytes_written >= 0) {
- if ((bytes_written == 0) && (priv_cmd.total_len > 0)) {
- command[0] = '\0';
- }
- if (bytes_written >= priv_cmd.total_len) {
- ANDROID_ERROR(("%s: err. bytes_written:%d >= total_len:%d, buf_size:%d\n",
- __FUNCTION__, bytes_written, priv_cmd.total_len, buf_size));
-
- ret = BCME_BUFTOOSHORT;
- goto exit;
- }
- bytes_written++;
- priv_cmd.used_len = bytes_written;
- if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
- ANDROID_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
- ret = -EFAULT;
- }
- }
- else {
- /* Propagate the error */
- ret = bytes_written;
- }
-
-exit:
-#ifdef DHD_SEND_HANG_PRIVCMD_ERRORS
- if (ret) {
- /* Avoid incrementing priv_cmd_errors in case of unsupported feature */
- if (ret != BCME_UNSUPPORTED) {
- wl_android_check_priv_cmd_errors(net);
- }
- } else {
- priv_cmd_errors = 0;
- }
-#endif /* DHD_SEND_HANG_PRIVCMD_ERRORS */
- net_os_wake_unlock(net);
- MFREE(dhd->osh, command, (buf_size + 1));
- return ret;
-}
-
-#ifdef WL_BCNRECV
-#define BCNRECV_ATTR_HDR_LEN 30
-int
-wl_android_bcnrecv_event(struct net_device *ndev, uint attr_type,
- uint status, uint reason, uint8 *data, uint data_len)
-{
- s32 err = BCME_OK;
- struct sk_buff *skb;
- gfp_t kflags;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- uint len;
-
- len = BCNRECV_ATTR_HDR_LEN + data_len;
-
- kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
- skb = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(ndev), len,
- BRCM_VENDOR_EVENT_BEACON_RECV, kflags);
- if (!skb) {
- ANDROID_ERROR(("skb alloc failed"));
- return -ENOMEM;
- }
- if ((attr_type == BCNRECV_ATTR_BCNINFO) && (data)) {
- /* send bcn info to upper layer */
- nla_put(skb, BCNRECV_ATTR_BCNINFO, data_len, data);
- } else if (attr_type == BCNRECV_ATTR_STATUS) {
- nla_put_u32(skb, BCNRECV_ATTR_STATUS, status);
- if (reason) {
- nla_put_u32(skb, BCNRECV_ATTR_REASON, reason);
- }
- } else {
- ANDROID_ERROR(("UNKNOWN ATTR_TYPE. attr_type:%d\n", attr_type));
- kfree_skb(skb);
- return -EINVAL;
- }
- cfg80211_vendor_event(skb, kflags);
- return err;
}
static int
-_wl_android_bcnrecv_start(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool user_trigger)
-{
- s32 err = BCME_OK;
-
- /* check any scan is in progress before beacon recv scan trigger IOVAR */
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- err = BCME_UNSUPPORTED;
- ANDROID_ERROR(("Scan in progress, Aborting beacon recv start, "
- "error:%d\n", err));
- goto exit;
- }
-
- if (wl_get_p2p_status(cfg, SCANNING)) {
- err = BCME_UNSUPPORTED;
- ANDROID_ERROR(("P2P Scan in progress, Aborting beacon recv start, "
- "error:%d\n", err));
- goto exit;
- }
-
- if (wl_get_drv_status(cfg, REMAINING_ON_CHANNEL, ndev)) {
- err = BCME_UNSUPPORTED;
- ANDROID_ERROR(("P2P remain on channel, Aborting beacon recv start, "
- "error:%d\n", err));
- goto exit;
- }
-
- /* check STA is in connected state, Beacon recv required connected state
- * else exit from beacon recv scan
- */
- if (!wl_get_drv_status(cfg, CONNECTED, ndev)) {
- err = BCME_UNSUPPORTED;
- ANDROID_ERROR(("STA is in not associated state error:%d\n", err));
- goto exit;
- }
-
-#ifdef WL_NAN
- /* Check NAN is enabled, if enabled exit else continue */
- if (wl_cfgnan_check_state(cfg)) {
- err = BCME_UNSUPPORTED;
- ANDROID_ERROR(("Nan is enabled, NAN+STA+FAKEAP concurrency is not supported\n"));
- goto exit;
- }
-#endif /* WL_NAN */
-
- /* Triggering an sendup_bcn iovar */
- err = wldev_iovar_setint(ndev, "sendup_bcn", 1);
- if (unlikely(err)) {
- ANDROID_ERROR(("sendup_bcn failed to set, error:%d\n", err));
- } else {
- cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_STARTED;
- WL_INFORM_MEM(("bcnrecv started. user_trigger:%d\n", user_trigger));
- if (user_trigger) {
- if ((err = wl_android_bcnrecv_event(ndev, BCNRECV_ATTR_STATUS,
- WL_BCNRECV_STARTED, 0, NULL, 0)) != BCME_OK) {
- ANDROID_ERROR(("failed to send bcnrecv event, error:%d\n", err));
- }
- }
- }
-exit:
- /*
- * BCNRECV start request can be rejected from dongle
- * in various conditions.
- * Error code need to be overridden to BCME_UNSUPPORTED
- * to avoid hang event from continous private
- * command error
- */
- if (err) {
- err = BCME_UNSUPPORTED;
- }
- return err;
-}
-
-int
-_wl_android_bcnrecv_stop(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint reason)
+wl_android_pktlog_filter_disable(struct net_device *dev, char *command, int total_len)
{
- s32 err = BCME_OK;
- u32 status;
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
- /* Send sendup_bcn iovar for all cases except W_BCNRECV_ROAMABORT reason -
- * fw generates roam abort event after aborting the bcnrecv.
- */
- if (reason != WL_BCNRECV_ROAMABORT) {
- /* Triggering an sendup_bcn iovar */
- err = wldev_iovar_setint(ndev, "sendup_bcn", 0);
- if (unlikely(err)) {
- ANDROID_ERROR(("sendup_bcn failed to set error:%d\n", err));
- goto exit;
- }
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
}
- /* Send notification for all cases */
- if (reason == WL_BCNRECV_SUSPEND) {
- cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_SUSPENDED;
- status = WL_BCNRECV_SUSPENDED;
+ filter = dhdp->pktlog->pktlog_filter;
+
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_TXPKT_CASE, FALSE);
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_TXSTATUS_CASE, FALSE);
+ err = dhd_pktlog_filter_enable(filter, PKTLOG_RXPKT_CASE, FALSE);
+
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter disable success\n", __FUNCTION__));
} else {
- cfg->bcnrecv_info.bcnrecv_state = BEACON_RECV_STOPPED;
- WL_INFORM_MEM(("bcnrecv stopped\n"));
- if (reason == WL_BCNRECV_USER_TRIGGER) {
- status = WL_BCNRECV_STOPPED;
- } else {
- status = WL_BCNRECV_ABORTED;
- }
- }
- if ((err = wl_android_bcnrecv_event(ndev, BCNRECV_ATTR_STATUS, status,
- reason, NULL, 0)) != BCME_OK) {
- ANDROID_ERROR(("failed to send bcnrecv event, error:%d\n", err));
+ ANDROID_ERROR(("%s: pktlog filter disable fail\n", __FUNCTION__));
+ return BCME_ERROR;
}
-exit:
- return err;
+
+ return bytes_written;
}
static int
-wl_android_bcnrecv_start(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+wl_android_pktlog_filter_pattern_enable(struct net_device *dev, char *command, int total_len)
{
- s32 err = BCME_OK;
-
- /* Adding scan_sync mutex to avoid race condition in b/w scan_req and bcn recv */
- mutex_lock(&cfg->scan_sync);
- mutex_lock(&cfg->bcn_sync);
- err = _wl_android_bcnrecv_start(cfg, ndev, true);
- mutex_unlock(&cfg->bcn_sync);
- mutex_unlock(&cfg->scan_sync);
- return err;
-}
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
-int
-wl_android_bcnrecv_stop(struct net_device *ndev, uint reason)
-{
- s32 err = BCME_OK;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
- mutex_lock(&cfg->bcn_sync);
- if ((cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED) ||
- (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_SUSPENDED)) {
- err = _wl_android_bcnrecv_stop(cfg, ndev, reason);
+ filter = dhdp->pktlog->pktlog_filter;
+
+ if (strlen(CMD_PKTLOG_FILTER_PATTERN_ENABLE) + 1 > total_len) {
+ return BCME_ERROR;
}
- mutex_unlock(&cfg->bcn_sync);
- return err;
-}
-int
-wl_android_bcnrecv_suspend(struct net_device *ndev)
-{
- s32 ret = BCME_OK;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ err = dhd_pktlog_filter_pattern_enable(filter,
+ command + strlen(CMD_PKTLOG_FILTER_PATTERN_ENABLE) + 1, TRUE);
- mutex_lock(&cfg->bcn_sync);
- if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED) {
- WL_INFORM_MEM(("bcnrecv suspend\n"));
- ret = _wl_android_bcnrecv_stop(cfg, ndev, WL_BCNRECV_SUSPEND);
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter pattern enable success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog filter pattern enable fail\n", __FUNCTION__));
+ return BCME_ERROR;
}
- mutex_unlock(&cfg->bcn_sync);
- return ret;
-}
-int
-wl_android_bcnrecv_resume(struct net_device *ndev)
-{
- s32 ret = BCME_OK;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
-
- /* Adding scan_sync mutex to avoid race condition in b/w scan_req and bcn recv */
- mutex_lock(&cfg->scan_sync);
- mutex_lock(&cfg->bcn_sync);
- if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_SUSPENDED) {
- WL_INFORM_MEM(("bcnrecv resume\n"));
- ret = _wl_android_bcnrecv_start(cfg, ndev, false);
- }
- mutex_unlock(&cfg->bcn_sync);
- mutex_unlock(&cfg->scan_sync);
- return ret;
+ return bytes_written;
}
-/* Beacon recv functionality code implementation */
-int
-wl_android_bcnrecv_config(struct net_device *ndev, char *cmd_argv, int total_len)
+static int
+wl_android_pktlog_filter_pattern_disable(struct net_device *dev, char *command, int total_len)
{
- struct bcm_cfg80211 *cfg = NULL;
- uint err = BCME_OK;
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
- if (!ndev) {
- ANDROID_ERROR(("ndev is NULL\n"));
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
return -EINVAL;
}
- cfg = wl_get_cfg(ndev);
- if (!cfg) {
- ANDROID_ERROR(("cfg is NULL\n"));
- return -EINVAL;
+ filter = dhdp->pktlog->pktlog_filter;
+
+ if (strlen(CMD_PKTLOG_FILTER_PATTERN_DISABLE) + 1 > total_len) {
+ return BCME_ERROR;
}
- /* sync commands from user space */
- mutex_lock(&cfg->usr_sync);
- if (strncmp(cmd_argv, "start", strlen("start")) == 0) {
- ANDROID_INFO(("BCNRECV start\n"));
- err = wl_android_bcnrecv_start(cfg, ndev);
- if (err != BCME_OK) {
- ANDROID_ERROR(("Failed to process the start command, error:%d\n", err));
- goto exit;
- }
- } else if (strncmp(cmd_argv, "stop", strlen("stop")) == 0) {
- ANDROID_INFO(("BCNRECV stop\n"));
- err = wl_android_bcnrecv_stop(ndev, WL_BCNRECV_USER_TRIGGER);
- if (err != BCME_OK) {
- ANDROID_ERROR(("Failed to stop the bcn recv, error:%d\n", err));
- goto exit;
- }
+ err = dhd_pktlog_filter_pattern_enable(filter,
+ command + strlen(CMD_PKTLOG_FILTER_PATTERN_DISABLE) + 1, FALSE);
+
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter pattern disable success\n", __FUNCTION__));
} else {
- err = BCME_ERROR;
+ ANDROID_ERROR(("%s: pktlog filter pattern disable fail\n", __FUNCTION__));
+ return BCME_ERROR;
}
-exit:
- mutex_unlock(&cfg->usr_sync);
- return err;
-}
-#endif /* WL_BCNRECV */
-#ifdef WL_CAC_TS
-/* CAC TSPEC functionality code implementation */
-static void
-wl_android_update_tsinfo(uint8 access_category, tspec_arg_t *tspec_arg)
-{
- uint8 tspec_id;
- /* Using direction as bidirectional by default */
- uint8 direction = TSPEC_BI_DIRECTION;
- /* Using U-APSD as the default power save mode */
- uint8 user_psb = TSPEC_UAPSD_PSB;
- uint8 ADDTS_AC2PRIO[4] = {PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_VI, PRIO_8021D_VO};
-
- /* Map tspec_id from access category */
- tspec_id = ADDTS_AC2PRIO[access_category];
-
- /* Update the tsinfo */
- tspec_arg->tsinfo.octets[0] = (uint8)(TSPEC_EDCA_ACCESS | direction |
- (tspec_id << TSPEC_TSINFO_TID_SHIFT));
- tspec_arg->tsinfo.octets[1] = (uint8)((tspec_id << TSPEC_TSINFO_PRIO_SHIFT) |
- user_psb);
- tspec_arg->tsinfo.octets[2] = 0x00;
+ return bytes_written;
}
-static s32
-wl_android_handle_cac_action(struct bcm_cfg80211 * cfg, struct net_device * ndev, char * argv)
+static int
+wl_android_pktlog_filter_add(struct net_device *dev, char *command, int total_len)
{
- tspec_arg_t tspec_arg;
- s32 err = BCME_ERROR;
- u8 ts_cmd[12] = "cac_addts";
- uint8 access_category;
- s32 bssidx;
-
- /* Following handling is done only for the primary interface */
- memset_s(&tspec_arg, sizeof(tspec_arg), 0, sizeof(tspec_arg));
- if (strncmp(argv, "addts", strlen("addts")) == 0) {
- tspec_arg.version = TSPEC_ARG_VERSION;
- tspec_arg.length = sizeof(tspec_arg_t) - (2 * sizeof(uint16));
- /* Read the params passed */
- sscanf(argv, "%*s %hhu %hu %hu", &access_category,
- &tspec_arg.nom_msdu_size, &tspec_arg.surplus_bw);
- if ((access_category > TSPEC_MAX_ACCESS_CATEGORY) ||
- ((tspec_arg.surplus_bw < TSPEC_MIN_SURPLUS_BW) ||
- (tspec_arg.surplus_bw > TSPEC_MAX_SURPLUS_BW)) ||
- (tspec_arg.nom_msdu_size > TSPEC_MAX_MSDU_SIZE)) {
- ANDROID_ERROR(("Invalid params access_category %hhu nom_msdu_size %hu"
- " surplus BW %hu\n", access_category, tspec_arg.nom_msdu_size,
- tspec_arg.surplus_bw));
- return BCME_USAGE_ERROR;
- }
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
- /* Update tsinfo */
- wl_android_update_tsinfo(access_category, &tspec_arg);
- /* Update other tspec parameters */
- tspec_arg.dialog_token = TSPEC_DEF_DIALOG_TOKEN;
- tspec_arg.mean_data_rate = TSPEC_DEF_MEAN_DATA_RATE;
- tspec_arg.min_phy_rate = TSPEC_DEF_MIN_PHY_RATE;
- } else if (strncmp(argv, "delts", strlen("delts")) == 0) {
- snprintf(ts_cmd, sizeof(ts_cmd), "cac_delts");
- tspec_arg.length = sizeof(tspec_arg_t) - (2 * sizeof(uint16));
- tspec_arg.version = TSPEC_ARG_VERSION;
- /* Read the params passed */
- sscanf(argv, "%*s %hhu", &access_category);
-
- if (access_category > TSPEC_MAX_ACCESS_CATEGORY) {
- WL_INFORM_MEM(("Invalide param, access_category %hhu\n", access_category));
- return BCME_USAGE_ERROR;
- }
- /* Update tsinfo */
- wl_android_update_tsinfo(access_category, &tspec_arg);
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
}
- if ((bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr)) < 0) {
- ANDROID_ERROR(("Find index failed\n"));
- err = BCME_ERROR;
- return err;
+ filter = dhdp->pktlog->pktlog_filter;
+
+ if (strlen(CMD_PKTLOG_FILTER_ADD) + 1 > total_len) {
+ return BCME_ERROR;
}
- err = wldev_iovar_setbuf_bsscfg(ndev, ts_cmd, &tspec_arg, sizeof(tspec_arg),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- ANDROID_ERROR(("%s error (%d)\n", ts_cmd, err));
+
+ err = dhd_pktlog_filter_add(filter, command + strlen(CMD_PKTLOG_FILTER_ADD) + 1);
+
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter add success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog filter add fail\n", __FUNCTION__));
+ return BCME_ERROR;
}
- return err;
+ return bytes_written;
}
-static s32
-wl_android_cac_ts_config(struct net_device *ndev, char *cmd_argv, int total_len)
+static int
+wl_android_pktlog_filter_info(struct net_device *dev, char *command, int total_len)
{
- struct bcm_cfg80211 *cfg = NULL;
- s32 err = BCME_OK;
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ int err = BCME_OK;
- if (!ndev) {
- ANDROID_ERROR(("ndev is NULL\n"));
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
return -EINVAL;
}
- cfg = wl_get_cfg(ndev);
- if (!cfg) {
- ANDROID_ERROR(("cfg is NULL\n"));
- return -EINVAL;
- }
+ filter = dhdp->pktlog->pktlog_filter;
- /* Request supported only for primary interface */
- if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
- ANDROID_ERROR(("Request on non-primary interface\n"));
- return -1;
- }
+ err = dhd_pktlog_filter_info(filter);
- /* sync commands from user space */
- mutex_lock(&cfg->usr_sync);
- err = wl_android_handle_cac_action(cfg, ndev, cmd_argv);
- mutex_unlock(&cfg->usr_sync);
+ if (err == BCME_OK) {
+ bytes_written = snprintf(command, total_len, "OK");
+ ANDROID_ERROR(("%s: pktlog filter info success\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: pktlog filter info fail\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
- return err;
+ return bytes_written;
}
-#endif /* WL_CAC_TS */
-#ifdef WL_GET_CU
-/* Implementation to get channel usage from framework */
-static s32
-wl_android_get_channel_util(struct net_device *ndev, char *command, int total_len)
+static int
+wl_android_pktlog_start(struct net_device *dev, char *command, int total_len)
{
- s32 bytes_written, err = 0;
- wl_bssload_t bssload;
- u8 smbuf[WLC_IOCTL_SMLEN];
- u8 chan_use_percentage = 0;
-
- if ((err = wldev_iovar_getbuf(ndev, "bssload_report", NULL,
- 0, smbuf, WLC_IOCTL_SMLEN, NULL))) {
- ANDROID_ERROR(("Getting bssload report failed with err=%d \n", err));
- return err;
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
}
- (void)memcpy_s(&bssload, sizeof(wl_bssload_t), smbuf, sizeof(wl_bssload_t));
- /* Convert channel usage to percentage value */
- chan_use_percentage = (bssload.chan_util * 100) / 255;
+ if (!dhdp->pktlog->tx_pktlog_ring || !dhdp->pktlog->rx_pktlog_ring) {
+ DHD_PKT_LOG(("%s(): tx_pktlog_ring=%p rx_pktlog_ring=%p\n",
+ __FUNCTION__, dhdp->pktlog->tx_pktlog_ring, dhdp->pktlog->rx_pktlog_ring));
+ return -EINVAL;
+ }
- bytes_written = snprintf(command, total_len, "CU %hhu",
- chan_use_percentage);
- ANDROID_INFO(("Channel Utilization %u %u\n", bssload.chan_util, chan_use_percentage));
+ dhdp->pktlog->tx_pktlog_ring->start = TRUE;
+ dhdp->pktlog->rx_pktlog_ring->start = TRUE;
- return bytes_written;
-}
-#endif /* WL_GET_CU */
+ bytes_written = snprintf(command, total_len, "OK");
-#ifdef RTT_GEOFENCE_INTERVAL
-#if defined(RTT_SUPPORT) && defined(WL_NAN)
-static void
-wl_android_set_rtt_geofence_interval(struct net_device *ndev, char *command)
-{
- int rtt_interval = 0;
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
- char *rtt_intp = command + strlen(CMD_GEOFENCE_INTERVAL) + 1;
+ ANDROID_ERROR(("%s: pktlog start success\n", __FUNCTION__));
- rtt_interval = bcm_atoi(rtt_intp);
- dhd_rtt_set_geofence_rtt_interval(dhdp, rtt_interval);
+ return bytes_written;
}
-#endif /* RTT_SUPPORT && WL_NAN */
-#endif /* RTT_GEOFENCE_INTERVAL */
-#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
-int
-wl_android_set_softap_elna_bypass(struct net_device *dev, char *command, int total_len)
+static int
+wl_android_pktlog_stop(struct net_device *dev, char *command, int total_len)
{
- char *ifname = NULL;
- char *pos, *token;
- int err = BCME_OK;
- int enable = FALSE;
-
- /*
- * STA/AP/GO I/F: DRIVER SET_SOFTAP_ELNA_BYPASS <ifname> <enable/disable>
- * the enable/disable format follows Samsung specific rules as following
- * Enable : 0
- * Disable :-1
- */
- pos = command;
-
- /* drop command */
- token = bcmstrtok(&pos, " ", NULL);
+ int bytes_written = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
- /* get the interface name */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token) {
- ANDROID_ERROR(("%s: Invalid arguments about interface name\n", __FUNCTION__));
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
return -EINVAL;
}
- ifname = token;
- /* get enable/disable flag */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token) {
- ANDROID_ERROR(("%s: Invalid arguments about Enable/Disable\n", __FUNCTION__));
+ if (!dhdp->pktlog->tx_pktlog_ring || !dhdp->pktlog->rx_pktlog_ring) {
+ DHD_PKT_LOG(("%s(): tx_pktlog_ring=%p rx_pktlog_ring=%p\n",
+ __FUNCTION__, dhdp->pktlog->tx_pktlog_ring, dhdp->pktlog->rx_pktlog_ring));
return -EINVAL;
}
- enable = bcm_atoi(token);
- CUSTOMER_HW4_EN_CONVERT(enable);
- err = wl_set_softap_elna_bypass(dev, ifname, enable);
- if (unlikely(err)) {
- ANDROID_ERROR(("%s: Failed to set ELNA Bypass of SoftAP mode, err=%d\n",
- __FUNCTION__, err));
- return err;
- }
+ dhdp->pktlog->tx_pktlog_ring->start = FALSE;
+ dhdp->pktlog->rx_pktlog_ring->start = FALSE;
- return err;
+ bytes_written = snprintf(command, total_len, "OK");
+
+ ANDROID_ERROR(("%s: pktlog stop success\n", __FUNCTION__));
+
+ return bytes_written;
}
-int
-wl_android_get_softap_elna_bypass(struct net_device *dev, char *command, int total_len)
+static int
+wl_android_pktlog_filter_exist(struct net_device *dev, char *command, int total_len)
{
- char *ifname = NULL;
- char *pos, *token;
- int err = BCME_OK;
int bytes_written = 0;
- int softap_elnabypass = 0;
+ dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(dev);
+ dhd_pktlog_filter_t *filter;
+ uint32 id;
+ bool exist = FALSE;
- /*
- * STA/AP/GO I/F: DRIVER GET_SOFTAP_ELNA_BYPASS <ifname>
- */
- pos = command;
+ if (!dhdp || !dhdp->pktlog) {
+ DHD_PKT_LOG(("%s(): dhdp=%p pktlog=%p\n",
+ __FUNCTION__, dhdp, (dhdp ? dhdp->pktlog : NULL)));
+ return -EINVAL;
+ }
- /* drop command */
- token = bcmstrtok(&pos, " ", NULL);
+ filter = dhdp->pktlog->pktlog_filter;
- /* get the interface name */
- token = bcmstrtok(&pos, " ", NULL);
- if (!token) {
- ANDROID_ERROR(("%s: Invalid arguments about interface name\n", __FUNCTION__));
- return -EINVAL;
+ if (strlen(CMD_PKTLOG_FILTER_EXIST) + 1 > total_len) {
+ return BCME_ERROR;
}
- ifname = token;
- err = wl_get_softap_elna_bypass(dev, ifname, &softap_elnabypass);
- if (unlikely(err)) {
- ANDROID_ERROR(("%s: Failed to get ELNA Bypass of SoftAP mode, err=%d\n",
- __FUNCTION__, err));
- return err;
+ exist = dhd_pktlog_filter_existed(filter, command + strlen(CMD_PKTLOG_FILTER_EXIST) + 1,
+ &id);
+
+ if (exist) {
+ bytes_written = snprintf(command, total_len, "TRUE");
+ ANDROID_ERROR(("%s: pktlog filter pattern id: %d is existed\n", __FUNCTION__, id));
} else {
- softap_elnabypass--; //Convert format to Customer HW4
- ANDROID_INFO(("%s: eLNA Bypass feature enable status is %d\n",
- __FUNCTION__, softap_elnabypass));
- bytes_written = snprintf(command, total_len, "%s %d",
- CMD_GET_SOFTAP_ELNA_BYPASS, softap_elnabypass);
+ bytes_written = snprintf(command, total_len, "FALSE");
+ ANDROID_ERROR(("%s: pktlog filter pattern id: %d is not existed\n", __FUNCTION__, id));
}
return bytes_written;
}
-#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
+#endif /* DHD_PKT_LOGGING */
-#ifdef WL_NAN
-int
-wl_android_get_nan_status(struct net_device *dev, char *command, int total_len)
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
{
+#define PRIVATE_COMMAND_MAX_LEN 8192
+#define PRIVATE_COMMAND_DEF_LEN 4096
+ int ret = 0;
+ char *command = NULL;
int bytes_written = 0;
- int error = BCME_OK;
- wl_nan_conf_status_t nstatus;
+ android_wifi_priv_cmd priv_cmd;
+ int buf_size = 0;
- error = wl_cfgnan_get_status(dev, &nstatus);
- if (error) {
- ANDROID_ERROR(("Failed to get nan status (%d)\n", error));
- return error;
- }
+ net_os_wake_lock(net);
- bytes_written = snprintf(command, total_len,
- "EN:%d Role:%d EM:%d CID:"MACF" NMI:"MACF" SC(2G):%d SC(5G):%d "
- "MR:"NMRSTR" AMR:"NMRSTR" IMR:"NMRSTR
- "HC:%d AMBTT:%04x TSF[%04x:%04x]\n",
- nstatus.enabled,
- nstatus.role,
- nstatus.election_mode,
- ETHERP_TO_MACF(&(nstatus.cid)),
- ETHERP_TO_MACF(&(nstatus.nmi)),
- nstatus.social_chans[0],
- nstatus.social_chans[1],
- NMR2STR(nstatus.mr),
- NMR2STR(nstatus.amr),
- NMR2STR(nstatus.imr),
- nstatus.hop_count,
- nstatus.ambtt,
- nstatus.cluster_tsf_h,
- nstatus.cluster_tsf_l);
- return bytes_written;
-}
-#endif /* WL_NAN */
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ goto exit;
+ }
-#ifdef SUPPORT_NAN_RANGING_TEST_BW
-enum {
- NAN_RANGING_5G_BW20 = 1,
- NAN_RANGING_5G_BW40,
- NAN_RANGING_5G_BW80
-};
+ if (!ifr->ifr_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
-int
-wl_nan_ranging_bw(struct net_device *net, int bw, char *command)
-{
- int bytes_written, err = BCME_OK;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- s32 val = 1;
- struct {
- u32 band;
- u32 bw_cap;
- } param = {0, 0};
+#ifdef CONFIG_COMPAT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ if (in_compat_syscall())
+#else
+ if (is_compat_task())
+#endif
+ {
+ compat_android_wifi_priv_cmd compat_priv_cmd;
+ if (copy_from_user(&compat_priv_cmd, ifr->ifr_data,
+ sizeof(compat_android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
- if (bw < NAN_RANGING_5G_BW20 || bw > NAN_RANGING_5G_BW80) {
- ANDROID_ERROR(("Wrong BW cmd:%d, %s\n", bw, __FUNCTION__));
- bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
- return bytes_written;
+ }
+ priv_cmd.buf = compat_ptr(compat_priv_cmd.buf);
+ priv_cmd.used_len = compat_priv_cmd.used_len;
+ priv_cmd.total_len = compat_priv_cmd.total_len;
+ } else
+#endif /* CONFIG_COMPAT */
+ {
+ if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ }
+ if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) {
+ ANDROID_ERROR(("%s: buf length invalid:%d\n", __FUNCTION__,
+ priv_cmd.total_len));
+ ret = -EINVAL;
+ goto exit;
}
- switch (bw) {
- case NAN_RANGING_5G_BW20:
- ANDROID_ERROR(("NAN_RANGING 5G/BW20\n"));
- param.band = WLC_BAND_5G;
- param.bw_cap = 0x1;
- break;
- case NAN_RANGING_5G_BW40:
- ANDROID_ERROR(("NAN_RANGING 5G/BW40\n"));
- param.band = WLC_BAND_5G;
- param.bw_cap = 0x3;
- break;
- case NAN_RANGING_5G_BW80:
- ANDROID_ERROR(("NAN_RANGING 5G/BW80\n"));
- param.band = WLC_BAND_5G;
- param.bw_cap = 0x7;
- break;
+ buf_size = max(priv_cmd.total_len, PRIVATE_COMMAND_DEF_LEN);
+ command = kmalloc((buf_size + 1), GFP_KERNEL);
+
+ if (!command)
+ {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto exit;
}
+ if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ command[priv_cmd.total_len] = '\0';
- err = wldev_ioctl_set(net, WLC_DOWN, &val, sizeof(s32));
- if (err) {
- ANDROID_ERROR(("WLC_DOWN error %d\n", err));
- bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
- } else {
- err = wldev_iovar_setbuf(net, "bw_cap", ¶m, sizeof(param),
- ioctl_buf, sizeof(ioctl_buf), NULL);
+ ANDROID_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
- if (err) {
- ANDROID_ERROR(("BW set failed\n"));
- bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
- } else {
- ANDROID_ERROR(("BW set done\n"));
- bytes_written = scnprintf(command, sizeof("OK"), "OK");
+ bytes_written = wl_handle_private_cmd(net, command, priv_cmd.total_len);
+ if (bytes_written >= 0) {
+ if ((bytes_written == 0) && (priv_cmd.total_len > 0)) {
+ command[0] = '\0';
}
-
- err = wldev_ioctl_set(net, WLC_UP, &val, sizeof(s32));
- if (err < 0) {
- ANDROID_ERROR(("WLC_UP error %d\n", err));
- bytes_written = scnprintf(command, sizeof("FAIL"), "FAIL");
+ if (bytes_written >= priv_cmd.total_len) {
+ ANDROID_ERROR(("%s: err. bytes_written:%d >= buf_size:%d \n",
+ __FUNCTION__, bytes_written, buf_size));
+ ret = BCME_BUFTOOSHORT;
+ goto exit;
+ }
+ bytes_written++;
+ priv_cmd.used_len = bytes_written;
+ if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
+ ANDROID_ERROR(("%s: failed to copy data to user buffer\n", __FUNCTION__));
+ ret = -EFAULT;
}
}
- return bytes_written;
+ else {
+ /* Propagate the error */
+ ret = bytes_written;
+ }
+
+exit:
+ net_os_wake_unlock(net);
+ kfree(command);
+ return ret;
}
-#endif /* SUPPORT_NAN_RANGING_TEST_BW */
int
wl_handle_private_cmd(struct net_device *net, char *command, u32 cmd_len)
if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
ANDROID_INFO(("%s, Received regular START command\n", __FUNCTION__));
-#ifdef SUPPORT_DEEP_SLEEP
- trigger_deep_sleep = 1;
-#else
#ifdef BT_OVER_SDIO
bytes_written = dhd_net_bus_get(net);
#else
bytes_written = wl_android_wifi_on(net);
#endif /* BT_OVER_SDIO */
-#endif /* SUPPORT_DEEP_SLEEP */
}
else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
}
if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
-#ifdef SUPPORT_DEEP_SLEEP
- trigger_deep_sleep = 1;
-#else
#ifdef BT_OVER_SDIO
bytes_written = dhd_net_bus_put(net);
#else
bytes_written = wl_android_wifi_off(net, FALSE);
#endif /* BT_OVER_SDIO */
-#endif /* SUPPORT_DEEP_SLEEP */
}
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
wl_cfg80211_set_passive_scan(net, command);
}
-#endif /* WL_CFG80211 */
+#endif
else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
}
#endif /* WL_CFG80211 */
}
else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
- bytes_written = wl_android_set_suspendopt(net, command);
+ bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
}
else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
- bytes_written = wl_android_set_suspendmode(net, command);
- }
- else if (strnicmp(command, CMD_SETDTIM_IN_SUSPEND, strlen(CMD_SETDTIM_IN_SUSPEND)) == 0) {
- bytes_written = wl_android_set_bcn_li_dtim(net, command);
+ bytes_written = wl_android_set_suspendmode(net, command, priv_cmd.total_len);
}
else if (strnicmp(command, CMD_MAXDTIM_IN_SUSPEND, strlen(CMD_MAXDTIM_IN_SUSPEND)) == 0) {
- bytes_written = wl_android_set_max_dtim(net, command);
+ bytes_written = wl_android_set_max_dtim(net, command, priv_cmd.total_len);
}
-#ifdef DISABLE_DTIM_IN_SUSPEND
- else if (strnicmp(command, CMD_DISDTIM_IN_SUSPEND, strlen(CMD_DISDTIM_IN_SUSPEND)) == 0) {
- bytes_written = wl_android_set_disable_dtim_in_suspend(net, command);
- }
-#endif /* DISABLE_DTIM_IN_SUSPEND */
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
- bytes_written = wl_android_set_band(net, command);
+#ifdef DISABLE_SETBAND
+ bytes_written = BCME_DISABLED;
+#else /* DISABLE_SETBAND */
+ uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+ if (dhd_conf_get_band(dhd_get_pub(net)) >= WLC_BAND_AUTO) {
+ printf("%s: Band is fixed in config.txt\n", __FUNCTION__);
+ } else
+ bytes_written = wl_cfg80211_set_if_band(net, band);
+#endif /* DISABLE_SETBAND */
}
-#endif /* WL_CFG80211 */
+#endif
else if (strnicmp(command, CMD_GETBAND, strlen(CMD_GETBAND)) == 0) {
bytes_written = wl_android_get_band(net, command, priv_cmd.total_len);
}
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_SET_CSA, strlen(CMD_SET_CSA)) == 0) {
- bytes_written = wl_android_set_csa(net, command);
+ bytes_written = wl_android_set_csa(net, command, priv_cmd.total_len);
} else if (strnicmp(command, CMD_80211_MODE, strlen(CMD_80211_MODE)) == 0) {
bytes_written = wl_android_get_80211_mode(net, command, priv_cmd.total_len);
} else if (strnicmp(command, CMD_CHANSPEC, strlen(CMD_CHANSPEC)) == 0) {
bytes_written = wl_android_get_chanspec(net, command, priv_cmd.total_len);
}
#endif /* WL_CFG80211 */
-#ifndef CUSTOMER_SET_COUNTRY
/* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */
else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
/*
(rev_info_delim + 1)) {
revinfo = bcm_atoi(rev_info_delim + 1);
}
-#ifdef WL_CFG80211
- bytes_written = wl_cfg80211_set_country_code(net, country_code,
- true, true, revinfo);
-#else
bytes_written = wldev_set_country(net, country_code, true, true, revinfo);
-#endif /* WL_CFG80211 */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef FCC_PWR_LIMIT_2G
+ if (wldev_iovar_setint(net, "fccpwrlimit2g", FALSE)) {
+ ANDROID_ERROR(("%s: fccpwrlimit2g deactivation is failed\n", __FUNCTION__));
+ } else {
+ ANDROID_ERROR(("%s: fccpwrlimit2g is deactivated\n", __FUNCTION__));
+ }
+#endif /* FCC_PWR_LIMIT_2G */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
}
-#endif /* CUSTOMER_SET_COUNTRY */
else if (strnicmp(command, CMD_DATARATE, strlen(CMD_DATARATE)) == 0) {
bytes_written = wl_android_get_datarate(net, command, priv_cmd.total_len);
} else if (strnicmp(command, CMD_ASSOC_CLIENTS, strlen(CMD_ASSOC_CLIENTS)) == 0) {
bytes_written = wl_android_get_assoclist(net, command, priv_cmd.total_len);
}
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef WLTDLS
+ else if (strnicmp(command, CMD_TDLS_RESET, strlen(CMD_TDLS_RESET)) == 0) {
+ bytes_written = wl_android_tdls_reset(net);
+ }
+#endif /* WLTDLS */
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
+
#ifdef PNO_SUPPORT
else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
bytes_written = dhd_dev_pno_stop_for_ssid(net);
else if (strnicmp(command, CMD_P2P_DEV_ADDR, strlen(CMD_P2P_DEV_ADDR)) == 0) {
bytes_written = wl_android_get_p2p_dev_addr(net, command, priv_cmd.total_len);
}
+#ifdef WL_CFG80211
+#ifdef WLMESH
+ else if (strnicmp(command, CMD_SAE_SET_PASSWORD, strlen(CMD_SAE_SET_PASSWORD)) == 0) {
+ int skip = strlen(CMD_SAE_SET_PASSWORD) + 1;
+ bytes_written = wl_cfg80211_set_sae_password(net, command + skip,
+ priv_cmd.total_len - skip);
+ }
+ else if (strnicmp(command, CMD_SET_RSDB_MODE, strlen(CMD_SET_RSDB_MODE)) == 0) {
+ bytes_written = wl_android_set_rsdb_mode(net, command, priv_cmd.total_len);
+ }
+#endif
+#endif /* WL_CFG80211 */
else if (strnicmp(command, CMD_P2P_SET_NOA, strlen(CMD_P2P_SET_NOA)) == 0) {
int skip = strlen(CMD_P2P_SET_NOA) + 1;
bytes_written = wl_cfg80211_set_p2p_noa(net, command + skip,
bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
priv_cmd.total_len - skip, *(command + skip - 2) - '0');
}
-#ifdef WLFBT
- else if (strnicmp(command, CMD_GET_FTKEY, strlen(CMD_GET_FTKEY)) == 0) {
- bytes_written = wl_cfg80211_get_fbt_key(net, command, priv_cmd.total_len);
- }
-#endif /* WLFBT */
#endif /* WL_CFG80211 */
#if defined(WL_SUPPORT_AUTO_CHANNEL)
else if (strnicmp(command, CMD_GET_BEST_CHANNELS,
priv_cmd.total_len);
}
#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef SUPPORT_SET_LPC
+ else if (strnicmp(command, CMD_HAPD_LPC_ENABLED,
+ strlen(CMD_HAPD_LPC_ENABLED)) == 0) {
+ int skip = strlen(CMD_HAPD_LPC_ENABLED) + 3;
+ wl_android_set_lpc(net, (const char*)command+skip);
+ }
+#endif /* SUPPORT_SET_LPC */
+#ifdef SUPPORT_TRIGGER_HANG_EVENT
+ else if (strnicmp(command, CMD_TEST_FORCE_HANG,
+ strlen(CMD_TEST_FORCE_HANG)) == 0) {
+ int skip = strlen(CMD_TEST_FORCE_HANG) + 1;
+ net_os_send_hang_message_reason(net, (const char*)command+skip);
+ }
+#endif /* SUPPORT_TRIGGER_HANG_EVENT */
+ else if (strnicmp(command, CMD_CHANGE_RL, strlen(CMD_CHANGE_RL)) == 0)
+ bytes_written = wl_android_ch_res_rl(net, true);
+ else if (strnicmp(command, CMD_RESTORE_RL, strlen(CMD_RESTORE_RL)) == 0)
+ bytes_written = wl_android_ch_res_rl(net, false);
+#ifdef WL_RELMCAST
+ else if (strnicmp(command, CMD_SET_RMC_ENABLE, strlen(CMD_SET_RMC_ENABLE)) == 0) {
+ int rmc_enable = *(command + strlen(CMD_SET_RMC_ENABLE) + 1) - '0';
+ bytes_written = wl_android_rmc_enable(net, rmc_enable);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_TXRATE, strlen(CMD_SET_RMC_TXRATE)) == 0) {
+ int rmc_txrate;
+ sscanf(command, "%*s %10d", &rmc_txrate);
+ bytes_written = wldev_iovar_setint(net, "rmc_txrate", rmc_txrate * 2);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_ACTPERIOD, strlen(CMD_SET_RMC_ACTPERIOD)) == 0) {
+ int actperiod;
+ sscanf(command, "%*s %10d", &actperiod);
+ bytes_written = wldev_iovar_setint(net, "rmc_actf_time", actperiod);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_IDLEPERIOD, strlen(CMD_SET_RMC_IDLEPERIOD)) == 0) {
+ int acktimeout;
+ sscanf(command, "%*s %10d", &acktimeout);
+ acktimeout *= 1000;
+ bytes_written = wldev_iovar_setint(net, "rmc_acktmo", acktimeout);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_LEADER, strlen(CMD_SET_RMC_LEADER)) == 0) {
+ int skip = strlen(CMD_SET_RMC_LEADER) + 1;
+ bytes_written = wl_android_rmc_set_leader(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_SET_RMC_EVENT,
+ strlen(CMD_SET_RMC_EVENT)) == 0) {
+ bytes_written = wl_android_set_rmc_event(net, command, priv_cmd.total_len);
+ }
+#endif /* WL_RELMCAST */
+ else if (strnicmp(command, CMD_GET_SCSCAN, strlen(CMD_GET_SCSCAN)) == 0) {
+ bytes_written = wl_android_get_singlecore_scan(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SET_SCSCAN, strlen(CMD_SET_SCSCAN)) == 0) {
+ bytes_written = wl_android_set_singlecore_scan(net, command, priv_cmd.total_len);
+ }
+#ifdef TEST_TX_POWER_CONTROL
+ else if (strnicmp(command, CMD_TEST_SET_TX_POWER,
+ strlen(CMD_TEST_SET_TX_POWER)) == 0) {
+ int skip = strlen(CMD_TEST_SET_TX_POWER) + 1;
+ wl_android_set_tx_power(net, (const char*)command+skip);
+ }
+ else if (strnicmp(command, CMD_TEST_GET_TX_POWER,
+ strlen(CMD_TEST_GET_TX_POWER)) == 0) {
+ wl_android_get_tx_power(net, command, priv_cmd.total_len);
+ }
+#endif /* TEST_TX_POWER_CONTROL */
+ else if (strnicmp(command, CMD_SARLIMIT_TX_CONTROL,
+ strlen(CMD_SARLIMIT_TX_CONTROL)) == 0) {
+ int skip = strlen(CMD_SARLIMIT_TX_CONTROL) + 1;
+ wl_android_set_sarlimit_txctrl(net, (const char*)command+skip);
+ }
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
else if (strnicmp(command, CMD_HAPD_MAC_FILTER, strlen(CMD_HAPD_MAC_FILTER)) == 0) {
int skip = strlen(CMD_HAPD_MAC_FILTER) + 1;
wl_android_set_mac_address_filter(net, command+skip);
}
else if (strnicmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0)
- bytes_written = wl_android_set_roam_mode(net, command);
+ bytes_written = wl_android_set_roam_mode(net, command, priv_cmd.total_len);
#if defined(BCMFW_ROAM_ENABLE)
else if (strnicmp(command, CMD_SET_ROAMPREF, strlen(CMD_SET_ROAMPREF)) == 0) {
bytes_written = wl_android_set_roampref(net, command, priv_cmd.total_len);
#endif /* BCMFW_ROAM_ENABLE */
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0)
- bytes_written = wl_android_set_miracast(net, command);
+ bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len);
+#ifdef WL11ULB
+ else if (strnicmp(command, CMD_ULB_MODE, strlen(CMD_ULB_MODE)) == 0)
+ bytes_written = wl_android_set_ulb_mode(net, command, priv_cmd.total_len);
+ else if (strnicmp(command, CMD_ULB_BW, strlen(CMD_ULB_BW)) == 0)
+ bytes_written = wl_android_set_ulb_bw(net, command, priv_cmd.total_len);
+#endif /* WL11ULB */
else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
bytes_written = wl_android_set_ibss_beacon_ouidata(net,
command, priv_cmd.total_len);
-#endif /* WL_CFG80211 */
+#endif
else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
int skip = strlen(CMD_KEEP_ALIVE) + 1;
- bytes_written = wl_keep_alive_set(net, command + skip);
+ bytes_written = wl_keep_alive_set(net, command + skip, priv_cmd.total_len - skip);
}
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_ROAM_OFFLOAD, strlen(CMD_ROAM_OFFLOAD)) == 0) {
int enable = *(command + strlen(CMD_ROAM_OFFLOAD) + 1) - '0';
bytes_written = wl_cfg80211_enable_roam_offload(net, enable);
}
+#endif
+#if defined(WL_VIRTUAL_APSTA)
else if (strnicmp(command, CMD_INTERFACE_CREATE, strlen(CMD_INTERFACE_CREATE)) == 0) {
char *name = (command + strlen(CMD_INTERFACE_CREATE) +1);
ANDROID_INFO(("Creating %s interface\n", name));
- if (wl_cfg80211_add_if(wl_get_cfg(net), net, WL_IF_TYPE_STA,
- name, NULL) == NULL) {
- bytes_written = -ENODEV;
- } else {
- /* Return success */
- bytes_written = 0;
- }
+ bytes_written = wl_cfg80211_interface_create(net, name);
}
else if (strnicmp(command, CMD_INTERFACE_DELETE, strlen(CMD_INTERFACE_DELETE)) == 0) {
char *name = (command + strlen(CMD_INTERFACE_DELETE) +1);
ANDROID_INFO(("Deleteing %s interface\n", name));
- bytes_written = wl_cfg80211_del_if(wl_get_cfg(net), net, NULL, name);
+ bytes_written = wl_cfg80211_interface_delete(net, name);
}
-#endif /* WL_CFG80211 */
+#endif /* defined (WL_VIRTUAL_APSTA) */
else if (strnicmp(command, CMD_GET_LINK_STATUS, strlen(CMD_GET_LINK_STATUS)) == 0) {
bytes_written = wl_android_get_link_status(net, command, priv_cmd.total_len);
}
char *data = (command + strlen(CMD_DFS_AP_MOVE) +1);
bytes_written = wl_cfg80211_dfs_ap_move(net, data, command, priv_cmd.total_len);
}
-#endif /* WL_CFG80211 */
+#endif
+#ifdef WBTEXT
+ else if (strnicmp(command, CMD_WBTEXT_ENABLE, strlen(CMD_WBTEXT_ENABLE)) == 0) {
+ bytes_written = wl_android_wbtext(net, command, priv_cmd.total_len);
+ }
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_WBTEXT_PROFILE_CONFIG,
+ strlen(CMD_WBTEXT_PROFILE_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_config(net, data, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_WEIGHT_CONFIG,
+ strlen(CMD_WBTEXT_WEIGHT_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_weight_config(net, data,
+ command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_TABLE_CONFIG,
+ strlen(CMD_WBTEXT_TABLE_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_table_config(net, data,
+ command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_DELTA_CONFIG,
+ strlen(CMD_WBTEXT_DELTA_CONFIG)) == 0) {
+ char *data = (command + strlen(CMD_WBTEXT_DELTA_CONFIG) + 1);
+ bytes_written = wl_cfg80211_wbtext_delta_config(net, data,
+ command, priv_cmd.total_len);
+ }
+#endif
+ else if (strnicmp(command, CMD_WBTEXT_BTM_TIMER_THRESHOLD,
+ strlen(CMD_WBTEXT_BTM_TIMER_THRESHOLD)) == 0) {
+ bytes_written = wl_cfg80211_wbtext_btm_timer_threshold(net, command,
+ priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_WBTEXT_BTM_DELTA,
+ strlen(CMD_WBTEXT_BTM_DELTA)) == 0) {
+ bytes_written = wl_cfg80211_wbtext_btm_delta(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* WBTEXT */
#ifdef SET_RPS_CPUS
else if (strnicmp(command, CMD_RPSMODE, strlen(CMD_RPSMODE)) == 0) {
- bytes_written = wl_android_set_rps_cpus(net, command);
+ bytes_written = wl_android_set_rps_cpus(net, command, priv_cmd.total_len);
}
#endif /* SET_RPS_CPUS */
#ifdef WLWFDS
else if (strnicmp(command, CMD_ADD_WFDS_HASH, strlen(CMD_ADD_WFDS_HASH)) == 0) {
- bytes_written = wl_android_set_wfds_hash(net, command, 1);
+ bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 1);
}
else if (strnicmp(command, CMD_DEL_WFDS_HASH, strlen(CMD_DEL_WFDS_HASH)) == 0) {
- bytes_written = wl_android_set_wfds_hash(net, command, 0);
+ bytes_written = wl_android_set_wfds_hash(net, command, priv_cmd.total_len, 0);
}
#endif /* WLWFDS */
#ifdef BT_WIFI_HANDOVER
else if (strnicmp(command, CMD_TBOW_TEARDOWN, strlen(CMD_TBOW_TEARDOWN)) == 0) {
- bytes_written = wl_tbow_teardown(net);
+ bytes_written = wl_tbow_teardown(net, command, priv_cmd.total_len);
}
#endif /* BT_WIFI_HANDOVER */
+#ifdef CUSTOMER_HW4_PRIVATE_CMD
+#ifdef FCC_PWR_LIMIT_2G
+ else if (strnicmp(command, CMD_GET_FCC_PWR_LIMIT_2G,
+ strlen(CMD_GET_FCC_PWR_LIMIT_2G)) == 0) {
+ bytes_written = wl_android_get_fcc_pwr_limit_2g(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SET_FCC_PWR_LIMIT_2G,
+ strlen(CMD_SET_FCC_PWR_LIMIT_2G)) == 0) {
+ bytes_written = wl_android_set_fcc_pwr_limit_2g(net, command, priv_cmd.total_len);
+ }
+#endif /* FCC_PWR_LIMIT_2G */
+ else if (strnicmp(command, CMD_GET_STA_INFO, strlen(CMD_GET_STA_INFO)) == 0) {
+ bytes_written = wl_cfg80211_get_sta_info(net, command, priv_cmd.total_len);
+ }
+#endif /* CUSTOMER_HW4_PRIVATE_CMD */
else if (strnicmp(command, CMD_MURX_BFE_CAP,
strlen(CMD_MURX_BFE_CAP)) == 0) {
-#if defined(WL_MURX) && defined(WL_CFG80211)
+#if defined(BCM4359_CHIP) && defined(WL_CFG80211)
uint val = *(command + strlen(CMD_MURX_BFE_CAP) + 1) - '0';
bytes_written = wl_android_murx_bfe_cap(net, val);
#else
return BCME_UNSUPPORTED;
-#endif /* WL_MURX */
+#endif /* BCM4359_CHIP */
}
#ifdef SUPPORT_AP_HIGHER_BEACONRATE
else if (strnicmp(command, CMD_GET_AP_BASICRATE, strlen(CMD_GET_AP_BASICRATE)) == 0) {
bytes_written = wl_android_get_ap_rps(net, command, priv_cmd.total_len);
}
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#ifdef SUPPORT_AP_SUSPEND
- else if (strnicmp(command, CMD_SET_AP_SUSPEND, strlen(CMD_SET_AP_SUSPEND)) == 0) {
- bytes_written = wl_android_set_ap_suspend(net, command, priv_cmd.total_len);
- }
-#endif /* SUPPORT_AP_SUSPEND */
-#ifdef SUPPORT_AP_BWCTRL
- else if (strnicmp(command, CMD_SET_AP_BW, strlen(CMD_SET_AP_BW)) == 0) {
- bytes_written = wl_android_set_ap_bw(net, command, priv_cmd.total_len);
- }
- else if (strnicmp(command, CMD_GET_AP_BW, strlen(CMD_GET_AP_BW)) == 0) {
- bytes_written = wl_android_get_ap_bw(net, command, priv_cmd.total_len);
- }
-#endif /* SUPPORT_AP_BWCTRL */
-#ifdef SUPPORT_RSSI_SUM_REPORT
+#ifdef SUPPORT_RSSI_LOGGING
else if (strnicmp(command, CMD_SET_RSSI_LOGGING, strlen(CMD_SET_RSSI_LOGGING)) == 0) {
bytes_written = wl_android_set_rssi_logging(net, command, priv_cmd.total_len);
}
else if (strnicmp(command, CMD_GET_RSSI_PER_ANT, strlen(CMD_GET_RSSI_PER_ANT)) == 0) {
bytes_written = wl_android_get_rssi_per_ant(net, command, priv_cmd.total_len);
}
-#endif /* SUPPORT_RSSI_SUM_REPORT */
+#endif /* SUPPORT_RSSI_LOGGING */
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ else if (strnicmp(command, CMD_GET_BSS_INFO, strlen(CMD_GET_BSS_INFO)) == 0) {
+ bytes_written = wl_cfg80211_get_bss_info(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_ASSOC_REJECT_INFO, strlen(CMD_GET_ASSOC_REJECT_INFO))
+ == 0) {
+ bytes_written = wl_cfg80211_get_connect_failed_status(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ else if (strnicmp(command, ENABLE_RANDOM_MAC, strlen(ENABLE_RANDOM_MAC)) == 0) {
+ bytes_written = wl_cfg80211_set_random_mac(net, TRUE);
+ } else if (strnicmp(command, DISABLE_RANDOM_MAC, strlen(DISABLE_RANDOM_MAC)) == 0) {
+ bytes_written = wl_cfg80211_set_random_mac(net, FALSE);
+ }
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
#ifdef WL_NATOE
else if (strnicmp(command, CMD_NATOE, strlen(CMD_NATOE)) == 0) {
bytes_written = wl_android_process_natoe_cmd(net, command,
bytes_written = wl_android_get_connection_stats(net, command,
priv_cmd.total_len);
}
-#endif // endif
+#endif
#ifdef DHD_LOG_DUMP
else if (strnicmp(command, CMD_NEW_DEBUG_PRINT_DUMP,
strlen(CMD_NEW_DEBUG_PRINT_DUMP)) == 0) {
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
- /* check whether it has more command */
- if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP), " ", 1) == 0) {
- /* compare unwanted/disconnected command */
- if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP) + 1,
- SUBCMD_UNWANTED, strlen(SUBCMD_UNWANTED)) == 0) {
- dhd_log_dump_trigger(dhdp, CMD_UNWANTED);
- } else if (strnicmp(command + strlen(CMD_NEW_DEBUG_PRINT_DUMP) + 1,
- SUBCMD_DISCONNECTED, strlen(SUBCMD_DISCONNECTED)) == 0) {
- dhd_log_dump_trigger(dhdp, CMD_DISCONNECTED);
- } else {
- dhd_log_dump_trigger(dhdp, CMD_DEFAULT);
- }
- } else {
- dhd_log_dump_trigger(dhdp, CMD_DEFAULT);
- }
+ dhd_schedule_log_dump(dhdp);
+#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
+ dhd_bus_mem_dump(dhdp);
+#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
+#ifdef DHD_PKT_LOGGING
+ dhd_schedule_pktlog_dump(dhdp);
+#endif /* DHD_PKT_LOGGING */
}
#endif /* DHD_LOG_DUMP */
-#ifdef DHD_STATUS_LOGGING
- else if (strnicmp(command, CMD_DUMP_STATUS_LOG, strlen(CMD_DUMP_STATUS_LOG)) == 0) {
- dhd_statlog_dump_scr(wl_cfg80211_get_dhdp(net));
- }
- else if (strnicmp(command, CMD_QUERY_STATUS_LOG, strlen(CMD_QUERY_STATUS_LOG)) == 0) {
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(net);
- bytes_written = dhd_statlog_query(dhdp, command, priv_cmd.total_len);
- }
-#endif /* DHD_STATUS_LOGGING */
#ifdef SET_PCIE_IRQ_CPU_CORE
else if (strnicmp(command, CMD_PCIE_IRQ_CORE, strlen(CMD_PCIE_IRQ_CORE)) == 0) {
- int affinity_cmd = *(command + strlen(CMD_PCIE_IRQ_CORE) + 1) - '0';
- wl_android_set_irq_cpucore(net, affinity_cmd);
+ int set = *(command + strlen(CMD_PCIE_IRQ_CORE) + 1) - '0';
+ wl_android_set_irq_cpucore(net, set);
}
#endif /* SET_PCIE_IRQ_CPU_CORE */
+#if defined(DHD_HANG_SEND_UP_TEST)
+ else if (strnicmp(command, CMD_MAKE_HANG, strlen(CMD_MAKE_HANG)) == 0) {
+ int skip = strlen(CMD_MAKE_HANG) + 1;
+ wl_android_make_hang_with_reason(net, (const char*)command+skip);
+ }
+#endif /* DHD_HANG_SEND_UP_TEST */
#ifdef SUPPORT_LQCM
else if (strnicmp(command, CMD_SET_LQCM_ENABLE, strlen(CMD_SET_LQCM_ENABLE)) == 0) {
int lqcm_enable = *(command + strlen(CMD_SET_LQCM_ENABLE) + 1) - '0';
bytes_written = wl_android_lqcm_enable(net, lqcm_enable);
}
else if (strnicmp(command, CMD_GET_LQCM_REPORT,
- strlen(CMD_GET_LQCM_REPORT)) == 0) {
+ strlen(CMD_GET_LQCM_REPORT)) == 0) {
bytes_written = wl_android_get_lqcm_report(net, command,
- priv_cmd.total_len);
+ priv_cmd.total_len);
}
-#endif // endif
+#endif
else if (strnicmp(command, CMD_GET_SNR, strlen(CMD_GET_SNR)) == 0) {
bytes_written = wl_android_get_snr(net, command, priv_cmd.total_len);
}
-#ifdef WL_CFG80211
- else if (strnicmp(command, CMD_DEBUG_VERBOSE, strlen(CMD_DEBUG_VERBOSE)) == 0) {
- int verbose_level = *(command + strlen(CMD_DEBUG_VERBOSE) + 1) - '0';
- bytes_written = wl_cfg80211_set_dbg_verbose(net, verbose_level);
+#ifdef WLADPS_PRIVATE_CMD
+ else if (strnicmp(command, CMD_SET_ADPS, strlen(CMD_SET_ADPS)) == 0) {
+ int skip = strlen(CMD_SET_ADPS) + 1;
+ bytes_written = wl_android_set_adps_mode(net, (const char*)command+skip);
}
-#endif /* WL_CFG80211 */
-#ifdef WL_BCNRECV
- else if (strnicmp(command, CMD_BEACON_RECV,
- strlen(CMD_BEACON_RECV)) == 0) {
- char *data = (command + strlen(CMD_BEACON_RECV) + 1);
- bytes_written = wl_android_bcnrecv_config(net,
- data, priv_cmd.total_len);
- }
-#endif /* WL_BCNRECV */
-#ifdef WL_MBO
- else if (strnicmp(command, CMD_MBO, strlen(CMD_MBO)) == 0) {
- bytes_written = wl_android_process_mbo_cmd(net, command,
- priv_cmd.total_len);
+ else if (strnicmp(command, CMD_GET_ADPS, strlen(CMD_GET_ADPS)) == 0) {
+ bytes_written = wl_android_get_adps_mode(net, command, priv_cmd.total_len);
}
-#endif /* WL_MBO */
-#ifdef WL_CAC_TS
- else if (strnicmp(command, CMD_CAC_TSPEC,
- strlen(CMD_CAC_TSPEC)) == 0) {
- char *data = (command + strlen(CMD_CAC_TSPEC) + 1);
- bytes_written = wl_android_cac_ts_config(net,
- data, priv_cmd.total_len);
- }
-#endif /* WL_CAC_TS */
-#ifdef WL_GET_CU
- else if (strnicmp(command, CMD_GET_CHAN_UTIL,
- strlen(CMD_GET_CHAN_UTIL)) == 0) {
- bytes_written = wl_android_get_channel_util(net,
- command, priv_cmd.total_len);
- }
-#endif /* WL_GET_CU */
-#ifdef RTT_GEOFENCE_INTERVAL
-#if defined(RTT_SUPPORT) && defined(WL_NAN)
- else if (strnicmp(command, CMD_GEOFENCE_INTERVAL,
- strlen(CMD_GEOFENCE_INTERVAL)) == 0) {
- (void)wl_android_set_rtt_geofence_interval(net, command);
- }
-#endif /* RTT_SUPPORT && WL_NAN */
-#endif /* RTT_GEOFENCE_INTERVAL */
-#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
- else if (strnicmp(command, CMD_SET_SOFTAP_ELNA_BYPASS,
- strlen(CMD_SET_SOFTAP_ELNA_BYPASS)) == 0) {
- bytes_written =
- wl_android_set_softap_elna_bypass(net, command, priv_cmd.total_len);
+#endif /* WLADPS_PRIVATE_CMD */
+#ifdef DHD_PKT_LOGGING
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_ENABLE,
+ strlen(CMD_PKTLOG_FILTER_ENABLE)) == 0) {
+ bytes_written = wl_android_pktlog_filter_enable(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_DISABLE,
+ strlen(CMD_PKTLOG_FILTER_DISABLE)) == 0) {
+ bytes_written = wl_android_pktlog_filter_disable(net, command, priv_cmd.total_len);
}
- else if (strnicmp(command, CMD_GET_SOFTAP_ELNA_BYPASS,
- strlen(CMD_GET_SOFTAP_ELNA_BYPASS)) == 0) {
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_PATTERN_ENABLE,
+ strlen(CMD_PKTLOG_FILTER_PATTERN_ENABLE)) == 0) {
bytes_written =
- wl_android_get_softap_elna_bypass(net, command, priv_cmd.total_len);
+ wl_android_pktlog_filter_pattern_enable(net, command, priv_cmd.total_len);
}
-#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
-#ifdef WL_NAN
- else if (strnicmp(command, CMD_GET_NAN_STATUS,
- strlen(CMD_GET_NAN_STATUS)) == 0) {
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_PATTERN_DISABLE,
+ strlen(CMD_PKTLOG_FILTER_PATTERN_DISABLE)) == 0) {
bytes_written =
- wl_android_get_nan_status(net, command, priv_cmd.total_len);
+ wl_android_pktlog_filter_pattern_disable(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_ADD, strlen(CMD_PKTLOG_FILTER_ADD)) == 0) {
+ bytes_written = wl_android_pktlog_filter_add(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_INFO, strlen(CMD_PKTLOG_FILTER_INFO)) == 0) {
+ bytes_written = wl_android_pktlog_filter_info(net, command, priv_cmd.total_len);
}
-#endif /* WL_NAN */
-#if defined(SUPPORT_NAN_RANGING_TEST_BW)
- else if (strnicmp(command, CMD_NAN_RANGING_SET_BW, strlen(CMD_NAN_RANGING_SET_BW)) == 0) {
- int bw_cmd = *(command + strlen(CMD_NAN_RANGING_SET_BW) + 1) - '0';
- bytes_written = wl_nan_ranging_bw(net, bw_cmd, command);
+ else if (strnicmp(command, CMD_PKTLOG_START, strlen(CMD_PKTLOG_START)) == 0) {
+ bytes_written = wl_android_pktlog_start(net, command, priv_cmd.total_len);
}
-#endif /* SUPPORT_NAN_RANGING_TEST_BW */
+ else if (strnicmp(command, CMD_PKTLOG_STOP, strlen(CMD_PKTLOG_STOP)) == 0) {
+ bytes_written = wl_android_pktlog_stop(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_PKTLOG_FILTER_EXIST, strlen(CMD_PKTLOG_FILTER_EXIST)) == 0) {
+ bytes_written = wl_android_pktlog_filter_exist(net, command, priv_cmd.total_len);
+ }
+#endif /* DHD_PKT_LOGGING */
+#if defined(STAT_REPORT)
+ else if (strnicmp(command, CMD_STAT_REPORT_GET_START,
+ strlen(CMD_STAT_REPORT_GET_START)) == 0) {
+ bytes_written = wl_android_stat_report_get_start(net, command, priv_cmd.total_len);
+ } else if (strnicmp(command, CMD_STAT_REPORT_GET_NEXT,
+ strlen(CMD_STAT_REPORT_GET_NEXT)) == 0) {
+ bytes_written = wl_android_stat_report_get_next(net, command, priv_cmd.total_len);
+ }
+#endif /* STAT_REPORT */
else if (wl_android_ext_priv_cmd(net, command, priv_cmd.total_len, &bytes_written) == 0) {
}
else {
{
int ret = 0;
-#if defined(ENABLE_INSMOD_NO_FW_LOAD) || defined(BUS_POWER_RESTORE)
+#ifdef ENABLE_INSMOD_NO_FW_LOAD
dhd_download_fw_on_driverload = FALSE;
#endif /* ENABLE_INSMOD_NO_FW_LOAD */
if (!iface_name[0]) {
- bzero(iface_name, IFNAMSIZ);
+ memset(iface_name, 0, IFNAMSIZ);
bcm_strncpy_s(iface_name, IFNAMSIZ, "wlan", IFNAMSIZ);
}
-#ifdef WL_GENL
- wl_genl_init();
-#endif // endif
-#ifdef WL_RELMCAST
wl_netlink_init();
-#endif /* WL_RELMCAST */
return ret;
}
int ret = 0;
struct io_cfg *cur, *q;
-#ifdef WL_GENL
- wl_genl_deinit();
-#endif /* WL_GENL */
-#ifdef WL_RELMCAST
wl_netlink_deinit();
-#endif /* WL_RELMCAST */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
- GCC_DIAGNOSTIC_POP();
list_del(&cur->list);
kfree(cur);
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
return ret;
}
g_wifi_on = FALSE;
}
-#ifdef WL_GENL
-/* Generic Netlink Initializaiton */
-static int wl_genl_init(void)
-{
- int ret;
-
- ANDROID_INFO(("GEN Netlink Init\n\n"));
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
- /* register new family */
- ret = genl_register_family(&wl_genl_family);
- if (ret != 0)
- goto failure;
-
- /* register functions (commands) of the new family */
- ret = genl_register_ops(&wl_genl_family, &wl_genl_ops);
- if (ret != 0) {
- ANDROID_ERROR(("register ops failed: %i\n", ret));
- genl_unregister_family(&wl_genl_family);
- goto failure;
- }
-
- ret = genl_register_mc_group(&wl_genl_family, &wl_genl_mcast);
-#else
- ret = genl_register_family_with_ops_groups(&wl_genl_family, wl_genl_ops, wl_genl_mcast);
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
- if (ret != 0) {
- ANDROID_ERROR(("register mc_group failed: %i\n", ret));
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
- genl_unregister_ops(&wl_genl_family, &wl_genl_ops);
-#endif // endif
- genl_unregister_family(&wl_genl_family);
- goto failure;
- }
-
- return 0;
-
-failure:
- ANDROID_ERROR(("Registering Netlink failed!!\n"));
- return -1;
-}
-
-/* Generic netlink deinit */
-static int wl_genl_deinit(void)
-{
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
- if (genl_unregister_ops(&wl_genl_family, &wl_genl_ops) < 0)
- ANDROID_ERROR(("Unregister wl_genl_ops failed\n"));
-#endif // endif
- if (genl_unregister_family(&wl_genl_family) < 0)
- ANDROID_ERROR(("Unregister wl_genl_ops failed\n"));
-
- return 0;
-}
-
-s32 wl_event_to_bcm_event(u16 event_type)
-{
- u16 event = -1;
-
- switch (event_type) {
- case WLC_E_SERVICE_FOUND:
- event = BCM_E_SVC_FOUND;
- break;
- case WLC_E_P2PO_ADD_DEVICE:
- event = BCM_E_DEV_FOUND;
- break;
- case WLC_E_P2PO_DEL_DEVICE:
- event = BCM_E_DEV_LOST;
- break;
- /* Above events are supported from BCM Supp ver 47 Onwards */
-#ifdef BT_WIFI_HANDOVER
- case WLC_E_BT_WIFI_HANDOVER_REQ:
- event = BCM_E_DEV_BT_WIFI_HO_REQ;
- break;
-#endif /* BT_WIFI_HANDOVER */
-
- default:
- ANDROID_ERROR(("Event not supported\n"));
- }
-
- return event;
-}
-
-s32
-wl_genl_send_msg(
- struct net_device *ndev,
- u32 event_type,
- const u8 *buf,
- u16 len,
- u8 *subhdr,
- u16 subhdr_len)
-{
- int ret = 0;
- struct sk_buff *skb;
- void *msg;
- u32 attr_type = 0;
- bcm_event_hdr_t *hdr = NULL;
- int mcast = 1; /* By default sent as mutlicast type */
- int pid = 0;
- u8 *ptr = NULL, *p = NULL;
- u32 tot_len = sizeof(bcm_event_hdr_t) + subhdr_len + len;
- u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
-
- ANDROID_INFO(("Enter \n"));
-
- /* Decide between STRING event and Data event */
- if (event_type == 0)
- attr_type = BCM_GENL_ATTR_STRING;
- else
- attr_type = BCM_GENL_ATTR_MSG;
-
- skb = genlmsg_new(NLMSG_GOODSIZE, kflags);
- if (skb == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- msg = genlmsg_put(skb, 0, 0, &wl_genl_family, 0, BCM_GENL_CMD_MSG);
- if (msg == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (attr_type == BCM_GENL_ATTR_STRING) {
- /* Add a BCM_GENL_MSG attribute. Since it is specified as a string.
- * make sure it is null terminated
- */
- if (subhdr || subhdr_len) {
- ANDROID_ERROR(("No sub hdr support for the ATTR STRING type \n"));
- ret = -EINVAL;
- goto out;
- }
-
- ret = nla_put_string(skb, BCM_GENL_ATTR_STRING, buf);
- if (ret != 0) {
- ANDROID_ERROR(("nla_put_string failed\n"));
- goto out;
- }
- } else {
- /* ATTR_MSG */
-
- /* Create a single buffer for all */
- p = ptr = (u8 *)MALLOCZ(cfg->osh, tot_len);
- if (!ptr) {
- ret = -ENOMEM;
- ANDROID_ERROR(("ENOMEM!!\n"));
- goto out;
- }
-
- /* Include the bcm event header */
- hdr = (bcm_event_hdr_t *)ptr;
- hdr->event_type = wl_event_to_bcm_event(event_type);
- hdr->len = len + subhdr_len;
- ptr += sizeof(bcm_event_hdr_t);
-
- /* Copy subhdr (if any) */
- if (subhdr && subhdr_len) {
- memcpy(ptr, subhdr, subhdr_len);
- ptr += subhdr_len;
- }
-
- /* Copy the data */
- if (buf && len) {
- memcpy(ptr, buf, len);
- }
-
- ret = nla_put(skb, BCM_GENL_ATTR_MSG, tot_len, p);
- if (ret != 0) {
- ANDROID_ERROR(("nla_put_string failed\n"));
- goto out;
- }
- }
-
- if (mcast) {
- int err = 0;
- /* finalize the message */
- genlmsg_end(skb, msg);
- /* NETLINK_CB(skb).dst_group = 1; */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
- if ((err = genlmsg_multicast(skb, 0, wl_genl_mcast.id, GFP_ATOMIC)) < 0)
-#else
- if ((err = genlmsg_multicast(&wl_genl_family, skb, 0, 0, GFP_ATOMIC)) < 0)
-#endif // endif
- ANDROID_ERROR(("genlmsg_multicast for attr(%d) failed. Error:%d \n",
- attr_type, err));
- else
- ANDROID_INFO(("Multicast msg sent successfully. attr_type:%d len:%d \n",
- attr_type, tot_len));
- } else {
- NETLINK_CB(skb).dst_group = 0; /* Not in multicast group */
-
- /* finalize the message */
- genlmsg_end(skb, msg);
-
- /* send the message back */
- if (genlmsg_unicast(&init_net, skb, pid) < 0)
- ANDROID_ERROR(("genlmsg_unicast failed\n"));
- }
-
-out:
- if (p) {
- MFREE(cfg->osh, p, tot_len);
- }
- if (ret)
- nlmsg_free(skb);
-
- return ret;
-}
-
-static s32
-wl_genl_handle_msg(
- struct sk_buff *skb,
- struct genl_info *info)
-{
- struct nlattr *na;
- u8 *data = NULL;
-
- ANDROID_INFO(("Enter \n"));
-
- if (info == NULL) {
- return -EINVAL;
- }
-
- na = info->attrs[BCM_GENL_ATTR_MSG];
- if (!na) {
- ANDROID_ERROR(("nlattribute NULL\n"));
- return -EINVAL;
- }
-
- data = (char *)nla_data(na);
- if (!data) {
- ANDROID_ERROR(("Invalid data\n"));
- return -EINVAL;
- } else {
- /* Handle the data */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) || defined(WL_COMPAT_WIRELESS)
- ANDROID_INFO(("%s: Data received from pid (%d) \n", __func__,
- info->snd_pid));
-#else
- ANDROID_INFO(("%s: Data received from pid (%d) \n", __func__,
- info->snd_portid));
-#endif /* (LINUX_VERSION < VERSION(3, 7, 0) || WL_COMPAT_WIRELESS */
- }
- return 0;
-}
-#endif /* WL_GENL */
int wl_fatal_error(void * wl, int rc)
{
g_wifi_on = enable;
}
#endif /* BT_OVER_SDIO */
-
-#ifdef WL_STATIC_IF
-#include <dhd_linux_priv.h>
-struct net_device *
-wl_cfg80211_register_static_if(struct bcm_cfg80211 *cfg, u16 iftype, char *ifname)
-{
- struct net_device *ndev;
- struct wireless_dev *wdev = NULL;
- int ifidx = WL_STATIC_IFIDX; /* Register ndev with a reserved ifidx */
- u8 mac_addr[ETH_ALEN];
- struct net_device *primary_ndev;
-#ifdef DHD_USE_RANDMAC
- struct ether_addr ea_addr;
-#endif /* DHD_USE_RANDMAC */
-#ifdef CUSTOM_MULTI_MAC
- char hw_ether[62];
- dhd_pub_t *dhd = cfg->pub;
-#endif
-
- WL_INFORM_MEM(("[STATIC_IF] Enter (%s) iftype:%d\n", ifname, iftype));
-
- if (!cfg) {
- ANDROID_ERROR(("cfg null\n"));
- return NULL;
- }
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
-
-#ifdef DHD_USE_RANDMAC
- dhd_generate_mac_addr(&ea_addr);
- (void)memcpy_s(mac_addr, ETH_ALEN, ea_addr.octet, ETH_ALEN);
-#else
-#if defined(CUSTOM_MULTI_MAC)
- if (wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether, "wlan1")) {
-#endif
- /* Use primary mac with locally admin bit set */
- (void)memcpy_s(mac_addr, ETH_ALEN, primary_ndev->dev_addr, ETH_ALEN);
- mac_addr[0] |= 0x02;
-#if defined(CUSTOM_MULTI_MAC)
- } else {
- (void)memcpy_s(mac_addr, ETH_ALEN, hw_ether, ETH_ALEN);
- }
-#endif
-#endif /* DHD_USE_RANDMAC */
-
- ndev = wl_cfg80211_allocate_if(cfg, ifidx, ifname, mac_addr,
- WL_BSSIDX_MAX, NULL);
- if (unlikely(!ndev)) {
- ANDROID_ERROR(("Failed to allocate static_if\n"));
- goto fail;
- }
- wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
- if (unlikely(!wdev)) {
- ANDROID_ERROR(("Failed to allocate wdev for static_if\n"));
- goto fail;
- }
-
- wdev->wiphy = cfg->wdev->wiphy;
- wdev->iftype = iftype;
-
- ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- wdev->netdev = ndev;
-
- if (wl_cfg80211_register_if(cfg, ifidx,
- ndev, TRUE) != BCME_OK) {
- ANDROID_ERROR(("ndev registration failed!\n"));
- goto fail;
- }
-
- cfg->static_ndev = ndev;
- cfg->static_ndev_state = NDEV_STATE_OS_IF_CREATED;
- wl_cfg80211_update_iflist_info(cfg, ndev, ifidx, NULL, WL_BSSIDX_MAX,
- ifname, NDEV_STATE_OS_IF_CREATED);
- WL_INFORM_MEM(("Static I/F (%s) Registered\n", ndev->name));
- return ndev;
-
-fail:
- wl_cfg80211_remove_if(cfg, ifidx, ndev, false);
- return NULL;
-}
-
-void
-wl_cfg80211_unregister_static_if(struct bcm_cfg80211 *cfg)
-{
- WL_INFORM_MEM(("[STATIC_IF] Enter\n"));
- if (!cfg || !cfg->static_ndev) {
- ANDROID_ERROR(("invalid input\n"));
- return;
- }
-
- /* wdev free will happen from notifier context */
- /* free_netdev(cfg->static_ndev);
- */
- unregister_netdev(cfg->static_ndev);
-}
-
-s32
-wl_cfg80211_static_if_open(struct net_device *net)
-{
- struct wireless_dev *wdev = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
- struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- u16 iftype = net->ieee80211_ptr ? net->ieee80211_ptr->iftype : 0;
- u16 wl_iftype, wl_mode;
-#ifdef CUSTOM_MULTI_MAC
- char hw_ether[62];
- dhd_pub_t *dhd = dhd_get_pub(net);
-#endif
-
- WL_INFORM_MEM(("[STATIC_IF] dev_open ndev %p and wdev %p\n", net, net->ieee80211_ptr));
- ASSERT(cfg->static_ndev == net);
-
- if (cfg80211_to_wl_iftype(iftype, &wl_iftype, &wl_mode) < 0) {
- return BCME_ERROR;
- }
- if (cfg->static_ndev_state != NDEV_STATE_FW_IF_CREATED) {
-#ifdef DHD_USE_RANDMAC
- wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, net->name, net->dev_addr);
-#else
-#if defined(CUSTOM_MULTI_MAC)
- if (wifi_platform_get_mac_addr(dhd->info->adapter, hw_ether, net->name)) {
-#endif
- wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, net->name, NULL);
-#if defined(CUSTOM_MULTI_MAC)
- } else {
- wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, net->name, hw_ether);
- }
-#endif
-#endif // endif
- if (!wdev) {
- ANDROID_ERROR(("[STATIC_IF] wdev is NULL, can't proceed"));
- return BCME_ERROR;
- }
- } else {
- WL_INFORM_MEM(("Fw IF for static netdev already created\n"));
- }
-
- return BCME_OK;
-}
-
-s32
-wl_cfg80211_static_if_close(struct net_device *net)
-{
- int ret = BCME_OK;
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
- struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
-
- if (cfg->static_ndev_state == NDEV_STATE_FW_IF_CREATED) {
- if (mutex_is_locked(&cfg->if_sync) == TRUE) {
- ret = _wl_cfg80211_del_if(cfg, primary_ndev, net->ieee80211_ptr, net->name);
- } else {
- ret = wl_cfg80211_del_if(cfg, primary_ndev, net->ieee80211_ptr, net->name);
- }
-
- if (unlikely(ret)) {
- ANDROID_ERROR(("Del iface failed for static_if %d\n", ret));
- }
- }
-
- return ret;
-}
-struct net_device *
-wl_cfg80211_post_static_ifcreate(struct bcm_cfg80211 *cfg,
- wl_if_event_info *event, u8 *addr, s32 iface_type)
-{
- struct net_device *new_ndev = NULL;
- struct wireless_dev *wdev = NULL;
-
- WL_INFORM_MEM(("Updating static iface after Fw IF create \n"));
- new_ndev = cfg->static_ndev;
-
- if (new_ndev) {
- wdev = new_ndev->ieee80211_ptr;
- ASSERT(wdev);
- wdev->iftype = iface_type;
- (void)memcpy_s(new_ndev->dev_addr, ETH_ALEN, addr, ETH_ALEN);
- }
-
- cfg->static_ndev_state = NDEV_STATE_FW_IF_CREATED;
- wl_cfg80211_update_iflist_info(cfg, new_ndev, event->ifidx, addr, event->bssidx,
- event->name, NDEV_STATE_FW_IF_CREATED);
- return new_ndev;
-}
-s32
-wl_cfg80211_post_static_ifdel(struct bcm_cfg80211 *cfg, struct net_device *ndev)
-{
- cfg->static_ndev_state = NDEV_STATE_FW_IF_DELETED;
- wl_cfg80211_update_iflist_info(cfg, ndev, WL_STATIC_IFIDX, NULL,
- WL_BSSIDX_MAX, NULL, NDEV_STATE_FW_IF_DELETED);
- wl_cfg80211_clear_per_bss_ies(cfg, ndev->ieee80211_ptr);
- wl_dealloc_netinfo_by_wdev(cfg, ndev->ieee80211_ptr);
- return BCME_OK;
-}
-#endif /* WL_STATIC_IF */
/*
* Linux cfg80211 driver - Android related functions
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_android.h 794110 2018-12-12 05:03:21Z $
+ * $Id: wl_android.h 607319 2015-12-18 14:16:55Z $
*/
#ifndef _wl_android_
#include <linux/module.h>
#include <linux/netdevice.h>
#include <wldev_common.h>
-#include <dngl_stats.h>
-#include <dhd.h>
/* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL
* automatically
*/
#if defined(BT_WIFI_HANDOVER)
#define WL_GENL
-#endif // endif
+#endif
+
-#ifdef WL_GENL
-#include <net/genetlink.h>
-#endif // endif
typedef struct _android_wifi_priv_cmd {
char *buf;
*/
/* message levels */
-#define ANDROID_ERROR_LEVEL (1 << 0)
-#define ANDROID_TRACE_LEVEL (1 << 1)
-#define ANDROID_INFO_LEVEL (1 << 2)
-#define ANDROID_SCAN_LEVEL (1 << 3)
-#define ANDROID_DBG_LEVEL (1 << 4)
-#define ANDROID_MSG_LEVEL (1 << 0)
+#define ANDROID_ERROR_LEVEL 0x0001
+#define ANDROID_TRACE_LEVEL 0x0002
+#define ANDROID_INFO_LEVEL 0x0004
-#define WL_MSG(name, arg1, args...) \
+#define ANDROID_ERROR(x) \
do { \
- if (android_msg_level & ANDROID_MSG_LEVEL) { \
- printk(KERN_ERR "[dhd-%s] %s : " arg1, name, __func__, ## args); \
+ if (android_msg_level & ANDROID_ERROR_LEVEL) { \
+ printk(KERN_ERR "ANDROID-ERROR) "); \
+ printk x; \
} \
} while (0)
-
-#define WL_MSG_PRINT_RATE_LIMIT_PERIOD 1000000000u /* 1s in units of ns */
-#define WL_MSG_RLMT(name, cmp, size, arg1, args...) \
-do { \
- if (android_msg_level & ANDROID_MSG_LEVEL) { \
- static uint64 __err_ts = 0; \
- static uint32 __err_cnt = 0; \
- uint64 __cur_ts = 0; \
- static uint8 static_tmp[size]; \
- __cur_ts = local_clock(); \
- if (__err_ts == 0 || (__cur_ts > __err_ts && \
- (__cur_ts - __err_ts > WL_MSG_PRINT_RATE_LIMIT_PERIOD)) || \
- memcmp(&static_tmp, cmp, size)) { \
- __err_ts = __cur_ts; \
- memcpy(static_tmp, cmp, size); \
- printk(KERN_ERR "[dhd-%s] %s : [%u times] " arg1, \
- name, __func__, __err_cnt, ## args); \
- __err_cnt = 0; \
- } else { \
- ++__err_cnt; \
+#define ANDROID_TRACE(x) \
+ do { \
+ if (android_msg_level & ANDROID_TRACE_LEVEL) { \
+ printk(KERN_ERR "ANDROID-TRACE) "); \
+ printk x; \
} \
- } \
-} while (0)
+ } while (0)
+#define ANDROID_INFO(x) \
+ do { \
+ if (android_msg_level & ANDROID_INFO_LEVEL) { \
+ printk(KERN_ERR "ANDROID-INFO) "); \
+ printk x; \
+ } \
+ } while (0)
/**
* wl_android_init will be called from module init function (dhd_module_init now), similarly
void wl_android_post_init(void);
int wl_android_wifi_on(struct net_device *dev);
int wl_android_wifi_off(struct net_device *dev, bool on_failure);
-int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr);
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
int wl_handle_private_cmd(struct net_device *net, char *command, u32 cmd_len);
+
+s32 wl_netlink_send_msg(int pid, int type, int seq, const void *data, size_t size);
#ifdef WL_EXT_IAPSTA
int wl_ext_iapsta_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx);
int wl_ext_iapsta_attach_name(struct net_device *net, int ifidx);
-int wl_ext_iapsta_dettach_netdev(struct net_device *net, int ifidx);
-int wl_ext_iapsta_update_net_device(struct net_device *net, int ifidx);
-void wl_ext_add_remove_pm_enable_work(struct net_device *dev, bool add);
-#ifdef PROPTX_MAXCOUNT
-void wl_ext_update_wlfc_maxcount(struct dhd_pub *dhd);
-int wl_ext_get_wlfc_maxcount(struct dhd_pub *dhd, int ifidx);
-#endif /* PROPTX_MAXCOUNT */
+int wl_ext_iapsta_dettach_netdev(void);
+u32 wl_ext_iapsta_update_channel(struct net_device *dev, u32 channel);
int wl_ext_iapsta_alive_preinit(struct net_device *dev);
int wl_ext_iapsta_alive_postinit(struct net_device *dev);
-int wl_ext_iapsta_attach(dhd_pub_t *pub);
-void wl_ext_iapsta_dettach(dhd_pub_t *pub);
-#ifdef WL_CFG80211
-u32 wl_ext_iapsta_update_channel(dhd_pub_t *dhd, struct net_device *dev, u32 channel);
-void wl_ext_iapsta_update_iftype(struct net_device *net, int ifidx, int wl_iftype);
-bool wl_ext_iapsta_iftype_enabled(struct net_device *net, int wl_iftype);
-void wl_ext_iapsta_ifadding(struct net_device *net, int ifidx);
-bool wl_ext_iapsta_mesh_creating(struct net_device *net);
-#endif
+int wl_ext_iapsta_event(struct net_device *dev, wl_event_msg_t *e, void* data);
extern int op_mode;
#endif
-typedef struct bcol_gtk_para {
- int enable;
- int ptk_len;
- char ptk[64];
- char replay[8];
-} bcol_gtk_para_t;
-#define ACS_FW_BIT (1<<0)
-#define ACS_DRV_BIT (1<<1)
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW)
-typedef enum WL_EVENT_PRIO {
- PRIO_EVENT_IAPSTA,
- PRIO_EVENT_ESCAN,
- PRIO_EVENT_WEXT
-}wl_event_prio_t;
-s32 wl_ext_event_attach(struct net_device *dev, dhd_pub_t *dhdp);
-void wl_ext_event_dettach(dhd_pub_t *dhdp);
-int wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx);
-int wl_ext_event_dettach_netdev(struct net_device *net, int ifidx);
-int wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd,
- uint32 event, void *cb_func, void *data, wl_event_prio_t prio);
-void wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
- uint32 event, void *cb_func);
-void wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data);
-#endif
-int wl_ext_autochannel(struct net_device *dev, uint acs, uint32 band);
int wl_android_ext_priv_cmd(struct net_device *net, char *command, int total_len,
int *bytes_written);
-void wl_ext_get_sec(struct net_device *dev, int ifmode, char *sec, int total_len);
-bool wl_ext_check_scan(struct net_device *dev, dhd_pub_t *dhdp);
-#if defined(WL_CFG80211) || defined(WL_ESCAN)
-void wl_ext_user_sync(struct dhd_pub *dhd, int ifidx, bool lock);
-bool wl_ext_event_complete(struct dhd_pub *dhd, int ifidx);
-#endif
-#if defined(WL_CFG80211)
-void wl_ext_bss_iovar_war(struct net_device *dev, s32 *val);
-#endif
-enum wl_ext_status {
- WL_EXT_STATUS_DISCONNECTING = 0,
- WL_EXT_STATUS_DISCONNECTED,
- WL_EXT_STATUS_SCAN,
- WL_EXT_STATUS_CONNECTING,
- WL_EXT_STATUS_CONNECTED,
- WL_EXT_STATUS_ADD_KEY,
- WL_EXT_STATUS_AP_ENABLED,
- WL_EXT_STATUS_DELETE_STA,
- WL_EXT_STATUS_STA_DISCONNECTED,
- WL_EXT_STATUS_STA_CONNECTED,
- WL_EXT_STATUS_AP_DISABLED
-};
-typedef struct wl_conn_info {
- uint8 bssidx;
- wlc_ssid_t ssid;
- struct ether_addr bssid;
- uint16 channel;
-} wl_conn_info_t;
-#if defined(WL_WIRELESS_EXT)
-s32 wl_ext_connect(struct net_device *dev, wl_conn_info_t *conn_info);
-#endif /* defined(WL_WIRELESS_EXT) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
#define strnicmp(str1, str2, len) strncasecmp((str1), (str2), (len))
#endif
-#ifdef WL_GENL
-typedef struct bcm_event_hdr {
- u16 event_type;
- u16 len;
-} bcm_event_hdr_t;
-
-/* attributes (variables): the index in this enum is used as a reference for the type,
- * userspace application has to indicate the corresponding type
- * the policy is used for security considerations
- */
-enum {
- BCM_GENL_ATTR_UNSPEC,
- BCM_GENL_ATTR_STRING,
- BCM_GENL_ATTR_MSG,
- __BCM_GENL_ATTR_MAX
-};
-#define BCM_GENL_ATTR_MAX (__BCM_GENL_ATTR_MAX - 1)
-
-/* commands: enumeration of all commands (functions),
- * used by userspace application to identify command to be ececuted
- */
-enum {
- BCM_GENL_CMD_UNSPEC,
- BCM_GENL_CMD_MSG,
- __BCM_GENL_CMD_MAX
-};
-#define BCM_GENL_CMD_MAX (__BCM_GENL_CMD_MAX - 1)
-
-/* Enum values used by the BCM supplicant to identify the events */
-enum {
- BCM_E_UNSPEC,
- BCM_E_SVC_FOUND,
- BCM_E_DEV_FOUND,
- BCM_E_DEV_LOST,
-#ifdef BT_WIFI_HANDOVER
- BCM_E_DEV_BT_WIFI_HO_REQ,
-#endif // endif
- BCM_E_MAX
-};
-
-s32 wl_genl_send_msg(struct net_device *ndev, u32 event_type,
- const u8 *string, u16 len, u8 *hdr, u16 hdrlen);
-#endif /* WL_GENL */
-s32 wl_netlink_send_msg(int pid, int type, int seq, const void *data, size_t size);
-
/* hostap mac mode */
#define MACLIST_MODE_DISABLED 0
#define MACLIST_MODE_DENY 1
* restrict max number to 10 as maximum cmd string size is 255
*/
#define MAX_NUM_MAC_FILT 10
-#define WL_GET_BAND(ch) (((uint)(ch) <= CH_MAX_2G_CHANNEL) ? \
- WLC_BAND_2G : WLC_BAND_5G)
int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
-#ifdef WL_BCNRECV
-extern int wl_android_bcnrecv_config(struct net_device *ndev, char *data,
- int total_len);
-extern int wl_android_bcnrecv_stop(struct net_device *ndev, uint reason);
-extern int wl_android_bcnrecv_resume(struct net_device *ndev);
-extern int wl_android_bcnrecv_suspend(struct net_device *ndev);
-extern int wl_android_bcnrecv_event(struct net_device *ndev,
- uint attr_type, uint status, uint reason, uint8 *data, uint data_len);
-#endif /* WL_BCNRECV */
-#ifdef WL_CAC_TS
-#define TSPEC_UPLINK_DIRECTION (0 << 5) /* uplink direction traffic stream */
-#define TSPEC_DOWNLINK_DIRECTION (1 << 5) /* downlink direction traffic stream */
-#define TSPEC_BI_DIRECTION (3 << 5) /* bi direction traffic stream */
-#define TSPEC_EDCA_ACCESS (1 << 7) /* EDCA access policy */
-#define TSPEC_UAPSD_PSB (1 << 2) /* U-APSD power saving behavior */
-#define TSPEC_TSINFO_TID_SHIFT 1 /* TID Shift */
-#define TSPEC_TSINFO_PRIO_SHIFT 3 /* PRIO Shift */
-#define TSPEC_MAX_ACCESS_CATEGORY 3
-#define TSPEC_MAX_USER_PRIO 7
-#define TSPEC_MAX_DIALOG_TOKEN 255
-#define TSPEC_MAX_SURPLUS_BW 12410
-#define TSPEC_MIN_SURPLUS_BW 11210
-#define TSPEC_MAX_MSDU_SIZE 1520
-#define TSPEC_DEF_MEAN_DATA_RATE 120000
-#define TSPEC_DEF_MIN_PHY_RATE 6000000
-#define TSPEC_DEF_DIALOG_TOKEN 7
-#endif /* WL_CAC_TS */
/* terence:
* BSSCACHE: Cache bss list
typedef struct wl_bss_cache {
struct wl_bss_cache *next;
int dirty;
- struct osl_timespec tv;
+ struct timeval tv;
wl_scan_results_t results;
} wl_bss_cache_t;
#include <linux/wireless.h>
#if defined(WL_WIRELESS_EXT)
#include <wl_iw.h>
-#endif /* WL_WIRELESS_EXT */
+#endif
#include <wldev_common.h>
#include <wlioctl.h>
#include <bcmutils.h>
#include <dhd_config.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
-#endif /* WL_CFG80211 */
+#endif
#ifdef WL_ESCAN
#include <wl_escan.h>
-#endif /* WL_ESCAN */
-
-#define AEXT_ERROR(name, arg1, args...) \
- do { \
- if (android_msg_level & ANDROID_ERROR_LEVEL) { \
- printk(KERN_ERR "[dhd-%s] AEXT-ERROR) %s : " arg1, name, __func__, ## args); \
- } \
- } while (0)
-#define AEXT_TRACE(name, arg1, args...) \
- do { \
- if (android_msg_level & ANDROID_TRACE_LEVEL) { \
- printk(KERN_INFO "[dhd-%s] AEXT-TRACE) %s : " arg1, name, __func__, ## args); \
- } \
- } while (0)
-#define AEXT_INFO(name, arg1, args...) \
- do { \
- if (android_msg_level & ANDROID_INFO_LEVEL) { \
- printk(KERN_INFO "[dhd-%s] AEXT-INFO) %s : " arg1, name, __func__, ## args); \
- } \
- } while (0)
-#define AEXT_DBG(name, arg1, args...) \
- do { \
- if (android_msg_level & ANDROID_DBG_LEVEL) { \
- printk(KERN_INFO "[dhd-%s] AEXT-DBG) %s : " arg1, name, __func__, ## args); \
- } \
- } while (0)
+#endif
#ifndef WL_CFG80211
#define htod32(i) i
#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
#define WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400
-#endif /* WL_CFG80211 */
+#endif
#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
#ifndef IW_CUSTOM_MAX
#define CMD_CHANNEL "CHANNEL"
#define CMD_CHANNELS "CHANNELS"
#define CMD_ROAM_TRIGGER "ROAM_TRIGGER"
+#define CMD_KEEP_ALIVE "KEEP_ALIVE"
#define CMD_PM "PM"
#define CMD_MONITOR "MONITOR"
#define CMD_SET_SUSPEND_BCN_LI_DTIM "SET_SUSPEND_BCN_LI_DTIM"
#define CMD_ISAM_ENABLE "ISAM_ENABLE"
#define CMD_ISAM_DISABLE "ISAM_DISABLE"
#define CMD_ISAM_STATUS "ISAM_STATUS"
-#define CMD_ISAM_PEER_PATH "ISAM_PEER_PATH"
-#define CMD_ISAM_PARAM "ISAM_PARAM"
#ifdef PROP_TXSTATUS
#ifdef PROP_TXSTATUS_VSDB
#include <dhd_wlfc.h>
extern int disable_proptx;
#endif /* PROP_TXSTATUS_VSDB */
-#endif /* PROP_TXSTATUS */
-#endif /* WL_EXT_IAPSTA */
+#endif
+#endif
+#ifdef IDHCP
+#define CMD_DHCPC_ENABLE "DHCPC_ENABLE"
+#define CMD_DHCPC_DUMP "DHCPC_DUMP"
+#endif
#define CMD_AUTOCHANNEL "AUTOCHANNEL"
#define CMD_WL "WL"
+int wl_ext_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
+{
+ int ret;
+
+ ret = wldev_ioctl(dev, cmd, arg, len, set);
+ if (ret)
+ ANDROID_ERROR(("%s: cmd=%d ret=%d\n", __FUNCTION__, cmd, ret));
+ return ret;
+}
+
+int wl_ext_iovar_getint(struct net_device *dev, s8 *iovar, s32 *val)
+{
+ int ret;
+
+ ret = wldev_iovar_getint(dev, iovar, val);
+ if (ret)
+ ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar, ret));
+
+ return ret;
+}
+
+int wl_ext_iovar_setint(struct net_device *dev, s8 *iovar, s32 val)
+{
+ int ret;
+
+ ret = wldev_iovar_setint(dev, iovar, val);
+ if (ret)
+ ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar, ret));
+
+ return ret;
+}
+
+int wl_ext_iovar_getbuf(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ int ret;
+
+ ret = wldev_iovar_getbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
+ if (ret != 0)
+ ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar_name, ret));
+
+ return ret;
+}
+
+int wl_ext_iovar_setbuf(struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+{
+ int ret;
+
+ ret = wldev_iovar_setbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
+ if (ret != 0)
+ ANDROID_ERROR(("%s: iovar=%s, ret=%d\n", __FUNCTION__, iovar_name, ret));
+
+ return ret;
+}
+
#ifdef WL_EXT_IAPSTA
+typedef enum IF_STATE {
+ IF_STATE_INIT = 1,
+ IF_STATE_DISALBE,
+ IF_STATE_ENABLE
+} if_state_t;
+
typedef enum APSTAMODE {
- IUNKNOWN_MODE = 0,
ISTAONLY_MODE = 1,
- IAPONLY_MODE = 2,
- ISTAAP_MODE = 3,
- ISTAGO_MODE = 4,
- ISTASTA_MODE = 5,
- IDUALAP_MODE = 6,
- ISTAAPAP_MODE = 7,
- IMESHONLY_MODE = 8,
- ISTAMESH_MODE = 9,
- IMESHAP_MODE = 10,
- ISTAAPMESH_MODE = 11,
- IMESHAPAP_MODE = 12
+ IAPONLY_MODE,
+ IAPSTA_MODE,
+ IDUALAP_MODE,
+ ISTAAPAP_MODE,
+ IMESHONLY_MODE,
+ IMESHSTA_MODE,
+ IMESHAP_MODE,
+ IMESHAPSTA_MODE,
+ IMESHAPAP_MODE,
+ IGOSTA_MODE
} apstamode_t;
typedef enum IFMODE {
ISTA_MODE = 1,
IAP_MODE,
- IGO_MODE,
- IGC_MODE,
IMESH_MODE
} ifmode_t;
PRIO_AP,
PRIO_MESH,
PRIO_STA
-} wl_prio_t;
+}wl_prio_t;
typedef struct wl_if_info {
struct net_device *dev;
+ if_state_t ifstate;
ifmode_t ifmode;
- unsigned long status;
char prefix;
wl_prio_t prio;
int ifidx;
authmode_t amode;
encmode_t emode;
char key[100];
-#if defined(WLMESH) && defined(WL_ESCAN)
- struct wl_escan_info *escan;
- timer_list_compat_t delay_scan;
-#endif /* WLMESH && WL_ESCAN */
- struct delayed_work pm_enable_work;
- struct mutex pm_sync;
-#ifdef PROPTX_MAXCOUNT
- int transit_maxcount;
-#endif /* PROP_TXSTATUS_VSDB */
} wl_if_info_t;
#define CSA_FW_BIT (1<<0)
typedef struct wl_apsta_params {
struct wl_if_info if_info[MAX_IF_NUM];
- struct dhd_pub *dhd;
int ioctl_ver;
bool init;
- int rsdb;
+ bool rsdb;
bool vsdb;
uint csa;
- uint acs;
- bool radar;
apstamode_t apstamode;
+ bool netif_change;
wait_queue_head_t netif_change_event;
- struct mutex usr_sync;
-#if defined(WLMESH) && defined(WL_ESCAN)
- int macs;
- struct wl_mesh_params mesh_info;
-#endif /* WLMESH && WL_ESCAN */
} wl_apsta_params_t;
-#define MAX_AP_LINK_WAIT_TIME 3000
-#define MAX_STA_LINK_WAIT_TIME 15000
-enum wifi_isam_status {
- ISAM_STATUS_IF_ADDING = 0,
- ISAM_STATUS_IF_READY,
- ISAM_STATUS_STA_CONNECTING,
- ISAM_STATUS_STA_CONNECTED,
- ISAM_STATUS_AP_CREATING,
- ISAM_STATUS_AP_CREATED
-};
-
-#define wl_get_isam_status(cur_if, stat) \
- (test_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
-#define wl_set_isam_status(cur_if, stat) \
- (set_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
-#define wl_clr_isam_status(cur_if, stat) \
- (clear_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
-#define wl_chg_isam_status(cur_if, stat) \
- (change_bit(ISAM_STATUS_ ## stat, &(cur_if)->status))
-
-static int wl_ext_enable_iface(struct net_device *dev, char *ifname, int wait_up);
-static int wl_ext_disable_iface(struct net_device *dev, char *ifname);
-#if defined(WLMESH) && defined(WL_ESCAN)
-static int wl_mesh_escan_attach(dhd_pub_t *dhd, struct wl_if_info *cur_if);
-#endif /* WLMESH && WL_ESCAN */
-#endif /* WL_EXT_IAPSTA */
-
-#ifdef IDHCP
-typedef struct dhcpc_parameter {
- uint32 ip_addr;
- uint32 ip_serv;
- uint32 lease_time;
-} dhcpc_para_t;
-#endif /* IDHCP */
-
-#ifdef WL_EXT_WOWL
-#define WL_WOWL_TCPFIN (1 << 26)
-typedef struct wl_wowl_pattern2 {
- char cmd[4];
- wl_wowl_pattern_t wowl_pattern;
-} wl_wowl_pattern2_t;
-#endif /* WL_EXT_WOWL */
-
-#ifdef WL_EXT_TCPKA
-typedef struct tcpka_conn {
- uint32 sess_id;
- struct ether_addr dst_mac; /* Destinition Mac */
- struct ipv4_addr src_ip; /* Sorce IP */
- struct ipv4_addr dst_ip; /* Destinition IP */
- uint16 ipid; /* Ip Identification */
- uint16 srcport; /* Source Port Address */
- uint16 dstport; /* Destination Port Address */
- uint32 seq; /* TCP Sequence Number */
- uint32 ack; /* TCP Ack Number */
- uint16 tcpwin; /* TCP window */
- uint32 tsval; /* Timestamp Value */
- uint32 tsecr; /* Timestamp Echo Reply */
- uint32 len; /* last packet payload len */
- uint32 ka_payload_len; /* keep alive payload length */
- uint8 ka_payload[1]; /* keep alive payload */
-} tcpka_conn_t;
-
-typedef struct tcpka_conn_sess {
- uint32 sess_id; /* session id */
- uint32 flag; /* enable/disable flag */
- wl_mtcpkeep_alive_timers_pkt_t tcpka_timers;
-} tcpka_conn_sess_t;
-
-typedef struct tcpka_conn_info {
- uint32 ipid;
- uint32 seq;
- uint32 ack;
-} tcpka_conn_sess_info_t;
-#endif /* WL_EXT_TCPKA */
-
-static int wl_ext_wl_iovar(struct net_device *dev, char *command, int total_len);
-
-static int
-wl_ext_ioctl(struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
-{
- int ret;
-
- ret = wldev_ioctl(dev, cmd, arg, len, set);
- if (ret)
- AEXT_ERROR(dev->name, "cmd=%d, ret=%d\n", cmd, ret);
- return ret;
-}
-
-static int
-wl_ext_iovar_getint(struct net_device *dev, s8 *iovar, s32 *val)
-{
- int ret;
-
- ret = wldev_iovar_getint(dev, iovar, val);
- if (ret)
- AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar, ret);
-
- return ret;
-}
-
-static int
-wl_ext_iovar_setint(struct net_device *dev, s8 *iovar, s32 val)
-{
- int ret;
-
- ret = wldev_iovar_setint(dev, iovar, val);
- if (ret)
- AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar, ret);
-
- return ret;
-}
-
-static int
-wl_ext_iovar_getbuf(struct net_device *dev, s8 *iovar_name,
- void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
-{
- int ret;
-
- ret = wldev_iovar_getbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
- if (ret != 0)
- AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar_name, ret);
-
- return ret;
-}
-
-static int
-wl_ext_iovar_setbuf(struct net_device *dev, s8 *iovar_name,
- void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
-{
- int ret;
-
- ret = wldev_iovar_setbuf(dev, iovar_name, param, paramlen, buf, buflen, buf_sync);
- if (ret != 0)
- AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar_name, ret);
-
- return ret;
-}
-
-static int
-wl_ext_iovar_setbuf_bsscfg(struct net_device *dev, s8 *iovar_name,
+static int wl_ext_enable_iface(struct net_device *dev, char *ifname);
+int wl_ext_iovar_setbuf_bsscfg(struct net_device *dev, s8 *iovar_name,
void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx,
struct mutex* buf_sync)
{
ret = wldev_iovar_setbuf_bsscfg(dev, iovar_name, param, paramlen,
buf, buflen, bsscfg_idx, buf_sync);
if (ret < 0)
- AEXT_ERROR(dev->name, "iovar=%s, ret=%d\n", iovar_name, ret);
+ ANDROID_ERROR(("%s: iovar_name=%s ret=%d\n", __FUNCTION__, iovar_name, ret));
return ret;
}
+#endif
+/* Return a legacy chanspec given a new chanspec
+ * Returns INVCHANSPEC on error
+ */
static chanspec_t
wl_ext_chspec_to_legacy(chanspec_t chspec)
{
chanspec_t lchspec;
if (wf_chspec_malformed(chspec)) {
- AEXT_ERROR("wlan", "input chanspec (0x%04X) malformed\n", chspec);
+ ANDROID_ERROR(("wl_ext_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+ chspec));
return INVCHANSPEC;
}
} else {
/* cannot express the bandwidth */
char chanbuf[CHANSPEC_STR_LEN];
- AEXT_ERROR("wlan", "unable to convert chanspec %s (0x%04X) "
- "to pre-11ac format\n",
- wf_chspec_ntoa(chspec, chanbuf), chspec);
+ ANDROID_ERROR((
+ "wl_ext_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+ "to pre-11ac format\n",
+ wf_chspec_ntoa(chspec, chanbuf), chspec));
return INVCHANSPEC;
}
return lchspec;
}
+/* given a chanspec value, do the endian and chanspec version conversion to
+ * a chanspec_t value
+ * Returns INVCHANSPEC on error
+ */
static chanspec_t
wl_ext_chspec_host_to_driver(int ioctl_ver, chanspec_t chanspec)
{
return chanspec;
}
-static void
-wl_ext_ch_to_chanspec(int ioctl_ver, int ch,
- struct wl_join_params *join_params, size_t *join_params_size)
-{
- chanspec_t chanspec = 0;
-
- if (ch != 0) {
- join_params->params.chanspec_num = 1;
- join_params->params.chanspec_list[0] = ch;
-
- if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
-
- chanspec |= WL_CHANSPEC_BW_20;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
-
- *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
- join_params->params.chanspec_num * sizeof(chanspec_t);
-
- join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
- join_params->params.chanspec_list[0] |= chanspec;
- join_params->params.chanspec_list[0] =
- wl_ext_chspec_host_to_driver(ioctl_ver,
- join_params->params.chanspec_list[0]);
-
- join_params->params.chanspec_num =
- htod32(join_params->params.chanspec_num);
- }
-}
-
#if defined(WL_EXT_IAPSTA) || defined(WL_CFG80211) || defined(WL_ESCAN)
static chanspec_t
wl_ext_chspec_from_legacy(chanspec_t legacy_chspec)
}
if (wf_chspec_malformed(chspec)) {
- AEXT_ERROR("wlan", "output chanspec (0x%04X) malformed\n", chspec);
+ ANDROID_ERROR(("wl_ext_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+ chspec));
return INVCHANSPEC;
}
return chanspec;
}
-#endif /* WL_EXT_IAPSTA || WL_CFG80211 || WL_ESCAN */
-
-bool
-wl_ext_check_scan(struct net_device *dev, dhd_pub_t *dhdp)
-{
-#ifdef WL_CFG80211
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#endif /* WL_CFG80211 */
-#ifdef WL_ESCAN
- struct wl_escan_info *escan = dhdp->escan;
-#endif /* WL_ESCAN */
-
-#ifdef WL_CFG80211
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- AEXT_ERROR(dev->name, "cfg80211 scanning...\n");
- return TRUE;
- }
-#endif /* WL_CFG80211 */
-
-#ifdef WL_ESCAN
- if (escan->escan_state == ESCAN_STATE_SCANING) {
- AEXT_ERROR(dev->name, "escan scanning...\n");
- return TRUE;
- }
-#endif /* WL_ESCAN */
-
- return FALSE;
-}
-
-#if defined(WL_CFG80211) || defined(WL_ESCAN)
-void
-wl_ext_user_sync(struct dhd_pub *dhd, int ifidx, bool lock)
-{
- struct net_device *dev = dhd_idx2net(dhd, ifidx);
-#ifdef WL_CFG80211
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#endif /* WL_CFG80211 */
-#ifdef WL_ESCAN
- struct wl_escan_info *escan = dhd->escan;
-#endif /* WL_ESCAN */
-
- AEXT_INFO(dev->name, "lock=%d\n", lock);
-
- if (lock) {
-#if defined(WL_CFG80211)
- mutex_lock(&cfg->usr_sync);
-#endif
-#if defined(WL_ESCAN)
- mutex_lock(&escan->usr_sync);
#endif
- } else {
-#if defined(WL_CFG80211)
- mutex_unlock(&cfg->usr_sync);
-#endif
-#if defined(WL_ESCAN)
- mutex_unlock(&escan->usr_sync);
-#endif
- }
-}
-
-bool
-wl_ext_event_complete(struct dhd_pub *dhd, int ifidx)
-{
- struct net_device *dev = dhd_idx2net(dhd, ifidx);
-#ifdef WL_CFG80211
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#endif /* WL_CFG80211 */
-#ifdef WL_ESCAN
- struct wl_escan_info *escan = dhd->escan;
-#endif /* WL_ESCAN */
- bool complete = TRUE;
-
-#ifdef WL_CFG80211
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- AEXT_INFO(dev->name, "SCANNING\n");
- complete = FALSE;
- }
- if (wl_get_drv_status_all(cfg, CONNECTING)) {
- AEXT_INFO(dev->name, "CONNECTING\n");
- complete = FALSE;
- }
- if (wl_get_drv_status_all(cfg, DISCONNECTING)) {
- AEXT_INFO(dev->name, "DISCONNECTING\n");
- complete = FALSE;
- }
-#endif /* WL_CFG80211 */
-#ifdef WL_ESCAN
- if (escan->escan_state == ESCAN_STATE_SCANING) {
- AEXT_INFO(dev->name, "ESCAN_STATE_SCANING\n");
- complete = FALSE;
- }
-#endif /* WL_ESCAN */
- if (dhd->conf->eapol_status >= EAPOL_STATUS_4WAY_START &&
- dhd->conf->eapol_status < EAPOL_STATUS_4WAY_DONE) {
- AEXT_INFO(dev->name, "4-WAY handshaking\n");
- complete = FALSE;
- }
-
- return complete;
-}
-#endif /* WL_CFG80211 && WL_ESCAN */
static int
wl_ext_get_ioctl_ver(struct net_device *dev, int *ioctl_ver)
val = 1;
ret = wl_ext_ioctl(dev, WLC_GET_VERSION, &val, sizeof(val), 0);
if (ret) {
+ ANDROID_ERROR(("WLC_GET_VERSION failed, err=%d\n", ret));
return ret;
}
val = dtoh32(val);
if (val != WLC_IOCTL_VERSION && val != 1) {
- AEXT_ERROR(dev->name, "Version mismatch, please upgrade. Got %d, expected %d or 1\n",
- val, WLC_IOCTL_VERSION);
+ ANDROID_ERROR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+ val, WLC_IOCTL_VERSION));
return BCME_VERSION;
}
*ioctl_ver = val;
return ret;
}
-void
-wl_ext_bss_iovar_war(struct net_device *ndev, s32 *val)
-{
- dhd_pub_t *dhd = dhd_get_pub(ndev);
- uint chip;
- bool need_war = false;
-
- chip = dhd_conf_get_chip(dhd);
-
- if (chip == BCM43362_CHIP_ID || chip == BCM4330_CHIP_ID ||
- chip == BCM43430_CHIP_ID || chip == BCM43012_CHIP_ID ||
- chip == BCM4345_CHIP_ID || chip == BCM4356_CHIP_ID ||
- chip == BCM4359_CHIP_ID) {
- need_war = true;
- }
-
- if (need_war) {
- /* Few firmware branches have issues in bss iovar handling and
- * that can't be changed since they are in production.
- */
- if (*val == WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE) {
- *val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
- } else if (*val == WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE) {
- *val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
- } else {
- /* Ignore for other bss enums */
- return;
- }
- AEXT_TRACE(ndev->name, "wl bss %d\n", *val);
- }
-}
-
static int
wl_ext_set_chanspec(struct net_device *dev, int ioctl_ver,
uint16 channel, chanspec_t *ret_chspec)
if (band == IEEE80211_BAND_5GHZ) {
param.band = WLC_BAND_5G;
- err = wl_ext_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param),
+ err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param),
iovar_buf, WLC_IOCTL_SMLEN, NULL);
if (err) {
if (err != BCME_UNSUPPORTED) {
- AEXT_ERROR(dev->name, "bw_cap failed, %d\n", err);
+ ANDROID_ERROR(("bw_cap failed, %d\n", err));
return err;
} else {
- err = wl_ext_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+ err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+ if (err) {
+ ANDROID_ERROR(("error get mimo_bw_cap (%d)\n", err));
+ }
if (bw_cap != WLC_N_BW_20ALL)
bw = WL_CHANSPEC_BW_40;
}
if (wf_chspec_valid(chspec)) {
fw_chspec = wl_ext_chspec_host_to_driver(ioctl_ver, chspec);
if (fw_chspec != INVCHANSPEC) {
- if ((err = wl_ext_iovar_setint(dev, "chanspec", fw_chspec)) == BCME_BADCHAN) {
+ if ((err = wldev_iovar_setint(dev, "chanspec", fw_chspec)) == BCME_BADCHAN) {
if (bw == WL_CHANSPEC_BW_80)
goto change_bw;
- err = wl_ext_ioctl(dev, WLC_SET_CHANNEL, &_chan, sizeof(_chan), 1);
- WL_MSG(dev->name, "channel %d\n", _chan);
+ wl_ext_ioctl(dev, WLC_SET_CHANNEL, &_chan, sizeof(_chan), 1);
+ printf("%s: channel %d\n", __FUNCTION__, _chan);
} else if (err) {
- AEXT_ERROR(dev->name, "failed to set chanspec error %d\n", err);
+ ANDROID_ERROR(("%s: failed to set chanspec error %d\n",
+ __FUNCTION__, err));
} else
- WL_MSG(dev->name, "channel %d, 0x%x\n", channel, chspec);
+ printf("%s: %s channel %d, 0x%x\n", __FUNCTION__,
+ dev->name, channel, chspec);
} else {
- AEXT_ERROR(dev->name, "failed to convert host chanspec to fw chanspec\n");
+ ANDROID_ERROR(("%s: failed to convert host chanspec to fw chanspec\n",
+ __FUNCTION__));
err = BCME_ERROR;
}
} else {
bw = 0;
if (bw)
goto set_channel;
- AEXT_ERROR(dev->name, "Invalid chanspec 0x%x\n", chspec);
+ ANDROID_ERROR(("%s: Invalid chanspec 0x%x\n", __FUNCTION__, chspec));
err = BCME_ERROR;
}
*ret_chspec = fw_chspec;
return err;
}
-static int
+int
wl_ext_channel(struct net_device *dev, char* command, int total_len)
{
int ret;
chanspec_t fw_chspec;
int ioctl_ver = 0;
- AEXT_TRACE(dev->name, "cmd %s", command);
+ ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
sscanf(command, "%*s %d", &channel);
wl_ext_get_ioctl_ver(dev, &ioctl_ver);
ret = wl_ext_set_chanspec(dev, ioctl_ver, channel, &fw_chspec);
} else {
- if (!(ret = wl_ext_ioctl(dev, WLC_GET_CHANNEL, &ci,
+ if (!(ret = wldev_ioctl(dev, WLC_GET_CHANNEL, &ci,
sizeof(channel_info_t), FALSE))) {
- AEXT_TRACE(dev->name, "hw_channel %d\n", ci.hw_channel);
- AEXT_TRACE(dev->name, "target_channel %d\n", ci.target_channel);
- AEXT_TRACE(dev->name, "scan_channel %d\n", ci.scan_channel);
+ ANDROID_TRACE(("hw_channel %d\n", ci.hw_channel));
+ ANDROID_TRACE(("target_channel %d\n", ci.target_channel));
+ ANDROID_TRACE(("scan_channel %d\n", ci.scan_channel));
bytes_written = snprintf(command, sizeof(channel_info_t)+2,
"channel %d", ci.hw_channel);
- AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
ret = bytes_written;
}
}
return ret;
}
-static int
+int
wl_ext_channels(struct net_device *dev, char* command, int total_len)
{
int ret, i;
u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
wl_uint32_list_t *list;
- AEXT_TRACE(dev->name, "cmd %s", command);
+ ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
memset(valid_chan_list, 0, sizeof(valid_chan_list));
list = (wl_uint32_list_t *)(void *) valid_chan_list;
list->count = htod32(WL_NUMCHANNELS);
- ret = wl_ext_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list,
+ ret = wldev_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list,
sizeof(valid_chan_list), 0);
if (ret<0) {
- AEXT_ERROR(dev->name, "get channels failed with %d\n", ret);
+ ANDROID_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, ret));
} else {
bytes_written = snprintf(command, total_len, "channels");
for (i = 0; i < dtoh32(list->count); i++) {
bytes_written += snprintf(command+bytes_written, total_len, " %d",
dtoh32(list->element[i]));
+ printf("%d ", dtoh32(list->element[i]));
}
- AEXT_TRACE(dev->name, "command result is %s\n", command);
+ printf("\n");
ret = bytes_written;
}
return ret;
}
-static int
+int
wl_ext_roam_trigger(struct net_device *dev, char* command, int total_len)
{
int ret = 0;
if (roam_trigger[0]) {
roam_trigger[1] = WLC_BAND_ALL;
- ret = wl_ext_ioctl(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
+ ret = wldev_ioctl(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
sizeof(roam_trigger), 1);
+ if (ret)
+ ANDROID_ERROR(("WLC_SET_ROAM_TRIGGER ERROR %d ret=%d\n",
+ roam_trigger[0], ret));
} else {
roam_trigger[1] = WLC_BAND_2G;
- ret = wl_ext_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger,
+ ret = wldev_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger,
sizeof(roam_trigger), 0);
if (!ret)
trigger[0] = roam_trigger[0];
+ else
+ ANDROID_ERROR(("2G WLC_GET_ROAM_TRIGGER ERROR %d ret=%d\n",
+ roam_trigger[0], ret));
roam_trigger[1] = WLC_BAND_5G;
- ret = wl_ext_ioctl(dev, WLC_GET_ROAM_TRIGGER, &roam_trigger,
+ ret = wldev_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger,
sizeof(roam_trigger), 0);
if (!ret)
trigger[1] = roam_trigger[0];
+ else
+ ANDROID_ERROR(("5G WLC_GET_ROAM_TRIGGER ERROR %d ret=%d\n",
+ roam_trigger[0], ret));
- AEXT_TRACE(dev->name, "roam_trigger %d %d\n", trigger[0], trigger[1]);
+ ANDROID_TRACE(("roam_trigger %d %d\n", trigger[0], trigger[1]));
bytes_written = snprintf(command, total_len, "%d %d", trigger[0], trigger[1]);
ret = bytes_written;
}
}
static int
+wl_ext_pattern_atoh(char *src, char *dst)
+{
+ int i;
+ if (strncmp(src, "0x", 2) != 0 &&
+ strncmp(src, "0X", 2) != 0) {
+ ANDROID_ERROR(("Mask invalid format. Needs to start with 0x\n"));
+ return -1;
+ }
+ src = src + 2; /* Skip past 0x */
+ if (strlen(src) % 2 != 0) {
+ DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
+ return -1;
+ }
+ for (i = 0; *src != '\0'; i++) {
+ char num[3];
+ bcm_strncpy_s(num, sizeof(num), src, 2);
+ num[2] = '\0';
+ dst[i] = (uint8)strtoul(num, NULL, 16);
+ src += 2;
+ }
+ return i;
+}
+
+int
+wl_ext_keep_alive(struct net_device *dev, char *command, int total_len)
+{
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int ret = -1, i;
+ int id, period=-1, len_bytes=0, buf_len=0;
+ char data[200]="\0";
+ char buf[WLC_IOCTL_SMLEN]="\0", iovar_buf[WLC_IOCTL_SMLEN]="\0";
+ int bytes_written = -1;
+
+ ANDROID_TRACE(("%s: command = %s\n", __FUNCTION__, command));
+ sscanf(command, "%*s %d %d %s", &id, &period, data);
+ ANDROID_TRACE(("%s: id=%d, period=%d, data=%s\n", __FUNCTION__, id, period, data));
+
+ if (period >= 0) {
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *)buf;
+ mkeep_alive_pktp->version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pktp->length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+ mkeep_alive_pktp->keep_alive_id = id;
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+ mkeep_alive_pktp->period_msec = period;
+ if (strlen(data)) {
+ len_bytes = wl_ext_pattern_atoh(data, (char *) mkeep_alive_pktp->data);
+ buf_len += len_bytes;
+ }
+ mkeep_alive_pktp->len_bytes = htod16(len_bytes);
+
+ ret = wl_ext_iovar_setbuf(dev, "mkeep_alive", buf, buf_len,
+ iovar_buf, sizeof(iovar_buf), NULL);
+ } else {
+ if (id < 0)
+ id = 0;
+ ret = wl_ext_iovar_getbuf(dev, "mkeep_alive", &id, sizeof(id), buf,
+ sizeof(buf), NULL);
+ if (ret) {
+ goto exit;
+ } else {
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) buf;
+ printf("Id :%d\n"
+ "Period (msec) :%d\n"
+ "Length :%d\n"
+ "Packet :0x",
+ mkeep_alive_pktp->keep_alive_id,
+ dtoh32(mkeep_alive_pktp->period_msec),
+ dtoh16(mkeep_alive_pktp->len_bytes));
+ for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
+ printf("%02x", mkeep_alive_pktp->data[i]);
+ }
+ printf("\n");
+ }
+ bytes_written = snprintf(command, total_len, "mkeep_alive_period_msec %d ",
+ dtoh32(mkeep_alive_pktp->period_msec));
+ bytes_written += snprintf(command+bytes_written, total_len, "0x");
+ for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
+ bytes_written += snprintf(command+bytes_written, total_len, "%x",
+ mkeep_alive_pktp->data[i]);
+ }
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ ret = bytes_written;
+ }
+
+exit:
+ return ret;
+}
+
+int
wl_ext_pm(struct net_device *dev, char *command, int total_len)
{
int pm=-1, ret = -1;
char *pm_local;
int bytes_written=-1;
- AEXT_TRACE(dev->name, "cmd %s", command);
+ ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
sscanf(command, "%*s %d", &pm);
if (pm >= 0) {
- ret = wl_ext_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+ ret = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), FALSE);
+ if (ret)
+ ANDROID_ERROR(("WLC_SET_PM ERROR %d ret=%d\n", pm, ret));
} else {
- ret = wl_ext_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), 0);
+ ret = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), FALSE);
if (!ret) {
- AEXT_TRACE(dev->name, "PM = %d", pm);
+ ANDROID_TRACE(("%s: PM = %d\n", __func__, pm));
if (pm == PM_OFF)
pm_local = "PM_OFF";
else if(pm == PM_MAX)
pm_local = "Invalid";
}
bytes_written = snprintf(command, total_len, "PM %s", pm_local);
- AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
ret = bytes_written;
}
}
static int
wl_ext_monitor(struct net_device *dev, char *command, int total_len)
{
- int val = -1, ret = -1;
+ int val, ret = -1;
int bytes_written=-1;
sscanf(command, "%*s %d", &val);
if (val >=0) {
- ret = wl_ext_ioctl(dev, WLC_SET_MONITOR, &val, sizeof(val), 1);
+ ret = wldev_ioctl(dev, WLC_SET_MONITOR, &val, sizeof(int), 1);
+ if (ret)
+ ANDROID_ERROR(("WLC_SET_MONITOR ERROR %d ret=%d\n", val, ret));
} else {
- ret = wl_ext_ioctl(dev, WLC_GET_MONITOR, &val, sizeof(val), 0);
+ ret = wldev_ioctl(dev, WLC_GET_MONITOR, &val, sizeof(val), FALSE);
if (!ret) {
- AEXT_TRACE(dev->name, "monitor = %d\n", val);
+ ANDROID_TRACE(("%s: monitor = %d\n", __FUNCTION__, val));
bytes_written = snprintf(command, total_len, "monitor %d", val);
- AEXT_TRACE(dev->name, "command result is %s\n", command);
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
ret = bytes_written;
}
}
return ret;
}
-s32
-wl_ext_connect(struct net_device *dev, struct wl_conn_info *conn_info)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_extjoin_params_t *ext_join_params = NULL;
- struct wl_join_params join_params;
- size_t join_params_size;
- s32 err = 0;
- u32 chan_cnt = 0;
- s8 *iovar_buf = NULL;
- int ioctl_ver = 0;
- char sec[32];
-
- wl_ext_get_ioctl_ver(dev, &ioctl_ver);
-
- if (dhd->conf->chip == BCM43362_CHIP_ID)
- goto set_ssid;
-
- if (conn_info->channel) {
- chan_cnt = 1;
- }
-
- iovar_buf = kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
- if (iovar_buf == NULL) {
- err = -ENOMEM;
- goto exit;
- }
-
- /*
- * Join with specific BSSID and cached SSID
- * If SSID is zero join based on BSSID only
- */
- join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
- chan_cnt * sizeof(chanspec_t);
- ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
- if (ext_join_params == NULL) {
- err = -ENOMEM;
- goto exit;
- }
- ext_join_params->ssid.SSID_len = min((uint32)sizeof(ext_join_params->ssid.SSID),
- conn_info->ssid.SSID_len);
- memcpy(&ext_join_params->ssid.SSID, conn_info->ssid.SSID, ext_join_params->ssid.SSID_len);
- ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
- /* increate dwell time to receive probe response or detect Beacon
- * from target AP at a noisy air only during connect command
- */
- ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1;
- ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
- /* Set up join scan parameters */
- ext_join_params->scan.scan_type = -1;
- ext_join_params->scan.nprobes = chan_cnt ?
- (ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1;
- ext_join_params->scan.home_time = -1;
-
- if (memcmp(ðer_null, &conn_info->bssid, ETHER_ADDR_LEN))
- memcpy(&ext_join_params->assoc.bssid, &conn_info->bssid, ETH_ALEN);
- else
- memcpy(&ext_join_params->assoc.bssid, ðer_bcast, ETH_ALEN);
- ext_join_params->assoc.chanspec_num = chan_cnt;
- if (chan_cnt) {
- u16 band, bw, ctl_sb;
- chanspec_t chspec;
- band = (conn_info->channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
- : WL_CHANSPEC_BAND_5G;
- bw = WL_CHANSPEC_BW_20;
- ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
- chspec = (conn_info->channel | band | bw | ctl_sb);
- ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
- ext_join_params->assoc.chanspec_list[0] |= chspec;
- ext_join_params->assoc.chanspec_list[0] =
- wl_ext_chspec_host_to_driver(ioctl_ver,
- ext_join_params->assoc.chanspec_list[0]);
- }
- ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
-
- wl_ext_get_sec(dev, 0, sec, sizeof(sec));
- WL_MSG(dev->name,
- "Connecting with %pM channel (%d) ssid \"%s\", len (%d), sec=%s\n\n",
- &ext_join_params->assoc.bssid, conn_info->channel,
- ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, sec);
- err = wl_ext_iovar_setbuf_bsscfg(dev, "join", ext_join_params,
- join_params_size, iovar_buf, WLC_IOCTL_MAXLEN, conn_info->bssidx, NULL);
-
- if (err) {
- if (err == BCME_UNSUPPORTED) {
- AEXT_TRACE(dev->name, "join iovar is not supported\n");
- goto set_ssid;
- } else {
- AEXT_ERROR(dev->name, "error (%d)\n", err);
- goto exit;
- }
- } else
- goto exit;
-
-set_ssid:
- memset(&join_params, 0, sizeof(join_params));
- join_params_size = sizeof(join_params.ssid);
-
- join_params.ssid.SSID_len = min((uint32)sizeof(join_params.ssid.SSID),
- conn_info->ssid.SSID_len);
- memcpy(&join_params.ssid.SSID, conn_info->ssid.SSID, join_params.ssid.SSID_len);
- join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
- if (memcmp(ðer_null, &conn_info->bssid, ETHER_ADDR_LEN))
- memcpy(&join_params.params.bssid, &conn_info->bssid, ETH_ALEN);
- else
- memcpy(&join_params.params.bssid, ðer_bcast, ETH_ALEN);
-
- wl_ext_ch_to_chanspec(ioctl_ver, conn_info->channel, &join_params, &join_params_size);
- AEXT_TRACE(dev->name, "join_param_size %zu\n", join_params_size);
-
- if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
- AEXT_INFO(dev->name, "ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
- join_params.ssid.SSID_len);
- }
- wl_ext_get_sec(dev, 0, sec, sizeof(sec));
- WL_MSG(dev->name,
- "Connecting with %pM channel (%d) ssid \"%s\", len (%d), sec=%s\n\n",
- &join_params.params.bssid, conn_info->channel,
- join_params.ssid.SSID, join_params.ssid.SSID_len, sec);
- err = wl_ext_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, 1);
-
-exit:
-#ifdef WL_EXT_IAPSTA
- if (!err)
- wl_ext_add_remove_pm_enable_work(dev, TRUE);
-#endif /* WL_EXT_IAPSTA */
- if (iovar_buf)
- kfree(iovar_buf);
- if (ext_join_params)
- kfree(ext_join_params);
- return err;
-
-}
-
-void
-wl_ext_get_sec(struct net_device *dev, int ifmode, char *sec, int total_len)
-{
- int auth=0, wpa_auth=0, wsec=0, mfp=0;
- int bytes_written=0;
-
- memset(sec, 0, total_len);
- wl_ext_iovar_getint(dev, "auth", &auth);
- wl_ext_iovar_getint(dev, "wpa_auth", &wpa_auth);
- wl_ext_iovar_getint(dev, "wsec", &wsec);
- wldev_iovar_getint(dev, "mfp", &mfp);
-
-#ifdef WL_EXT_IAPSTA
- if (ifmode == IMESH_MODE) {
- if (auth == WL_AUTH_OPEN_SYSTEM && wpa_auth == WPA_AUTH_DISABLED) {
- bytes_written += snprintf(sec+bytes_written, total_len, "open");
- } else if (auth == WL_AUTH_OPEN_SYSTEM && wpa_auth == WPA2_AUTH_PSK) {
- bytes_written += snprintf(sec+bytes_written, total_len, "sae");
- } else {
- bytes_written += snprintf(sec+bytes_written, total_len, "%d/0x%x",
- auth, wpa_auth);
- }
- } else
-#endif /* WL_EXT_IAPSTA */
- {
- if (auth == WL_AUTH_OPEN_SYSTEM && wpa_auth == WPA_AUTH_DISABLED) {
- bytes_written += snprintf(sec+bytes_written, total_len, "open");
- } else if (auth == WL_AUTH_SHARED_KEY && wpa_auth == WPA_AUTH_DISABLED) {
- bytes_written += snprintf(sec+bytes_written, total_len, "shared");
- } else if (auth == WL_AUTH_OPEN_SYSTEM && wpa_auth == WPA_AUTH_PSK) {
- bytes_written += snprintf(sec+bytes_written, total_len, "wpapsk");
- } else if (auth == WL_AUTH_OPEN_SYSTEM && wpa_auth == WPA2_AUTH_PSK) {
- bytes_written += snprintf(sec+bytes_written, total_len, "wpa2psk");
- } else if (auth == WL_AUTH_OPEN_SHARED && wpa_auth == WPA3_AUTH_SAE_PSK) {
- bytes_written += snprintf(sec+bytes_written, total_len, "wpa3");
- } else if ((auth == WL_AUTH_OPEN_SYSTEM || auth == WL_AUTH_SAE_KEY) &&
- wpa_auth == 0x20) {
- bytes_written += snprintf(sec+bytes_written, total_len, "wpa3");
- } else {
- bytes_written += snprintf(sec+bytes_written, total_len, "%d/0x%x",
- auth, wpa_auth);
- }
- }
-
- if (mfp == WL_MFP_NONE) {
- bytes_written += snprintf(sec+bytes_written, total_len, "/mfpn");
- } else if (mfp == WL_MFP_CAPABLE) {
- bytes_written += snprintf(sec+bytes_written, total_len, "/mfpc");
- } else if (mfp == WL_MFP_REQUIRED) {
- bytes_written += snprintf(sec+bytes_written, total_len, "/mfpr");
- } else {
- bytes_written += snprintf(sec+bytes_written, total_len, "/%d", mfp);
- }
-
-#ifdef WL_EXT_IAPSTA
- if (ifmode == IMESH_MODE) {
- if (wsec == WSEC_NONE) {
- bytes_written += snprintf(sec+bytes_written, total_len, "/none");
- } else {
- bytes_written += snprintf(sec+bytes_written, total_len, "/aes");
- }
- } else
-#endif /* WL_EXT_IAPSTA */
- {
- if (wsec == WSEC_NONE) {
- bytes_written += snprintf(sec+bytes_written, total_len, "/none");
- } else if (wsec == WEP_ENABLED) {
- bytes_written += snprintf(sec+bytes_written, total_len, "/wep");
- } else if (wsec == (TKIP_ENABLED|AES_ENABLED) ||
- wsec == (WSEC_SWFLAG|TKIP_ENABLED|AES_ENABLED)) {
- bytes_written += snprintf(sec+bytes_written, total_len, "/tkipaes");
- } else if (wsec == TKIP_ENABLED || wsec == (WSEC_SWFLAG|TKIP_ENABLED)) {
- bytes_written += snprintf(sec+bytes_written, total_len, "/tkip");
- } else if (wsec == AES_ENABLED || wsec == (WSEC_SWFLAG|AES_ENABLED)) {
- bytes_written += snprintf(sec+bytes_written, total_len, "/aes");
- } else {
- bytes_written += snprintf(sec+bytes_written, total_len, "/0x%x", wsec);
- }
- }
-
-}
-
-static bool
-wl_ext_dfs_chan(uint16 chan)
-{
- if (chan >= 52 && chan <= 144)
- return TRUE;
- return FALSE;
-}
-
-static uint16
-wl_ext_get_default_chan(struct net_device *dev,
- uint16 *chan_2g, uint16 *chan_5g, bool nodfs)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- uint16 chan_tmp = 0, chan = 0;
- wl_uint32_list_t *list;
- u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
- s32 ret = BCME_OK;
- int i;
-
- *chan_2g = 0;
- *chan_5g = 0;
- memset(valid_chan_list, 0, sizeof(valid_chan_list));
- list = (wl_uint32_list_t *)(void *) valid_chan_list;
- list->count = htod32(WL_NUMCHANNELS);
- ret = wl_ext_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list,
- sizeof(valid_chan_list), 0);
- if (ret == 0) {
- for (i=0; i<dtoh32(list->count); i++) {
- chan_tmp = dtoh32(list->element[i]);
- if (!dhd_conf_match_channel(dhd, chan_tmp))
- continue;
- if (chan_tmp <= 13) {
- *chan_2g = chan_tmp;
- } else {
- if (wl_ext_dfs_chan(chan_tmp) && nodfs)
- continue;
- else if (chan_tmp >= 36 && chan_tmp <= 161)
- *chan_5g = chan_tmp;
- }
- }
- }
-
- return chan;
-}
-
-#if defined(SENDPROB) || (defined(WLMESH) && defined(WL_ESCAN))
-static int
-wl_ext_add_del_ie(struct net_device *dev, uint pktflag, char *ie_data, const char* add_del_cmd)
-{
- vndr_ie_setbuf_t *vndr_ie = NULL;
- char iovar_buf[WLC_IOCTL_SMLEN]="\0";
- int ie_data_len = 0, tot_len = 0, iecount;
- int err = -1;
-
- if (!strlen(ie_data)) {
- AEXT_ERROR(dev->name, "wrong ie %s\n", ie_data);
- goto exit;
- }
-
- tot_len = (int)(sizeof(vndr_ie_setbuf_t) + ((strlen(ie_data)-2)/2));
- vndr_ie = (vndr_ie_setbuf_t *) kzalloc(tot_len, GFP_KERNEL);
- if (!vndr_ie) {
- AEXT_ERROR(dev->name, "IE memory alloc failed\n");
- err = -ENOMEM;
- goto exit;
- }
-
- /* Copy the vndr_ie SET command ("add"/"del") to the buffer */
- strncpy(vndr_ie->cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
- vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
-
- /* Set the IE count - the buffer contains only 1 IE */
- iecount = htod32(1);
- memcpy((void *)&vndr_ie->vndr_ie_buffer.iecount, &iecount, sizeof(s32));
-
- /* Set packet flag to indicate that BEACON's will contain this IE */
- pktflag = htod32(pktflag);
- memcpy((void *)&vndr_ie->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
- sizeof(u32));
-
- /* Set the IE ID */
- vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = (uchar)DOT11_MNG_VS_ID;
-
- /* Set the IE LEN */
- vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (strlen(ie_data)-2)/2;
-
- /* Set the IE OUI and DATA */
- ie_data_len = wl_pattern_atoh(ie_data,
- (char *)vndr_ie->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui);
- if (ie_data_len <= 0) {
- AEXT_ERROR(dev->name, "wrong ie_data_len %d\n", (int)strlen(ie_data)-2);
- goto exit;
- }
-
- err = wl_ext_iovar_setbuf(dev, "vndr_ie", vndr_ie, tot_len, iovar_buf,
- sizeof(iovar_buf), NULL);
-
-exit:
- if (vndr_ie) {
- kfree(vndr_ie);
- }
- return err;
-}
-#endif /* SENDPROB || (WLMESH && WL_ESCAN) */
-
-#ifdef WL_EXT_IAPSTA
-#define WL_PM_ENABLE_TIMEOUT 10000
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
-entry = container_of((ptr), type, member); \
-_Pragma("GCC diagnostic pop")
-#else
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-entry = container_of((ptr), type, member);
-#endif /* STRICT_GCC_WARNINGS */
-
-static void
-wl_ext_pm_work_handler(struct work_struct *work)
-{
- struct wl_if_info *cur_if;
- s32 pm = PM_FAST;
- dhd_pub_t *dhd;
-
- BCM_SET_CONTAINER_OF(cur_if, work, struct wl_if_info, pm_enable_work.work);
-
- WL_TRACE(("%s: Enter\n", __FUNCTION__));
-
- if (cur_if->dev == NULL)
- return;
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic push")
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
-#endif
-
- dhd = dhd_get_pub(cur_if->dev);
-
- if (!dhd || !dhd->up) {
- AEXT_TRACE(cur_if->ifname, "dhd is null or not up\n");
- return;
- }
- if (dhd_conf_get_pm(dhd) >= 0)
- pm = dhd_conf_get_pm(dhd);
- wl_ext_ioctl(cur_if->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif
- DHD_PM_WAKE_UNLOCK(dhd);
-
-}
-
-void
-wl_ext_add_remove_pm_enable_work(struct net_device *dev, bool add)
-{
- dhd_pub_t *dhd = dhd_get_pub(dev);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
- u16 wq_duration = 0;
- s32 pm = PM_OFF;
- int i;
-
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev && tmp_if->dev == dev) {
- cur_if = tmp_if;
- break;
- }
- }
-
- if (!cur_if)
- return;
-
- mutex_lock(&cur_if->pm_sync);
- /*
- * Make cancel and schedule work part mutually exclusive
- * so that while cancelling, we are sure that there is no
- * work getting scheduled.
- */
-
- if (delayed_work_pending(&cur_if->pm_enable_work)) {
- cancel_delayed_work_sync(&cur_if->pm_enable_work);
- DHD_PM_WAKE_UNLOCK(dhd);
- }
-
- if (add) {
- wq_duration = (WL_PM_ENABLE_TIMEOUT);
- }
-
- /* It should schedule work item only if driver is up */
- if (dhd->up) {
- if (dhd_conf_get_pm(dhd) >= 0)
- pm = dhd_conf_get_pm(dhd);
- wl_ext_ioctl(cur_if->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
- if (wq_duration) {
- if (schedule_delayed_work(&cur_if->pm_enable_work,
- msecs_to_jiffies((const unsigned int)wq_duration))) {
- DHD_PM_WAKE_LOCK_TIMEOUT(dhd, wq_duration);
- } else {
- AEXT_ERROR(cur_if->ifname, "Can't schedule pm work handler\n");
- }
- }
- }
- mutex_unlock(&cur_if->pm_sync);
-
-}
-
-static int
-wl_ext_parse_wep(char *key, struct wl_wsec_key *wsec_key)
+#ifdef WL_EXT_IAPSTA
+struct wl_apsta_params g_apsta_params;
+static int
+wl_ext_parse_wep(char *key, struct wl_wsec_key *wsec_key)
{
char hex[] = "XX";
unsigned char *data = wsec_key->data;
wl_ext_iovar_setint(dev, "nmode", 0);
val = 0;
wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
- AEXT_TRACE(dev->name, "Network mode: B only\n");
+ ANDROID_TRACE(("%s: Network mode: B only\n", __FUNCTION__));
} else if (bgnmode == IEEE80211G) {
wl_ext_iovar_setint(dev, "nmode", 0);
val = 2;
wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
- AEXT_TRACE(dev->name, "Network mode: G only\n");
+ ANDROID_TRACE(("%s: Network mode: G only\n", __FUNCTION__));
} else if (bgnmode == IEEE80211BG) {
wl_ext_iovar_setint(dev, "nmode", 0);
val = 1;
wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
- AEXT_TRACE(dev->name, "Network mode: B/G mixed\n");
+ ANDROID_TRACE(("%s: Network mode: B/G mixed\n", __FUNCTION__));
} else if (bgnmode == IEEE80211BGN) {
wl_ext_iovar_setint(dev, "nmode", 0);
wl_ext_iovar_setint(dev, "nmode", 1);
wl_ext_iovar_setint(dev, "vhtmode", 0);
val = 1;
wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
- AEXT_TRACE(dev->name, "Network mode: B/G/N mixed\n");
+ ANDROID_TRACE(("%s: Network mode: B/G/N mixed\n", __FUNCTION__));
} else if (bgnmode == IEEE80211BGNAC) {
wl_ext_iovar_setint(dev, "nmode", 0);
wl_ext_iovar_setint(dev, "nmode", 1);
wl_ext_iovar_setint(dev, "vhtmode", 1);
val = 1;
wl_ext_ioctl(dev, WLC_SET_GMODE, &val, sizeof(val), 1);
- AEXT_TRACE(dev->name, "Network mode: B/G/N/AC mixed\n");
+ ANDROID_TRACE(("%s: Network mode: B/G/N/AC mixed\n", __FUNCTION__));
}
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
return 0;
}
+static void
+wl_ext_get_amode(struct wl_if_info *cur_if, char *amode)
+{
+ struct net_device *dev = cur_if->dev;
+ int auth=-1, wpa_auth=-1;
+
+ wl_ext_iovar_getint(dev, "auth", &auth);
+ wl_ext_iovar_getint(dev, "wpa_auth", &wpa_auth);
+
+ if (cur_if->ifmode == IMESH_MODE) {
+ if (auth == 0 && wpa_auth == 0) {
+ strcpy(amode, "open");
+ } else if (auth == 0 && wpa_auth == 128) {
+ strcpy(amode, "sae");
+ }
+ } else if (auth == 0 && wpa_auth == 0) {
+ strcpy(amode, "open");
+ } else if (auth == 1 && wpa_auth == 0) {
+ strcpy(amode, "shared");
+ } else if (auth == 0 && wpa_auth == 4) {
+ strcpy(amode, "wpapsk");
+ } else if (auth == 0 && wpa_auth == 128) {
+ strcpy(amode, "wpa2psk");
+ } else if (auth == 0 && wpa_auth == 132) {
+ strcpy(amode, "wpawpa2psk");
+ }
+}
+
+static void
+wl_ext_get_emode(struct wl_if_info *cur_if, char *emode)
+{
+ struct net_device *dev = cur_if->dev;
+ int wsec=0;
+
+ wl_ext_iovar_getint(dev, "wsec", &wsec);
+
+ if (cur_if->ifmode == IMESH_MODE) {
+ if (wsec == 0) {
+ strcpy(emode, "none");
+ } else {
+ strcpy(emode, "sae");
+ }
+ } else if (wsec == 0) {
+ strcpy(emode, "none");
+ } else if (wsec == 1) {
+ strcpy(emode, "wep");
+ } else if (wsec == 2 || wsec == 10) {
+ strcpy(emode, "tkip");
+ } else if (wsec == 4 || wsec == 12) {
+ strcpy(emode, "aes");
+ } else if (wsec == 6 || wsec == 14) {
+ strcpy(emode, "tkipaes");
+ }
+}
+
static int
wl_ext_set_amode(struct wl_if_info *cur_if)
{
authmode_t amode = cur_if->amode;
int auth=0, wpa_auth=0;
-#ifdef WLMESH
if (cur_if->ifmode == IMESH_MODE) {
if (amode == AUTH_SAE) {
- auth = WL_AUTH_OPEN_SYSTEM;
- wpa_auth = WPA2_AUTH_PSK;
- AEXT_INFO(dev->name, "SAE\n");
+ auth = 0;
+ wpa_auth = 128;
+ ANDROID_INFO(("%s: Authentication: SAE\n", __FUNCTION__));
} else {
- auth = WL_AUTH_OPEN_SYSTEM;
- wpa_auth = WPA_AUTH_DISABLED;
- AEXT_INFO(dev->name, "Open System\n");
- }
- } else
-#endif /* WLMESH */
- if (amode == AUTH_OPEN) {
- auth = WL_AUTH_OPEN_SYSTEM;
- wpa_auth = WPA_AUTH_DISABLED;
- AEXT_INFO(dev->name, "Open System\n");
+ auth = 0;
+ wpa_auth = 0;
+ ANDROID_INFO(("%s: Authentication: Open System\n", __FUNCTION__));
+ }
+ } else if (amode == AUTH_OPEN) {
+ auth = 0;
+ wpa_auth = 0;
+ ANDROID_INFO(("%s: Authentication: Open System\n", __FUNCTION__));
} else if (amode == AUTH_SHARED) {
- auth = WL_AUTH_SHARED_KEY;
- wpa_auth = WPA_AUTH_DISABLED;
- AEXT_INFO(dev->name, "Shared Key\n");
+ auth = 1;
+ wpa_auth = 0;
+ ANDROID_INFO(("%s: Authentication: Shared Key\n", __FUNCTION__));
} else if (amode == AUTH_WPAPSK) {
- auth = WL_AUTH_OPEN_SYSTEM;
- wpa_auth = WPA_AUTH_PSK;
- AEXT_INFO(dev->name, "WPA-PSK\n");
+ auth = 0;
+ wpa_auth = 4;
+ ANDROID_INFO(("%s: Authentication: WPA-PSK\n", __FUNCTION__));
} else if (amode == AUTH_WPA2PSK) {
- auth = WL_AUTH_OPEN_SYSTEM;
- wpa_auth = WPA2_AUTH_PSK;
- AEXT_INFO(dev->name, "WPA2-PSK\n");
+ auth = 0;
+ wpa_auth = 128;
+ ANDROID_INFO(("%s: Authentication: WPA2-PSK\n", __FUNCTION__));
} else if (amode == AUTH_WPAWPA2PSK) {
- auth = WL_AUTH_OPEN_SYSTEM;
- wpa_auth = WPA2_AUTH_PSK | WPA_AUTH_PSK;
- AEXT_INFO(dev->name, "WPA/WPA2-PSK\n");
+ auth = 0;
+ wpa_auth = 132;
+ ANDROID_INFO(("%s: Authentication: WPA/WPA2-PSK\n", __FUNCTION__));
}
-#ifdef WLMESH
if (cur_if->ifmode == IMESH_MODE) {
s32 val = WL_BSSTYPE_MESH;
wl_ext_ioctl(dev, WLC_SET_INFRA, &val, sizeof(val), 1);
- } else
-#endif /* WLMESH */
- if (cur_if->ifmode == ISTA_MODE) {
+ } else if (cur_if->ifmode == ISTA_MODE) {
s32 val = WL_BSSTYPE_INFRA;
wl_ext_ioctl(dev, WLC_SET_INFRA, &val, sizeof(val), 1);
}
}
static int
-wl_ext_set_emode(struct wl_apsta_params *apsta_params,
- struct wl_if_info *cur_if)
+wl_ext_set_emode(struct wl_if_info *cur_if, struct wl_apsta_params *apsta_params)
{
struct net_device *dev = cur_if->dev;
int wsec=0;
authmode_t amode = cur_if->amode;
encmode_t emode = cur_if->emode;
char *key = cur_if->key;
- struct dhd_pub *dhd = apsta_params->dhd;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ struct dhd_pub *dhd = dhd_get_pub(dev);
memset(&wsec_key, 0, sizeof(wsec_key));
memset(&psk, 0, sizeof(psk));
-#ifdef WLMESH
if (cur_if->ifmode == IMESH_MODE) {
if (amode == AUTH_SAE) {
- wsec = AES_ENABLED;
+ wsec = 4;
+ ANDROID_INFO(("%s: Encryption: AES\n", __FUNCTION__));
} else {
- wsec = WSEC_NONE;
+ wsec = 0;
+ ANDROID_INFO(("%s: Encryption: No securiy\n", __FUNCTION__));
}
- } else
-#endif /* WLMESH */
- if (emode == ENC_NONE) {
- wsec = WSEC_NONE;
- AEXT_INFO(dev->name, "No securiy\n");
+ } else if (emode == ENC_NONE) {
+ wsec = 0;
+ ANDROID_INFO(("%s: Encryption: No securiy\n", __FUNCTION__));
} else if (emode == ENC_WEP) {
- wsec = WEP_ENABLED;
+ wsec = 1;
wl_ext_parse_wep(key, &wsec_key);
- AEXT_INFO(dev->name, "WEP key \"%s\"\n", wsec_key.data);
+ ANDROID_INFO(("%s: Encryption: WEP\n", __FUNCTION__));
+ ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, wsec_key.data));
} else if (emode == ENC_TKIP) {
- wsec = TKIP_ENABLED;
+ wsec = 2;
psk.key_len = strlen(key);
psk.flags = WSEC_PASSPHRASE;
memcpy(psk.key, key, strlen(key));
- AEXT_INFO(dev->name, "TKIP key \"%s\"\n", psk.key);
+ ANDROID_INFO(("%s: Encryption: TKIP\n", __FUNCTION__));
+ ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, psk.key));
} else if (emode == ENC_AES || amode == AUTH_SAE) {
- wsec = AES_ENABLED;
+ wsec = 4;
psk.key_len = strlen(key);
psk.flags = WSEC_PASSPHRASE;
memcpy(psk.key, key, strlen(key));
- AEXT_INFO(dev->name, "AES key \"%s\"\n", psk.key);
+ ANDROID_INFO(("%s: Encryption: AES\n", __FUNCTION__));
+ ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, psk.key));
} else if (emode == ENC_TKIPAES) {
- wsec = TKIP_ENABLED | AES_ENABLED;
+ wsec = 6;
psk.key_len = strlen(key);
psk.flags = WSEC_PASSPHRASE;
memcpy(psk.key, key, strlen(key));
- AEXT_INFO(dev->name, "TKIP/AES key \"%s\"\n", psk.key);
+ ANDROID_INFO(("%s: Encryption: TKIP/AES\n", __FUNCTION__));
+ ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, psk.key));
}
if (dhd->conf->chip == BCM43430_CHIP_ID && cur_if->ifidx > 0 && wsec >= 2 &&
- apsta_params->apstamode == ISTAAP_MODE) {
- wsec |= WSEC_SWFLAG; // terence 20180628: fix me, this is a workaround
+ apsta_params->apstamode == IAPSTA_MODE) {
+ wsec |= 0x8; // terence 20180628: fix me, this is a workaround
}
wl_ext_iovar_setint(dev, "wsec", wsec);
-#ifdef WLMESH
if (cur_if->ifmode == IMESH_MODE) {
if (amode == AUTH_SAE) {
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- AEXT_INFO(dev->name, "AES key \"%s\"\n", key);
+ ANDROID_INFO(("%s: Key: \"%s\"\n", __FUNCTION__, key));
wl_ext_iovar_setint(dev, "mesh_auth_proto", 1);
wl_ext_iovar_setint(dev, "mfp", WL_MFP_REQUIRED);
wl_ext_iovar_setbuf(dev, "sae_password", key, strlen(key),
iovar_buf, WLC_IOCTL_SMLEN, NULL);
} else {
- AEXT_INFO(dev->name, "No securiy\n");
wl_ext_iovar_setint(dev, "mesh_auth_proto", 0);
wl_ext_iovar_setint(dev, "mfp", WL_MFP_NONE);
}
- } else
-#endif /* WLMESH */
- if (emode == ENC_WEP) {
+ } else if (emode == ENC_WEP) {
wl_ext_ioctl(dev, WLC_SET_KEY, &wsec_key, sizeof(wsec_key), 1);
} else if (emode == ENC_TKIP || emode == ENC_AES || emode == ENC_TKIPAES) {
- if (cur_if->ifmode == ISTA_MODE)
- wl_ext_iovar_setint(dev, "sup_wpa", 1);
- wl_ext_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk), 1);
+ if (dev) {
+ if (cur_if->ifmode == ISTA_MODE)
+ wl_ext_iovar_setint(dev, "sup_wpa", 1);
+ wl_ext_ioctl(dev, WLC_SET_WSEC_PMK, &psk, sizeof(psk), 1);
+ } else {
+ ANDROID_ERROR(("%s: apdev is null\n", __FUNCTION__));
+ }
}
return 0;
}
-static u32
-wl_ext_get_chanspec(struct wl_apsta_params *apsta_params,
- struct net_device *dev)
+static uint16
+wl_ext_get_chan(struct net_device *dev)
{
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
int ret = 0;
+ uint16 chan = 0, ctl_chan;
struct ether_addr bssid;
u32 chanspec = 0;
-
+
ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
if (ret != BCME_NOTASSOCIATED && memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) {
- if (wl_ext_iovar_getint(dev, "chanspec", (s32 *)&chanspec) == BCME_OK) {
+ if (wldev_iovar_getint(dev, "chanspec", (s32 *)&chanspec) == BCME_OK) {
chanspec = wl_ext_chspec_driver_to_host(apsta_params->ioctl_ver, chanspec);
- return chanspec;
- }
- }
-
- return 0;
-}
-
-static uint16
-wl_ext_get_chan(struct wl_apsta_params *apsta_params, struct net_device *dev)
-{
- int ret = 0;
- uint16 chan = 0, ctl_chan;
- struct ether_addr bssid;
- u32 chanspec = 0;
-
- ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
- if (ret != BCME_NOTASSOCIATED && memcmp(ðer_null, &bssid, ETHER_ADDR_LEN)) {
- if (wl_ext_iovar_getint(dev, "chanspec", (s32 *)&chanspec) == BCME_OK) {
- chanspec = wl_ext_chspec_driver_to_host(apsta_params->ioctl_ver, chanspec);
- ctl_chan = wf_chspec_ctlchan(chanspec);
- chan = (u16)(ctl_chan & 0x00FF);
- return chan;
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+ chan = (u16)(ctl_chan & 0x00FF);
+ ANDROID_INFO(("%s: cur_chan=%d(0x%x)\n", __FUNCTION__,
+ chan, chanspec));
+ return chan;
}
}
}
static chanspec_t
-wl_ext_chan_to_chanspec(struct wl_apsta_params *apsta_params,
+wl_ext_get_chanspec(struct wl_apsta_params *apsta_params,
struct net_device *dev, uint16 channel)
{
s32 _chan = channel;
if (band == IEEE80211_BAND_5GHZ) {
param.band = WLC_BAND_5G;
- err = wl_ext_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param),
+ err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param),
iovar_buf, WLC_IOCTL_SMLEN, NULL);
if (err) {
if (err != BCME_UNSUPPORTED) {
- AEXT_ERROR(dev->name, "bw_cap failed, %d\n", err);
+ ANDROID_ERROR(("bw_cap failed, %d\n", err));
return err;
} else {
- err = wl_ext_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+ err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+ if (err) {
+ ANDROID_ERROR(("error get mimo_bw_cap (%d)\n", err));
+ }
if (bw_cap != WLC_N_BW_20ALL)
bw = WL_CHANSPEC_BW_40;
}
if (wf_chspec_valid(chspec)) {
fw_chspec = wl_ext_chspec_host_to_driver(apsta_params->ioctl_ver, chspec);
if (fw_chspec == INVCHANSPEC) {
- AEXT_ERROR(dev->name, "failed to convert host chanspec to fw chanspec\n");
+ ANDROID_ERROR(("%s: failed to convert host chanspec to fw chanspec\n",
+ __FUNCTION__));
fw_chspec = 0;
}
} else {
bw = 0;
if (bw)
goto set_channel;
- AEXT_ERROR(dev->name, "Invalid chanspec 0x%x\n", chspec);
+ ANDROID_ERROR(("%s: Invalid chanspec 0x%x\n", __FUNCTION__, chspec));
err = BCME_ERROR;
}
return fw_chspec;
}
-static bool
-wl_ext_radar_detect(struct net_device *dev)
+static void
+wl_ext_ch_to_chanspec(int ch, struct wl_join_params *join_params,
+ size_t *join_params_size)
{
- int ret = BCME_OK;
- bool radar = FALSE;
- s32 val = 0;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ chanspec_t chanspec = 0;
- if ((ret = wldev_ioctl(dev, WLC_GET_RADAR, &val, sizeof(int), false) == 0)) {
- radar = TRUE;
- }
+ if (ch != 0) {
+ join_params->params.chanspec_num = 1;
+ join_params->params.chanspec_list[0] = ch;
- return radar;
-}
+ if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
-static struct wl_if_info *
-wl_ext_if_enabled(struct wl_apsta_params *apsta_params, ifmode_t ifmode)
-{
- struct wl_if_info *tmp_if, *target_if = NULL;
- int i;
+ chanspec |= WL_CHANSPEC_BW_20;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if && tmp_if->ifmode == ifmode &&
- wl_get_isam_status(tmp_if, IF_READY)) {
- if (wl_ext_get_chan(apsta_params, tmp_if->dev)) {
- target_if = tmp_if;
- break;
- }
- }
- }
+ *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+ join_params->params.chanspec_num * sizeof(chanspec_t);
+
+ join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ join_params->params.chanspec_list[0] |= chanspec;
+ join_params->params.chanspec_list[0] =
+ wl_ext_chspec_host_to_driver(apsta_params->ioctl_ver,
+ join_params->params.chanspec_list[0]);
- return target_if;
+ join_params->params.chanspec_num =
+ htod32(join_params->params.chanspec_num);
+ ANDROID_TRACE(("join_params->params.chanspec_list[0]= %X, %d channels\n",
+ join_params->params.chanspec_list[0],
+ join_params->params.chanspec_num));
+ }
}
-#ifndef WL_STATIC_IF
-s32
-wl_ext_add_del_bss(struct net_device *ndev, s32 bsscfg_idx,
- int iftype, s32 del, u8 *addr)
+static s32
+wl_ext_connect(struct wl_if_info *cur_if)
{
- s32 ret = BCME_OK;
- s32 val = 0;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- struct {
- s32 cfg;
- s32 val;
- struct ether_addr ea;
- } bss_setbuf;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ wl_extjoin_params_t *ext_join_params;
+ struct wl_join_params join_params;
+ size_t join_params_size;
+ s32 err = 0;
+ u32 chan_cnt = 0;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
- AEXT_TRACE(ndev->name, "wl_iftype:%d del:%d \n", iftype, del);
-
- bzero(&bss_setbuf, sizeof(bss_setbuf));
-
- /* AP=2, STA=3, up=1, down=0, val=-1 */
- if (del) {
- val = WLC_AP_IOV_OP_DELETE;
- } else if (iftype == WL_INTERFACE_CREATE_AP) {
- /* Add/role change to AP Interface */
- AEXT_TRACE(ndev->name, "Adding AP Interface\n");
- val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
- } else if (iftype == WL_INTERFACE_CREATE_STA) {
- /* Add/role change to STA Interface */
- AEXT_TRACE(ndev->name, "Adding STA Interface\n");
- val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
- } else {
- AEXT_ERROR(ndev->name, "add_del_bss NOT supported for IFACE type:0x%x", iftype);
- return -EINVAL;
+ if (cur_if->channel) {
+ chan_cnt = 1;
}
- if (!del) {
- wl_ext_bss_iovar_war(ndev, &val);
+ /*
+ * Join with specific BSSID and cached SSID
+ * If SSID is zero join based on BSSID only
+ */
+ join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
+ chan_cnt * sizeof(chanspec_t);
+ ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
+ if (ext_join_params == NULL) {
+ err = -ENOMEM;
+ goto exit;
}
+ ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID),
+ strlen(cur_if->ssid));
+ memcpy(&ext_join_params->ssid.SSID, cur_if->ssid, ext_join_params->ssid.SSID_len);
+ ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
+ /* increate dwell time to receive probe response or detect Beacon
+ * from target AP at a noisy air only during connect command
+ */
+ ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1;
+ ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
+ /* Set up join scan parameters */
+ ext_join_params->scan.scan_type = -1;
+ ext_join_params->scan.nprobes = chan_cnt ?
+ (ext_join_params->scan.active_time/WL_SCAN_JOIN_PROBE_INTERVAL_MS) : -1;
+ ext_join_params->scan.home_time = -1;
- bss_setbuf.cfg = htod32(bsscfg_idx);
- bss_setbuf.val = htod32(val);
-
- if (addr) {
- memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
+ if (memcmp(ðer_null, &cur_if->bssid, ETHER_ADDR_LEN))
+ memcpy(&ext_join_params->assoc.bssid, &cur_if->bssid, ETH_ALEN);
+ else
+ memcpy(&ext_join_params->assoc.bssid, ðer_bcast, ETH_ALEN);
+ ext_join_params->assoc.chanspec_num = chan_cnt;
+ if (chan_cnt) {
+ u16 channel, band, bw, ctl_sb;
+ chanspec_t chspec;
+ channel = cur_if->channel;
+ band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
+ : WL_CHANSPEC_BAND_5G;
+ bw = WL_CHANSPEC_BW_20;
+ ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
+ chspec = (channel | band | bw | ctl_sb);
+ ext_join_params->assoc.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ ext_join_params->assoc.chanspec_list[0] |= chspec;
+ ext_join_params->assoc.chanspec_list[0] =
+ wl_ext_chspec_host_to_driver(apsta_params->ioctl_ver,
+ ext_join_params->assoc.chanspec_list[0]);
+ }
+ ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
+ if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ ANDROID_INFO(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+ ext_join_params->ssid.SSID_len));
}
- AEXT_INFO(ndev->name, "wl bss %d bssidx:%d\n", val, bsscfg_idx);
- ret = wl_ext_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
- ioctl_buf, WLC_IOCTL_SMLEN, NULL);
- if (ret != 0)
- WL_ERR(("'bss %d' failed with %d\n", val, ret));
-
- return ret;
-}
+ err = wl_ext_iovar_setbuf_bsscfg(cur_if->dev, "join", ext_join_params,
+ join_params_size, iovar_buf, WLC_IOCTL_SMLEN, cur_if->bssidx, NULL);
-static int
-wl_ext_interface_ops(struct net_device *dev,
- struct wl_apsta_params *apsta_params, int iftype, u8 *addr)
-{
- s32 ret;
- struct wl_interface_create_v2 iface;
- wl_interface_create_v3_t iface_v3;
- struct wl_interface_info_v1 *info;
- wl_interface_info_v2_t *info_v2;
- uint32 ifflags = 0;
- bool use_iface_info_v2 = false;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- wl_wlc_version_t wlc_ver;
-
- /* Interface create */
- bzero(&iface, sizeof(iface));
-
- if (addr) {
- ifflags |= WL_INTERFACE_MAC_USE;
- }
-
- ret = wldev_iovar_getbuf(dev, "wlc_ver", NULL, 0,
- &wlc_ver, sizeof(wl_wlc_version_t), NULL);
- if ((ret == BCME_OK) && (wlc_ver.wlc_ver_major >= 5)) {
- ret = wldev_iovar_getbuf(dev, "interface_create",
- &iface, sizeof(struct wl_interface_create_v2),
- ioctl_buf, sizeof(ioctl_buf), NULL);
- if ((ret == BCME_OK) && (*((uint32 *)ioctl_buf) == WL_INTERFACE_CREATE_VER_3)) {
- use_iface_info_v2 = true;
- bzero(&iface_v3, sizeof(wl_interface_create_v3_t));
- iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
- iface_v3.iftype = iftype;
- iface_v3.flags = ifflags;
- if (addr) {
- memcpy(&iface_v3.mac_addr.octet, addr, ETH_ALEN);
- }
- ret = wl_ext_iovar_getbuf(dev, "interface_create",
- &iface_v3, sizeof(wl_interface_create_v3_t),
- ioctl_buf, sizeof(ioctl_buf), NULL);
- if (unlikely(ret)) {
- WL_ERR(("Interface v3 create failed!! ret %d\n", ret));
- return ret;
- }
- }
- }
+ printf("Connecting with " MACDBG " channel (%d) ssid \"%s\", len (%d)\n\n",
+ MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), cur_if->channel,
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len);
- /* success case */
- if (use_iface_info_v2 == true) {
- info_v2 = (wl_interface_info_v2_t *)ioctl_buf;
- ret = info_v2->bsscfgidx;
- } else {
- /* Use v1 struct */
- iface.ver = WL_INTERFACE_CREATE_VER_2;
- iface.iftype = iftype;
- iface.flags = ifflags;
- if (addr) {
- memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
- }
- ret = wldev_iovar_getbuf(dev, "interface_create",
- &iface, sizeof(struct wl_interface_create_v2),
- ioctl_buf, sizeof(ioctl_buf), NULL);
- if (ret == BCME_OK) {
- info = (struct wl_interface_info_v1 *)ioctl_buf;
- ret = info->bsscfgidx;
+ kfree(ext_join_params);
+ if (err) {
+ if (err == BCME_UNSUPPORTED) {
+ ANDROID_TRACE(("join iovar is not supported\n"));
+ goto set_ssid;
+ } else {
+ ANDROID_ERROR(("error (%d)\n", err));
+ goto exit;
}
- }
+ } else
+ goto exit;
- AEXT_INFO(dev->name, "wl interface create success!! bssidx:%d \n", ret);
- return ret;
-}
+set_ssid:
+ memset(&join_params, 0, sizeof(join_params));
+ join_params_size = sizeof(join_params.ssid);
-static void
-wl_ext_wait_netif_change(struct wl_apsta_params *apsta_params,
- struct wl_if_info *cur_if)
-{
- rtnl_unlock();
- wait_event_interruptible_timeout(apsta_params->netif_change_event,
- wl_get_isam_status(cur_if, IF_READY),
- msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
- rtnl_lock();
-}
+ join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), strlen(cur_if->ssid));
+ memcpy(&join_params.ssid.SSID, cur_if->ssid, join_params.ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+ if (memcmp(ðer_null, &cur_if->bssid, ETHER_ADDR_LEN))
+ memcpy(&join_params.params.bssid, &cur_if->bssid, ETH_ALEN);
+ else
+ memcpy(&join_params.params.bssid, ðer_bcast, ETH_ALEN);
-static void
-wl_ext_interface_create(struct net_device *dev, struct wl_apsta_params *apsta_params,
- struct wl_if_info *cur_if, int iftype, u8 *addr)
-{
- s32 ret;
+ wl_ext_ch_to_chanspec(cur_if->channel, &join_params, &join_params_size);
+ ANDROID_TRACE(("join_param_size %zu\n", join_params_size));
- wl_set_isam_status(cur_if, IF_ADDING);
- ret = wl_ext_interface_ops(dev, apsta_params, iftype, addr);
- if (ret == BCME_UNSUPPORTED) {
- wl_ext_add_del_bss(dev, 1, iftype, 0, addr);
+ if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
+ ANDROID_INFO(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+ join_params.ssid.SSID_len));
}
- wl_ext_wait_netif_change(apsta_params, cur_if);
+ err = wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &join_params,join_params_size, 1);
+ if (err) {
+ ANDROID_ERROR(("error (%d)\n", err));
+ }
+
+exit:
+ return err;
+
}
static void
-wl_ext_iapsta_intf_add(struct net_device *dev, struct wl_apsta_params *apsta_params)
+wl_ext_wait_netif_change(struct wl_apsta_params *apsta_params,
+ bool need_rtnl_unlock)
{
- struct dhd_pub *dhd;
- apstamode_t apstamode = apsta_params->apstamode;
- struct wl_if_info *cur_if;
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- wl_p2p_if_t ifreq;
- struct ether_addr mac_addr;
-
- dhd = dhd_get_pub(dev);
- bzero(&mac_addr, sizeof(mac_addr));
-
- if (apstamode == ISTAAP_MODE) {
- cur_if = &apsta_params->if_info[IF_VIF];
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
- }
- else if (apstamode == ISTAGO_MODE) {
- bzero(&ifreq, sizeof(wl_p2p_if_t));
- ifreq.type = htod32(WL_P2P_IF_GO);
- cur_if = &apsta_params->if_info[IF_VIF];
- wl_set_isam_status(cur_if, IF_ADDING);
- wl_ext_iovar_setbuf(dev, "p2p_ifadd", &ifreq, sizeof(ifreq),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- wl_ext_wait_netif_change(apsta_params, cur_if);
- }
- else if (apstamode == ISTASTA_MODE) {
- cur_if = &apsta_params->if_info[IF_VIF];
- memcpy(&mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
- mac_addr.octet[0] |= 0x02;
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_STA,
- (u8*)&mac_addr);
- }
- else if (apstamode == IDUALAP_MODE) {
- cur_if = &apsta_params->if_info[IF_VIF];
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
- }
- else if (apstamode == ISTAAPAP_MODE) {
- u8 rand_bytes[2] = {0, };
- get_random_bytes(&rand_bytes, sizeof(rand_bytes));
- cur_if = &apsta_params->if_info[IF_VIF];
- memcpy(&mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
- mac_addr.octet[0] |= 0x02;
- mac_addr.octet[5] += 0x01;
- memcpy(&mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP,
- (u8*)&mac_addr);
- cur_if = &apsta_params->if_info[IF_VIF2];
- memcpy(&mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
- mac_addr.octet[0] |= 0x02;
- mac_addr.octet[5] += 0x02;
- memcpy(&mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP,
- (u8*)&mac_addr);
- }
-#ifdef WLMESH
- else if (apstamode == ISTAMESH_MODE) {
- cur_if = &apsta_params->if_info[IF_VIF];
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_STA, NULL);
- }
- else if (apstamode == IMESHAP_MODE) {
- cur_if = &apsta_params->if_info[IF_VIF];
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
- }
- else if (apstamode == ISTAAPMESH_MODE) {
- cur_if = &apsta_params->if_info[IF_VIF];
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
- cur_if = &apsta_params->if_info[IF_VIF2];
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_STA, NULL);
- }
- else if (apstamode == IMESHAPAP_MODE) {
- cur_if = &apsta_params->if_info[IF_VIF];
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
- cur_if = &apsta_params->if_info[IF_VIF2];
- wl_ext_interface_create(dev, apsta_params, cur_if, WL_INTERFACE_CREATE_AP, NULL);
- }
-#endif /* WLMESH */
-
+ if (need_rtnl_unlock)
+ rtnl_unlock();
+ wait_event_interruptible_timeout(apsta_params->netif_change_event,
+ apsta_params->netif_change, msecs_to_jiffies(1500));
+ if (need_rtnl_unlock)
+ rtnl_lock();
}
-#endif /* WL_STATIC_IF */
static void
wl_ext_iapsta_preinit(struct net_device *dev, struct wl_apsta_params *apsta_params)
{
struct dhd_pub *dhd;
apstamode_t apstamode = apsta_params->apstamode;
+ wl_interface_create_t iface;
struct wl_if_info *cur_if;
+ wlc_ssid_t ssid = { 0, {0} };
s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wl_country_t cspec = {{0}, 0, {0}};
+ wl_p2p_if_t ifreq;
s32 val = 0;
- int i;
+ int i, dfs = 1, pm = 0;
dhd = dhd_get_pub(dev);
for (i=0; i<MAX_IF_NUM; i++) {
cur_if = &apsta_params->if_info[i];
- if (i >= 1 && !strlen(cur_if->ifname))
- snprintf(cur_if->ifname, IFNAMSIZ, "wlan%d", i);
+ if (i == 1 && !strlen(cur_if->ifname))
+ strcpy(cur_if->ifname, "wlan1");
+ if (i == 2 && !strlen(cur_if->ifname))
+ strcpy(cur_if->ifname, "wlan2");
if (cur_if->ifmode == ISTA_MODE) {
cur_if->channel = 0;
cur_if->maxassoc = -1;
+ cur_if->ifstate = IF_STATE_INIT;
cur_if->prio = PRIO_STA;
cur_if->prefix = 'S';
snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_sta");
} else if (cur_if->ifmode == IAP_MODE) {
cur_if->channel = 1;
cur_if->maxassoc = -1;
+ cur_if->ifstate = IF_STATE_INIT;
cur_if->prio = PRIO_AP;
cur_if->prefix = 'A';
snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_ap");
-#ifdef WLMESH
+ dfs = 0;
} else if (cur_if->ifmode == IMESH_MODE) {
cur_if->channel = 1;
cur_if->maxassoc = -1;
+ cur_if->ifstate = IF_STATE_INIT;
cur_if->prio = PRIO_MESH;
cur_if->prefix = 'M';
snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_mesh");
-#ifdef WL_ESCAN
- if (i == 0 && apsta_params->macs)
- wl_mesh_escan_attach(dhd, cur_if);
-#endif /* WL_ESCAN */
-#endif /* WLMESH */
+ dfs = 0;
+ }
+ }
+
+ if (!dfs && !apsta_params->vsdb) {
+ dhd_conf_get_country(dhd, &cspec);
+ if (!dhd_conf_map_country_list(dhd, &cspec)) {
+ dhd_conf_set_country(dhd, &cspec);
+ dhd_bus_country_set(dev, &cspec, TRUE);
}
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "dfs_chan_disable", 1);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
}
if (FW_SUPPORTED(dhd, rsdb)) {
if (apstamode == IDUALAP_MODE)
- apsta_params->rsdb = -1;
+ apsta_params->rsdb = TRUE;
else if (apstamode == ISTAAPAP_MODE)
- apsta_params->rsdb = 0;
- if (apstamode == ISTAAPAP_MODE || apstamode == IDUALAP_MODE ||
- apstamode == IMESHONLY_MODE || apstamode == ISTAMESH_MODE ||
- apstamode == IMESHAP_MODE || apstamode == ISTAAPMESH_MODE ||
- apstamode == IMESHAPAP_MODE) {
+ apsta_params->rsdb = FALSE;
+ if (apstamode == IDUALAP_MODE || apstamode == ISTAAPAP_MODE ||
+ apstamode == IMESHONLY_MODE || apstamode == IMESHSTA_MODE ||
+ apstamode == IMESHAP_MODE || apstamode == IMESHAPSTA_MODE ||
+ apstamode == IMESHAPAP_MODE) {
wl_config_t rsdb_mode_cfg = {0, 0};
- rsdb_mode_cfg.config = apsta_params->rsdb;
- AEXT_INFO(dev->name, "set rsdb_mode %d\n", rsdb_mode_cfg.config);
+ if (apsta_params->rsdb)
+ rsdb_mode_cfg.config = 1;
+ printf("%s: set rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config);
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_iovar_setbuf(dev, "rsdb_mode", &rsdb_mode_cfg,
sizeof(rsdb_mode_cfg), iovar_buf, sizeof(iovar_buf), NULL);
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
}
} else {
- apsta_params->rsdb = 0;
+ apsta_params->rsdb = FALSE;
}
if (apstamode == ISTAONLY_MODE) {
wl_ext_ioctl(dev, WLC_SET_AP, &val, sizeof(val), 1);
#ifdef PROP_TXSTATUS_VSDB
#if defined(BCMSDIO)
- if (!(FW_SUPPORTED(dhd, rsdb)) && !disable_proptx) {
+ if (!FW_SUPPORTED(dhd, rsdb) && !disable_proptx) {
bool enabled;
dhd_wlfc_get_enable(dhd, &enabled);
if (!enabled) {
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
}
}
-#endif /* BCMSDIO */
+#endif
#endif /* PROP_TXSTATUS_VSDB */
}
- else if (apstamode == ISTAAP_MODE) {
+ else if (apstamode == IAPSTA_MODE) {
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_iovar_setint(dev, "mpc", 0);
wl_ext_iovar_setint(dev, "apsta", 1);
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- }
- else if (apstamode == ISTAGO_MODE) {
- wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
- wl_ext_iovar_setint(dev, "apsta", 1);
- wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- }
- else if (apstamode == ISTASTA_MODE) {
+ apsta_params->netif_change = FALSE;
+ if (FW_SUPPORTED(dhd, rsdb)) {
+ bzero(&iface, sizeof(wl_interface_create_t));
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = WL_INTERFACE_CREATE_AP;
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface,
+ sizeof(iface), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ } else {
+ wl_ext_iovar_setbuf_bsscfg(dev, "ssid", &ssid, sizeof(ssid),
+ iovar_buf, WLC_IOCTL_SMLEN, 1, NULL);
+ }
+ wl_ext_wait_netif_change(apsta_params, TRUE);
}
else if (apstamode == IDUALAP_MODE) {
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
val = 1;
wl_ext_ioctl(dev, WLC_SET_AP, &val, sizeof(val), 1);
+ bzero(&iface, sizeof(wl_interface_create_t));
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = WL_INTERFACE_CREATE_AP;
+ apsta_params->netif_change = FALSE;
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, TRUE);
}
else if (apstamode == ISTAAPAP_MODE) {
+ u8 rand_bytes[2] = {0, };
+ get_random_bytes(&rand_bytes, sizeof(rand_bytes));
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_iovar_setint(dev, "mpc", 0);
wl_ext_iovar_setint(dev, "mbss", 1);
wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
// don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+ bzero(&iface, sizeof(wl_interface_create_t));
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = WL_INTERFACE_CREATE_AP | WL_INTERFACE_MAC_USE;
+ memcpy(&iface.mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
+ iface.mac_addr.octet[0] |= 0x02;
+ iface.mac_addr.octet[5] += 0x01;
+ memcpy(&iface.mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
+ apsta_params->netif_change = FALSE;
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, TRUE);
+ bzero(&iface, sizeof(wl_interface_create_t));
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = WL_INTERFACE_CREATE_AP | WL_INTERFACE_MAC_USE;
+ memcpy(&iface.mac_addr, dev->dev_addr, ETHER_ADDR_LEN);
+ iface.mac_addr.octet[0] |= 0x02;
+ iface.mac_addr.octet[5] += 0x02;
+ memcpy(&iface.mac_addr.octet[3], rand_bytes, sizeof(rand_bytes));
+ apsta_params->netif_change = FALSE;
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, TRUE);
}
-#ifdef WLMESH
- else if (apstamode == IMESHONLY_MODE || apstamode == ISTAMESH_MODE ||
- apstamode == IMESHAP_MODE || apstamode == ISTAAPMESH_MODE ||
- apstamode == IMESHAPAP_MODE) {
- int pm = 0;
+ else if (apstamode == IMESHONLY_MODE) {
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_iovar_setint(dev, "mpc", 0);
- if (apstamode == IMESHONLY_MODE)
- wl_ext_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), 1);
- else
- wl_ext_iovar_setint(dev, "mbcn", 1);
+ wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+ wl_ext_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), 1);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ // don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+ }
+ else if (apstamode == IMESHSTA_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "mpc", 0);
+ wl_ext_iovar_setint(dev, "mbcn", 1);
+ wl_ext_iovar_setint(dev, "apsta", 1);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ bzero(&iface, sizeof(wl_interface_create_t));
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = WL_INTERFACE_CREATE_STA;
+ apsta_params->netif_change = FALSE;
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, TRUE);
+ }
+ else if (apstamode == IMESHAP_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "mpc", 0);
+ wl_ext_iovar_setint(dev, "mbcn", 1);
+ wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ // don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+ bzero(&iface, sizeof(wl_interface_create_t));
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = WL_INTERFACE_CREATE_AP;
+ apsta_params->netif_change = FALSE;
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, TRUE);
+ }
+ else if (apstamode == IMESHAPSTA_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "mpc", 0);
+ wl_ext_iovar_setint(dev, "mbcn", 1);
+ wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ // don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+ bzero(&iface, sizeof(wl_interface_create_t));
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = WL_INTERFACE_CREATE_AP;
+ apsta_params->netif_change = FALSE;
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, TRUE);
+ bzero(&iface, sizeof(wl_interface_create_t));
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = WL_INTERFACE_CREATE_STA;
+ apsta_params->netif_change = FALSE;
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, TRUE);
+ }
+ else if (apstamode == IMESHAPAP_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "mpc", 0);
+ wl_ext_iovar_setint(dev, "mbcn", 1);
wl_ext_iovar_setint(dev, "apsta", 1); // keep 1 as we set in dhd_preinit_ioctls
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
// don't set WLC_SET_AP to 0, some parameters will be reset, such as bcn_timeout and roam_off
+ bzero(&iface, sizeof(wl_interface_create_t));
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = WL_INTERFACE_CREATE_AP;
+ apsta_params->netif_change = FALSE;
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, TRUE);
+ bzero(&iface, sizeof(wl_interface_create_t));
+ iface.ver = WL_INTERFACE_CREATE_VER;
+ iface.flags = WL_INTERFACE_CREATE_AP;
+ apsta_params->netif_change = FALSE;
+ wl_ext_iovar_getbuf(dev, "interface_create", &iface, sizeof(iface),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, TRUE);
+ }
+ else if (apstamode == IGOSTA_MODE) {
+ wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
+ wl_ext_iovar_setint(dev, "apsta", 1);
+ wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
+ bzero(&ifreq, sizeof(wl_p2p_if_t));
+ ifreq.type = htod32(WL_P2P_IF_GO);
+ apsta_params->netif_change = FALSE;
+ wl_ext_iovar_setbuf(dev, "p2p_ifadd", &ifreq, sizeof(ifreq),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ wl_ext_wait_netif_change(apsta_params, TRUE);
}
-#endif /* WLMESH */
wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
apsta_params->init = TRUE;
- WL_MSG(dev->name, "apstamode=%d\n", apstamode);
+ printf("%s: apstamode=%d\n", __FUNCTION__, apstamode);
}
static int
-wl_ext_isam_param(struct net_device *dev, char *command, int total_len)
+wl_ext_isam_init(struct net_device *dev, char *command, int total_len)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- int ret = -1;
- char *pick_tmp, *data, *param;
- int bytes_written=-1;
+ char *pch, *pick_tmp, *pick_tmp2, *param;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ struct dhd_pub *dhd;
+ int i;
- AEXT_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+ if (apsta_params->init) {
+ ANDROID_ERROR(("%s: don't init twice\n", __FUNCTION__));
+ return -1;
+ }
- pick_tmp = command;
- param = bcmstrtok(&pick_tmp, " ", 0); // pick isam_param
- param = bcmstrtok(&pick_tmp, " ", 0); // pick cmd
- while (param != NULL) {
- data = bcmstrtok(&pick_tmp, " ", 0); // pick data
- if (!strcmp(param, "acs")) {
- if (data) {
- apsta_params->acs = simple_strtol(data, NULL, 0);
- ret = 0;
- } else {
- bytes_written = snprintf(command, total_len, "%d", apsta_params->acs);
- ret = bytes_written;
- goto exit;
- }
- }
- param = bcmstrtok(&pick_tmp, " ", 0); // pick cmd
- }
-
-exit:
- return ret;
-}
-
-static int
-wl_ext_isam_init(struct net_device *dev, char *command, int total_len)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- char *pch, *pick_tmp, *pick_tmp2, *param;
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- int i;
+ dhd = dhd_get_pub(dev);
- if (apsta_params->init) {
- AEXT_ERROR(dev->name, "don't init twice\n");
- return -1;
- }
- AEXT_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+ ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
pick_tmp = command;
param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_init
param = bcmstrtok(&pick_tmp, " ", 0);
while (param != NULL) {
- pick_tmp2 = bcmstrtok(&pick_tmp, " ", 0);
- if (!pick_tmp2) {
- AEXT_ERROR(dev->name, "wrong param %s\n", param);
- return -1;
- }
if (!strcmp(param, "mode")) {
pch = NULL;
- if (!strcmp(pick_tmp2, "sta")) {
- apsta_params->apstamode = ISTAONLY_MODE;
- } else if (!strcmp(pick_tmp2, "ap")) {
- apsta_params->apstamode = IAPONLY_MODE;
- } else if (!strcmp(pick_tmp2, "sta-ap")) {
- apsta_params->apstamode = ISTAAP_MODE;
- } else if (!strcmp(pick_tmp2, "sta-sta")) {
- apsta_params->apstamode = ISTASTA_MODE;
- apsta_params->vsdb = TRUE;
- } else if (!strcmp(pick_tmp2, "ap-ap")) {
- apsta_params->apstamode = IDUALAP_MODE;
- } else if (!strcmp(pick_tmp2, "sta-ap-ap")) {
- apsta_params->apstamode = ISTAAPAP_MODE;
- } else if (!strcmp(pick_tmp2, "apsta")) {
- apsta_params->apstamode = ISTAAP_MODE;
- apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
- apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
- } else if (!strcmp(pick_tmp2, "dualap")) {
- apsta_params->apstamode = IDUALAP_MODE;
- apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
- apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
- } else if (!strcmp(pick_tmp2, "sta-go") ||
- !strcmp(pick_tmp2, "gosta")) {
- if (!FW_SUPPORTED(dhd, p2p)) {
- return -1;
- }
- apsta_params->apstamode = ISTAGO_MODE;
- apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
- apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
-#ifdef WLMESH
- } else if (!strcmp(pick_tmp2, "mesh")) {
- apsta_params->apstamode = IMESHONLY_MODE;
- } else if (!strcmp(pick_tmp2, "sta-mesh")) {
- apsta_params->apstamode = ISTAMESH_MODE;
- } else if (!strcmp(pick_tmp2, "sta-ap-mesh")) {
- apsta_params->apstamode = ISTAAPMESH_MODE;
- } else if (!strcmp(pick_tmp2, "mesh-ap")) {
- apsta_params->apstamode = IMESHAP_MODE;
- } else if (!strcmp(pick_tmp2, "mesh-ap-ap")) {
- apsta_params->apstamode = IMESHAPAP_MODE;
-#endif /* WLMESH */
- } else {
- AEXT_ERROR(dev->name, "mode [sta|ap|sta-ap|ap-ap]\n");
- return -1;
- }
- pch = bcmstrtok(&pick_tmp2, " -", 0);
- for (i=0; i<MAX_IF_NUM && pch; i++) {
- if (!strcmp(pch, "sta"))
- apsta_params->if_info[i].ifmode = ISTA_MODE;
- else if (!strcmp(pch, "ap"))
- apsta_params->if_info[i].ifmode = IAP_MODE;
-#ifdef WLMESH
- else if (!strcmp(pch, "mesh")) {
- if (dhd->conf->fw_type != FW_TYPE_MESH) {
- AEXT_ERROR(dev->name, "wrong fw type\n");
+ pick_tmp2 = bcmstrtok(&pick_tmp, " ", 0);
+ if (pick_tmp2) {
+ if (!strcmp(pick_tmp2, "sta")) {
+ apsta_params->apstamode = ISTAONLY_MODE;
+ } else if (!strcmp(pick_tmp2, "ap")) {
+ apsta_params->apstamode = IAPONLY_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-ap")) {
+ apsta_params->apstamode = IAPSTA_MODE;
+ } else if (!strcmp(pick_tmp2, "ap-ap")) {
+ apsta_params->apstamode = IDUALAP_MODE;
+ } else if (!strcmp(pick_tmp2, "sta-ap-ap")) {
+ apsta_params->apstamode = ISTAAPAP_MODE;
+ } else if (!strcmp(pick_tmp2, "mesh")) {
+ apsta_params->apstamode = IMESHONLY_MODE;
+ } else if (!strcmp(pick_tmp2, "mesh-sta") ||
+ !strcmp(pick_tmp2, "sta-mesh")) {
+ apsta_params->apstamode = IMESHSTA_MODE;
+ } else if (!strcmp(pick_tmp2, "mesh-ap") ||
+ !strcmp(pick_tmp2, "ap-mesh")) {
+ apsta_params->apstamode = IMESHAP_MODE;
+ } else if (!strcmp(pick_tmp2, "mesh-ap-sta") ||
+ !strcmp(pick_tmp2, "sta-ap-mesh") ||
+ !strcmp(pick_tmp2, "sta-mesh-ap")) {
+ apsta_params->apstamode = IMESHAPSTA_MODE;
+ } else if (!strcmp(pick_tmp2, "mesh-ap-ap") ||
+ !strcmp(pick_tmp2, "ap-ap-mesh")) {
+ apsta_params->apstamode = IMESHAPAP_MODE;
+ } else if (!strcmp(pick_tmp2, "apsta")) {
+ apsta_params->apstamode = IAPSTA_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
+ apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
+ } else if (!strcmp(pick_tmp2, "dualap")) {
+ apsta_params->apstamode = IDUALAP_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
+ apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
+ } else if (!strcmp(pick_tmp2, "gosta")) {
+ if (!FW_SUPPORTED(dhd, p2p)) {
return -1;
}
- apsta_params->if_info[i].ifmode = IMESH_MODE;
+ apsta_params->apstamode = IGOSTA_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
+ apsta_params->if_info[IF_VIF].ifmode = IAP_MODE;
+ } else {
+ ANDROID_ERROR(("%s: mode [sta|ap|sta-ap|ap-ap]\n", __FUNCTION__));
+ return -1;
}
-#endif /* WLMESH */
pch = bcmstrtok(&pick_tmp2, " -", 0);
+ for (i=0; i<MAX_IF_NUM && pch; i++) {
+ if (!strcmp(pch, "sta"))
+ apsta_params->if_info[i].ifmode = ISTA_MODE;
+ else if (!strcmp(pch, "ap"))
+ apsta_params->if_info[i].ifmode = IAP_MODE;
+ else if (!strcmp(pch, "mesh")) {
+ if (dhd->conf->fw_type != FW_TYPE_MESH) {
+ ANDROID_ERROR(("%s: wrong fw type\n", __FUNCTION__));
+ return -1;
+ }
+ apsta_params->if_info[i].ifmode = IMESH_MODE;
+ }
+ pch = bcmstrtok(&pick_tmp2, " -", 0);
+ }
}
}
else if (!strcmp(param, "rsdb")) {
- apsta_params->rsdb = (int)simple_strtol(pick_tmp2, NULL, 0);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ if (pch) {
+ if (!strcmp(pch, "y")) {
+ apsta_params->rsdb = TRUE;
+ } else if (!strcmp(pch, "n")) {
+ apsta_params->rsdb = FALSE;
+ } else {
+ ANDROID_ERROR(("%s: rsdb [y|n]\n", __FUNCTION__));
+ return -1;
+ }
+ }
} else if (!strcmp(param, "vsdb")) {
- if (!strcmp(pick_tmp2, "y")) {
- apsta_params->vsdb = TRUE;
- } else if (!strcmp(pick_tmp2, "n")) {
- apsta_params->vsdb = FALSE;
- } else {
- AEXT_ERROR(dev->name, "vsdb [y|n]\n");
- return -1;
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ if (pch) {
+ if (!strcmp(pch, "y")) {
+ apsta_params->vsdb = TRUE;
+ } else if (!strcmp(pch, "n")) {
+ apsta_params->vsdb = FALSE;
+ } else {
+ ANDROID_ERROR(("%s: vsdb [y|n]\n", __FUNCTION__));
+ return -1;
+ }
}
} else if (!strcmp(param, "csa")) {
- apsta_params->csa = (int)simple_strtol(pick_tmp2, NULL, 0);
- } else if (!strcmp(param, "acs")) {
- apsta_params->acs = (int)simple_strtol(pick_tmp2, NULL, 0);
-#if defined(WLMESH) && defined(WL_ESCAN)
- } else if (!strcmp(param, "macs")) {
- apsta_params->macs = (int)simple_strtol(pick_tmp2, NULL, 0);
-#endif /* WLMESH && WL_ESCAN */
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ if (pch) {
+ apsta_params->csa = (int)simple_strtol(pch, NULL, 0);
+ }
} else if (!strcmp(param, "ifname")) {
pch = NULL;
- pch = bcmstrtok(&pick_tmp2, " -", 0);
+ pick_tmp2 = bcmstrtok(&pick_tmp, " ", 0);
+ if (pick_tmp2)
+ pch = bcmstrtok(&pick_tmp2, " -", 0);
for (i=0; i<MAX_IF_NUM && pch; i++) {
strcpy(apsta_params->if_info[i].ifname, pch);
pch = bcmstrtok(&pick_tmp2, " -", 0);
}
} else if (!strcmp(param, "vifname")) {
- strcpy(apsta_params->if_info[IF_VIF].ifname, pick_tmp2);
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ if (pch)
+ strcpy(apsta_params->if_info[IF_VIF].ifname, pch);
+ else {
+ ANDROID_ERROR(("%s: vifname [wlan1]\n", __FUNCTION__));
+ return -1;
+ }
}
param = bcmstrtok(&pick_tmp, " ", 0);
}
if (apsta_params->apstamode == 0) {
- AEXT_ERROR(dev->name, "mode [sta|ap|sta-ap|ap-ap]\n");
+ ANDROID_ERROR(("%s: mode [sta|ap|sta-ap|ap-ap]\n", __FUNCTION__));
return -1;
}
wl_ext_iapsta_preinit(dev, apsta_params);
-#ifndef WL_STATIC_IF
- wl_ext_iapsta_intf_add(dev, apsta_params);
-#endif /* WL_STATIC_IF */
return 0;
}
else if (!strcmp(pick_tmp, "bgnac"))
cur_if->bgnmode = IEEE80211BGNAC;
else {
- AEXT_ERROR(cur_if->dev->name, "bgnmode [b|g|bg|bgn|bgnac]\n");
+ ANDROID_ERROR(("%s: bgnmode [b|g|bg|bgn|bgnac]\n", __FUNCTION__));
return -1;
}
} else if (!strcmp(row->name, " hidden ")) {
else if (!strcmp(pick_tmp, "y"))
cur_if->hidden = 1;
else {
- AEXT_ERROR(cur_if->dev->name, "hidden [y|n]\n");
+ ANDROID_ERROR(("%s: hidden [y|n]\n", __FUNCTION__));
return -1;
}
} else if (!strcmp(row->name, " maxassoc ")) {
else if (!strcmp(pick_tmp, "sae"))
cur_if->amode = AUTH_SAE;
else {
- AEXT_ERROR(cur_if->dev->name, "amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]\n");
+ ANDROID_ERROR(("%s: amode [open|shared|wpapsk|wpa2psk|wpawpa2psk]\n",
+ __FUNCTION__));
return -1;
}
} else if (!strcmp(row->name, " emode ")) {
else if (!strcmp(pick_tmp, "tkipaes"))
cur_if->emode = ENC_TKIPAES;
else {
- AEXT_ERROR(cur_if->dev->name, "emode [none|wep|tkip|aes|tkipaes]\n");
+ ANDROID_ERROR(("%s: emode [none|wep|tkip|aes|tkipaes]\n",
+ __FUNCTION__));
return -1;
}
} else if (!strcmp(row->name, " key ")) {
static int
wl_ext_iapsta_config(struct net_device *dev, char *command, int total_len)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
int ret=0, i;
char *pch, *pch2, *pick_tmp, *pick_next=NULL, *param;
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
char ifname[IFNAMSIZ+1];
- struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
+ struct wl_if_info *cur_if = NULL;
if (!apsta_params->init) {
- AEXT_ERROR(dev->name, "please init first\n");
+ ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
return -1;
}
- AEXT_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+ ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
pick_tmp = command;
param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_config
- mutex_lock(&apsta_params->usr_sync);
-
while (pick_tmp != NULL) {
memset(ifname, 0, IFNAMSIZ+1);
if (!strncmp(pick_tmp, "ifname ", strlen("ifname "))) {
if (pch && pch2) {
strncpy(ifname, pch, pch2-pch);
} else {
- AEXT_ERROR(dev->name, "ifname [wlanX]\n");
- ret = -1;
- break;
+ ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__));
+ return -1;
}
for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev && !strcmp(tmp_if->dev->name, ifname)) {
- cur_if = tmp_if;
+ if (apsta_params->if_info[i].dev &&
+ !strcmp(apsta_params->if_info[i].dev->name, ifname)) {
+ cur_if = &apsta_params->if_info[i];
break;
}
}
if (!cur_if) {
- AEXT_ERROR(dev->name, "wrong ifname=%s in apstamode=%d\n",
- ifname, apsta_params->apstamode);
- ret = -1;
- break;
+ ANDROID_ERROR(("%s: wrong ifname=%s in apstamode=%d\n",
+ __FUNCTION__, ifname, apsta_params->apstamode));
+ return -1;
}
ret = wl_ext_parse_config(cur_if, pick_tmp, &pick_next);
if (ret)
- break;
+ return -1;
pick_tmp = pick_next;
} else {
- AEXT_ERROR(dev->name, "first arg must be ifname\n");
- ret = -1;
- break;
+ ANDROID_ERROR(("%s: first arg must be ifname\n", __FUNCTION__));
+ return -1;
}
}
- mutex_unlock(&apsta_params->usr_sync);
-
- return ret;
+ return 0;
}
static int
-wl_ext_assoclist(struct net_device *dev, char *data, char *command,
- int total_len)
+wl_ext_isam_status(struct net_device *dev)
{
- int ret = 0, i, maxassoc = 0, bytes_written = 0;
- char mac_buf[MAX_NUM_OF_ASSOCLIST *
- sizeof(struct ether_addr) + sizeof(uint)] = {0};
- struct maclist *assoc_maclist = (struct maclist *)mac_buf;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ int i;
+ bool now_if;
+ struct wl_if_info *tmp_if;
+ uint16 chan = 0;
+ wlc_ssid_t ssid = { 0, {0} };
+ char amode[16], emode[16];
- assoc_maclist->count = htod32(MAX_NUM_OF_ASSOCLIST);
- ret = wl_ext_ioctl(dev, WLC_GET_ASSOCLIST, assoc_maclist, sizeof(mac_buf), 0);
- if (ret)
- return -1;
- maxassoc = dtoh32(assoc_maclist->count);
- bytes_written += snprintf(command+bytes_written, total_len,
- "%2s: %12s",
- "no", "------addr------");
- for (i=0; i<maxassoc; i++) {
- bytes_written += snprintf(command+bytes_written, total_len,
- "\n%2d: %pM", i, &assoc_maclist->ea[i]);
+ if (apsta_params->init == FALSE) {
+ return 0;
}
- return bytes_written;
-}
-
-#ifdef WLMESH
-static int
-wl_mesh_print_peer_info(mesh_peer_info_ext_t *mpi_ext,
- uint32 peer_results_count, char *command, int total_len)
-{
- char *peering_map[] = MESH_PEERING_STATE_STRINGS;
- uint32 count = 0;
- int bytes_written = 0;
-
- bytes_written += snprintf(command+bytes_written, total_len,
- "%2s: %12s : %6s : %-6s : %6s :"
- " %5s : %4s : %4s : %11s : %4s",
- "no", "------addr------ ", "l.aid", "state", "p.aid",
- "mppid", "llid", "plid", "entry_state", "rssi");
- for (count=0; count < peer_results_count; count++) {
- if (mpi_ext->entry_state != MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT) {
- bytes_written += snprintf(command+bytes_written, total_len,
- "\n%2d: %pM : 0x%4x : %6s : 0x%4x :"
- " %5d : %4d : %4d : %11s : %4d",
- count, &mpi_ext->ea, mpi_ext->local_aid,
- peering_map[mpi_ext->peer_info.state],
- mpi_ext->peer_info.peer_aid,
- mpi_ext->peer_info.mesh_peer_prot_id,
- mpi_ext->peer_info.local_link_id,
- mpi_ext->peer_info.peer_link_id,
- (mpi_ext->entry_state == MESH_SELF_PEER_ENTRY_STATE_ACTIVE) ?
- "ACTIVE" :
- "EXTERNAL",
- mpi_ext->rssi);
- } else {
- bytes_written += snprintf(command+bytes_written, total_len,
- "\n%2d: %pM : %6s : %5s : %6s :"
- " %5s : %4s : %4s : %11s : %4s",
- count, &mpi_ext->ea, " NA ", " NA ", " NA ",
- " NA ", " NA ", " NA ", " TIMEDOUT ", " NA ");
+ printf("****************************\n");
+ printf("%s: apstamode=%d\n", __FUNCTION__, apsta_params->apstamode);
+ for (i=0; i<MAX_IF_NUM; i++) {
+ now_if = FALSE;
+ memset(&ssid, 0, sizeof(ssid));
+ memset(amode, 0, sizeof(amode));
+ memset(emode, 0, sizeof(emode));
+ tmp_if = &apsta_params->if_info[i];
+ if (dev == tmp_if->dev)
+ now_if = TRUE;
+ if (tmp_if->dev) {
+ chan = wl_ext_get_chan(tmp_if->dev);
+ if (chan) {
+ wl_ext_ioctl(tmp_if->dev, WLC_GET_SSID, &ssid, sizeof(ssid), 0);
+ wl_ext_get_amode(tmp_if, amode);
+ wl_ext_get_emode(tmp_if, emode);
+ }
+ if (chan) {
+ printf("%s[%c-%c%s]: chan %3d, amode %s, emode %s, SSID \"%s\"\n",
+ tmp_if->ifname, tmp_if->prefix, chan?'E':'D',
+ now_if?"*":" ", chan, amode, emode, ssid.SSID);
+ } else {
+ printf("%s[%c-%c%s]:\n",
+ tmp_if->ifname, tmp_if->prefix, chan?'E':'D',
+ now_if?"*":" ");
+ }
}
- mpi_ext++;
- }
-
- return bytes_written;
-}
-
-static int
-wl_mesh_get_peer_results(struct net_device *dev, char *buf, int len)
-{
- int indata, inlen;
- mesh_peer_info_dump_t *peer_results;
- int ret;
-
- memset(buf, 0, len);
- peer_results = (mesh_peer_info_dump_t *)buf;
- indata = htod32(len);
- inlen = 4;
- ret = wl_ext_iovar_getbuf(dev, "mesh_peer_status", &indata, inlen, buf, len, NULL);
- if (!ret) {
- peer_results = (mesh_peer_info_dump_t *)buf;
- ret = peer_results->count;
}
+ printf("****************************\n");
- return ret;
+ return 0;
}
static int
-wl_ext_mesh_peer_status(struct net_device *dev, char *data, char *command,
- int total_len)
+wl_ext_if_down(struct wl_if_info *cur_if)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- int i;
- struct wl_if_info *cur_if;
- mesh_peer_info_dump_t *peer_results;
- mesh_peer_info_ext_t *mpi_ext;
- char *peer_buf = NULL;
- int peer_len = WLC_IOCTL_MAXLEN;
- int dump_written = 0, ret;
-
- if (!data) {
- peer_buf = kmalloc(peer_len, GFP_KERNEL);
- if (peer_buf == NULL) {
- AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
- peer_len);
- return -1;
- }
- for (i=0; i<MAX_IF_NUM; i++) {
- cur_if = &apsta_params->if_info[i];
- if (cur_if && dev == cur_if->dev && cur_if->ifmode == IMESH_MODE) {
- memset(peer_buf, 0, peer_len);
- ret = wl_mesh_get_peer_results(dev, peer_buf, peer_len);
- if (ret >= 0) {
- peer_results = (mesh_peer_info_dump_t *)peer_buf;
- mpi_ext = (mesh_peer_info_ext_t *)peer_results->mpi_ext;
- dump_written += wl_mesh_print_peer_info(mpi_ext,
- peer_results->count, command+dump_written,
- total_len-dump_written);
- }
- } else if (cur_if && dev == cur_if->dev) {
- AEXT_ERROR(dev->name, "[%s][%c] is not mesh interface\n",
- cur_if->ifname, cur_if->prefix);
- }
- }
- }
-
- if (peer_buf)
- kfree(peer_buf);
- return dump_written;
-}
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ scb_val_t scbval;
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+ apstamode_t apstamode = g_apsta_params.apstamode;
-#ifdef WL_ESCAN
-#define WL_MESH_DELAY_SCAN_MS 3000
-static void
-wl_mesh_timer(unsigned long data)
-{
- wl_event_msg_t msg;
- struct wl_if_info *mesh_if = (struct wl_if_info *)data;
- struct dhd_pub *dhd;
+ printf("%s: %s[%c] Turning off\n", __FUNCTION__, cur_if->ifname, cur_if->prefix);
- if (!mesh_if) {
- AEXT_ERROR("wlan", "mesh_if is not ready\n");
- return;
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1);
+ } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
+ // deauthenticate all STA first
+ memcpy(scbval.ea.octet, ðer_bcast, ETHER_ADDR_LEN);
+ wl_ext_ioctl(cur_if->dev, WLC_SCB_DEAUTHENTICATE, &scbval.ea, ETHER_ADDR_LEN, 1);
}
- if (!mesh_if->dev) {
- AEXT_ERROR("wlan", "ifidx %d is not ready\n", mesh_if->ifidx);
- return;
+ if (apstamode == IAPONLY_MODE || apstamode == IMESHONLY_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_DOWN, NULL, 0, 1);
+ } else {
+ bss_setbuf.cfg = 0xffffffff;
+ bss_setbuf.val = htod32(0);
+ wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
}
- dhd = dhd_get_pub(mesh_if->dev);
-
- bzero(&msg, sizeof(wl_event_msg_t));
- AEXT_TRACE(mesh_if->dev->name, "timer expired\n");
-
- msg.ifidx = mesh_if->ifidx;
- msg.event_type = hton32(WLC_E_RESERVED);
- msg.reason = 0xFFFFFFFF;
- wl_ext_event_send(dhd->event_params, &msg, NULL);
-}
-
-static void
-wl_mesh_set_timer(struct wl_if_info *mesh_if, uint timeout)
-{
- AEXT_TRACE(mesh_if->dev->name, "timeout=%d\n", timeout);
- if (timer_pending(&mesh_if->delay_scan))
- del_timer_sync(&mesh_if->delay_scan);
-
- if (timeout) {
- if (timer_pending(&mesh_if->delay_scan))
- del_timer_sync(&mesh_if->delay_scan);
- mod_timer(&mesh_if->delay_scan, jiffies + msecs_to_jiffies(timeout));
- }
+ return 0;
}
static int
-wl_mesh_clear_vndr_ie(struct net_device *dev, uchar *oui)
+wl_ext_if_up(struct wl_if_info *cur_if)
{
- char *vndr_ie_buf = NULL;
- vndr_ie_setbuf_t *vndr_ie = NULL;
- ie_getbuf_t vndr_ie_tmp;
- char *iovar_buf = NULL;
- int err = -1, i;
- vndr_ie_buf_t *vndr_ie_dump = NULL;
- uchar *iebuf;
- vndr_ie_info_t *ie_info;
- vndr_ie_t *ie;
-
- vndr_ie_buf = kzalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
- if (!vndr_ie_buf) {
- AEXT_ERROR(dev->name, "IE memory alloc failed\n");
- err = -ENOMEM;
- goto exit;
- }
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ apstamode_t apstamode = apsta_params->apstamode;
+ chanspec_t fw_chspec;
- iovar_buf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
- if (!iovar_buf) {
- AEXT_ERROR(dev->name, "iovar_buf alloc failed\n");
- err = -ENOMEM;
- goto exit;
+ if (cur_if->ifmode != IAP_MODE) {
+ ANDROID_ERROR(("%s: Wrong ifmode on %s[%c]\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix));
+ return 0;
}
- memset(iovar_buf, 0, WLC_IOCTL_MEDLEN);
- vndr_ie_tmp.pktflag = (uint32) -1;
- vndr_ie_tmp.id = (uint8) DOT11_MNG_PROPR_ID;
- err = wl_ext_iovar_getbuf(dev, "vndr_ie", &vndr_ie_tmp, sizeof(vndr_ie_tmp),
- iovar_buf, WLC_IOCTL_MEDLEN, NULL);
- if (err)
- goto exit;
-
- vndr_ie_dump = (vndr_ie_buf_t *)iovar_buf;
- if (!vndr_ie_dump->iecount)
- goto exit;
-
- iebuf = (uchar *)&vndr_ie_dump->vndr_ie_list[0];
- for (i=0; i<vndr_ie_dump->iecount; i++) {
- ie_info = (vndr_ie_info_t *) iebuf;
- ie = &ie_info->vndr_ie_data;
- if (memcmp(ie->oui, oui, 3))
- memset(ie->oui, 0, 3);
- iebuf += sizeof(uint32) + ie->len + VNDR_IE_HDR_LEN;
+ if (cur_if->channel >= 52 && cur_if->channel <= 148) {
+ printf("%s: %s[%c] skip DFS channel %d\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix, cur_if->channel);
+ return 0;
}
- vndr_ie = (vndr_ie_setbuf_t *) vndr_ie_buf;
- strncpy(vndr_ie->cmd, "del", VNDR_IE_CMD_LEN - 1);
- vndr_ie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
- memcpy(&vndr_ie->vndr_ie_buffer, vndr_ie_dump, WLC_IOCTL_SMLEN-VNDR_IE_CMD_LEN-1);
+ printf("%s: %s[%c] Turning on\n", __FUNCTION__, cur_if->ifname, cur_if->prefix);
+ wl_ext_isam_status(cur_if->dev);
- memset(iovar_buf, 0, WLC_IOCTL_MEDLEN);
- err = wl_ext_iovar_setbuf(dev, "vndr_ie", vndr_ie, WLC_IOCTL_SMLEN, iovar_buf,
- WLC_IOCTL_MEDLEN, NULL);
+ wl_ext_set_chanspec(cur_if->dev, apsta_params->ioctl_ver, cur_if->channel,
+ &fw_chspec);
-exit:
- if (vndr_ie) {
- kfree(vndr_ie);
- }
- if (iovar_buf) {
- kfree(iovar_buf);
+ if (apstamode == IAPONLY_MODE) {
+ wl_ext_ioctl(cur_if->dev, WLC_UP, NULL, 0, 1);
+ } else {
+ bss_setbuf.cfg = 0xffffffff;
+ bss_setbuf.val = htod32(1);
+ wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf,
+ sizeof(bss_setbuf), iovar_buf, WLC_IOCTL_SMLEN, NULL);
}
- return err;
-}
-
-static int
-wl_mesh_clear_mesh_info(struct wl_apsta_params *apsta_params,
- struct wl_if_info *mesh_if, bool scan)
-{
- struct wl_mesh_params *mesh_info = &apsta_params->mesh_info;
- uchar mesh_oui[]={0x00, 0x22, 0xf4};
- int ret;
-
- AEXT_TRACE(mesh_if->dev->name, "Enter\n");
- ret = wl_mesh_clear_vndr_ie(mesh_if->dev, mesh_oui);
- memset(mesh_info, 0, sizeof(struct wl_mesh_params));
- if (scan) {
- mesh_info->scan_channel = wl_ext_get_chan(apsta_params, mesh_if->dev);
- wl_mesh_set_timer(mesh_if, 100);
- }
+ OSL_SLEEP(500);
- return ret;
+ return 0;
}
static int
-wl_mesh_update_vndr_ie(struct wl_apsta_params *apsta_params,
- struct wl_if_info *mesh_if)
+wl_ext_iapsta_disable(struct net_device *dev, char *command, int total_len)
{
- struct wl_mesh_params *mesh_info = &apsta_params->mesh_info;
- char *vndr_ie;
- uchar mesh_oui[]={0x00, 0x22, 0xf4};
- int bytes_written = 0;
- int ret = 0, i, vndr_ie_len;
- uint8 *peer_bssid;
-
- wl_mesh_clear_vndr_ie(mesh_if->dev, mesh_oui);
-
- vndr_ie_len = WLC_IOCTL_MEDLEN;
- vndr_ie = kmalloc(vndr_ie_len, GFP_KERNEL);
- if (vndr_ie == NULL) {
- AEXT_ERROR(mesh_if->dev->name, "Failed to allocate buffer of %d bytes\n",
- WLC_IOCTL_MEDLEN);
- ret = -1;
- goto exit;
- }
-
- bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
- "0x%02x%02x%02x", mesh_oui[0], mesh_oui[1], mesh_oui[2]);
-
- bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
- "%02x%02x%02x%02x%02x%02x%02x%02x", MESH_INFO_MASTER_BSSID, ETHER_ADDR_LEN,
- ((u8 *)(&mesh_info->master_bssid))[0], ((u8 *)(&mesh_info->master_bssid))[1],
- ((u8 *)(&mesh_info->master_bssid))[2], ((u8 *)(&mesh_info->master_bssid))[3],
- ((u8 *)(&mesh_info->master_bssid))[4], ((u8 *)(&mesh_info->master_bssid))[5]);
-
- bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
- "%02x%02x%02x", MESH_INFO_MASTER_CHANNEL, 1, mesh_info->master_channel);
-
- bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
- "%02x%02x%02x", MESH_INFO_HOP_CNT, 1, mesh_info->hop_cnt);
-
- bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
- "%02x%02x", MESH_INFO_PEER_BSSID, mesh_info->hop_cnt*ETHER_ADDR_LEN);
- for (i=0; i<mesh_info->hop_cnt && i<MAX_HOP_LIST; i++) {
- peer_bssid = (uint8 *)&mesh_info->peer_bssid[i];
- bytes_written += snprintf(vndr_ie+bytes_written, vndr_ie_len,
- "%02x%02x%02x%02x%02x%02x",
- peer_bssid[0], peer_bssid[1], peer_bssid[2],
- peer_bssid[3], peer_bssid[4], peer_bssid[5]);
- }
+ char *pch, *pick_tmp, *param;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wlc_ssid_t ssid = { 0, {0} };
+ scb_val_t scbval;
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ apstamode_t apstamode = apsta_params->apstamode;
+ char ifname[IFNAMSIZ+1];
+ struct wl_if_info *cur_if = NULL;
+ struct dhd_pub *dhd;
+ int i;
- ret = wl_ext_add_del_ie(mesh_if->dev, VNDR_IE_BEACON_FLAG|VNDR_IE_PRBRSP_FLAG,
- vndr_ie, "add");
- if (!ret) {
- AEXT_INFO(mesh_if->dev->name, "mbssid=%pM, mchannel=%d, hop=%d, pbssid=%pM\n",
- &mesh_info->master_bssid, mesh_info->master_channel, mesh_info->hop_cnt,
- mesh_info->peer_bssid);
+ if (!apsta_params->init) {
+ ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
+ return -1;
}
-exit:
- if (vndr_ie)
- kfree(vndr_ie);
- return ret;
-}
-
-static bool
-wl_mesh_update_master_info(struct wl_apsta_params *apsta_params,
- struct wl_if_info *mesh_if)
-{
- struct wl_mesh_params *mesh_info = &apsta_params->mesh_info;
- struct wl_if_info *sta_if = NULL;
- bool updated = FALSE;
-
- sta_if = wl_ext_if_enabled(apsta_params, ISTA_MODE);
- if (sta_if) {
- wldev_ioctl(mesh_if->dev, WLC_GET_BSSID, &mesh_info->master_bssid,
- ETHER_ADDR_LEN, 0);
- mesh_info->master_channel = wl_ext_get_chan(apsta_params, mesh_if->dev);
- mesh_info->hop_cnt = 0;
- memset(mesh_info->peer_bssid, 0, MAX_HOP_LIST*ETHER_ADDR_LEN);
- if (!wl_mesh_update_vndr_ie(apsta_params, mesh_if))
- updated = TRUE;
- }
-
- return updated;
-}
-
-static bool
-wl_mesh_update_mesh_info(struct wl_apsta_params *apsta_params,
- struct wl_if_info *mesh_if)
-{
- struct wl_mesh_params *mesh_info = &apsta_params->mesh_info, peer_mesh_info;
- uint32 count = 0;
- char *dump_buf = NULL;
- mesh_peer_info_dump_t *peer_results;
- mesh_peer_info_ext_t *mpi_ext;
- struct ether_addr bssid;
- bool updated = FALSE, bss_found = FALSE;
- uint16 cur_chan;
+ ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
+ dhd = dhd_get_pub(dev);
- dump_buf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
- if (dump_buf == NULL) {
- AEXT_ERROR(mesh_if->dev->name, "Failed to allocate buffer of %d bytes\n",
- WLC_IOCTL_MAXLEN);
- return FALSE;
- }
- count = wl_mesh_get_peer_results(mesh_if->dev, dump_buf, WLC_IOCTL_MAXLEN);
- if (count > 0) {
- memset(&bssid, 0, ETHER_ADDR_LEN);
- wldev_ioctl(mesh_if->dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, 0);
- peer_results = (mesh_peer_info_dump_t *)dump_buf;
- mpi_ext = (mesh_peer_info_ext_t *)peer_results->mpi_ext;
- for (count = 0; count < peer_results->count; count++) {
- if (mpi_ext->entry_state != MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT &&
- mpi_ext->peer_info.state == MESH_PEERING_ESTAB) {
- memset(&peer_mesh_info, 0, sizeof(struct wl_mesh_params));
- bss_found = wl_escan_mesh_info(mesh_if->dev, mesh_if->escan,
- &mpi_ext->ea, &peer_mesh_info);
- if (bss_found && (mesh_info->master_channel == 0 ||
- peer_mesh_info.hop_cnt <= mesh_info->hop_cnt) &&
- memcmp(&peer_mesh_info.peer_bssid, &bssid, ETHER_ADDR_LEN)) {
- memcpy(&mesh_info->master_bssid, &peer_mesh_info.master_bssid,
- ETHER_ADDR_LEN);
- mesh_info->master_channel = peer_mesh_info.master_channel;
- mesh_info->hop_cnt = peer_mesh_info.hop_cnt+1;
- memset(mesh_info->peer_bssid, 0, MAX_HOP_LIST*ETHER_ADDR_LEN);
- memcpy(&mesh_info->peer_bssid, &mpi_ext->ea, ETHER_ADDR_LEN);
- memcpy(&mesh_info->peer_bssid[1], peer_mesh_info.peer_bssid,
- (MAX_HOP_LIST-1)*ETHER_ADDR_LEN);
- updated = TRUE;
- }
- }
- mpi_ext++;
- }
- if (updated) {
- if (wl_mesh_update_vndr_ie(apsta_params, mesh_if)) {
- AEXT_ERROR(mesh_if->dev->name, "update failed\n");
- mesh_info->master_channel = 0;
- updated = FALSE;
- goto exit;
+ pick_tmp = command;
+ param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_disable
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ while (param != NULL) {
+ if (!strcmp(param, "ifname")) {
+ pch = bcmstrtok(&pick_tmp, " ", 0);
+ if (pch)
+ strcpy(ifname, pch);
+ else {
+ ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__));
+ return -1;
}
}
+ param = bcmstrtok(&pick_tmp, " ", 0);
}
- if (!mesh_info->master_channel) {
- wlc_ssid_t cur_ssid;
- char sec[32];
- bool sae = FALSE;
- memset(&peer_mesh_info, 0, sizeof(struct wl_mesh_params));
- wl_ext_ioctl(mesh_if->dev, WLC_GET_SSID, &cur_ssid, sizeof(cur_ssid), 0);
- wl_ext_get_sec(mesh_if->dev, mesh_if->ifmode, sec, sizeof(sec));
- if (strnicmp(sec, "sae/sae", strlen("sae/sae")) == 0)
- sae = TRUE;
- cur_chan = wl_ext_get_chan(apsta_params, mesh_if->dev);
- bss_found = wl_escan_mesh_peer(mesh_if->dev, mesh_if->escan, &cur_ssid, cur_chan,
- sae, &peer_mesh_info);
-
- if (bss_found && peer_mesh_info.master_channel&&
- (cur_chan != peer_mesh_info.master_channel)) {
- WL_MSG(mesh_if->ifname, "moving channel %d -> %d\n",
- cur_chan, peer_mesh_info.master_channel);
- wl_ext_disable_iface(mesh_if->dev, mesh_if->ifname);
- mesh_if->channel = peer_mesh_info.master_channel;
- wl_ext_enable_iface(mesh_if->dev, mesh_if->ifname, 500);
+ for (i=0; i<MAX_IF_NUM; i++) {
+ if (apsta_params->if_info[i].dev &&
+ !strcmp(apsta_params->if_info[i].dev->name, ifname)) {
+ cur_if = &apsta_params->if_info[i];
+ break;
}
}
+ if (!cur_if) {
+ ANDROID_ERROR(("%s: wrong ifname=%s or dev not ready\n", __FUNCTION__, ifname));
+ return -1;
+ }
-exit:
- if (dump_buf)
- kfree(dump_buf);
- return updated;
-}
-
-static void
-wl_mesh_event_handler( struct wl_apsta_params *apsta_params,
- struct wl_if_info *mesh_if, const wl_event_msg_t *e, void *data)
-{
- struct wl_mesh_params *mesh_info = &apsta_params->mesh_info;
- uint32 event_type = ntoh32(e->event_type);
- uint32 status = ntoh32(e->status);
- uint32 reason = ntoh32(e->reason);
- wlc_ssid_t ssid;
- int ret;
-
- if (wl_get_isam_status(mesh_if, AP_CREATED) &&
- ((event_type == WLC_E_SET_SSID && status == WLC_E_STATUS_SUCCESS) ||
- (event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
- reason == WLC_E_REASON_INITIAL_ASSOC))) {
- if (!wl_mesh_update_master_info(apsta_params, mesh_if)) {
- mesh_info->scan_channel = wl_ext_get_chan(apsta_params, mesh_if->dev);
- wl_mesh_set_timer(mesh_if, WL_MESH_DELAY_SCAN_MS);
- }
- }
- else if ((event_type == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) ||
- (event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
- reason == WLC_E_REASON_DEAUTH)) {
- wl_mesh_clear_mesh_info(apsta_params, mesh_if, FALSE);
- }
- else if (wl_get_isam_status(mesh_if, AP_CREATED) &&
- (event_type == WLC_E_ASSOC_IND || event_type == WLC_E_REASSOC_IND) &&
- reason == DOT11_SC_SUCCESS) {
- mesh_info->scan_channel = wl_ext_get_chan(apsta_params, mesh_if->dev);
- wl_mesh_set_timer(mesh_if, 100);
- }
- else if (event_type == WLC_E_DISASSOC_IND || event_type == WLC_E_DEAUTH_IND ||
- (event_type == WLC_E_DEAUTH && reason != DOT11_RC_RESERVED)) {
- if (!memcmp(&mesh_info->peer_bssid, &e->addr, ETHER_ADDR_LEN))
- wl_mesh_clear_mesh_info(apsta_params, mesh_if, TRUE);
- }
- else if (wl_get_isam_status(mesh_if, AP_CREATED) &&
- event_type == WLC_E_RESERVED && reason == 0xFFFFFFFF) {
- if (!wl_mesh_update_master_info(apsta_params, mesh_if)) {
- wl_ext_ioctl(mesh_if->dev, WLC_GET_SSID, &ssid, sizeof(ssid), 0);
- ret = wl_escan_set_scan(mesh_if->dev, apsta_params->dhd, &ssid,
- mesh_info->scan_channel, FALSE);
- if (ret)
- wl_mesh_set_timer(mesh_if, WL_MESH_DELAY_SCAN_MS);
- }
- }
- else if (wl_get_isam_status(mesh_if, AP_CREATED) &&
- ((event_type == WLC_E_ESCAN_RESULT && status == WLC_E_STATUS_SUCCESS) ||
- (event_type == WLC_E_ESCAN_RESULT &&
- (status == WLC_E_STATUS_ABORT || status == WLC_E_STATUS_NEWSCAN ||
- status == WLC_E_STATUS_11HQUIET || status == WLC_E_STATUS_CS_ABORT ||
- status == WLC_E_STATUS_NEWASSOC || status == WLC_E_STATUS_TIMEOUT)))) {
- if (!wl_mesh_update_master_info(apsta_params, mesh_if)) {
- if (!wl_mesh_update_mesh_info(apsta_params, mesh_if)) {
- mesh_info->scan_channel = 0;
- wl_mesh_set_timer(mesh_if, WL_MESH_DELAY_SCAN_MS);
- }
- }
- }
-}
-
-static void
-wl_mesh_escan_detach(dhd_pub_t *dhd, struct wl_if_info *mesh_if)
-{
- AEXT_TRACE(mesh_if->dev->name, "Enter\n");
-
- del_timer_sync(&mesh_if->delay_scan);
-
- if (mesh_if->escan) {
- mesh_if->escan = NULL;
- }
-}
-
-static int
-wl_mesh_escan_attach(dhd_pub_t *dhd, struct wl_if_info *mesh_if)
-{
- AEXT_TRACE(mesh_if->dev->name, "Enter\n");
-
- mesh_if->escan = dhd->escan;
- init_timer_compat(&mesh_if->delay_scan, wl_mesh_timer, mesh_if);
-
- return 0;
-}
-
-static uint
-wl_mesh_update_peer_path(struct wl_if_info *mesh_if, char *command,
- int total_len)
-{
- struct wl_mesh_params peer_mesh_info;
- uint32 count = 0;
- char *dump_buf = NULL;
- mesh_peer_info_dump_t *peer_results;
- mesh_peer_info_ext_t *mpi_ext;
- int bytes_written = 0, j, k;
- bool bss_found = FALSE;
-
- dump_buf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
- if (dump_buf == NULL) {
- AEXT_ERROR(mesh_if->dev->name, "Failed to allocate buffer of %d bytes\n",
- WLC_IOCTL_MAXLEN);
- return FALSE;
- }
- count = wl_mesh_get_peer_results(mesh_if->dev, dump_buf, WLC_IOCTL_MAXLEN);
- if (count > 0) {
- peer_results = (mesh_peer_info_dump_t *)dump_buf;
- mpi_ext = (mesh_peer_info_ext_t *)peer_results->mpi_ext;
- for (count = 0; count < peer_results->count; count++) {
- if (mpi_ext->entry_state != MESH_SELF_PEER_ENTRY_STATE_TIMEDOUT &&
- mpi_ext->peer_info.state == MESH_PEERING_ESTAB) {
- memset(&peer_mesh_info, 0, sizeof(struct wl_mesh_params));
- bss_found = wl_escan_mesh_info(mesh_if->dev, mesh_if->escan,
- &mpi_ext->ea, &peer_mesh_info);
- if (bss_found) {
- bytes_written += snprintf(command+bytes_written, total_len,
- "\npeer=%pM, hop=%d",
- &mpi_ext->ea, peer_mesh_info.hop_cnt);
- for (j=1; j<peer_mesh_info.hop_cnt; j++) {
- bytes_written += snprintf(command+bytes_written,
- total_len, "\n");
- for (k=0; k<j; k++) {
- bytes_written += snprintf(command+bytes_written,
- total_len, " ");
- }
- bytes_written += snprintf(command+bytes_written, total_len,
- "%pM", &peer_mesh_info.peer_bssid[j]);
- }
- }
- }
- mpi_ext++;
- }
- }
-
- if (dump_buf)
- kfree(dump_buf);
- return bytes_written;
-}
-
-static int
-wl_ext_isam_peer_path(struct net_device *dev, char *command, int total_len)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_mesh_params *mesh_info = &apsta_params->mesh_info;
- struct wl_if_info *tmp_if;
- uint16 chan = 0;
- char *dump_buf = NULL;
- int dump_len = WLC_IOCTL_MEDLEN;
- int dump_written = 0;
- int i;
-
- if (command || android_msg_level & ANDROID_INFO_LEVEL) {
- if (command) {
- dump_buf = command;
- dump_len = total_len;
- } else {
- dump_buf = kmalloc(dump_len, GFP_KERNEL);
- if (dump_buf == NULL) {
- AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
- dump_len);
- return -1;
- }
- }
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev && tmp_if->ifmode == IMESH_MODE && apsta_params->macs) {
- chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
- if (chan) {
- dump_written += snprintf(dump_buf+dump_written, dump_len,
- "[dhd-%s-%c] mbssid=%pM, mchan=%d, hop=%d, pbssid=%pM",
- tmp_if->ifname, tmp_if->prefix, &mesh_info->master_bssid,
- mesh_info->master_channel, mesh_info->hop_cnt,
- &mesh_info->peer_bssid);
- dump_written += wl_mesh_update_peer_path(tmp_if,
- dump_buf+dump_written, dump_len-dump_written);
- }
- }
- }
- AEXT_INFO(dev->name, "%s\n", dump_buf);
- }
-
- if (!command && dump_buf)
- kfree(dump_buf);
- return dump_written;
-}
-#endif /* WL_ESCAN */
-#endif /* WLMESH */
-
-static int
-wl_ext_isam_status(struct net_device *dev, char *command, int total_len)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- int i;
- struct wl_if_info *tmp_if;
- uint16 chan = 0;
- wlc_ssid_t ssid = { 0, {0} };
- struct ether_addr bssid;
- scb_val_t scb_val;
- char sec[32];
- u32 chanspec = 0;
- char *dump_buf = NULL;
- int dump_len = WLC_IOCTL_MEDLEN;
- int dump_written = 0;
-
- if (command || android_msg_level & ANDROID_INFO_LEVEL) {
- if (command) {
- dump_buf = command;
- dump_len = total_len;
- } else {
- dump_buf = kmalloc(dump_len, GFP_KERNEL);
- if (dump_buf == NULL) {
- AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
- dump_len);
- return -1;
- }
- }
- dump_written += snprintf(dump_buf+dump_written, dump_len,
- "apstamode=%d", apsta_params->apstamode);
- for (i=0; i<MAX_IF_NUM; i++) {
- memset(&ssid, 0, sizeof(ssid));
- memset(&bssid, 0, sizeof(bssid));
- memset(&scb_val, 0, sizeof(scb_val));
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev) {
- chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
- if (chan) {
- wl_ext_ioctl(tmp_if->dev, WLC_GET_SSID, &ssid, sizeof(ssid), 0);
- wldev_ioctl(tmp_if->dev, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
- wldev_ioctl(tmp_if->dev, WLC_GET_RSSI, &scb_val,
- sizeof(scb_val_t), 0);
- chanspec = wl_ext_get_chanspec(apsta_params, tmp_if->dev);
- wl_ext_get_sec(tmp_if->dev, tmp_if->ifmode, sec, sizeof(sec));
- dump_written += snprintf(dump_buf+dump_written, dump_len,
- "\n[dhd-%s-%c]: bssid=%pM, chan=%3d(0x%x %sMHz), "
- "rssi=%3d, sec=%-15s, SSID=\"%s\"",
- tmp_if->ifname, tmp_if->prefix, &bssid, chan, chanspec,
- CHSPEC_IS20(chanspec)?"20":
- CHSPEC_IS40(chanspec)?"40":
- CHSPEC_IS80(chanspec)?"80":"160",
- dtoh32(scb_val.val), sec, ssid.SSID);
- if (tmp_if->ifmode == IAP_MODE) {
- dump_written += snprintf(dump_buf+dump_written, dump_len, "\n");
- dump_written += wl_ext_assoclist(tmp_if->dev, NULL,
- dump_buf+dump_written, dump_len-dump_written);
- }
-#ifdef WLMESH
- else if (tmp_if->ifmode == IMESH_MODE) {
- dump_written += snprintf(dump_buf+dump_written, dump_len, "\n");
- dump_written += wl_ext_mesh_peer_status(tmp_if->dev, NULL,
- dump_buf+dump_written, dump_len-dump_written);
- }
-#endif /* WLMESH */
- } else {
- dump_written += snprintf(dump_buf+dump_written, dump_len,
- "\n[dhd-%s-%c]:", tmp_if->ifname, tmp_if->prefix);
- }
- }
- }
- AEXT_INFO(dev->name, "%s\n", dump_buf);
- }
-
- if (!command && dump_buf)
- kfree(dump_buf);
- return dump_written;
-}
-
-static bool
-wl_ext_master_if(struct wl_if_info *cur_if)
-{
- if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE)
- return TRUE;
- else
- return FALSE;
-}
-
-static int
-wl_ext_if_down(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
-{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- scb_val_t scbval;
- struct {
- s32 cfg;
- s32 val;
- } bss_setbuf;
- apstamode_t apstamode = apsta_params->apstamode;
-
- WL_MSG(cur_if->ifname, "[%c] Turning off...\n", cur_if->prefix);
-
- if (cur_if->ifmode == ISTA_MODE) {
- wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1);
- } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
- // deauthenticate all STA first
- memcpy(scbval.ea.octet, ðer_bcast, ETHER_ADDR_LEN);
- wl_ext_ioctl(cur_if->dev, WLC_SCB_DEAUTHENTICATE, &scbval.ea, ETHER_ADDR_LEN, 1);
- }
-
- if (apstamode == IAPONLY_MODE || apstamode == IMESHONLY_MODE) {
- wl_ext_ioctl(cur_if->dev, WLC_DOWN, NULL, 0, 1);
- } else {
- bss_setbuf.cfg = 0xffffffff;
- bss_setbuf.val = htod32(0);
- wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- }
- wl_clr_isam_status(cur_if, AP_CREATED);
-
- return 0;
-}
-
-static int
-wl_ext_if_up(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
-{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- struct {
- s32 cfg;
- s32 val;
- } bss_setbuf;
- apstamode_t apstamode = apsta_params->apstamode;
- chanspec_t fw_chspec;
- u32 timeout;
- wlc_ssid_t ssid = { 0, {0} };
- uint16 chan = 0;
-
- if (cur_if->ifmode != IAP_MODE) {
- AEXT_ERROR(cur_if->ifname, "Wrong ifmode\n");
- return 0;
- }
-
- if (wl_ext_dfs_chan(cur_if->channel) && !apsta_params->radar) {
- WL_MSG(cur_if->ifname, "[%c] skip DFS channel %d\n",
- cur_if->prefix, cur_if->channel);
- return 0;
- } else if (!cur_if->channel) {
- WL_MSG(cur_if->ifname, "[%c] no valid channel\n", cur_if->prefix);
- return 0;
- }
-
- WL_MSG(cur_if->ifname, "[%c] Turning on...\n", cur_if->prefix);
-
- wl_ext_set_chanspec(cur_if->dev, apsta_params->ioctl_ver, cur_if->channel,
- &fw_chspec);
-
- wl_clr_isam_status(cur_if, AP_CREATED);
- wl_set_isam_status(cur_if, AP_CREATING);
- if (apstamode == IAPONLY_MODE) {
- wl_ext_ioctl(cur_if->dev, WLC_UP, NULL, 0, 1);
- } else {
- bss_setbuf.cfg = 0xffffffff;
- bss_setbuf.val = htod32(1);
- wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf,
- sizeof(bss_setbuf), iovar_buf, WLC_IOCTL_SMLEN, NULL);
- }
-
- timeout = wait_event_interruptible_timeout(apsta_params->netif_change_event,
- wl_get_isam_status(cur_if, AP_CREATED),
- msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
- if (timeout <= 0 || !wl_get_isam_status(cur_if, AP_CREATED)) {
- wl_ext_if_down(apsta_params, cur_if);
- WL_MSG(cur_if->ifname, "[%c] failed to up with SSID: \"%s\"\n",
- cur_if->prefix, cur_if->ssid);
- } else {
- wl_ext_ioctl(cur_if->dev, WLC_GET_SSID, &ssid, sizeof(ssid), 0);
- chan = wl_ext_get_chan(apsta_params, cur_if->dev);
- WL_MSG(cur_if->ifname, "[%c] enabled with SSID: \"%s\" on channel %d\n",
- cur_if->prefix, ssid.SSID, chan);
- }
- wl_clr_isam_status(cur_if, AP_CREATING);
-
- wl_ext_isam_status(cur_if->dev, NULL, 0);
-
- return 0;
-}
-
-static int
-wl_ext_disable_iface(struct net_device *dev, char *ifname)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- int i;
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- wlc_ssid_t ssid = { 0, {0} };
- scb_val_t scbval;
- struct {
- s32 cfg;
- s32 val;
- } bss_setbuf;
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- apstamode_t apstamode = apsta_params->apstamode;
- struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
-
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev && !strcmp(tmp_if->dev->name, ifname)) {
- cur_if = tmp_if;
- break;
- }
- }
- if (!cur_if) {
- AEXT_ERROR(dev->name, "wrong ifname=%s or dev not ready\n", ifname);
- return -1;
- }
-
- mutex_lock(&apsta_params->usr_sync);
- WL_MSG(ifname, "[%c] Disabling...\n", cur_if->prefix);
+ printf("%s: %s[%c] Disabling\n", __FUNCTION__, ifname, cur_if->prefix);
if (cur_if->ifmode == ISTA_MODE) {
wl_ext_ioctl(cur_if->dev, WLC_DISASSOC, NULL, 0, 1);
- wl_ext_add_remove_pm_enable_work(dev, FALSE);
} else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
// deauthenticate all STA first
memcpy(scbval.ea.octet, ðer_bcast, ETHER_ADDR_LEN);
wl_ext_ioctl(dev, WLC_DOWN, NULL, 0, 1);
wl_ext_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1); // reset ssid
wl_ext_iovar_setint(dev, "mpc", 1);
- } else if ((apstamode==ISTAAP_MODE || apstamode==ISTAGO_MODE) &&
+ } else if ((apstamode==IAPSTA_MODE || apstamode==IGOSTA_MODE) &&
cur_if->ifmode == IAP_MODE) {
bss_setbuf.cfg = 0xffffffff;
bss_setbuf.val = htod32(0);
dhd_wlfc_deinit(dhd);
}
}
-#endif /* BCMSDIO */
+#endif
#endif /* PROP_TXSTATUS_VSDB */
}
- else if (apstamode == IDUALAP_MODE || apstamode == ISTAAPAP_MODE) {
+ else if (apstamode == IDUALAP_MODE) {
bss_setbuf.cfg = 0xffffffff;
bss_setbuf.val = htod32(0);
wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
iovar_buf, WLC_IOCTL_SMLEN, NULL);
-#ifdef WLMESH
- } else if (apstamode == ISTAMESH_MODE || apstamode == IMESHAP_MODE ||
- apstamode == ISTAAPMESH_MODE || apstamode == IMESHAPAP_MODE) {
+ } else if (apstamode == IMESHSTA_MODE || apstamode == IMESHAP_MODE ||
+ apstamode == IMESHAPSTA_MODE || apstamode == IMESHAPAP_MODE ||
+ apstamode == ISTAAPAP_MODE) {
bss_setbuf.cfg = 0xffffffff;
bss_setbuf.val = htod32(0);
wl_ext_iovar_setbuf(cur_if->dev, "bss", &bss_setbuf, sizeof(bss_setbuf),
iovar_buf, WLC_IOCTL_SMLEN, NULL);
- if (cur_if->ifmode == IMESH_MODE) {
- int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev && tmp_if->ifmode == ISTA_MODE) {
- wl_ext_ioctl(tmp_if->dev, WLC_SET_SCAN_CHANNEL_TIME,
- &scan_assoc_time, sizeof(scan_assoc_time), 1);
- }
- }
- }
-#endif /* WLMESH */
}
- wl_clr_isam_status(cur_if, AP_CREATED);
+ cur_if->ifstate = IF_STATE_DISALBE;
+
+ printf("%s: %s[%c] disabled\n", __FUNCTION__, ifname, cur_if->prefix);
- WL_MSG(ifname, "[%c] Exit\n", cur_if->prefix);
- mutex_unlock(&apsta_params->usr_sync);
return 0;
}
-static int
-wl_ext_iapsta_disable(struct net_device *dev, char *command, int total_len)
+static uint16
+wl_ext_get_vsdb_chan(struct net_device *dev,
+ struct wl_if_info *cur_if, struct wl_if_info *target_if)
{
- int ret = 0;
- char *pch, *pick_tmp, *param;
- char ifname[IFNAMSIZ+1];
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ uint16 target_chan = 0, cur_chan = cur_if->channel;
+ struct dhd_pub *dhd;
- AEXT_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+ dhd = dhd_get_pub(dev);
- pick_tmp = command;
- param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_disable
- param = bcmstrtok(&pick_tmp, " ", 0);
- while (param != NULL) {
- if (!strcmp(param, "ifname")) {
- pch = bcmstrtok(&pick_tmp, " ", 0);
- if (pch) {
- strcpy(ifname, pch);
- ret = wl_ext_disable_iface(dev, ifname);
- if (ret)
- return ret;
- }
- else {
- AEXT_ERROR(dev->name, "ifname [wlanX]\n");
- return -1;
- }
+ target_chan = wl_ext_get_chan(target_if->dev);
+ if (target_chan) {
+ ANDROID_INFO(("%s: cur_chan=%d, target_chan=%d\n", __FUNCTION__,
+ cur_chan, target_chan));
+ if ((cur_chan <= CH_MAX_2G_CHANNEL && target_chan > CH_MAX_2G_CHANNEL) ||
+ (cur_chan > CH_MAX_2G_CHANNEL && target_chan <= CH_MAX_2G_CHANNEL)) {
+ // different band
+ if (!FW_SUPPORTED(dhd, rsdb) || !apsta_params->rsdb)
+ return target_chan;
+ } else {
+ // same band
+ if (target_chan != cur_chan)
+ return target_chan;
}
- param = bcmstrtok(&pick_tmp, " ", 0);
}
- return ret;
+ return 0;
}
-static bool
-wl_ext_diff_band(uint16 chan1, uint16 chan2)
+static int
+wl_ext_triger_csa(struct wl_if_info *cur_if)
{
- if ((chan1 <= CH_MAX_2G_CHANNEL && chan2 > CH_MAX_2G_CHANNEL) ||
- (chan1 > CH_MAX_2G_CHANNEL && chan2 <= CH_MAX_2G_CHANNEL)) {
- return TRUE;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+
+ if (apsta_params->csa & CSA_DRV_BIT &&
+ (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE)) {
+ if (!cur_if->channel) {
+ printf("%s: %s[%c] skip channel %d\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix, cur_if->channel);
+ } else if (cur_if->channel >= 52 && cur_if->channel <= 148) {
+ printf("%s: %s[%c] skip DFS channel %d\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix, cur_if->channel);
+ wl_ext_if_down(cur_if);
+ } else {
+ wl_chan_switch_t csa_arg;
+ memset(&csa_arg, 0, sizeof(csa_arg));
+ csa_arg.mode = 1;
+ csa_arg.count = 3;
+ csa_arg.chspec = wl_ext_get_chanspec(apsta_params, cur_if->dev,
+ cur_if->channel);
+ if (csa_arg.chspec) {
+ printf("%s: Trigger CSA to channel %d(0x%x)\n", __FUNCTION__,
+ cur_if->channel, csa_arg.chspec);
+ wl_ext_iovar_setbuf(cur_if->dev, "csa", &csa_arg, sizeof(csa_arg),
+ iovar_buf, sizeof(iovar_buf), NULL);
+ OSL_SLEEP(500);
+ wl_ext_isam_status(cur_if->dev);
+ } else {
+ printf("%s: fail to get chanspec\n", __FUNCTION__);
+ }
+ }
}
- return FALSE;
+
+ return 0;
}
static uint16
-wl_ext_same_band(struct wl_apsta_params *apsta_params,
- struct wl_if_info *cur_if, bool nodfs)
+wl_ext_move_cur_channel(struct net_device *dev,
+ struct wl_if_info *cur_if)
{
- struct wl_if_info *tmp_if;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ struct wl_if_info *tmp_if, *target_if = NULL;
uint16 tmp_chan, target_chan = 0;
wl_prio_t max_prio;
int i;
+ if (apsta_params->vsdb) {
+ target_chan = cur_if->channel;
+ goto exit;
+ }
+
// find the max prio
max_prio = cur_if->prio;
for (i=0; i<MAX_IF_NUM; i++) {
tmp_if = &apsta_params->if_info[i];
- if (cur_if != tmp_if && wl_get_isam_status(tmp_if, IF_READY) &&
+ if (tmp_if->ifstate >= IF_STATE_INIT && cur_if != tmp_if &&
tmp_if->prio > max_prio) {
- tmp_chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
- if (wl_ext_dfs_chan(tmp_chan) && nodfs)
- continue;
- if (tmp_chan && !wl_ext_diff_band(cur_if->channel, tmp_chan)) {
+ tmp_chan = wl_ext_get_vsdb_chan(dev, cur_if, tmp_if);
+ if (tmp_chan) {
+ target_if = tmp_if;
target_chan = tmp_chan;
max_prio = tmp_if->prio;
}
}
}
- return target_chan;
-}
-
-static uint16
-wl_ext_get_vsdb_chan(struct wl_apsta_params *apsta_params,
- struct wl_if_info *cur_if, struct wl_if_info *target_if)
-{
- uint16 target_chan = 0, cur_chan = cur_if->channel;
-
- target_chan = wl_ext_get_chan(apsta_params, target_if->dev);
- if (target_chan) {
- AEXT_INFO(cur_if->ifname, "cur_chan=%d, target_chan=%d\n",
- cur_chan, target_chan);
- if (wl_ext_diff_band(cur_chan, target_chan)) {
- if (!apsta_params->rsdb)
- return target_chan;
- } else {
- if (cur_chan != target_chan)
- return target_chan;
- }
- }
-
- return 0;
-}
-
-static int
-wl_ext_rsdb_core_conflict(struct wl_apsta_params *apsta_params,
- struct wl_if_info *cur_if)
-{
- struct wl_if_info *tmp_if;
- uint16 cur_chan, tmp_chan;
- int i;
-
- if (apsta_params->rsdb) {
- cur_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if != cur_if && wl_get_isam_status(tmp_if, IF_READY) &&
- tmp_if->prio > cur_if->prio) {
- tmp_chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
- if (!tmp_chan)
- continue;
- if (wl_ext_diff_band(cur_chan, tmp_chan) &&
- wl_ext_diff_band(cur_chan, cur_if->channel))
- return TRUE;
- else if (!wl_ext_diff_band(cur_chan, tmp_chan) &&
- wl_ext_diff_band(cur_chan, cur_if->channel))
- return TRUE;
- }
- }
- }
- return FALSE;
-}
-
-static int
-wl_ext_trigger_csa(struct wl_apsta_params *apsta_params, struct wl_if_info *cur_if)
-{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- bool core_conflict = FALSE;
-
- if (wl_ext_master_if(cur_if) && (apsta_params->csa & CSA_DRV_BIT)) {
- if (!cur_if->channel) {
- WL_MSG(cur_if->ifname, "[%c] no valid channel\n", cur_if->prefix);
- } else if (wl_ext_dfs_chan(cur_if->channel) && !apsta_params->radar) {
- WL_MSG(cur_if->ifname, "[%c] skip DFS channel %d\n",
- cur_if->prefix, cur_if->channel);
- wl_ext_if_down(apsta_params, cur_if);
- } else {
- wl_chan_switch_t csa_arg;
- memset(&csa_arg, 0, sizeof(csa_arg));
- csa_arg.mode = 1;
- csa_arg.count = 3;
- csa_arg.chspec = wl_ext_chan_to_chanspec(apsta_params, cur_if->dev,
- cur_if->channel);
- core_conflict = wl_ext_rsdb_core_conflict(apsta_params, cur_if);
- if (core_conflict) {
- WL_MSG(cur_if->ifname, "[%c] Skip CSA due to rsdb core conflict\n",
- cur_if->prefix);
- } else if (csa_arg.chspec) {
- WL_MSG(cur_if->ifname, "[%c] Trigger CSA to channel %d(0x%x)\n",
- cur_if->prefix, cur_if->channel, csa_arg.chspec);
- wl_set_isam_status(cur_if, AP_CREATING);
- wl_ext_iovar_setbuf(cur_if->dev, "csa", &csa_arg, sizeof(csa_arg),
- iovar_buf, sizeof(iovar_buf), NULL);
- OSL_SLEEP(500);
- wl_clr_isam_status(cur_if, AP_CREATING);
- wl_ext_isam_status(cur_if->dev, NULL, 0);
- } else {
- AEXT_ERROR(cur_if->ifname, "fail to get chanspec\n");
- }
- }
- }
-
- return 0;
-}
-
-static void
-wl_ext_move_cur_dfs_channel(struct wl_apsta_params *apsta_params,
- struct wl_if_info *cur_if)
-{
- uint16 other_chan = 0, cur_chan = cur_if->channel;
- uint16 chan_2g = 0, chan_5g = 0;
- uint32 auto_band = WLC_BAND_2G;
-
- if (wl_ext_master_if(cur_if) && wl_ext_dfs_chan(cur_if->channel) &&
- !apsta_params->radar) {
-
- wl_ext_get_default_chan(cur_if->dev, &chan_2g, &chan_5g, TRUE);
- if (!chan_2g && !chan_5g) {
- cur_if->channel = 0;
- WL_MSG(cur_if->ifname, "[%c] no valid channel\n", cur_if->prefix);
- return;
- }
-
- if (apsta_params->vsdb) {
- if (chan_5g) {
- cur_if->channel = chan_5g;
- auto_band = WLC_BAND_5G;
- other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
- } else {
- cur_if->channel = chan_2g;
- auto_band = WLC_BAND_2G;
- other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
- }
- if (!other_chan) {
- other_chan = wl_ext_autochannel(cur_if->dev, ACS_FW_BIT|ACS_DRV_BIT,
- auto_band);
- }
- if (other_chan)
- cur_if->channel = other_chan;
- } else if (apsta_params->rsdb) {
- if (chan_5g) {
- cur_if->channel = chan_5g;
- auto_band = WLC_BAND_5G;
- other_chan = wl_ext_same_band(apsta_params, cur_if, FALSE);
- if (wl_ext_dfs_chan(other_chan) && chan_2g) {
- cur_if->channel = chan_2g;
- auto_band = WLC_BAND_2G;
- other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
- }
- } else {
- cur_if->channel = chan_2g;
- auto_band = WLC_BAND_2G;
- other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
- }
- if (!other_chan) {
- other_chan = wl_ext_autochannel(cur_if->dev, ACS_FW_BIT|ACS_DRV_BIT,
- auto_band);
- }
- if (other_chan)
- cur_if->channel = other_chan;
- } else {
- cur_if->channel = chan_5g;
- auto_band = WLC_BAND_5G;
- other_chan = wl_ext_same_band(apsta_params, cur_if, FALSE);
- if (wl_ext_dfs_chan(other_chan)) {
- cur_if->channel = 0;
- }
- else if (!other_chan) {
- other_chan = wl_ext_autochannel(cur_if->dev, ACS_FW_BIT|ACS_DRV_BIT,
- auto_band);
- }
- if (other_chan)
- cur_if->channel = other_chan;
- }
- WL_MSG(cur_if->ifname, "[%c] move channel %d => %d\n",
- cur_if->prefix, cur_chan, cur_if->channel);
- }
-}
-
-static void
-wl_ext_move_other_dfs_channel(struct wl_apsta_params *apsta_params,
- struct wl_if_info *cur_if)
-{
- uint16 other_chan = 0, cur_chan = cur_if->channel;
- uint16 chan_2g = 0, chan_5g = 0;
- uint32 auto_band = WLC_BAND_2G;
-
- if (wl_ext_master_if(cur_if) && wl_ext_dfs_chan(cur_if->channel) &&
- !apsta_params->radar) {
-
- wl_ext_get_default_chan(cur_if->dev, &chan_2g, &chan_5g, TRUE);
- if (!chan_2g && !chan_5g) {
- cur_if->channel = 0;
- WL_MSG(cur_if->ifname, "[%c] no valid channel\n", cur_if->prefix);
- return;
- }
-
- if (apsta_params->vsdb) {
- if (chan_5g) {
- cur_if->channel = chan_5g;
- auto_band = WLC_BAND_5G;
- other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
- } else {
- cur_if->channel = chan_2g;
- auto_band = WLC_BAND_2G;
- other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
- }
- if (!other_chan) {
- other_chan = wl_ext_autochannel(cur_if->dev, ACS_FW_BIT|ACS_DRV_BIT,
- auto_band);
- }
- if (other_chan)
- cur_if->channel = other_chan;
- } else if (apsta_params->rsdb) {
- if (chan_2g) {
- cur_if->channel = chan_2g;
- auto_band = WLC_BAND_2G;
- other_chan = wl_ext_same_band(apsta_params, cur_if, TRUE);
- if (!other_chan) {
- other_chan = wl_ext_autochannel(cur_if->dev, ACS_FW_BIT|ACS_DRV_BIT,
- auto_band);
- }
- } else {
- cur_if->channel = 0;
- }
- if (other_chan)
- cur_if->channel = other_chan;
- } else {
- cur_if->channel = 0;
- }
- WL_MSG(cur_if->ifname, "[%c] move channel %d => %d\n",
- cur_if->prefix, cur_chan, cur_if->channel);
- }
-}
-
-static uint16
-wl_ext_move_cur_channel(struct wl_apsta_params *apsta_params,
- struct wl_if_info *cur_if)
-{
- struct wl_if_info *tmp_if, *target_if = NULL;
- uint16 tmp_chan, target_chan = 0;
- wl_prio_t max_prio;
- int i;
-
- if (apsta_params->vsdb) {
- target_chan = cur_if->channel;
- goto exit;
- }
-
- // find the max prio
- max_prio = cur_if->prio;
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (cur_if != tmp_if && wl_get_isam_status(tmp_if, IF_READY) &&
- tmp_if->prio > max_prio) {
- tmp_chan = wl_ext_get_vsdb_chan(apsta_params, cur_if, tmp_if);
- if (tmp_chan) {
- target_if = tmp_if;
- target_chan = tmp_chan;
- max_prio = tmp_if->prio;
- }
- }
- }
-
- if (target_chan) {
- tmp_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
- if (apsta_params->rsdb && tmp_chan &&
- wl_ext_diff_band(tmp_chan, target_chan)) {
- WL_MSG(cur_if->ifname, "[%c] keep on current channel %d\n",
- cur_if->prefix, tmp_chan);
- cur_if->channel = 0;
- } else {
- WL_MSG(cur_if->ifname, "[%c] channel=%d => %s[%c] channel=%d\n",
- cur_if->prefix, cur_if->channel,
- target_if->ifname, target_if->prefix, target_chan);
- cur_if->channel = target_chan;
- }
- }
-
-exit:
- wl_ext_move_cur_dfs_channel(apsta_params, cur_if);
-
- return cur_if->channel;
+ if (target_chan) {
+ printf("%s: %s channel=%d => %s channel=%d\n", __FUNCTION__,
+ cur_if->ifname, cur_if->channel, target_if->ifname, target_chan);
+ cur_if->channel = target_chan;
+ }
+exit:
+ if ((cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) &&
+ (cur_if->channel >= 52 && cur_if->channel <= 148)) {
+ printf("%s: %s[%c] skip DFS channel %d\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix, cur_if->channel);
+ cur_if->channel = 0;
+ }
+
+ return cur_if->channel;
}
static void
-wl_ext_move_other_channel(struct wl_apsta_params *apsta_params,
+wl_ext_move_other_channel(struct net_device *dev,
struct wl_if_info *cur_if)
{
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
struct wl_if_info *tmp_if, *target_if=NULL;
uint16 tmp_chan, target_chan = 0;
wl_prio_t max_prio = 0, cur_prio;
cur_prio = cur_if->prio;
for (i=0; i<MAX_IF_NUM; i++) {
tmp_if = &apsta_params->if_info[i];
- if (cur_if != tmp_if && wl_get_isam_status(tmp_if, IF_READY) &&
+ if (tmp_if->ifstate >= IF_STATE_INIT && cur_if != tmp_if &&
tmp_if->prio >= max_prio && tmp_if->prio <= cur_prio) {
- tmp_chan = wl_ext_get_vsdb_chan(apsta_params, cur_if, tmp_if);
+ tmp_chan = wl_ext_get_vsdb_chan(dev, cur_if, tmp_if);
if (tmp_chan) {
target_if = tmp_if;
target_chan = tmp_chan;
}
if (target_if) {
- WL_MSG(target_if->ifname, "channel=%d => %s channel=%d\n",
- target_chan, cur_if->ifname, cur_if->channel);
+ printf("%s: %s channel=%d => %s channel=%d\n", __FUNCTION__,
+ target_if->ifname, target_chan, cur_if->ifname, cur_if->channel);
target_if->channel = cur_if->channel;
- wl_ext_move_other_dfs_channel(apsta_params, target_if);
if (apsta_params->csa == 0) {
- wl_ext_if_down(apsta_params, target_if);
- wl_ext_move_other_channel(apsta_params, cur_if);
+ wl_ext_if_down(target_if);
+ wl_ext_move_other_channel(dev, target_if);
if (target_if->ifmode == ISTA_MODE || target_if->ifmode == IMESH_MODE) {
- wl_ext_enable_iface(target_if->dev, target_if->ifname, 0);
+ wl_ext_enable_iface(target_if->dev, target_if->ifname);
} else if (target_if->ifmode == IAP_MODE) {
- wl_ext_if_up(apsta_params, target_if);
+ wl_ext_if_up(target_if);
}
} else {
- wl_ext_trigger_csa(apsta_params, target_if);
+ wl_ext_triger_csa(target_if);
}
}
}
-static bool
-wl_ext_wait_other_enabling(struct wl_apsta_params *apsta_params,
- struct wl_if_info *cur_if)
-{
- struct wl_if_info *tmp_if;
- bool enabling = FALSE;
- u32 timeout = 1;
- int i;
-
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev && tmp_if->dev != cur_if->dev) {
- if (tmp_if->ifmode == ISTA_MODE)
- enabling = wl_get_isam_status(tmp_if, STA_CONNECTING);
- else if (tmp_if->ifmode == IAP_MODE || tmp_if->ifmode == IMESH_MODE)
- enabling = wl_get_isam_status(tmp_if, AP_CREATING);
- if (enabling)
- WL_MSG(cur_if->ifname, "waiting for %s[%c] enabling...\n",
- tmp_if->ifname, tmp_if->prefix);
- if (enabling && tmp_if->ifmode == ISTA_MODE) {
- timeout = wait_event_interruptible_timeout(
- apsta_params->netif_change_event,
- !wl_get_isam_status(tmp_if, STA_CONNECTING),
- msecs_to_jiffies(MAX_STA_LINK_WAIT_TIME));
- } else if (enabling &&
- (tmp_if->ifmode == IAP_MODE || tmp_if->ifmode == IMESH_MODE)) {
- timeout = wait_event_interruptible_timeout(
- apsta_params->netif_change_event,
- !wl_get_isam_status(tmp_if, AP_CREATING),
- msecs_to_jiffies(MAX_STA_LINK_WAIT_TIME));
- }
- if (tmp_if->ifmode == ISTA_MODE)
- enabling = wl_get_isam_status(tmp_if, STA_CONNECTING);
- else if (tmp_if->ifmode == IAP_MODE || tmp_if->ifmode == IMESH_MODE)
- enabling = wl_get_isam_status(tmp_if, AP_CREATING);
- if (timeout <= 0 || enabling) {
- WL_MSG(cur_if->ifname, "%s[%c] is still enabling...\n",
- tmp_if->ifname, tmp_if->prefix);
- }
- }
- }
-
- return enabling;
-}
-
static int
-wl_ext_enable_iface(struct net_device *dev, char *ifname, int wait_up)
+wl_ext_enable_iface(struct net_device *dev, char *ifname)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- int i, ret = 0;
+ int i;
s8 iovar_buf[WLC_IOCTL_SMLEN];
wlc_ssid_t ssid = { 0, {0} };
chanspec_t fw_chspec;
+ struct wl_join_params join_params;
+ size_t join_params_size;
struct {
s32 cfg;
s32 val;
} bss_setbuf;
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
apstamode_t apstamode = apsta_params->apstamode;
- struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
+ struct wl_if_info *cur_if = NULL;
+ struct dhd_pub *dhd;
uint16 cur_chan;
- struct wl_conn_info conn_info;
- u32 timeout;
+
+ dhd = dhd_get_pub(dev);
for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev && !strcmp(tmp_if->dev->name, ifname)) {
- cur_if = tmp_if;
+ if (apsta_params->if_info[i].dev &&
+ !strcmp(apsta_params->if_info[i].dev->name, ifname)) {
+ cur_if = &apsta_params->if_info[i];
break;
}
}
if (!cur_if) {
- AEXT_ERROR(dev->name, "wrong ifname=%s or dev not ready\n", ifname);
+ ANDROID_ERROR(("%s: wrong ifname=%s or dev not ready\n", __FUNCTION__, ifname));
return -1;
}
- mutex_lock(&apsta_params->usr_sync);
-
- if (cur_if->ifmode == ISTA_MODE) {
- wl_set_isam_status(cur_if, STA_CONNECTING);
- } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
- wl_set_isam_status(cur_if, AP_CREATING);
- }
-
- wl_ext_isam_status(cur_if->dev, NULL, 0);
- WL_MSG(ifname, "[%c] Enabling...\n", cur_if->prefix);
+ printf("%s: %s[%c] Enabling\n", __FUNCTION__, ifname, cur_if->prefix);
- wl_ext_wait_other_enabling(apsta_params, cur_if);
+ wl_ext_isam_status(cur_if->dev);
- if (wl_ext_master_if(cur_if) && apsta_params->acs) {
- uint16 chan_2g, chan_5g;
- uint auto_band;
- auto_band = WL_GET_BAND(cur_if->channel);
- wl_ext_get_default_chan(cur_if->dev, &chan_2g, &chan_5g, TRUE);
- if ((chan_2g && auto_band == WLC_BAND_2G) ||
- (chan_5g && auto_band == WLC_BAND_5G)) {
- cur_if->channel = wl_ext_autochannel(cur_if->dev, apsta_params->acs,
- auto_band);
- } else {
- AEXT_ERROR(ifname, "invalid channel\n");
- ret = -1;
- goto exit;
- }
- }
-
- wl_ext_move_cur_channel(apsta_params, cur_if);
-
- if (wl_ext_master_if(cur_if) && !cur_if->channel) {
- AEXT_ERROR(ifname, "skip channel 0\n");
- ret = -1;
- goto exit;
+ wl_ext_move_cur_channel(dev, cur_if);
+ if (!cur_if->channel && cur_if->ifmode != ISTA_MODE) {
+ return 0;
}
- cur_chan = wl_ext_get_chan(apsta_params, cur_if->dev);
+ cur_chan = wl_ext_get_chan(cur_if->dev);
if (cur_chan) {
- AEXT_INFO(cur_if->ifname, "Associated\n");
+ ANDROID_INFO(("%s: Associated!\n", __FUNCTION__));
if (cur_chan != cur_if->channel) {
- wl_ext_trigger_csa(apsta_params, cur_if);
+ wl_ext_triger_csa(cur_if);
}
- goto exit;
- }
- if (cur_if->ifmode == ISTA_MODE) {
- wl_clr_isam_status(cur_if, STA_CONNECTED);
- } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
- wl_clr_isam_status(cur_if, AP_CREATED);
+ return 0;
}
- wl_ext_move_other_channel(apsta_params, cur_if);
+ wl_ext_move_other_channel(dev, cur_if);
if (cur_if->ifidx > 0) {
wl_ext_iovar_setbuf(cur_if->dev, "cur_etheraddr", (u8 *)cur_if->dev->dev_addr,
memcpy(ssid.SSID, cur_if->ssid, ssid.SSID_len);
if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
wl_ext_iovar_setint(dev, "mpc", 0);
- if (apstamode == IAPONLY_MODE || apstamode == IMESHONLY_MODE) {
+ if (apstamode == IAPONLY_MODE) {
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- } else if (apstamode==ISTAAP_MODE || apstamode==ISTAGO_MODE) {
+ } else if (apstamode==IAPSTA_MODE || apstamode==IGOSTA_MODE) {
wl_ext_iovar_setbuf_bsscfg(cur_if->dev, "ssid", &ssid, sizeof(ssid),
iovar_buf, WLC_IOCTL_SMLEN, cur_if->bssidx, NULL);
}
}
- if (wl_ext_master_if(cur_if)) {
+ if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
wl_ext_set_bgnmode(cur_if);
if (!cur_if->channel) {
+#ifdef WL_CFG80211
+ char *pick_tmp, *param;
+ char cmd[128];
+ uint16 cur_chan;
+ cur_chan = 1;
+ snprintf(cmd, 128, "get_best_channels");
+ wl_cfg80211_get_best_channels(dev, cmd, strlen(cmd));
+ pick_tmp = cmd;
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ while (param != NULL) {
+ if (!strnicmp(param, "2g=", strlen("2g="))) {
+ cur_chan = (int)simple_strtol(param+strlen("2g="), NULL, 10);
+ } else if (!strnicmp(param, "5g=", strlen("5g="))) {
+ cur_chan = (int)simple_strtol(param+strlen("5g="), NULL, 10);
+ }
+ param = bcmstrtok(&pick_tmp, " ", 0);
+ }
+ cur_if->channel = cur_chan;
+#else
cur_if->channel = 1;
+#endif
}
- ret = wl_ext_set_chanspec(cur_if->dev, apsta_params->ioctl_ver,
- cur_if->channel, &fw_chspec);
- if (ret)
- goto exit;
+ wl_ext_set_chanspec(cur_if->dev, apsta_params->ioctl_ver, cur_if->channel,
+ &fw_chspec);
}
wl_ext_set_amode(cur_if);
- wl_ext_set_emode(apsta_params, cur_if);
+ wl_ext_set_emode(cur_if, apsta_params);
- if (cur_if->ifmode == ISTA_MODE) {
- conn_info.bssidx = cur_if->bssidx;
- conn_info.channel = cur_if->channel;
- memcpy(conn_info.ssid.SSID, cur_if->ssid, strlen(cur_if->ssid));
- conn_info.ssid.SSID_len = strlen(cur_if->ssid);
- memcpy(&conn_info.bssid, &cur_if->bssid, ETHER_ADDR_LEN);
- }
if (cur_if->ifmode == IAP_MODE) {
if (cur_if->maxassoc >= 0)
wl_ext_iovar_setint(dev, "maxassoc", cur_if->maxassoc);
if (cur_if->hidden > 0) {
wl_ext_ioctl(cur_if->dev, WLC_SET_CLOSED, &cur_if->hidden,
sizeof(cur_if->hidden), 1);
- WL_MSG(ifname, "[%c] Broadcast SSID: %s\n",
- cur_if->prefix, cur_if->hidden ? "OFF":"ON");
+ printf("%s: Broadcast SSID: %s\n", __FUNCTION__,
+ cur_if->hidden ? "OFF":"ON");
}
}
if (apstamode == ISTAONLY_MODE) {
- wl_ext_connect(cur_if->dev, &conn_info);
+ wl_ext_connect(cur_if);
} else if (apstamode == IAPONLY_MODE) {
wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
- } else if (apstamode == ISTAAP_MODE || apstamode == ISTAGO_MODE) {
+ } else if (apstamode == IAPSTA_MODE || apstamode == IGOSTA_MODE) {
if (cur_if->ifmode == ISTA_MODE) {
- wl_ext_connect(cur_if->dev, &conn_info);
+ wl_ext_connect(cur_if);
} else {
if (FW_SUPPORTED(dhd, rsdb)) {
wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef PROP_TXSTATUS_VSDB
#if defined(BCMSDIO)
- if (!(FW_SUPPORTED(dhd, rsdb)) && !disable_proptx) {
+ if (!FW_SUPPORTED(dhd, rsdb) && !disable_proptx) {
bool enabled;
dhd_wlfc_get_enable(dhd, &enabled);
if (!enabled) {
wl_ext_ioctl(dev, WLC_UP, NULL, 0, 1);
}
}
-#endif /* BCMSDIO */
+#endif
#endif /* PROP_TXSTATUS_VSDB */
}
}
wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
} else if (apstamode == ISTAAPAP_MODE) {
if (cur_if->ifmode == ISTA_MODE) {
- wl_ext_connect(cur_if->dev, &conn_info);
+ wl_ext_connect(cur_if);
} else if (cur_if->ifmode == IAP_MODE) {
wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
} else {
- AEXT_ERROR(cur_if->ifname, "wrong ifmode %d\n", cur_if->ifmode);
+ printf("%s: wrong ifmode %d\n", __FUNCTION__, cur_if->ifmode);
}
-#ifdef WLMESH
} else if (apstamode == IMESHONLY_MODE ||
- apstamode == ISTAMESH_MODE || apstamode == IMESHAP_MODE ||
- apstamode == ISTAAPMESH_MODE || apstamode == IMESHAPAP_MODE) {
+ apstamode == IMESHSTA_MODE || apstamode == IMESHAP_MODE ||
+ apstamode == IMESHAPSTA_MODE || apstamode == IMESHAPAP_MODE) {
if (cur_if->ifmode == ISTA_MODE) {
- wl_ext_connect(cur_if->dev, &conn_info);
+ wl_ext_connect(cur_if);
} else if (cur_if->ifmode == IAP_MODE) {
wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &ssid, sizeof(ssid), 1);
} else if (cur_if->ifmode == IMESH_MODE) {
- struct wl_join_params join_params;
// need to up before setting ssid
memset(&join_params, 0, sizeof(join_params));
join_params.ssid.SSID_len = strlen(cur_if->ssid);
memcpy((void *)join_params.ssid.SSID, cur_if->ssid, strlen(cur_if->ssid));
join_params.params.chanspec_list[0] = fw_chspec;
join_params.params.chanspec_num = 1;
- wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &join_params, sizeof(join_params), 1);
+ join_params_size = sizeof(join_params);
+ wl_ext_ioctl(cur_if->dev, WLC_SET_SSID, &join_params, join_params_size, 1);
} else {
- AEXT_ERROR(cur_if->ifname, "wrong ifmode %d\n", cur_if->ifmode);
- }
-#endif /* WLMESH */
- }
-
- if (wait_up) {
- OSL_SLEEP(wait_up);
- } else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
- timeout = wait_event_interruptible_timeout(apsta_params->netif_change_event,
- wl_get_isam_status(cur_if, AP_CREATED),
- msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
- if (timeout <= 0 || !wl_get_isam_status(cur_if, AP_CREATED)) {
- mutex_unlock(&apsta_params->usr_sync);
- wl_ext_disable_iface(dev, cur_if->ifname);
- WL_MSG(ifname, "[%c] failed to enable with SSID: \"%s\"\n",
- cur_if->prefix, cur_if->ssid);
- ret = -1;
+ printf("%s: wrong ifmode %d\n", __FUNCTION__, cur_if->ifmode);
}
}
- if (wl_get_isam_status(cur_if, AP_CREATED) &&
- (cur_if->ifmode == IMESH_MODE || cur_if->ifmode == IAP_MODE) &&
- (apstamode == ISTAAP_MODE || apstamode == ISTAAPAP_MODE ||
- apstamode == ISTAMESH_MODE || apstamode == IMESHAP_MODE ||
- apstamode == ISTAAPMESH_MODE || apstamode == IMESHAPAP_MODE)) {
- int scan_assoc_time = 80;
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev && tmp_if->ifmode == ISTA_MODE) {
- wl_ext_ioctl(tmp_if->dev, WLC_SET_SCAN_CHANNEL_TIME,
- &scan_assoc_time, sizeof(scan_assoc_time), 1);
- }
- }
- }
+ OSL_SLEEP(500);
+ printf("%s: %s[%c] enabled with SSID: \"%s\"\n", __FUNCTION__,
+ ifname, cur_if->prefix, cur_if->ssid);
+ wl_ext_isam_status(cur_if->dev);
- wl_ext_isam_status(cur_if->dev, NULL, 0);
+ cur_if->ifstate = IF_STATE_ENABLE;
-exit:
- if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
- wl_clr_isam_status(cur_if, AP_CREATING);
- }
- WL_MSG(ifname, "[%c] Exit ret=%d\n", cur_if->prefix, ret);
- mutex_unlock(&apsta_params->usr_sync);
- return ret;
+ return 0;
}
static int
{
int ret = 0;
char *pch, *pick_tmp, *param;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
char ifname[IFNAMSIZ+1];
- AEXT_TRACE(dev->name, "command=%s, len=%d\n", command, total_len);
+ if (!apsta_params->init) {
+ ANDROID_ERROR(("%s: please init first\n", __FUNCTION__));
+ return -1;
+ }
+
+ ANDROID_TRACE(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
pick_tmp = command;
param = bcmstrtok(&pick_tmp, " ", 0); // skip iapsta_enable
pch = bcmstrtok(&pick_tmp, " ", 0);
if (pch) {
strcpy(ifname, pch);
- ret = wl_ext_enable_iface(dev, ifname, 0);
+ ret = wl_ext_enable_iface(dev, ifname);
if (ret)
return ret;
} else {
- AEXT_ERROR(dev->name, "ifname [wlanX]\n");
+ ANDROID_ERROR(("%s: ifname [wlanX]\n", __FUNCTION__));
return -1;
}
}
return ret;
}
-#ifdef PROPTX_MAXCOUNT
int
-wl_ext_get_wlfc_maxcount(struct dhd_pub *dhd, int ifidx)
+wl_ext_iapsta_alive_preinit(struct net_device *dev)
{
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *tmp_if, *cur_if = NULL;
- int i, maxcount = WL_TXSTATUS_FREERUNCTR_MASK;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ struct wl_if_info *cur_if;
+ int i;
+
+ if (apsta_params->init == TRUE) {
+ ANDROID_ERROR(("%s: don't init twice\n", __FUNCTION__));
+ return -1;
+ }
- if (!apsta_params->rsdb)
- return maxcount;
+ ANDROID_TRACE(("%s: Enter\n", __FUNCTION__));
for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev && tmp_if->ifidx == ifidx) {
- cur_if = tmp_if;
- maxcount = cur_if->transit_maxcount;
+ cur_if = &apsta_params->if_info[i];
+ if (i == 1 && !strlen(cur_if->ifname))
+ strcpy(cur_if->ifname, "wlan1");
+ if (i == 2 && !strlen(cur_if->ifname))
+ strcpy(cur_if->ifname, "wlan2");
+ if (cur_if->ifmode == ISTA_MODE) {
+ cur_if->channel = 0;
+ cur_if->maxassoc = -1;
+ cur_if->ifstate = IF_STATE_INIT;
+ cur_if->prio = PRIO_STA;
+ cur_if->prefix = 'S';
+ snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_sta");
+ } else if (cur_if->ifmode == IAP_MODE) {
+ cur_if->channel = 1;
+ cur_if->maxassoc = -1;
+ cur_if->ifstate = IF_STATE_INIT;
+ cur_if->prio = PRIO_AP;
+ cur_if->prefix = 'A';
+ snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_ap");
+ } else if (cur_if->ifmode == IMESH_MODE) {
+ cur_if->channel = 1;
+ cur_if->maxassoc = -1;
+ cur_if->ifstate = IF_STATE_INIT;
+ cur_if->prio = PRIO_MESH;
+ cur_if->prefix = 'M';
+ snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_mesh");
}
}
- if (cur_if)
- AEXT_INFO(cur_if->ifname, "update maxcount %d\n", maxcount);
- else
- AEXT_INFO("wlan", "update maxcount %d for ifidx %d\n", maxcount, ifidx);
- return maxcount;
+ apsta_params->init = TRUE;
+
+ return 0;
}
-void
-wl_ext_update_wlfc_maxcount(struct dhd_pub *dhd)
+int
+wl_ext_iapsta_alive_postinit(struct net_device *dev)
{
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *tmp_if;
- bool band_5g = FALSE;
- uint16 chan = 0;
- int i, ret;
+ s32 apsta = 0;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
- if (!apsta_params->rsdb)
- return;
+ wl_ext_iovar_getint(dev, "apsta", &apsta);
+ if (apsta == 1) {
+ apsta_params->apstamode = ISTAONLY_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
+ op_mode = DHD_FLAG_STA_MODE;
+ } else {
+ apsta_params->apstamode = IAPONLY_MODE;
+ apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
+ op_mode = DHD_FLAG_HOSTAP_MODE;
+ }
+ // fix me: how to check it's IAPSTA_MODE or IDUALAP_MODE?
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev) {
- chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
- if (chan > CH_MAX_2G_CHANNEL) {
- tmp_if->transit_maxcount = dhd->conf->proptx_maxcnt_5g;
- ret = dhd_wlfc_update_maxcount(dhd, tmp_if->ifidx,
- tmp_if->transit_maxcount);
- if (ret == 0)
- AEXT_INFO(tmp_if->ifname, "updated maxcount %d\n",
- tmp_if->transit_maxcount);
- band_5g = TRUE;
- }
- }
- }
+ wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
+ printf("%s: apstamode=%d\n", __FUNCTION__, apsta_params->apstamode);
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev) {
- chan = wl_ext_get_chan(apsta_params, tmp_if->dev);
- if ((chan == 0) || (chan <= CH_MAX_2G_CHANNEL && chan >= CH_MIN_2G_CHANNEL)) {
- if (chan == 0) {
- tmp_if->transit_maxcount = WL_TXSTATUS_FREERUNCTR_MASK;
- } else if (band_5g) {
- tmp_if->transit_maxcount = dhd->conf->proptx_maxcnt_2g;
- } else {
- tmp_if->transit_maxcount = dhd->conf->proptx_maxcnt_5g;
- }
- ret = dhd_wlfc_update_maxcount(dhd, tmp_if->ifidx,
- tmp_if->transit_maxcount);
- if (ret == 0)
- AEXT_INFO(tmp_if->ifname, "updated maxcount %d\n",
- tmp_if->transit_maxcount);
- }
+ return op_mode;
+}
+
+#if defined(WL_WIRELESS_EXT)
+static bool
+wl_ext_conn_status_str(uint32 event_type,
+ uint32 status, uint32 reason, char* stringBuf, uint buflen)
+{
+ int i;
+
+ typedef struct conn_fail_event_map_t {
+ uint32 inEvent; /* input: event type to match */
+ uint32 inStatus; /* input: event status code to match */
+ uint32 inReason; /* input: event reason code to match */
+ } conn_fail_event_map_t;
+
+ /* Map of WLC_E events to connection failure strings */
+# define WL_IW_DONT_CARE 9999
+ const conn_fail_event_map_t event_map [] = {
+ /* inEvent inStatus inReason */
+ {WLC_E_LINK, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
+ {WLC_E_DEAUTH, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
+ {WLC_E_DEAUTH_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
+ {WLC_E_DISASSOC, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
+ {WLC_E_DISASSOC_IND, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
+ {WLC_E_OVERLAY_REQ, WL_IW_DONT_CARE, WL_IW_DONT_CARE},
+ {WLC_E_ASSOC_IND, WL_IW_DONT_CARE, DOT11_SC_SUCCESS},
+ {WLC_E_REASSOC_IND, WL_IW_DONT_CARE, DOT11_SC_SUCCESS},
+ };
+
+ /* Search the event map table for a matching event */
+ for (i = 0; i < sizeof(event_map)/sizeof(event_map[0]); i++) {
+ const conn_fail_event_map_t* row = &event_map[i];
+ if (row->inEvent == event_type &&
+ (row->inStatus == status || row->inStatus == WL_IW_DONT_CARE) &&
+ (row->inReason == reason || row->inReason == WL_IW_DONT_CARE)) {
+ memset(stringBuf, 0, buflen);
+ snprintf(stringBuf, buflen, "isam_event event=%d reason=%d",
+ event_type, reason);
+ return TRUE;
}
}
+
+ return FALSE;
}
-#endif /* PROPTX_MAXCOUNT */
+#endif /* WL_WIRELESS_EXT */
-static int
-wl_ext_iapsta_event(struct net_device *dev,
- struct wl_apsta_params *apsta_params, wl_event_msg_t *e, void* data)
+int
+wl_ext_iapsta_event(struct net_device *dev, wl_event_msg_t *e, void* data)
{
- struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
-#if defined(WLMESH) && defined(WL_ESCAN)
- struct wl_if_info *mesh_if = NULL;
-#endif /* WLMESH && WL_ESCAN */
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ struct wl_if_info *cur_if = NULL;
int i;
+#if defined(WL_WIRELESS_EXT)
+ char extra[IW_CUSTOM_MAX + 1];
+ union iwreq_data wrqu;
+#endif
uint32 event_type = ntoh32(e->event_type);
uint32 status = ntoh32(e->status);
uint32 reason = ntoh32(e->reason);
uint16 flags = ntoh16(e->flags);
- for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev == dev) {
- cur_if = tmp_if;
- break;
- }
+ if (!apsta_params->init) {
+ ANDROID_TRACE(("%s: please init first\n", __FUNCTION__));
+ return -1;
}
-#if defined(WLMESH) && defined(WL_ESCAN)
+
for (i=0; i<MAX_IF_NUM; i++) {
- tmp_if = &apsta_params->if_info[i];
- if (tmp_if->dev && tmp_if->ifmode == IMESH_MODE) {
- mesh_if = tmp_if;
+ if (apsta_params->if_info[i].ifidx == e->ifidx) {
+ cur_if = &apsta_params->if_info[i];
break;
}
}
-#endif /* WLMESH && WL_ESCAN */
if (!cur_if || !cur_if->dev) {
- AEXT_DBG(dev->name, "ifidx %d is not ready\n", e->ifidx);
+ ANDROID_ERROR(("%s: %s ifidx %d is not ready\n", __FUNCTION__,
+ dev->name, e->ifidx));
return -1;
}
- if (cur_if->ifmode == ISTA_MODE || cur_if->ifmode == IGC_MODE) {
+ if (cur_if->ifmode == ISTA_MODE) {
if (event_type == WLC_E_LINK) {
if (!(flags & WLC_EVENT_MSG_LINK)) {
- WL_MSG(cur_if->ifname,
- "[%c] Link down with %pM, %s(%d), reason %d\n",
- cur_if->prefix, &e->addr, bcmevent_get_name(event_type),
- event_type, reason);
- wl_clr_isam_status(cur_if, STA_CONNECTED);
-#if defined(WLMESH) && defined(WL_ESCAN)
- if (mesh_if && apsta_params->macs)
- wl_mesh_clear_mesh_info(apsta_params, mesh_if, TRUE);
-#endif /* WLMESH && WL_ESCAN */
+ printf("%s: %s[%c] Link Down with %pM\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix, &e->addr);
} else {
- WL_MSG(cur_if->ifname, "[%c] Link UP with %pM\n",
- cur_if->prefix, &e->addr);
- wl_set_isam_status(cur_if, STA_CONNECTED);
-#if defined(WLMESH) && defined(WL_ESCAN)
- if (mesh_if && apsta_params->macs)
- wl_mesh_update_master_info(apsta_params, mesh_if);
-#endif /* WLMESH && WL_ESCAN */
+ printf("%s: %s[%c] Link UP with %pM\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix, &e->addr);
}
- wl_clr_isam_status(cur_if, STA_CONNECTING);
- wake_up_interruptible(&apsta_params->netif_change_event);
-#ifdef PROPTX_MAXCOUNT
- wl_ext_update_wlfc_maxcount(apsta_params->dhd);
-#endif /* PROPTX_MAXCOUNT */
- } else if (event_type == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS) {
- WL_MSG(cur_if->ifname,
- "connect failed event=%d, reason=%d, status=%d\n",
- event_type, reason, status);
- wl_clr_isam_status(cur_if, STA_CONNECTING);
- wake_up_interruptible(&apsta_params->netif_change_event);
-#if defined(WLMESH) && defined(WL_ESCAN)
- if (mesh_if && apsta_params->macs)
- wl_mesh_clear_mesh_info(apsta_params, mesh_if, TRUE);
-#endif /* WLMESH && WL_ESCAN */
-#ifdef PROPTX_MAXCOUNT
- wl_ext_update_wlfc_maxcount(apsta_params->dhd);
-#endif /* PROPTX_MAXCOUNT */
- } else if (event_type == WLC_E_DEAUTH || event_type == WLC_E_DEAUTH_IND ||
- event_type == WLC_E_DISASSOC || event_type == WLC_E_DISASSOC_IND) {
- WL_MSG(cur_if->ifname, "[%c] Link down with %pM, %s(%d), reason %d\n",
- cur_if->prefix, &e->addr, bcmevent_get_name(event_type),
- event_type, reason);
-#if defined(WLMESH) && defined(WL_ESCAN)
- if (mesh_if && apsta_params->macs)
- wl_mesh_clear_mesh_info(apsta_params, mesh_if, TRUE);
-#endif /* WLMESH && WL_ESCAN */
}
}
else if (cur_if->ifmode == IAP_MODE || cur_if->ifmode == IMESH_MODE) {
if ((event_type == WLC_E_SET_SSID && status == WLC_E_STATUS_SUCCESS) ||
(event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
reason == WLC_E_REASON_INITIAL_ASSOC)) {
- if (wl_get_isam_status(cur_if, AP_CREATING)) {
- WL_MSG(cur_if->ifname, "[%c] Link up (etype=%d)\n",
- cur_if->prefix, event_type);
- wl_set_isam_status(cur_if, AP_CREATED);
- wake_up_interruptible(&apsta_params->netif_change_event);
- } else {
- wl_set_isam_status(cur_if, AP_CREATED);
- WL_MSG(cur_if->ifname, "[%c] Link up w/o creating? (etype=%d)\n",
- cur_if->prefix, event_type);
- }
-#ifdef PROPTX_MAXCOUNT
- wl_ext_update_wlfc_maxcount(apsta_params->dhd);
-#endif /* PROPTX_MAXCOUNT */
- }
- else if ((event_type == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) ||
+ printf("%s: %s[%c] Link up\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix);
+ } else if ((event_type == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS) ||
(event_type == WLC_E_LINK && status == WLC_E_STATUS_SUCCESS &&
reason == WLC_E_REASON_DEAUTH)) {
- wl_clr_isam_status(cur_if, AP_CREATED);
- WL_MSG(cur_if->ifname, "[%c] Link down, reason=%d\n",
- cur_if->prefix, reason);
-#ifdef PROPTX_MAXCOUNT
- wl_ext_update_wlfc_maxcount(apsta_params->dhd);
-#endif /* PROPTX_MAXCOUNT */
+ printf("%s: %s[%c] Link down\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix);
}
else if ((event_type == WLC_E_ASSOC_IND || event_type == WLC_E_REASSOC_IND) &&
reason == DOT11_SC_SUCCESS) {
- WL_MSG(cur_if->ifname, "[%c] connected device %pM\n",
- cur_if->prefix, &e->addr);
- wl_ext_isam_status(cur_if->dev, NULL, 0);
- }
- else if (event_type == WLC_E_DISASSOC_IND ||
- event_type == WLC_E_DEAUTH_IND ||
+ printf("%s: %s[%c] connected device %pM\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix, &e->addr);
+ } else if (event_type == WLC_E_DISASSOC_IND) {
+ printf("%s: %s[%c] disassociated device %pM\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix, &e->addr);
+ } else if (event_type == WLC_E_DEAUTH_IND ||
(event_type == WLC_E_DEAUTH && reason != DOT11_RC_RESERVED)) {
- WL_MSG_RLMT(cur_if->ifname, &e->addr, ETHER_ADDR_LEN,
- "[%c] disconnected device %pM, %s(%d), reason=%d\n",
- cur_if->prefix, &e->addr, bcmevent_get_name(event_type),
- event_type, reason);
- wl_ext_isam_status(cur_if->dev, NULL, 0);
+ printf("%s: %s[%c] deauthenticated device %pM\n", __FUNCTION__,
+ cur_if->ifname, cur_if->prefix, &e->addr);
}
-#if defined(WLMESH) && defined(WL_ESCAN)
- if (cur_if->ifmode == IMESH_MODE && apsta_params->macs)
- wl_mesh_event_handler(apsta_params, cur_if, e, data);
-#endif /* WLMESH && WL_ESCAN */
+ }
+
+#if defined(WL_WIRELESS_EXT)
+ memset(extra, 0, sizeof(extra));
+ memset(&wrqu, 0, sizeof(wrqu));
+ memcpy(wrqu.addr.sa_data, &e->addr, ETHER_ADDR_LEN);
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+ if (wl_ext_conn_status_str(event_type, status, reason, extra, sizeof(extra))) {
+ wrqu.data.length = strlen(extra);
+ wireless_send_event(cur_if->dev, IWEVCUSTOM, &wrqu, extra);
+ ANDROID_INFO(("%s: %s[%c] event=%d, status=%d, reason=%d, flags=%d sent up\n",
+ __FUNCTION__, cur_if->ifname, cur_if->prefix, event_type, status,
+ reason, flags));
+ } else
+#endif /* WL_WIRELESS_EXT */
+ {
+ ANDROID_INFO(("%s: %s[%c] event=%d, status=%d, reason=%d, flags=%d\n",
+ __FUNCTION__, cur_if->ifname, cur_if->prefix, event_type, status,
+ reason, flags));
}
return 0;
}
-#ifdef WL_CFG80211
u32
-wl_ext_iapsta_update_channel(dhd_pub_t *dhd, struct net_device *dev,
- u32 channel)
+wl_ext_iapsta_update_channel(struct net_device *dev, u32 channel)
{
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
struct wl_if_info *cur_if = NULL, *tmp_if = NULL;
int i;
}
if (cur_if) {
- wl_ext_isam_status(cur_if->dev, NULL, 0);
+ wl_ext_isam_status(cur_if->dev);
cur_if->channel = channel;
- if (wl_ext_master_if(cur_if) && apsta_params->acs) {
- uint auto_band = WL_GET_BAND(channel);
- cur_if->channel = wl_ext_autochannel(cur_if->dev, apsta_params->acs,
- auto_band);
- }
- channel = wl_ext_move_cur_channel(apsta_params, cur_if);
+ channel = wl_ext_move_cur_channel(apsta_params->if_info[IF_PIF].dev, cur_if);
if (channel)
- wl_ext_move_other_channel(apsta_params, cur_if);
- if (cur_if->ifmode == ISTA_MODE)
- wl_set_isam_status(cur_if, STA_CONNECTING);
+ wl_ext_move_other_channel(apsta_params->if_info[IF_PIF].dev, cur_if);
}
return channel;
}
-static int
-wl_ext_iftype_to_ifmode(struct net_device *net, int wl_iftype, ifmode_t *ifmode)
-{
- switch (wl_iftype) {
- case WL_IF_TYPE_STA:
- *ifmode = ISTA_MODE;
- break;
- case WL_IF_TYPE_AP:
- *ifmode = IAP_MODE;
- break;
- case WL_IF_TYPE_P2P_GO:
- *ifmode = IGO_MODE;
- break;
- case WL_IF_TYPE_P2P_GC:
- *ifmode = IGC_MODE;
- break;
- default:
- AEXT_ERROR(net->name, "Unknown interface wl_iftype:0x%x\n", wl_iftype);
- return BCME_ERROR;
- }
- return BCME_OK;
-}
-
-void
-wl_ext_iapsta_update_iftype(struct net_device *net, int ifidx, int wl_iftype)
-{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *cur_if = NULL;
-
- AEXT_TRACE(net->name, "ifidx=%d, wl_iftype=%d\n", ifidx, wl_iftype);
-
- if (ifidx < MAX_IF_NUM) {
- cur_if = &apsta_params->if_info[ifidx];
- }
-
- if (cur_if) {
- if (wl_iftype == WL_IF_TYPE_STA) {
- cur_if->ifmode = ISTA_MODE;
- cur_if->prio = PRIO_STA;
- cur_if->prefix = 'S';
- } else if (wl_iftype == WL_IF_TYPE_AP && cur_if->ifmode != IMESH_MODE) {
- cur_if->ifmode = IAP_MODE;
- cur_if->prio = PRIO_AP;
- cur_if->prefix = 'A';
- } else if (wl_iftype == WL_IF_TYPE_P2P_GO) {
- cur_if->ifmode = IGO_MODE;
- cur_if->prio = PRIO_AP;
- cur_if->prefix = 'P';
- apsta_params->vsdb = TRUE;
- } else if (wl_iftype == WL_IF_TYPE_P2P_GC) {
- cur_if->ifmode = IGC_MODE;
- cur_if->prio = PRIO_STA;
- cur_if->prefix = 'P';
- apsta_params->vsdb = TRUE;
- wl_ext_iovar_setint(cur_if->dev, "assoc_retry_max", 3);
- }
- }
-}
-
-void
-wl_ext_iapsta_ifadding(struct net_device *net, int ifidx)
-{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *cur_if = NULL;
-
- AEXT_TRACE(net->name, "ifidx=%d\n", ifidx);
- if (ifidx < MAX_IF_NUM) {
- cur_if = &apsta_params->if_info[ifidx];
- wl_set_isam_status(cur_if, IF_ADDING);
- }
-}
-
-bool
-wl_ext_iapsta_iftype_enabled(struct net_device *net, int wl_iftype)
-{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *cur_if = NULL;
- ifmode_t ifmode = 0;
-
- wl_ext_iftype_to_ifmode(net, wl_iftype, &ifmode);
- cur_if = wl_ext_if_enabled(apsta_params, ifmode);
- if (cur_if)
- return TRUE;
-
- return FALSE;
-}
-
-bool
-wl_ext_iapsta_mesh_creating(struct net_device *net)
-{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *cur_if;
- int i;
-
- if (apsta_params) {
- for (i=0; i<MAX_IF_NUM; i++) {
- cur_if = &apsta_params->if_info[i];
- if (cur_if->ifmode==IMESH_MODE && wl_get_isam_status(cur_if, IF_ADDING))
- return TRUE;
- }
- }
- return FALSE;
-}
-#endif /* WL_CFG80211 */
-
-int
-wl_ext_iapsta_alive_preinit(struct net_device *dev)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
-
- if (apsta_params->init == TRUE) {
- AEXT_ERROR(dev->name, "don't init twice\n");
- return -1;
- }
-
- AEXT_TRACE(dev->name, "Enter\n");
-
- apsta_params->init = TRUE;
-
- return 0;
-}
-
-int
-wl_ext_iapsta_alive_postinit(struct net_device *dev)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- s32 apsta = 0, ap = 0;
- struct wl_if_info *cur_if;
- int i;
-
- wl_ext_iovar_getint(dev, "apsta", &apsta);
- wl_ext_ioctl(dev, WLC_GET_AP, &ap, sizeof(ap), 0);
- if (apsta == 1 || ap == 0) {
- apsta_params->apstamode = ISTAONLY_MODE;
- apsta_params->if_info[IF_PIF].ifmode = ISTA_MODE;
- op_mode = DHD_FLAG_STA_MODE;
- } else {
- apsta_params->apstamode = IAPONLY_MODE;
- apsta_params->if_info[IF_PIF].ifmode = IAP_MODE;
- op_mode = DHD_FLAG_HOSTAP_MODE;
- }
- // fix me: how to check it's ISTAAP_MODE or IDUALAP_MODE?
-
- wl_ext_get_ioctl_ver(dev, &apsta_params->ioctl_ver);
- WL_MSG(dev->name, "apstamode=%d\n", apsta_params->apstamode);
-
- for (i=0; i<MAX_IF_NUM; i++) {
- cur_if = &apsta_params->if_info[i];
- if (i == 1 && !strlen(cur_if->ifname))
- strcpy(cur_if->ifname, "wlan1");
- if (i == 2 && !strlen(cur_if->ifname))
- strcpy(cur_if->ifname, "wlan2");
- if (cur_if->ifmode == ISTA_MODE) {
- cur_if->channel = 0;
- cur_if->maxassoc = -1;
- wl_set_isam_status(cur_if, IF_READY);
- cur_if->prio = PRIO_STA;
- cur_if->prefix = 'S';
- snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_sta");
- } else if (cur_if->ifmode == IAP_MODE) {
- cur_if->channel = 1;
- cur_if->maxassoc = -1;
- wl_set_isam_status(cur_if, IF_READY);
- cur_if->prio = PRIO_AP;
- cur_if->prefix = 'A';
- snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_ap");
-#ifdef WLMESH
- } else if (cur_if->ifmode == IMESH_MODE) {
- cur_if->channel = 1;
- cur_if->maxassoc = -1;
- wl_set_isam_status(cur_if, IF_READY);
- cur_if->prio = PRIO_MESH;
- cur_if->prefix = 'M';
- snprintf(cur_if->ssid, DOT11_MAX_SSID_LEN, "ttt_mesh");
-#endif /* WLMESH */
- }
- }
-
- return op_mode;
-}
-
-static int
-wl_ext_iapsta_get_rsdb(struct net_device *net, struct dhd_pub *dhd)
-{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- wl_config_t *rsdb_p;
- int ret = 0, rsdb = 0;
-
- if (dhd->conf->chip == BCM4359_CHIP_ID) {
- ret = wldev_iovar_getbuf(net, "rsdb_mode", NULL, 0,
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- if (!ret) {
- if (dhd->conf->fw_type == FW_TYPE_MESH) {
- rsdb = 1;
- } else {
- rsdb_p = (wl_config_t *) iovar_buf;
- rsdb = rsdb_p->config;
- }
- }
- }
-
- AEXT_INFO(net->name, "rsdb_mode=%d\n", rsdb);
-
- return rsdb;
-}
-
-static void
-wl_ext_iapsta_postinit(struct net_device *net, struct wl_if_info *cur_if)
-{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- int pm;
-
- AEXT_TRACE(cur_if->ifname, "ifidx=%d\n", cur_if->ifidx);
- if (cur_if->ifidx == 0) {
- apsta_params->rsdb = wl_ext_iapsta_get_rsdb(net, dhd);
- apsta_params->vsdb = FALSE;
- apsta_params->csa = 0;
- apsta_params->acs = 0;
- apsta_params->radar = wl_ext_radar_detect(net);
- if (dhd->conf->fw_type == FW_TYPE_MESH) {
- apsta_params->csa |= (CSA_FW_BIT | CSA_DRV_BIT);
- }
- } else {
- if (cur_if->ifmode == ISTA_MODE) {
- wl_ext_iovar_setint(cur_if->dev, "roam_off", dhd->conf->roam_off);
- wl_ext_iovar_setint(cur_if->dev, "bcn_timeout", dhd->conf->bcn_timeout);
- if (dhd->conf->pm >= 0)
- pm = dhd->conf->pm;
- else
- pm = PM_FAST;
- wl_ext_ioctl(cur_if->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
- wl_ext_iovar_setint(cur_if->dev, "assoc_retry_max", 20);
- }
-#ifdef WLMESH
- else if (cur_if->ifmode == IMESH_MODE) {
- pm = 0;
- wl_ext_ioctl(cur_if->dev, WLC_SET_PM, &pm, sizeof(pm), 1);
- }
-#endif /* WLMESH */
- }
-#ifdef PROPTX_MAXCOUNT
- wl_ext_update_wlfc_maxcount(dhd);
-#endif /* PROPTX_MAXCOUNT */
-
-}
-
int
wl_ext_iapsta_attach_name(struct net_device *net, int ifidx)
{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ struct dhd_pub *dhd;
struct wl_if_info *cur_if = NULL;
- AEXT_TRACE(net->name, "ifidx=%d\n", ifidx);
+ dhd = dhd_get_pub(net);
+
+ ANDROID_TRACE(("%s: ifidx=%d, %s\n", __FUNCTION__, ifidx, net->name));
if (ifidx < MAX_IF_NUM) {
cur_if = &apsta_params->if_info[ifidx];
}
if (ifidx == 0) {
+ if (dhd->conf->fw_type == FW_TYPE_MESH) {
+ apsta_params->rsdb = TRUE;
+ apsta_params->csa = CSA_FW_BIT | CSA_DRV_BIT;
+ }
strcpy(cur_if->ifname, net->name);
- wl_ext_iapsta_postinit(net, cur_if);
- wl_set_isam_status(cur_if, IF_READY);
- } else if (cur_if && wl_get_isam_status(cur_if, IF_ADDING)) {
+ } else if (cur_if && cur_if->ifstate == IF_STATE_INIT) {
strcpy(cur_if->ifname, net->name);
- wl_ext_iapsta_postinit(net, cur_if);
- wl_clr_isam_status(cur_if, IF_ADDING);
- wl_set_isam_status(cur_if, IF_READY);
-#ifndef WL_STATIC_IF
+ apsta_params->netif_change = TRUE;
wake_up_interruptible(&apsta_params->netif_change_event);
-#endif /* WL_STATIC_IF */
}
return 0;
}
int
-wl_ext_iapsta_update_net_device(struct net_device *net, int ifidx)
+wl_ext_iapsta_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
+ struct dhd_pub *dhd;
struct wl_if_info *cur_if = NULL, *primary_if;
- AEXT_TRACE(net->name, "ifidx=%d\n", ifidx);
+ dhd = dhd_get_pub(net);
+
+ printf("%s: ifidx=%d, bssidx=%d\n", __FUNCTION__, ifidx, bssidx);
if (ifidx < MAX_IF_NUM) {
cur_if = &apsta_params->if_info[ifidx];
}
- if (cur_if && wl_get_isam_status(cur_if, IF_ADDING)) {
+ if (ifidx == 0) {
+ memset(apsta_params, 0, sizeof(struct wl_apsta_params));
+ apsta_params->vsdb = FALSE;
+ cur_if->dev = net;
+ cur_if->ifidx = ifidx;
+ cur_if->bssidx = bssidx;
+ strcpy(cur_if->ifname, net->name);
+ init_waitqueue_head(&apsta_params->netif_change_event);
+ } else if (cur_if && cur_if->ifstate == IF_STATE_INIT) {
primary_if = &apsta_params->if_info[IF_PIF];
+ cur_if->dev = net;
+ cur_if->ifidx = ifidx;
+ cur_if->bssidx = bssidx;
if (strlen(cur_if->ifname)) {
memset(net->name, 0, sizeof(IFNAMSIZ));
strcpy(net->name, cur_if->ifname);
net->name[IFNAMSIZ-1] = '\0';
}
-#ifndef WL_STATIC_IF
- if (apsta_params->apstamode != IUNKNOWN_MODE &&
- apsta_params->apstamode != ISTAAPAP_MODE &&
- apsta_params->apstamode != ISTASTA_MODE) {
+ if (apsta_params->apstamode != ISTAAPAP_MODE) {
memcpy(net->dev_addr, primary_if->dev->dev_addr, ETHER_ADDR_LEN);
net->dev_addr[0] |= 0x02;
if (ifidx >= 2) {
net->dev_addr[5] += (ifidx-1);
}
}
-#endif /* WL_STATIC_IF */
- }
-
- return 0;
-}
-
-int
-wl_ext_iapsta_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
-{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *cur_if = NULL, *primary_if;
-
- AEXT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
- if (ifidx < MAX_IF_NUM) {
- cur_if = &apsta_params->if_info[ifidx];
- }
- if (ifidx == 0) {
- memset(apsta_params, 0, sizeof(struct wl_apsta_params));
- apsta_params->dhd = dhd;
- cur_if->dev = net;
- cur_if->ifidx = ifidx;
- cur_if->bssidx = bssidx;
- cur_if->ifmode = ISTA_MODE;
- cur_if->prio = PRIO_STA;
- cur_if->prefix = 'S';
- wl_ext_event_register(net, dhd, WLC_E_LAST, wl_ext_iapsta_event,
- apsta_params, PRIO_EVENT_IAPSTA);
- strcpy(cur_if->ifname, net->name);
- init_waitqueue_head(&apsta_params->netif_change_event);
- mutex_init(&apsta_params->usr_sync);
- mutex_init(&cur_if->pm_sync);
- INIT_DELAYED_WORK(&cur_if->pm_enable_work, wl_ext_pm_work_handler);
- } else if (cur_if && wl_get_isam_status(cur_if, IF_ADDING)) {
- primary_if = &apsta_params->if_info[IF_PIF];
- cur_if->dev = net;
- cur_if->ifidx = ifidx;
- cur_if->bssidx = bssidx;
- wl_ext_event_register(net, dhd, WLC_E_LAST, wl_ext_iapsta_event,
- apsta_params, PRIO_EVENT_IAPSTA);
-#if defined(WLMESH) && defined(WL_ESCAN)
- if (cur_if->ifmode == IMESH_MODE && apsta_params->macs) {
- wl_mesh_escan_attach(dhd, cur_if);
- }
-#endif /* WLMESH && WL_ESCAN */
- mutex_init(&cur_if->pm_sync);
- INIT_DELAYED_WORK(&cur_if->pm_enable_work, wl_ext_pm_work_handler);
- }
-
- return 0;
-}
-
-int
-wl_ext_iapsta_dettach_netdev(struct net_device *net, int ifidx)
-{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_apsta_params *apsta_params = dhd->iapsta_params;
- struct wl_if_info *cur_if = NULL;
-
- if (!apsta_params)
- return 0;
-
- AEXT_TRACE(net->name, "ifidx=%d\n", ifidx);
- if (ifidx < MAX_IF_NUM) {
- cur_if = &apsta_params->if_info[ifidx];
- }
-
- if (ifidx == 0) {
- wl_ext_add_remove_pm_enable_work(net, FALSE);
- wl_ext_event_deregister(net, dhd, WLC_E_LAST, wl_ext_iapsta_event);
-#if defined(WLMESH) && defined(WL_ESCAN)
- if (cur_if->ifmode == IMESH_MODE && apsta_params->macs) {
- wl_mesh_escan_detach(dhd, cur_if);
- }
-#endif /* WLMESH && WL_ESCAN */
- memset(apsta_params, 0, sizeof(struct wl_apsta_params));
- } else if (cur_if && (wl_get_isam_status(cur_if, IF_READY) ||
- wl_get_isam_status(cur_if, IF_ADDING))) {
- wl_ext_add_remove_pm_enable_work(net, FALSE);
- wl_ext_event_deregister(net, dhd, WLC_E_LAST, wl_ext_iapsta_event);
-#if defined(WLMESH) && defined(WL_ESCAN)
- if (cur_if->ifmode == IMESH_MODE && apsta_params->macs) {
- wl_mesh_escan_detach(dhd, cur_if);
- }
-#endif /* WLMESH && WL_ESCAN */
- memset(cur_if, 0, sizeof(struct wl_if_info));
- }
-
- return 0;
-}
-
-int
-wl_ext_iapsta_attach(dhd_pub_t *pub)
-{
- struct wl_apsta_params *iapsta_params;
-
- iapsta_params = kzalloc(sizeof(struct wl_apsta_params), GFP_KERNEL);
- if (unlikely(!iapsta_params)) {
- AEXT_ERROR("wlan", "Could not allocate apsta_params\n");
- return -ENOMEM;
- }
- pub->iapsta_params = (void *)iapsta_params;
-
- return 0;
-}
-
-void
-wl_ext_iapsta_dettach(dhd_pub_t *pub)
-{
- if (pub->iapsta_params) {
- kfree(pub->iapsta_params);
- pub->iapsta_params = NULL;
- }
-}
-#endif /* WL_EXT_IAPSTA */
-
-#ifdef IDHCP
-/*
-terence 20190409:
-dhd_priv wl dhcpc_dump
-dhd_priv wl dhcpc_param <client ip> <server ip> <lease time>
-*/
-static int
-wl_ext_dhcpc_dump(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- int ret = 0;
- int bytes_written = 0;
- uint32 ip_addr;
- char buf[20]="";
-
- if (!data) {
- ret = wl_ext_iovar_getint(dev, "dhcpc_ip_addr", &ip_addr);
- if (!ret) {
- bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
- bytes_written += snprintf(command+bytes_written, total_len,
- "ipaddr %s ", buf);
- }
-
- ret = wl_ext_iovar_getint(dev, "dhcpc_ip_mask", &ip_addr);
- if (!ret) {
- bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
- bytes_written += snprintf(command+bytes_written, total_len,
- "mask %s ", buf);
- }
-
- ret = wl_ext_iovar_getint(dev, "dhcpc_ip_gateway", &ip_addr);
- if (!ret) {
- bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
- bytes_written += snprintf(command+bytes_written, total_len,
- "gw %s ", buf);
- }
-
- ret = wl_ext_iovar_getint(dev, "dhcpc_ip_dnsserv", &ip_addr);
- if (!ret) {
- bcm_ip_ntoa((struct ipv4_addr *)&ip_addr, buf);
- bytes_written += snprintf(command+bytes_written, total_len,
- "dnsserv %s ", buf);
- }
-
- if (!bytes_written)
- bytes_written = -1;
-
- AEXT_TRACE(dev->name, "command result is %s\n", command);
- }
-
- return bytes_written;
-}
-
-int
-wl_ext_dhcpc_param(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- int ret = -1, bytes_written = 0;
- char ip_addr_str[20]="", ip_serv_str[20]="";
- struct dhcpc_parameter dhcpc_param;
- uint32 ip_addr, ip_serv, lease_time;
- char iovar_buf[WLC_IOCTL_SMLEN]="\0";
-
- if (data) {
- AEXT_TRACE(dev->name, "cmd %s", command);
- sscanf(data, "%s %s %d", ip_addr_str, ip_serv_str, &lease_time);
- AEXT_TRACE(dev->name, "ip_addr = %s, ip_serv = %s, lease_time = %d",
- ip_addr_str, ip_serv_str, lease_time);
-
- memset(&dhcpc_param, 0, sizeof(struct dhcpc_parameter));
- if (!bcm_atoipv4(ip_addr_str, (struct ipv4_addr *)&ip_addr)) {
- AEXT_ERROR(dev->name, "wrong ip_addr_str %s\n", ip_addr_str);
- ret = -1;
- goto exit;
- }
- dhcpc_param.ip_addr = ip_addr;
-
- if (!bcm_atoipv4(ip_addr_str, (struct ipv4_addr *)&ip_serv)) {
- AEXT_ERROR(dev->name, "wrong ip_addr_str %s\n", ip_addr_str);
- ret = -1;
- goto exit;
- }
- dhcpc_param.ip_serv = ip_serv;
- dhcpc_param.lease_time = lease_time;
- ret = wl_ext_iovar_setbuf(dev, "dhcpc_param", &dhcpc_param,
- sizeof(struct dhcpc_parameter), iovar_buf, sizeof(iovar_buf), NULL);
- } else {
- ret = wl_ext_iovar_getbuf(dev, "dhcpc_param", &dhcpc_param,
- sizeof(struct dhcpc_parameter), iovar_buf, WLC_IOCTL_SMLEN, NULL);
- if (!ret) {
- bcm_ip_ntoa((struct ipv4_addr *)&dhcpc_param.ip_addr, ip_addr_str);
- bytes_written += snprintf(command + bytes_written, total_len,
- "ip_addr %s\n", ip_addr_str);
- bcm_ip_ntoa((struct ipv4_addr *)&dhcpc_param.ip_serv, ip_serv_str);
- bytes_written += snprintf(command + bytes_written, total_len,
- "ip_serv %s\n", ip_serv_str);
- bytes_written += snprintf(command + bytes_written, total_len,
- "lease_time %d\n", dhcpc_param.lease_time);
- AEXT_TRACE(dev->name, "command result is %s\n", command);
- ret = bytes_written;
- }
- }
-
- exit:
- return ret;
-}
-#endif /* IDHCP */
-
-int
-wl_ext_mkeep_alive(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
- int ret = -1, i, ifidx, id, period=-1;
- char *packet = NULL, *buf = NULL;
- int bytes_written = 0;
-
- if (data) {
- buf = kmalloc(total_len, GFP_KERNEL);
- if (buf == NULL) {
- AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
- goto exit;
- }
- packet = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
- if (packet == NULL) {
- AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
- goto exit;
- }
- AEXT_TRACE(dev->name, "cmd %s", command);
- sscanf(data, "%d %d %s", &id, &period, packet);
- AEXT_TRACE(dev->name, "id=%d, period=%d, packet=%s", id, period, packet);
- if (period >= 0) {
- ifidx = dhd_net2idx(dhd->info, dev);
- ret = dhd_conf_mkeep_alive(dhd, ifidx, id, period, packet, FALSE);
- } else {
- if (id < 0)
- id = 0;
- ret = wl_ext_iovar_getbuf(dev, "mkeep_alive", &id, sizeof(id), buf,
- total_len, NULL);
- if (!ret) {
- mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) buf;
- bytes_written += snprintf(command+bytes_written, total_len,
- "Id :%d\n"
- "Period (msec) :%d\n"
- "Length :%d\n"
- "Packet :0x",
- mkeep_alive_pktp->keep_alive_id,
- dtoh32(mkeep_alive_pktp->period_msec),
- dtoh16(mkeep_alive_pktp->len_bytes));
- for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
- bytes_written += snprintf(command+bytes_written, total_len,
- "%02x", mkeep_alive_pktp->data[i]);
- }
- AEXT_TRACE(dev->name, "command result is %s\n", command);
- ret = bytes_written;
- }
- }
- }
-
-exit:
- if (buf)
- kfree(buf);
- if (packet)
- kfree(packet);
- return ret;
-}
-
-#ifdef WL_EXT_TCPKA
-static int
-wl_ext_tcpka_conn_add(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- int ret = 0;
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- tcpka_conn_t *tcpka = NULL;
- uint32 sess_id = 0, ipid = 0, srcport = 0, dstport = 0, seq = 0, ack = 0,
- tcpwin = 0, tsval = 0, tsecr = 0, len = 0, ka_payload_len = 0;
- char dst_mac[ETHER_ADDR_STR_LEN], src_ip[IPV4_ADDR_STR_LEN],
- dst_ip[IPV4_ADDR_STR_LEN], ka_payload[32];
-
- if (data) {
- memset(dst_mac, 0, sizeof(dst_mac));
- memset(src_ip, 0, sizeof(src_ip));
- memset(dst_ip, 0, sizeof(dst_ip));
- memset(ka_payload, 0, sizeof(ka_payload));
- sscanf(data, "%d %s %s %s %d %d %d %u %u %d %u %u %u %32s",
- &sess_id, dst_mac, src_ip, dst_ip, &ipid, &srcport, &dstport, &seq,
- &ack, &tcpwin, &tsval, &tsecr, &len, ka_payload);
-
- ka_payload_len = strlen(ka_payload) / 2;
- tcpka = kmalloc(sizeof(struct tcpka_conn) + ka_payload_len, GFP_KERNEL);
- if (tcpka == NULL) {
- AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n",
- sizeof(struct tcpka_conn) + ka_payload_len);
- goto exit;
- }
- memset(tcpka, 0, sizeof(struct tcpka_conn) + ka_payload_len);
-
- tcpka->sess_id = sess_id;
- if (!(ret = bcm_ether_atoe(dst_mac, &tcpka->dst_mac))) {
- AEXT_ERROR(dev->name, "mac parsing err addr=%s\n", dst_mac);
- goto exit;
- }
- if (!bcm_atoipv4(src_ip, &tcpka->src_ip)) {
- AEXT_ERROR(dev->name, "src_ip parsing err ip=%s\n", src_ip);
- goto exit;
- }
- if (!bcm_atoipv4(dst_ip, &tcpka->dst_ip)) {
- AEXT_ERROR(dev->name, "dst_ip parsing err ip=%s\n", dst_ip);
- goto exit;
- }
- tcpka->ipid = ipid;
- tcpka->srcport = srcport;
- tcpka->dstport = dstport;
- tcpka->seq = seq;
- tcpka->ack = ack;
- tcpka->tcpwin = tcpwin;
- tcpka->tsval = tsval;
- tcpka->tsecr = tsecr;
- tcpka->len = len;
- ka_payload_len = wl_pattern_atoh(ka_payload, (char *)tcpka->ka_payload);
- if (ka_payload_len == -1) {
- AEXT_ERROR(dev->name,"rejecting ka_payload=%s\n", ka_payload);
- goto exit;
- }
- tcpka->ka_payload_len = ka_payload_len;
-
- AEXT_INFO(dev->name,
- "tcpka_conn_add %d %pM %pM %pM %d %d %d %u %u %d %u %u %u %u \"%s\"\n",
- tcpka->sess_id, &tcpka->dst_mac, &tcpka->src_ip, &tcpka->dst_ip,
- tcpka->ipid, tcpka->srcport, tcpka->dstport, tcpka->seq,
- tcpka->ack, tcpka->tcpwin, tcpka->tsval, tcpka->tsecr,
- tcpka->len, tcpka->ka_payload_len, tcpka->ka_payload);
-
- ret = wl_ext_iovar_setbuf(dev, "tcpka_conn_add", (char *)tcpka,
- (sizeof(tcpka_conn_t) + tcpka->ka_payload_len - 1),
- iovar_buf, sizeof(iovar_buf), NULL);
- }
-
-exit:
- if (tcpka)
- kfree(tcpka);
- return ret;
-}
-
-static int
-wl_ext_tcpka_conn_enable(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- tcpka_conn_sess_t tcpka_conn;
- int ret = 0;
- uint32 sess_id = 0, flag, interval = 0, retry_interval = 0, retry_count = 0;
-
- if (data) {
- sscanf(data, "%d %d %d %d %d",
- &sess_id, &flag, &interval, &retry_interval, &retry_count);
- tcpka_conn.sess_id = sess_id;
- tcpka_conn.flag = flag;
- if (tcpka_conn.flag) {
- tcpka_conn.tcpka_timers.interval = interval;
- tcpka_conn.tcpka_timers.retry_interval = retry_interval;
- tcpka_conn.tcpka_timers.retry_count = retry_count;
- } else {
- tcpka_conn.tcpka_timers.interval = 0;
- tcpka_conn.tcpka_timers.retry_interval = 0;
- tcpka_conn.tcpka_timers.retry_count = 0;
- }
-
- AEXT_INFO(dev->name, "tcpka_conn_enable %d %d %d %d %d\n",
- tcpka_conn.sess_id, tcpka_conn.flag,
- tcpka_conn.tcpka_timers.interval,
- tcpka_conn.tcpka_timers.retry_interval,
- tcpka_conn.tcpka_timers.retry_count);
-
- ret = wl_ext_iovar_setbuf(dev, "tcpka_conn_enable", (char *)&tcpka_conn,
- sizeof(tcpka_conn_sess_t), iovar_buf, sizeof(iovar_buf), NULL);
- }
-
- return ret;
-}
-
-static int
-wl_ext_tcpka_conn_info(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- tcpka_conn_sess_info_t *info = NULL;
- uint32 sess_id = 0;
- int ret = 0, bytes_written = 0;
-
- if (data) {
- sscanf(data, "%d", &sess_id);
- AEXT_INFO(dev->name, "tcpka_conn_sess_info %d\n", sess_id);
- ret = wl_ext_iovar_getbuf(dev, "tcpka_conn_sess_info", (char *)&sess_id,
- sizeof(uint32), iovar_buf, sizeof(iovar_buf), NULL);
- if (!ret) {
- info = (tcpka_conn_sess_info_t *) iovar_buf;
- bytes_written += snprintf(command+bytes_written, total_len,
- "id :%d\n"
- "ipid :%d\n"
- "seq :%u\n"
- "ack :%u",
- sess_id, info->ipid, info->seq, info->ack);
- AEXT_INFO(dev->name, "%s\n", command);
- ret = bytes_written;
- }
- }
-
- return ret;
-}
-#endif /* WL_EXT_TCPKA */
-
-static int
-wl_ext_rsdb_mode(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- wl_config_t rsdb_mode_cfg = {1, 0}, *rsdb_p;
- int ret = 0;
-
- if (data) {
- rsdb_mode_cfg.config = (int)simple_strtol(data, NULL, 0);
- ret = wl_ext_iovar_setbuf(dev, "rsdb_mode", (char *)&rsdb_mode_cfg,
- sizeof(rsdb_mode_cfg), iovar_buf, WLC_IOCTL_SMLEN, NULL);
- AEXT_INFO(dev->name, "rsdb_mode %d\n", rsdb_mode_cfg.config);
- } else {
- ret = wl_ext_iovar_getbuf(dev, "rsdb_mode", NULL, 0,
- iovar_buf, WLC_IOCTL_SMLEN, NULL);
- if (!ret) {
- rsdb_p = (wl_config_t *) iovar_buf;
- ret = snprintf(command, total_len, "%d", rsdb_p->config);
- AEXT_TRACE(dev->name, "command result is %s\n", command);
- }
- }
-
- return ret;
-}
-
-static int
-wl_ext_recal(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- int ret = 0, i, nchan, nssid = 0;
- int params_size = WL_SCAN_PARAMS_FIXED_SIZE + WL_NUMCHANNELS * sizeof(uint16);
- wl_scan_params_t *params = NULL;
- int ioctl_ver;
- char *p;
-
- AEXT_TRACE(dev->name, "Enter\n");
-
- if (data) {
- params_size += WL_SCAN_PARAMS_SSID_MAX * sizeof(wlc_ssid_t);
- params = (wl_scan_params_t *) kzalloc(params_size, GFP_KERNEL);
- if (params == NULL) {
- ret = -ENOMEM;
- goto exit;
- }
- memset(params, 0, params_size);
-
- wl_ext_get_ioctl_ver(dev, &ioctl_ver);
-
- memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN);
- params->bss_type = DOT11_BSSTYPE_ANY;
- params->scan_type = 0;
- params->nprobes = -1;
- params->active_time = -1;
- params->passive_time = -1;
- params->home_time = -1;
- params->channel_num = 0;
-
- params->scan_type |= WL_SCANFLAGS_PASSIVE;
- nchan = 2;
- params->channel_list[0] = wf_channel2chspec(1, WL_CHANSPEC_BW_20);
- params->channel_list[1] = wf_channel2chspec(2, WL_CHANSPEC_BW_20);
-
- params->nprobes = htod32(params->nprobes);
- params->active_time = htod32(params->active_time);
- params->passive_time = htod32(params->passive_time);
- params->home_time = htod32(params->home_time);
-
- for (i = 0; i < nchan; i++) {
- wl_ext_chspec_host_to_driver(ioctl_ver, params->channel_list[i]);
- }
-
- p = (char*)params->channel_list + nchan * sizeof(uint16);
-
- params->channel_num = htod32((nssid << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (nchan & WL_SCAN_PARAMS_COUNT_MASK));
- params_size = p - (char*)params + nssid * sizeof(wlc_ssid_t);
-
- AEXT_INFO(dev->name, "recal\n");
- ret = wl_ext_ioctl(dev, WLC_SCAN, params, params_size, 1);
- }
-
-exit:
- if (params)
- kfree(params);
- return ret;
-}
-
-static s32
-wl_ext_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add)
-{
- s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
- s8 eventmask[WL_EVENTING_MASK_LEN];
- s32 err = 0;
-
- if (!ndev)
- return -ENODEV;
-
- /* Setup event_msgs */
- err = wldev_iovar_getbuf(ndev, "event_msgs", NULL, 0, iovbuf, sizeof(iovbuf), NULL);
- if (unlikely(err)) {
- AEXT_ERROR(ndev->name, "Get event_msgs error (%d)\n", err);
- goto eventmsg_out;
- }
- memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
- if (add) {
- setbit(eventmask, event);
- } else {
- clrbit(eventmask, event);
- }
- err = wldev_iovar_setbuf(ndev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
- sizeof(iovbuf), NULL);
- if (unlikely(err)) {
- AEXT_ERROR(ndev->name, "Set event_msgs error (%d)\n", err);
- goto eventmsg_out;
- }
-
-eventmsg_out:
- return err;
-}
-
-static int
-wl_ext_event_msg(struct net_device *dev, char *data,
- char *command, int total_len)
-{
- s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
- s8 eventmask[WL_EVENTING_MASK_LEN];
- int i, bytes_written = 0, add = -1;
- uint event;
- char *vbuf;
- bool skipzeros;
-
- /* dhd_priv wl event_msg [offset] [1/0, 1 for add, 0 for remove] */
- /* dhd_priv wl event_msg 40 1 */
- if (data) {
- AEXT_TRACE(dev->name, "data = %s\n", data);
- sscanf(data, "%d %d", &event, &add);
- /* Setup event_msgs */
- bytes_written = wldev_iovar_getbuf(dev, "event_msgs", NULL, 0, iovbuf,
- sizeof(iovbuf), NULL);
- if (unlikely(bytes_written)) {
- AEXT_ERROR(dev->name, "Get event_msgs error (%d)\n", bytes_written);
- goto eventmsg_out;
- }
- memcpy(eventmask, iovbuf, WL_EVENTING_MASK_LEN);
- if (add == -1) {
- if (isset(eventmask, event))
- bytes_written += snprintf(command+bytes_written, total_len, "1");
- else
- bytes_written += snprintf(command+bytes_written, total_len, "0");
- AEXT_INFO(dev->name, "%s\n", command);
- goto eventmsg_out;
- }
- bytes_written = wl_ext_add_remove_eventmsg(dev, event, add);
- }
- else {
- /* Setup event_msgs */
- bytes_written = wldev_iovar_getbuf(dev, "event_msgs", NULL, 0, iovbuf,
- sizeof(iovbuf), NULL);
- if (bytes_written) {
- AEXT_ERROR(dev->name, "Get event_msgs error (%d)\n", bytes_written);
- goto eventmsg_out;
- }
- vbuf = (char *)iovbuf;
- bytes_written += snprintf(command+bytes_written, total_len, "0x");
- for (i = (sizeof(eventmask) - 1); i >= 0; i--) {
- if (vbuf[i] || (i == 0))
- skipzeros = FALSE;
- if (skipzeros)
- continue;
- bytes_written += snprintf(command+bytes_written, total_len,
- "%02x", vbuf[i] & 0xff);
- }
- AEXT_INFO(dev->name, "%s\n", command);
- }
-
-eventmsg_out:
- return bytes_written;
-}
-
-#ifdef PKT_FILTER_SUPPORT
-extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
-extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
-extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
-static int
-wl_ext_pkt_filter_add(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- int i, filter_id, new_id = 0, cnt;
- conf_pkt_filter_add_t *filter_add = &dhd->conf->pkt_filter_add;
- char **pktfilter = dhd->pktfilter;
- int err = 0;
-
- if (data) {
- AEXT_TRACE(dev->name, "data = %s\n", data);
-
- new_id = simple_strtol(data, NULL, 10);
- if (new_id <= 0) {
- AEXT_ERROR(dev->name, "wrong id %d\n", new_id);
- return -1;
- }
-
- cnt = dhd->pktfilter_count;
- for (i=0; i<cnt; i++) {
- if (!pktfilter[i])
- continue;
- filter_id = simple_strtol(pktfilter[i], NULL, 10);
- if (new_id == filter_id) {
- AEXT_ERROR(dev->name, "filter id %d already in list\n", filter_id);
- return -1;
- }
- }
-
- cnt = filter_add->count;
- if (cnt >= DHD_CONF_FILTER_MAX) {
- AEXT_ERROR(dev->name, "not enough filter\n");
- return -1;
- }
- for (i=0; i<cnt; i++) {
- filter_id = simple_strtol(filter_add->filter[i], NULL, 10);
- if (new_id == filter_id) {
- AEXT_ERROR(dev->name, "filter id %d already in list\n", filter_id);
- return -1;
- }
- }
-
- strcpy(&filter_add->filter[cnt][0], data);
- dhd->pktfilter[dhd->pktfilter_count] = filter_add->filter[cnt];
- filter_add->count++;
- dhd->pktfilter_count++;
-
- dhd_pktfilter_offload_set(dhd, data);
- AEXT_INFO(dev->name, "filter id %d added\n", new_id);
- }
-
- return err;
-}
-
-static int
-wl_ext_pkt_filter_delete(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- int i, j, filter_id, cnt;
- char **pktfilter = dhd->pktfilter;
- conf_pkt_filter_add_t *filter_add = &dhd->conf->pkt_filter_add;
- bool in_filter = FALSE;
- int id, err = 0;
-
- if (data) {
- AEXT_TRACE(dev->name, "data = %s\n", data);
- id = (int)simple_strtol(data, NULL, 0);
-
- cnt = filter_add->count;
- for (i=0; i<cnt; i++) {
- filter_id = simple_strtol(filter_add->filter[i], NULL, 10);
- if (id == filter_id) {
- in_filter = TRUE;
- memset(filter_add->filter[i], 0, PKT_FILTER_LEN);
- for (j=i; j<(cnt-1); j++) {
- strcpy(filter_add->filter[j], filter_add->filter[j+1]);
- memset(filter_add->filter[j+1], 0, PKT_FILTER_LEN);
- }
- cnt--;
- filter_add->count--;
- dhd->pktfilter_count--;
- }
- }
-
- cnt = dhd->pktfilter_count;
- for (i=0; i<cnt; i++) {
- if (!pktfilter[i])
- continue;
- filter_id = simple_strtol(pktfilter[i], NULL, 10);
- if (id == filter_id) {
- in_filter = TRUE;
- memset(pktfilter[i], 0, strlen(pktfilter[i]));
- }
- }
-
- if (in_filter) {
- dhd_pktfilter_offload_delete(dhd, id);
- AEXT_INFO(dev->name, "filter id %d deleted\n", id);
- } else {
- AEXT_ERROR(dev->name, "filter id %d not in list\n", id);
- err = -1;
- }
- }
-
- return err;
-}
-
-static int
-wl_ext_pkt_filter_enable(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- int err = 0, id, enable;
- int i, filter_id, cnt;
- char **pktfilter = dhd->pktfilter;
- bool in_filter = FALSE;
-
- /* dhd_priv wl pkt_filter_enable [id] [1/0] */
- /* dhd_priv wl pkt_filter_enable 141 1 */
- if (data) {
- sscanf(data, "%d %d", &id, &enable);
-
- cnt = dhd->pktfilter_count;
- for (i=0; i<cnt; i++) {
- if (!pktfilter[i])
- continue;
- filter_id = simple_strtol(pktfilter[i], NULL, 10);
- if (id == filter_id) {
- in_filter = TRUE;
- break;
- }
- }
-
- if (in_filter) {
- dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
- enable, dhd_master_mode);
- AEXT_INFO(dev->name, "filter id %d %s\n", id, enable?"enabled":"disabled");
- } else {
- AEXT_ERROR(dev->name, "filter id %d not in list\n", id);
- err = -1;
- }
- }
-
- return err;
-}
-#endif /* PKT_FILTER_SUPPORT */
-
-#ifdef SENDPROB
-static int
-wl_ext_send_probreq(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- int err = 0;
- char addr_str[16], addr[6];
- char iovar_buf[WLC_IOCTL_SMLEN]="\0";
- char ie_data[WLC_IOCTL_SMLEN] = "\0";
- wl_probe_params_t params;
-
- /* dhd_priv wl send_probreq [dest. addr] [OUI+VAL] */
- /* dhd_priv wl send_probreq 0x00904c010203 0x00904c01020304050607 */
- if (data) {
- AEXT_TRACE(dev->name, "data = %s\n", data);
- sscanf(data, "%s %s", addr_str, ie_data);
- AEXT_TRACE(dev->name, "addr=%s, ie=%s\n", addr_str, ie_data);
-
- if (strlen(addr_str) != 14) {
- AEXT_ERROR(dev->name, "wrong addr %s\n", addr_str);
- goto exit;
- }
- wl_pattern_atoh(addr_str, (char *) addr);
- memset(¶ms, 0, sizeof(params));
- memcpy(¶ms.bssid, addr, ETHER_ADDR_LEN);
- memcpy(¶ms.mac, addr, ETHER_ADDR_LEN);
-
- err = wl_ext_add_del_ie(dev, VNDR_IE_PRBREQ_FLAG, ie_data, "add");
- if (err)
- goto exit;
- err = wl_ext_iovar_setbuf(dev, "sendprb", (char *)¶ms, sizeof(params),
- iovar_buf, sizeof(iovar_buf), NULL);
- OSL_SLEEP(100);
- wl_ext_add_del_ie(dev, VNDR_IE_PRBREQ_FLAG, ie_data, "del");
- }
-
-exit:
- return err;
-}
-
-static int
-wl_ext_send_probresp(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- int err = 0;
- char addr_str[16], addr[6];
- char iovar_buf[WLC_IOCTL_SMLEN]="\0";
- char ie_data[WLC_IOCTL_SMLEN] = "\0";
-
- /* dhd_priv wl send_probresp [dest. addr] [OUI+VAL] */
- /* dhd_priv wl send_probresp 0x00904c010203 0x00904c01020304050607 */
- if (data) {
- AEXT_TRACE(dev->name, "data = %s\n", data);
- sscanf(data, "%s %s", addr_str, ie_data);
- AEXT_TRACE(dev->name, "addr=%s, ie=%s\n", addr_str, ie_data);
-
- if (strlen(addr_str) != 14) {
- AEXT_ERROR(dev->name, "wrong addr %s\n", addr_str);
- goto exit;
- }
- wl_pattern_atoh(addr_str, (char *) addr);
-
- err = wl_ext_add_del_ie(dev, VNDR_IE_PRBRSP_FLAG, ie_data, "add");
- if (err)
- goto exit;
- err = wl_ext_iovar_setbuf(dev, "send_probresp", addr, sizeof(addr),
- iovar_buf, sizeof(iovar_buf), NULL);
- OSL_SLEEP(100);
- wl_ext_add_del_ie(dev, VNDR_IE_PRBRSP_FLAG, ie_data, "del");
- }
-
-exit:
- return err;
-}
-
-static int
-wl_ext_recv_probreq(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- int err = 0, enable = 0;
- char cmd[32];
- struct dhd_pub *dhd = dhd_get_pub(dev);
-
- /* enable:
- 1. dhd_priv wl 86 0
- 2. dhd_priv wl event_msg 44 1
- disable:
- 1. dhd_priv wl 86 2;
- 2. dhd_priv wl event_msg 44 0
- */
- if (data) {
- AEXT_TRACE(dev->name, "data = %s\n", data);
- sscanf(data, "%d", &enable);
- if (enable) {
- strcpy(cmd, "wl 86 0");
- err = wl_ext_wl_iovar(dev, cmd, total_len);
- if (err)
- goto exit;
- strcpy(cmd, "wl event_msg 44 1");
- err = wl_ext_wl_iovar(dev, cmd, total_len);
- if (err)
- goto exit;
- dhd->recv_probereq = TRUE;
- } else {
- if (dhd->conf->pm)
- strcpy(cmd, "wl 86 2"); {
- wl_ext_wl_iovar(dev, cmd, total_len);
- }
- strcpy(cmd, "wl event_msg 44 0");
- wl_ext_wl_iovar(dev, cmd, total_len);
- dhd->recv_probereq = FALSE;
- }
- }
-
-exit:
- return err;
-}
-
-static int
-wl_ext_recv_probresp(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- int err = 0, enable = 0;
- char cmd[32];
-
- /* enable:
- 1. dhd_priv wl pkt_filter_add 150 0 0 0 0xFF 0x50
- 2. dhd_priv wl pkt_filter_enable 150 1
- 3. dhd_priv wl mpc 0
- 4. dhd_priv wl 108 1
- disable:
- 1. dhd_priv wl 108 0
- 2. dhd_priv wl mpc 1
- 3. dhd_priv wl pkt_filter_disable 150 0
- 4. dhd_priv pkt_filter_delete 150
- */
- if (data) {
- AEXT_TRACE(dev->name, "data = %s\n", data);
- sscanf(data, "%d", &enable);
- if (enable) {
- strcpy(cmd, "wl pkt_filter_add 150 0 0 0 0xFF 0x50");
- err = wl_ext_wl_iovar(dev, cmd, total_len);
- if (err)
- goto exit;
- strcpy(cmd, "wl pkt_filter_enable 150 1");
- err = wl_ext_wl_iovar(dev, cmd, total_len);
- if (err)
- goto exit;
- strcpy(cmd, "wl mpc 0");
- err = wl_ext_wl_iovar(dev, cmd, total_len);
- if (err)
- goto exit;
- strcpy(cmd, "wl 108 1");
- err= wl_ext_wl_iovar(dev, cmd, total_len);
- } else {
- strcpy(cmd, "wl 108 0");
- wl_ext_wl_iovar(dev, cmd, total_len);
- strcpy(cmd, "wl mpc 1");
- wl_ext_wl_iovar(dev, cmd, total_len);
- strcpy(cmd, "wl pkt_filter_enable 150 0");
- wl_ext_wl_iovar(dev, cmd, total_len);
- strcpy(cmd, "wl pkt_filter_delete 150");
- wl_ext_wl_iovar(dev, cmd, total_len);
+ if (cur_if->ifmode == ISTA_MODE) {
+ wl_ext_iovar_setint(net, "roam_off", dhd->conf->roam_off);
+ wl_ext_iovar_setint(net, "bcn_timeout", dhd->conf->bcn_timeout);
+ } else if (cur_if->ifmode == IMESH_MODE) {
+ int pm = 0;
+ wl_ext_ioctl(net, WLC_SET_PM, &pm, sizeof(pm), 1);
}
}
-exit:
- return err;
+ return 0;
}
-#endif /* SENDPROB */
-static int
-wl_ext_gtk_key_info(struct net_device *dev, char *data, char *command, int total_len)
+int
+wl_ext_iapsta_dettach_netdev(void)
{
- int err = 0;
- char iovar_buf[WLC_IOCTL_SMLEN]="\0";
- gtk_keyinfo_t keyinfo;
- bcol_gtk_para_t bcol_keyinfo;
-
- /* wl gtk_key_info [kck kek replay_ctr] */
- /* wl gtk_key_info 001122..FF001122..FF00000000000001 */
- if (data) {
- memset(&keyinfo, 0, sizeof(keyinfo));
- memcpy(&keyinfo, data, RSN_KCK_LENGTH+RSN_KEK_LENGTH+RSN_REPLAY_LEN);
- if (android_msg_level & ANDROID_INFO_LEVEL) {
- prhex("kck", (uchar *)keyinfo.KCK, RSN_KCK_LENGTH);
- prhex("kek", (uchar *)keyinfo.KEK, RSN_KEK_LENGTH);
- prhex("replay_ctr", (uchar *)keyinfo.ReplayCounter, RSN_REPLAY_LEN);
- }
-
- memset(&bcol_keyinfo, 0, sizeof(bcol_keyinfo));
- bcol_keyinfo.enable = 1;
- bcol_keyinfo.ptk_len = 64;
- memcpy(&bcol_keyinfo.ptk, data, RSN_KCK_LENGTH+RSN_KEK_LENGTH);
- err = wl_ext_iovar_setbuf(dev, "bcol_gtk_rekey_ptk", &bcol_keyinfo,
- sizeof(bcol_keyinfo), iovar_buf, sizeof(iovar_buf), NULL);
- if (!err) {
- goto exit;
- }
+ struct wl_apsta_params *apsta_params = &g_apsta_params;
- err = wl_ext_iovar_setbuf(dev, "gtk_key_info", &keyinfo, sizeof(keyinfo),
- iovar_buf, sizeof(iovar_buf), NULL);
- if (err) {
- AEXT_ERROR(dev->name, "failed to set gtk_key_info\n");
- goto exit;
- }
- }
+ printf("%s: Enter\n", __FUNCTION__);
+ memset(apsta_params, 0, sizeof(struct wl_apsta_params));
-exit:
- return err;
+ return 0;
}
+#endif
-#ifdef WL_EXT_WOWL
-static int
-wl_ext_wowl_pattern(struct net_device *dev, char *data, char *command,
- int total_len)
+#ifdef IDHCP
+int
+wl_ext_ip_dump(int ip, char *buf)
{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- uint buf_len = 0;
- int offset;
- char mask[128]="\0", pattern[128]="\0", add[4]="\0",
- mask_tmp[128], *pmask_tmp;
- uint32 masksize, patternsize, pad_len = 0;
- wl_wowl_pattern2_t *wowl_pattern2 = NULL;
- wl_wowl_pattern_t *wowl_pattern = NULL;
- char *mask_and_pattern;
- wl_wowl_pattern_list_t *list;
- uint8 *ptr;
- int ret = 0, i, j, v;
-
- if (data) {
- sscanf(data, "%s %d %s %s", add, &offset, mask_tmp, pattern);
- if (strcmp(add, "add") != 0 && strcmp(add, "clr") != 0) {
- AEXT_ERROR(dev->name, "first arg should be add or clr\n");
- goto exit;
- }
- if (!strcmp(add, "clr")) {
- AEXT_INFO(dev->name, "wowl_pattern clr\n");
- ret = wl_ext_iovar_setbuf(dev, "wowl_pattern", add,
- sizeof(add), iovar_buf, sizeof(iovar_buf), NULL);
- goto exit;
- }
- masksize = strlen(mask_tmp) -2;
- AEXT_TRACE(dev->name, "0 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
-
- // add pading
- if (masksize % 16)
- pad_len = (16 - masksize % 16);
- for (i=0; i<pad_len; i++)
- strcat(mask_tmp, "0");
- masksize += pad_len;
- AEXT_TRACE(dev->name, "1 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
-
- // translate 0x00 to 0, others to 1
- j = 0;
- pmask_tmp = &mask_tmp[2];
- for (i=0; i<masksize/2; i++) {
- if(strncmp(&pmask_tmp[i*2], "00", 2))
- pmask_tmp[j] = '1';
- else
- pmask_tmp[j] = '0';
- j++;
- }
- pmask_tmp[j] = '\0';
- masksize = masksize / 2;
- AEXT_TRACE(dev->name, "2 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
-
- // reorder per 8bits
- pmask_tmp = &mask_tmp[2];
- for (i=0; i<masksize/8; i++) {
- char c;
- for (j=0; j<4; j++) {
- c = pmask_tmp[i*8+j];
- pmask_tmp[i*8+j] = pmask_tmp[(i+1)*8-j-1];
- pmask_tmp[(i+1)*8-j-1] = c;
- }
- }
- AEXT_TRACE(dev->name, "3 mask_tmp=%s, masksize=%d\n", mask_tmp, masksize);
-
- // translate 8bits to 1byte
- j = 0; v = 0;
- pmask_tmp = &mask_tmp[2];
- strcpy(mask, "0x");
- for (i=0; i<masksize; i++) {
- v = (v<<1) | (pmask_tmp[i]=='1');
- if (((i+1)%4) == 0) {
- if (v < 10)
- mask[j+2] = v + '0';
- else
- mask[j+2] = (v-10) + 'a';
- j++;
- v = 0;
- }
- }
- mask[j+2] = '\0';
- masksize = j/2;
- AEXT_TRACE(dev->name, "4 mask=%s, masksize=%d\n", mask, masksize);
-
- patternsize = (strlen(pattern)-2)/2;
- buf_len = sizeof(wl_wowl_pattern2_t) + patternsize + masksize;
- wowl_pattern2 = kmalloc(buf_len, GFP_KERNEL);
- if (wowl_pattern2 == NULL) {
- AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", buf_len);
- goto exit;
- }
- memset(wowl_pattern2, 0, sizeof(wl_wowl_pattern2_t));
+ unsigned char bytes[4];
+ int bytes_written=-1;
- strncpy(wowl_pattern2->cmd, add, sizeof(add));
- wowl_pattern2->wowl_pattern.type = 0;
- wowl_pattern2->wowl_pattern.offset = offset;
- mask_and_pattern = (char*)wowl_pattern2 + sizeof(wl_wowl_pattern2_t);
+ bytes[0] = ip & 0xFF;
+ bytes[1] = (ip >> 8) & 0xFF;
+ bytes[2] = (ip >> 16) & 0xFF;
+ bytes[3] = (ip >> 24) & 0xFF;
+ bytes_written = sprintf(buf, "%d.%d.%d.%d", bytes[0], bytes[1], bytes[2], bytes[3]);
- wowl_pattern2->wowl_pattern.masksize = masksize;
- ret = wl_pattern_atoh(mask, mask_and_pattern);
- if (ret == -1) {
- AEXT_ERROR(dev->name, "rejecting mask=%s\n", mask);
- goto exit;
- }
+ return bytes_written;
+}
- mask_and_pattern += wowl_pattern2->wowl_pattern.masksize;
- wowl_pattern2->wowl_pattern.patternoffset = sizeof(wl_wowl_pattern_t) +
- wowl_pattern2->wowl_pattern.masksize;
+/*
+terence 20170215:
+dhd_priv dhcpc_dump ifname [wlan0|wlan1]
+dhd_priv dhcpc_enable [0|1]
+*/
+int
+wl_ext_dhcpc_enable(struct net_device *dev, char *command, int total_len)
+{
+ int enable = -1, ret = -1;
+ int bytes_written = -1;
- wowl_pattern2->wowl_pattern.patternsize = patternsize;
- ret = wl_pattern_atoh(pattern, mask_and_pattern);
- if (ret == -1) {
- AEXT_ERROR(dev->name, "rejecting pattern=%s\n", pattern);
- goto exit;
- }
+ ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
- AEXT_INFO(dev->name, "%s %d %s %s\n", add, offset, mask, pattern);
+ sscanf(command, "%*s %d", &enable);
- ret = wl_ext_iovar_setbuf(dev, "wowl_pattern", (char *)wowl_pattern2,
- buf_len, iovar_buf, sizeof(iovar_buf), NULL);
- }
+ if (enable >= 0)
+ ret = wl_ext_iovar_setint(dev, "dhcpc_enable", enable);
else {
- ret = wl_ext_iovar_getbuf(dev, "wowl_pattern", NULL, 0,
- iovar_buf, sizeof(iovar_buf), NULL);
+ ret = wl_ext_iovar_getint(dev, "dhcpc_enable", &enable);
if (!ret) {
- list = (wl_wowl_pattern_list_t *)iovar_buf;
- ret = snprintf(command, total_len, "#of patterns :%d\n", list->count);
- ptr = (uint8 *)list->pattern;
- for (i=0; i<list->count; i++) {
- uint8 *pattern;
- wowl_pattern = (wl_wowl_pattern_t *)ptr;
- ret += snprintf(command+ret, total_len,
- "Pattern %d:\n"
- "ID :0x%x\n"
- "Offset :%d\n"
- "Masksize :%d\n"
- "Mask :0x",
- i+1, (uint32)wowl_pattern->id, wowl_pattern->offset,
- wowl_pattern->masksize);
- pattern = ((uint8 *)wowl_pattern + sizeof(wl_wowl_pattern_t));
- for (j = 0; j < wowl_pattern->masksize; j++) {
- ret += snprintf(command+ret, total_len, "%02x", pattern[j]);
- }
- ret += snprintf(command+ret, total_len, "\n");
- ret += snprintf(command+ret, total_len,
- "PatternSize:%d\n"
- "Pattern :0x",
- wowl_pattern->patternsize);
-
- pattern = ((uint8*)wowl_pattern + wowl_pattern->patternoffset);
- for (j=0; j<wowl_pattern->patternsize; j++)
- ret += snprintf(command+ret, total_len, "%02x", pattern[j]);
- ret += snprintf(command+ret, total_len, "\n");
- ptr += (wowl_pattern->masksize + wowl_pattern->patternsize +
- sizeof(wl_wowl_pattern_t));
- }
-
- AEXT_INFO(dev->name, "%s\n", command);
+ bytes_written = snprintf(command, total_len, "%d", enable);
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ ret = bytes_written;
}
}
-exit:
- if (wowl_pattern2)
- kfree(wowl_pattern2);
return ret;
}
-static int
-wl_ext_wowl_wakeind(struct net_device *dev, char *data, char *command,
- int total_len)
+int
+wl_ext_dhcpc_dump(struct net_device *dev, char *command, int total_len)
{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- wl_wowl_wakeind_t *wake = NULL;
- int ret = -1;
- char clr[6]="\0";
+ int ret = 0;
+ int bytes_written = 0;
+ uint32 ip_addr;
+ char buf[20]="";
- if (data) {
- sscanf(data, "%s", clr);
- if (!strcmp(clr, "clear")) {
- AEXT_INFO(dev->name, "wowl_wakeind clear\n");
- ret = wl_ext_iovar_setbuf(dev, "wowl_wakeind", clr, sizeof(clr),
- iovar_buf, sizeof(iovar_buf), NULL);
- } else {
- AEXT_ERROR(dev->name, "first arg should be clear\n");
- }
- } else {
- ret = wl_ext_iovar_getbuf(dev, "wowl_wakeind", NULL, 0,
- iovar_buf, sizeof(iovar_buf), NULL);
- if (!ret) {
- wake = (wl_wowl_wakeind_t *) iovar_buf;
- ret = snprintf(command, total_len, "wakeind=0x%x", wake->ucode_wakeind);
- if (wake->ucode_wakeind & WL_WOWL_MAGIC)
- ret += snprintf(command+ret, total_len, " (MAGIC packet)");
- if (wake->ucode_wakeind & WL_WOWL_NET)
- ret += snprintf(command+ret, total_len, " (Netpattern)");
- if (wake->ucode_wakeind & WL_WOWL_DIS)
- ret += snprintf(command+ret, total_len, " (Disassoc/Deauth)");
- if (wake->ucode_wakeind & WL_WOWL_BCN)
- ret += snprintf(command+ret, total_len, " (Loss of beacon)");
- if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_TIME)
- ret += snprintf(command+ret, total_len, " (TCPKA timeout)");
- if (wake->ucode_wakeind & WL_WOWL_TCPKEEP_DATA)
- ret += snprintf(command+ret, total_len, " (TCPKA data)");
- if (wake->ucode_wakeind & WL_WOWL_TCPFIN)
- ret += snprintf(command+ret, total_len, " (TCP FIN)");
- AEXT_INFO(dev->name, "%s\n", command);
- }
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_addr", &ip_addr);
+ if (!ret) {
+ wl_ext_ip_dump(ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len, "ipaddr %s ", buf);
}
- return ret;
-}
-#endif /* WL_EXT_WOWL */
-
-#ifdef WL_GPIO_NOTIFY
-typedef struct notify_payload {
- int index;
- int len;
- char payload[128];
-} notify_payload_t;
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_mask", &ip_addr);
+ if (!ret) {
+ wl_ext_ip_dump(ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len, "mask %s ", buf);
+ }
-static int
-wl_ext_gpio_notify(struct net_device *dev, char *data, char *command,
- int total_len)
-{
- s8 iovar_buf[WLC_IOCTL_SMLEN];
- notify_payload_t notify, *pnotify = NULL;
- int i, ret = 0, bytes_written = 0;
- char frame_str[WLC_IOCTL_SMLEN+3];
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_gateway", &ip_addr);
+ if (!ret) {
+ wl_ext_ip_dump(ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len, "gw %s ", buf);
+ }
- if (data) {
- memset(¬ify, 0, sizeof(notify));
- memset(frame_str, 0, sizeof(frame_str));
- sscanf(data, "%d %s", ¬ify.index, frame_str);
-
- if (notify.index < 0)
- notify.index = 0;
-
- if (strlen(frame_str)) {
- notify.len = wl_pattern_atoh(frame_str, notify.payload);
- if (notify.len == -1) {
- AEXT_ERROR(dev->name, "rejecting pattern=%s\n", frame_str);
- goto exit;
- }
- AEXT_INFO(dev->name, "index=%d, len=%d\n", notify.index, notify.len);
- if (android_msg_level & ANDROID_INFO_LEVEL)
- prhex("payload", (uchar *)notify.payload, notify.len);
- ret = wl_ext_iovar_setbuf(dev, "bcol_gpio_noti", (char *)¬ify,
- sizeof(notify), iovar_buf, WLC_IOCTL_SMLEN, NULL);
- } else {
- AEXT_INFO(dev->name, "index=%d\n", notify.index);
- ret = wl_ext_iovar_getbuf(dev, "bcol_gpio_noti", ¬ify.index,
- sizeof(notify.index), iovar_buf, sizeof(iovar_buf), NULL);
- if (!ret) {
- pnotify = (notify_payload_t *)iovar_buf;
- bytes_written += snprintf(command+bytes_written, total_len,
- "Id :%d\n"
- "Packet :0x",
- pnotify->index);
- for (i=0; i<pnotify->len; i++) {
- bytes_written += snprintf(command+bytes_written, total_len,
- "%02x", pnotify->payload[i]);
- }
- AEXT_TRACE(dev->name, "command result is\n%s\n", command);
- ret = bytes_written;
- }
- }
+ ret = wl_ext_iovar_getint(dev, "dhcpc_ip_dnsserv", &ip_addr);
+ if (!ret) {
+ wl_ext_ip_dump(ip_addr, buf);
+ bytes_written += snprintf(command+bytes_written, total_len, "dnsserv %s ", buf);
}
-exit:
- return ret;
-}
-#endif /* WL_GPIO_NOTIFY */
-
-#ifdef CSI_SUPPORT
-typedef struct csi_config {
- /* Peer device mac address. */
- struct ether_addr addr;
- /* BW to be used in the measurements. This needs to be supported both by the */
- /* device itself and the peer. */
- uint32 bw;
- /* Time interval between measurements (units: 1 ms). */
- uint32 period;
- /* CSI method */
- uint32 method;
-} csi_config_t;
-
-typedef struct csi_list {
- uint32 cnt;
- csi_config_t configs[1];
-} csi_list_t;
+ if (!bytes_written)
+ bytes_written = -1;
-static int
-wl_ether_atoe(const char *a, struct ether_addr *n)
-{
- char *c = NULL;
- int i = 0;
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
- memset(n, 0, ETHER_ADDR_LEN);
- for (;;) {
- n->octet[i++] = (uint8)strtoul(a, &c, 16);
- if (!*c++ || i == ETHER_ADDR_LEN)
- break;
- a = c;
- }
- return (i == ETHER_ADDR_LEN);
+ return bytes_written;
}
+#endif
static int
-wl_ext_csi(struct net_device *dev, char *data, char *command, int total_len)
+wl_ext_rsdb_mode(struct net_device *dev, char *data, char *command,
+ int total_len)
{
- csi_config_t csi, *csip;
- csi_list_t *csi_list;
- int ret = -1, period=-1, i;
- char mac[32], *buf = NULL;
- struct ether_addr ea;
- int bytes_written = 0;
+ s8 iovar_buf[WLC_IOCTL_SMLEN];
+ wl_config_t rsdb_mode_cfg = {1, 0}, *rsdb_p;
+ int ret = 0;
- buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
- if (buf == NULL) {
- AEXT_ERROR(dev->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
- goto exit;
- }
- memset(buf, 0, WLC_IOCTL_SMLEN);
+ ANDROID_TRACE(("%s: Enter\n", __FUNCTION__));
if (data) {
- sscanf(data, "%s %d", mac, &period);
- ret = wl_ether_atoe(mac, &ea);
- if (!ret) {
- AEXT_ERROR(dev->name, "rejecting mac=%s, ret=%d\n", mac, ret);
- goto exit;
- }
- AEXT_TRACE(dev->name, "mac=%pM, period=%d", &ea, period);
- if (period > 0) {
- memset(&csi, 0, sizeof(csi_config_t));
- bcopy(&ea, &csi.addr, ETHER_ADDR_LEN);
- csi.period = period;
- ret = wl_ext_iovar_setbuf(dev, "csi", (char *)&csi, sizeof(csi),
- buf, WLC_IOCTL_SMLEN, NULL);
- } else if (period == 0) {
- memset(&csi, 0, sizeof(csi_config_t));
- bcopy(&ea, &csi.addr, ETHER_ADDR_LEN);
- ret = wl_ext_iovar_setbuf(dev, "csi_del", (char *)&csi, sizeof(csi),
- buf, WLC_IOCTL_SMLEN, NULL);
- } else {
- ret = wl_ext_iovar_getbuf(dev, "csi", &ea, ETHER_ADDR_LEN, buf,
- WLC_IOCTL_SMLEN, NULL);
- if (!ret) {
- csip = (csi_config_t *) buf;
- /* Dump all lists */
- bytes_written += snprintf(command+bytes_written, total_len,
- "Mac :%pM\n"
- "Period :%d\n"
- "BW :%d\n"
- "Method :%d\n",
- &csip->addr, csip->period, csip->bw, csip->method);
- AEXT_TRACE(dev->name, "command result is %s\n", command);
- ret = bytes_written;
- }
- }
- }
- else {
- ret = wl_ext_iovar_getbuf(dev, "csi_list", NULL, 0, buf, WLC_IOCTL_SMLEN, NULL);
+ rsdb_mode_cfg.config = (int)simple_strtol(data, NULL, 0);
+ ret = wl_ext_iovar_setbuf(dev, "rsdb_mode", (char *)&rsdb_mode_cfg,
+ sizeof(rsdb_mode_cfg), iovar_buf, WLC_IOCTL_SMLEN, NULL);
+ printf("%s: rsdb_mode %d\n", __FUNCTION__, rsdb_mode_cfg.config);
+ } else {
+ ret = wl_ext_iovar_getbuf(dev, "rsdb_mode", NULL, 0,
+ iovar_buf, WLC_IOCTL_SMLEN, NULL);
if (!ret) {
- csi_list = (csi_list_t *)buf;
- bytes_written += snprintf(command+bytes_written, total_len,
- "Total number :%d\n", csi_list->cnt);
- for (i=0; i<csi_list->cnt; i++) {
- csip = &csi_list->configs[i];
- bytes_written += snprintf(command+bytes_written, total_len,
- "Idx :%d\n"
- "Mac :%pM\n"
- "Period :%d\n"
- "BW :%d\n"
- "Method :%d\n\n",
- i+1, &csip->addr, csip->period, csip->bw, csip->method);
- }
- AEXT_TRACE(dev->name, "command result is %s\n", command);
- ret = bytes_written;
+ rsdb_p = (wl_config_t *) iovar_buf;
+ ret = snprintf(command, total_len, "%d", rsdb_p->config);
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__,
+ command));
}
}
-exit:
- if (buf)
- kfree(buf);
return ret;
}
-#endif /* CSI_SUPPORT */
typedef int (wl_ext_tpl_parse_t)(struct net_device *dev, char *data, char *command,
int total_len);
} wl_ext_iovar_tpl_t;
const wl_ext_iovar_tpl_t wl_ext_iovar_tpl_list[] = {
- {WLC_GET_VAR, WLC_SET_VAR, "event_msg", wl_ext_event_msg},
- {WLC_GET_VAR, WLC_SET_VAR, "gtk_key_info", wl_ext_gtk_key_info},
- {WLC_GET_VAR, WLC_SET_VAR, "recal", wl_ext_recal},
{WLC_GET_VAR, WLC_SET_VAR, "rsdb_mode", wl_ext_rsdb_mode},
- {WLC_GET_VAR, WLC_SET_VAR, "mkeep_alive", wl_ext_mkeep_alive},
-#ifdef PKT_FILTER_SUPPORT
- {WLC_GET_VAR, WLC_SET_VAR, "pkt_filter_add", wl_ext_pkt_filter_add},
- {WLC_GET_VAR, WLC_SET_VAR, "pkt_filter_delete", wl_ext_pkt_filter_delete},
- {WLC_GET_VAR, WLC_SET_VAR, "pkt_filter_enable", wl_ext_pkt_filter_enable},
-#endif /* PKT_FILTER_SUPPORT */
-#if defined(WL_EXT_IAPSTA) && defined(WLMESH)
- {WLC_GET_VAR, WLC_SET_VAR, "mesh_peer_status", wl_ext_mesh_peer_status},
-#endif /* WL_EXT_IAPSTA && WLMESH */
-#ifdef SENDPROB
- {WLC_GET_VAR, WLC_SET_VAR, "send_probreq", wl_ext_send_probreq},
- {WLC_GET_VAR, WLC_SET_VAR, "send_probresp", wl_ext_send_probresp},
- {WLC_GET_VAR, WLC_SET_VAR, "recv_probreq", wl_ext_recv_probreq},
- {WLC_GET_VAR, WLC_SET_VAR, "recv_probresp", wl_ext_recv_probresp},
-#endif /* SENDPROB */
-#ifdef WL_EXT_TCPKA
- {WLC_GET_VAR, WLC_SET_VAR, "tcpka_conn_add", wl_ext_tcpka_conn_add},
- {WLC_GET_VAR, WLC_SET_VAR, "tcpka_conn_enable", wl_ext_tcpka_conn_enable},
- {WLC_GET_VAR, WLC_SET_VAR, "tcpka_conn_sess_info", wl_ext_tcpka_conn_info},
-#endif /* WL_EXT_TCPKA */
-#ifdef WL_EXT_WOWL
- {WLC_GET_VAR, WLC_SET_VAR, "wowl_pattern", wl_ext_wowl_pattern},
- {WLC_GET_VAR, WLC_SET_VAR, "wowl_wakeind", wl_ext_wowl_wakeind},
-#endif /* WL_EXT_WOWL */
-#ifdef IDHCP
- {WLC_GET_VAR, WLC_SET_VAR, "dhcpc_dump", wl_ext_dhcpc_dump},
- {WLC_GET_VAR, WLC_SET_VAR, "dhcpc_param", wl_ext_dhcpc_param},
-#endif /* IDHCP */
-#ifdef WL_GPIO_NOTIFY
- {WLC_GET_VAR, WLC_SET_VAR, "bcol_gpio_noti", wl_ext_gpio_notify},
-#endif /* WL_GPIO_NOTIFY */
-#ifdef CSI_SUPPORT
- {WLC_GET_VAR, WLC_SET_VAR, "csi", wl_ext_csi},
-#endif /* CSI_SUPPORT */
};
/*
dhd_priv wl mpc
dhd_priv wl mpc 1
*/
-static int
+int
wl_ext_wl_iovar(struct net_device *dev, char *command, int total_len)
{
int cmd, val, ret = -1, i;
const wl_ext_iovar_tpl_t *tpl = wl_ext_iovar_tpl_list;
int tpl_count = ARRAY_SIZE(wl_ext_iovar_tpl_list);
- AEXT_TRACE(dev->name, "cmd %s\n", command);
+ ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
pick_tmp = command;
pch = bcmstrtok(&pick_tmp, " ", 0); // pick wl
if (cmd == 0) {
strcpy(name, pch);
}
- data = bcmstrtok(&pick_tmp, "", 0); // pick data
+ data = bcmstrtok(&pick_tmp, " ", 0); // pick data
if (data && cmd == 0) {
cmd = WLC_SET_VAR;
} else if (cmd == 0) {
} else {
if (cmd == WLC_SET_VAR) {
val = (int)simple_strtol(data, NULL, 0);
- AEXT_INFO(dev->name, "set %s %d\n", name, val);
+ ANDROID_TRACE(("%s: set %s %d\n", __FUNCTION__, name, val));
ret = wl_ext_iovar_setint(dev, name, val);
} else if (cmd == WLC_GET_VAR) {
- AEXT_INFO(dev->name, "get %s\n", name);
+ ANDROID_TRACE(("%s: get %s\n", __FUNCTION__, name));
ret = wl_ext_iovar_getint(dev, name, &val);
if (!ret) {
bytes_written = snprintf(command, total_len, "%d", val);
- AEXT_INFO(dev->name, "command result is %s\n", command);
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__,
+ command));
ret = bytes_written;
}
} else if (data) {
val = (int)simple_strtol(data, NULL, 0);
- AEXT_INFO(dev->name, "set %d %d\n", cmd, val);
+ ANDROID_TRACE(("%s: set %d %d\n", __FUNCTION__, cmd, val));
ret = wl_ext_ioctl(dev, cmd, &val, sizeof(val), TRUE);
} else {
- AEXT_INFO(dev->name, "get %d\n", cmd);
+ ANDROID_TRACE(("%s: get %d\n", __FUNCTION__, cmd));
ret = wl_ext_ioctl(dev, cmd, &val, sizeof(val), FALSE);
if (!ret) {
bytes_written = snprintf(command, total_len, "%d", val);
- AEXT_INFO(dev->name, "command result is %s\n", command);
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__,
+ command));
ret = bytes_written;
}
}
return ret;
}
-int
-wl_android_ext_priv_cmd(struct net_device *net, char *command,
+int wl_android_ext_priv_cmd(struct net_device *net, char *command,
int total_len, int *bytes_written)
{
int ret = 0;
else if (strnicmp(command, CMD_ROAM_TRIGGER, strlen(CMD_ROAM_TRIGGER)) == 0) {
*bytes_written = wl_ext_roam_trigger(net, command, total_len);
}
+ else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
+ *bytes_written = wl_ext_keep_alive(net, command, total_len);
+ }
else if (strnicmp(command, CMD_PM, strlen(CMD_PM)) == 0) {
*bytes_written = wl_ext_pm(net, command, total_len);
}
*bytes_written = net_os_set_suspend_bcn_li_dtim(net, bcn_li_dtim);
}
#ifdef WL_EXT_IAPSTA
- else if (strnicmp(command, CMD_IAPSTA_INIT, strlen(CMD_IAPSTA_INIT)) == 0 ||
- strnicmp(command, CMD_ISAM_INIT, strlen(CMD_ISAM_INIT)) == 0) {
+ else if (strnicmp(command, CMD_IAPSTA_INIT, strlen(CMD_IAPSTA_INIT)) == 0) {
+ *bytes_written = wl_ext_isam_init(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_ISAM_INIT, strlen(CMD_ISAM_INIT)) == 0) {
*bytes_written = wl_ext_isam_init(net, command, total_len);
}
- else if (strnicmp(command, CMD_IAPSTA_CONFIG, strlen(CMD_IAPSTA_CONFIG)) == 0 ||
- strnicmp(command, CMD_ISAM_CONFIG, strlen(CMD_ISAM_CONFIG)) == 0) {
+ else if (strnicmp(command, CMD_IAPSTA_CONFIG, strlen(CMD_IAPSTA_CONFIG)) == 0) {
+ *bytes_written = wl_ext_iapsta_config(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_ISAM_CONFIG, strlen(CMD_ISAM_CONFIG)) == 0) {
*bytes_written = wl_ext_iapsta_config(net, command, total_len);
}
- else if (strnicmp(command, CMD_IAPSTA_ENABLE, strlen(CMD_IAPSTA_ENABLE)) == 0 ||
- strnicmp(command, CMD_ISAM_ENABLE, strlen(CMD_ISAM_ENABLE)) == 0) {
+ else if (strnicmp(command, CMD_IAPSTA_ENABLE, strlen(CMD_IAPSTA_ENABLE)) == 0) {
+ *bytes_written = wl_ext_iapsta_enable(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_ISAM_ENABLE, strlen(CMD_ISAM_ENABLE)) == 0) {
*bytes_written = wl_ext_iapsta_enable(net, command, total_len);
}
- else if (strnicmp(command, CMD_IAPSTA_DISABLE, strlen(CMD_IAPSTA_DISABLE)) == 0 ||
- strnicmp(command, CMD_ISAM_DISABLE, strlen(CMD_ISAM_DISABLE)) == 0) {
+ else if (strnicmp(command, CMD_IAPSTA_DISABLE, strlen(CMD_IAPSTA_DISABLE)) == 0) {
+ *bytes_written = wl_ext_iapsta_disable(net, command, total_len);
+ }
+ else if (strnicmp(command, CMD_ISAM_DISABLE, strlen(CMD_ISAM_DISABLE)) == 0) {
*bytes_written = wl_ext_iapsta_disable(net, command, total_len);
}
else if (strnicmp(command, CMD_ISAM_STATUS, strlen(CMD_ISAM_STATUS)) == 0) {
- *bytes_written = wl_ext_isam_status(net, command, total_len);
+ *bytes_written = wl_ext_isam_status(net);
}
- else if (strnicmp(command, CMD_ISAM_PARAM, strlen(CMD_ISAM_PARAM)) == 0) {
- *bytes_written = wl_ext_isam_param(net, command, total_len);
+#endif
+#ifdef IDHCP
+ else if (strnicmp(command, CMD_DHCPC_ENABLE, strlen(CMD_DHCPC_ENABLE)) == 0) {
+ *bytes_written = wl_ext_dhcpc_enable(net, command, total_len);
}
-#if defined(WLMESH) && defined(WL_ESCAN)
- else if (strnicmp(command, CMD_ISAM_PEER_PATH, strlen(CMD_ISAM_PEER_PATH)) == 0) {
- *bytes_written = wl_ext_isam_peer_path(net, command, total_len);
+ else if (strnicmp(command, CMD_DHCPC_DUMP, strlen(CMD_DHCPC_DUMP)) == 0) {
+ *bytes_written = wl_ext_dhcpc_dump(net, command, total_len);
}
-#endif /* WLMESH && WL_ESCAN */
-#endif /* WL_EXT_IAPSTA */
+#endif
#ifdef WL_CFG80211
else if (strnicmp(command, CMD_AUTOCHANNEL, strlen(CMD_AUTOCHANNEL)) == 0) {
*bytes_written = wl_cfg80211_autochannel(net, command, total_len);
}
-#endif /* WL_CFG80211 */
-#if defined(WL_WIRELESS_EXT) && defined(WL_ESCAN)
+#endif
+#ifdef WL_ESCAN
else if (strnicmp(command, CMD_AUTOCHANNEL, strlen(CMD_AUTOCHANNEL)) == 0) {
- *bytes_written = wl_iw_autochannel(net, command, total_len);
+ *bytes_written = wl_escan_autochannel(net, command, total_len);
}
-#endif /* WL_WIRELESS_EXT && WL_ESCAN */
+#endif
else if (strnicmp(command, CMD_WL, strlen(CMD_WL)) == 0) {
*bytes_written = wl_ext_wl_iovar(net, command, total_len);
}
s32 err = BCME_OK;
param.band = band;
- err = wl_ext_iovar_getbuf(net, "bw_cap", ¶m, sizeof(param), buf,
+ err = wldev_iovar_getbuf(net, "bw_cap", ¶m, sizeof(param), buf,
sizeof(buf), NULL);
if (err) {
if (err != BCME_UNSUPPORTED) {
- AEXT_ERROR(net->name, "bw_cap failed, %d\n", err);
+ ANDROID_ERROR(("bw_cap failed, %d\n", err));
return err;
} else {
err = wl_ext_iovar_getint(net, "mimo_bw_cap", &bw_cap);
+ if (err) {
+ ANDROID_ERROR(("error get mimo_bw_cap (%d)\n", err));
+ }
if (bw_cap != WLC_N_BW_20ALL)
bw = WL_CHANSPEC_BW_40;
}
distance = 8;
else
distance = 16;
- AEXT_INFO(net->name, "bw=0x%x, distance=%d\n", bw, distance);
+ ANDROID_INFO(("%s: bw=0x%x, distance=%d\n", __FUNCTION__, bw, distance));
return distance;
}
wl_bss_cache_ctrl_t *bss_cache_ctrl,
#else
struct wl_scan_results *bss_list,
-#endif /* BSSCACHE */
+#endif
int ioctl_ver, int *best_2g_ch, int *best_5g_ch
)
{
s32 i, j;
#if defined(BSSCACHE)
wl_bss_cache_t *node;
-#endif /* BSSCACHE */
+#endif
int b_band[CH_MAX_2G_CHANNEL]={0}, a_band1[4]={0}, a_band4[5]={0};
s32 cen_ch, distance, distance_2g, distance_5g, ch, min_ap=999;
u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
wl_uint32_list_t *list;
int ret;
chanspec_t chanspec;
- struct dhd_pub *dhd = dhd_get_pub(net);
memset(b_band, -1, sizeof(b_band));
memset(a_band1, -1, sizeof(a_band1));
memset(valid_chan_list, 0, sizeof(valid_chan_list));
list = (wl_uint32_list_t *)(void *) valid_chan_list;
list->count = htod32(WL_NUMCHANNELS);
- ret = wl_ext_ioctl(net, WLC_GET_VALID_CHANNELS, &valid_chan_list,
+ ret = wldev_ioctl(net, WLC_GET_VALID_CHANNELS, valid_chan_list,
sizeof(valid_chan_list), 0);
if (ret<0) {
- AEXT_ERROR(net->name, "get channels failed with %d\n", ret);
+ ANDROID_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, ret));
return 0;
} else {
for (i = 0; i < dtoh32(list->count); i++) {
ch = dtoh32(list->element[i]);
- if (!dhd_conf_match_channel(dhd, ch))
- continue;
if (ch < CH_MAX_2G_CHANNEL)
b_band[ch-1] = 0;
else if (ch <= 48)
for (i=0; node && i<256; i++)
#else
for (i=0; i < bss_list->count; i++)
-#endif /* BSSCACHE */
+#endif
{
#if defined(BSSCACHE)
bi = node->results.bss_info;
#else
bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : bss_list->bss_info;
-#endif /* BSSCACHE */
+#endif
chanspec = wl_ext_chspec_driver_to_host(ioctl_ver, bi->chanspec);
cen_ch = CHSPEC_CHANNEL(bi->chanspec);
distance = 0;
distance += distance_2g;
for (j=0; j<ARRAYSIZE(b_band); j++) {
if (b_band[j] >= 0 && abs(cen_ch-(1+j)) <= distance)
- b_band[j] += 1;
+ b_band[j] += 1;
}
} else {
distance += distance_5g;
}
#if defined(BSSCACHE)
node = node->next;
-#endif /* BSSCACHE */
+#endif
}
*best_2g_ch = 0;
}
}
- if (android_msg_level & ANDROID_INFO_LEVEL) {
- struct bcmstrbuf strbuf;
- char *tmp_buf = NULL;
- tmp_buf = kmalloc(WLC_IOCTL_SMLEN, GFP_KERNEL);
- if (tmp_buf == NULL) {
- AEXT_ERROR(net->name, "Failed to allocate buffer of %d bytes\n", WLC_IOCTL_SMLEN);
- goto exit;
- }
- bcm_binit(&strbuf, tmp_buf, WLC_IOCTL_SMLEN);
+ if (android_msg_level&ANDROID_INFO_LEVEL) {
+ printf("%s: b_band: ", __FUNCTION__);
for (j=0; j<ARRAYSIZE(b_band); j++)
- bcm_bprintf(&strbuf, "%d/%d, ", b_band[j], 1+j);
- bcm_bprintf(&strbuf, "\n");
+ printf("%d, ", b_band[j]);
+ printf("\n");
+ printf("%s: a_band1: ", __FUNCTION__);
for (j=0; j<ARRAYSIZE(a_band1); j++)
- bcm_bprintf(&strbuf, "%d/%d, ", a_band1[j], 36+j*4);
- bcm_bprintf(&strbuf, "\n");
+ printf("%d, ", a_band1[j]);
+ printf("\n");
+ printf("%s: a_band4: ", __FUNCTION__);
for (j=0; j<ARRAYSIZE(a_band4); j++)
- bcm_bprintf(&strbuf, "%d/%d, ", a_band4[j], 149+j*4);
- bcm_bprintf(&strbuf, "\n");
- bcm_bprintf(&strbuf, "best_2g_ch=%d, best_5g_ch=%d\n",
+ printf("%d, ", a_band4[j]);
+ printf("\n");
+ printf("%s: best_2g_ch=%d, best_5g_ch=%d\n", __FUNCTION__,
*best_2g_ch, *best_5g_ch);
- AEXT_INFO(net->name, "\n%s", strbuf.origbuf);
- if (tmp_buf) {
- kfree(tmp_buf);
- }
}
-exit:
return 0;
}
-#endif /* WL_CFG80211 || WL_ESCAN */
-
-#ifdef WL_CFG80211
-#define APCS_MAX_RETRY 10
-static int
-wl_ext_fw_apcs(struct net_device *dev, uint32 band)
-{
- int channel = 0, chosen = 0, retry = 0, ret = 0, spect = 0;
- u8 *reqbuf = NULL;
- uint32 buf_size;
-
- ret = wldev_ioctl_get(dev, WLC_GET_SPECT_MANAGMENT, &spect, sizeof(spect));
- if (ret) {
- AEXT_ERROR(dev->name, "ACS: error getting the spect, ret=%d\n", ret);
- goto done;
- }
-
- if (spect > 0) {
- ret = wl_cfg80211_set_spect(dev, 0);
- if (ret < 0) {
- AEXT_ERROR(dev->name, "ACS: error while setting spect, ret=%d\n", ret);
- goto done;
- }
- }
-
- reqbuf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
- if (reqbuf == NULL) {
- AEXT_ERROR(dev->name, "failed to allocate chanspec buffer\n");
- goto done;
- }
- memset(reqbuf, 0, CHANSPEC_BUF_SIZE);
-
- if (band == WLC_BAND_AUTO) {
- AEXT_INFO(dev->name, "ACS full channel scan \n");
- reqbuf[0] = htod32(0);
- } else if (band == WLC_BAND_5G) {
- AEXT_INFO(dev->name, "ACS 5G band scan \n");
- if ((ret = wl_cfg80211_get_chanspecs_5g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) {
- AEXT_ERROR(dev->name, "ACS 5g chanspec retreival failed! \n");
- goto done;
- }
- } else if (band == WLC_BAND_2G) {
- /*
- * If channel argument is not provided/ argument 20 is provided,
- * Restrict channel to 2GHz, 20MHz BW, No SB
- */
- AEXT_INFO(dev->name, "ACS 2G band scan \n");
- if ((ret = wl_cfg80211_get_chanspecs_2g(dev, reqbuf, CHANSPEC_BUF_SIZE)) < 0) {
- AEXT_ERROR(dev->name, "ACS 2g chanspec retreival failed! \n");
- goto done;
- }
- } else {
- AEXT_ERROR(dev->name, "ACS: No band chosen\n");
- goto done;
- }
-
- buf_size = (band == WLC_BAND_AUTO) ? sizeof(int) : CHANSPEC_BUF_SIZE;
- ret = wldev_ioctl_set(dev, WLC_START_CHANNEL_SEL, (void *)reqbuf,
- buf_size);
- if (ret < 0) {
- AEXT_ERROR(dev->name, "can't start auto channel scan, err = %d\n", ret);
- channel = 0;
- goto done;
- }
-
- /* Wait for auto channel selection, max 3000 ms */
- if ((band == WLC_BAND_2G) || (band == WLC_BAND_5G)) {
- OSL_SLEEP(500);
- } else {
- /*
- * Full channel scan at the minimum takes 1.2secs
- * even with parallel scan. max wait time: 3500ms
- */
- OSL_SLEEP(1000);
- }
-
- retry = APCS_MAX_RETRY;
- while (retry--) {
- ret = wldev_ioctl_get(dev, WLC_GET_CHANNEL_SEL, &chosen,
- sizeof(chosen));
- if (ret < 0) {
- chosen = 0;
- } else {
- chosen = dtoh32(chosen);
- }
-
- if (chosen) {
- int chosen_band;
- int apcs_band;
-#ifdef D11AC_IOTYPES
- if (wl_cfg80211_get_ioctl_version() == 1) {
- channel = LCHSPEC_CHANNEL((chanspec_t)chosen);
- } else {
- channel = CHSPEC_CHANNEL((chanspec_t)chosen);
- }
-#else
- channel = CHSPEC_CHANNEL((chanspec_t)chosen);
-#endif /* D11AC_IOTYPES */
- apcs_band = (band == WLC_BAND_AUTO) ? WLC_BAND_2G : band;
- chosen_band = (channel <= CH_MAX_2G_CHANNEL) ? WLC_BAND_2G : WLC_BAND_5G;
- if (apcs_band == chosen_band) {
- WL_MSG(dev->name, "selected channel = %d\n", channel);
- break;
- }
- }
- AEXT_INFO(dev->name, "%d tried, ret = %d, chosen = 0x%x\n",
- (APCS_MAX_RETRY - retry), ret, chosen);
- OSL_SLEEP(250);
- }
-
-done:
- if (spect > 0) {
- if ((ret = wl_cfg80211_set_spect(dev, spect) < 0)) {
- AEXT_ERROR(dev->name, "ACS: error while setting spect\n");
- }
- }
-
- if (reqbuf) {
- kfree(reqbuf);
- }
-
- return channel;
-}
-#endif /* WL_CFG80211 */
-
-#ifdef WL_ESCAN
-int
-wl_ext_drv_apcs(struct net_device *dev, uint32 band)
-{
- int ret = 0, channel = 0;
- struct dhd_pub *dhd = dhd_get_pub(dev);
- struct wl_escan_info *escan = NULL;
- int retry = 0, retry_max, retry_interval = 250, up = 1;
-#ifdef WL_CFG80211
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#endif /* WL_CFG80211 */
-
- escan = dhd->escan;
- if (dhd) {
- retry_max = WL_ESCAN_TIMER_INTERVAL_MS/retry_interval;
- ret = wldev_ioctl_get(dev, WLC_GET_UP, &up, sizeof(s32));
- if (ret < 0 || up == 0) {
- ret = wldev_ioctl_set(dev, WLC_UP, &up, sizeof(s32));
- }
- retry = retry_max;
- while (retry--) {
- if (escan->escan_state == ESCAN_STATE_SCANING
-#ifdef WL_CFG80211
- || wl_get_drv_status_all(cfg, SCANNING)
-#endif
- )
- {
- AEXT_INFO(dev->name, "Scanning %d tried, ret = %d\n",
- (retry_max - retry), ret);
- } else {
- escan->autochannel = 1;
- ret = wl_escan_set_scan(dev, dhd, NULL, 0, TRUE);
- if (!ret)
- break;
- }
- OSL_SLEEP(retry_interval);
- }
- if ((retry == 0) || (ret < 0))
- goto done;
- retry = retry_max;
- while (retry--) {
- if (escan->escan_state == ESCAN_STATE_IDLE) {
- if (band == WLC_BAND_5G)
- channel = escan->best_5g_ch;
- else
- channel = escan->best_2g_ch;
- WL_MSG(dev->name, "selected channel = %d\n", channel);
- goto done;
- }
- AEXT_INFO(dev->name, "escan_state=%d, %d tried, ret = %d\n",
- escan->escan_state, (retry_max - retry), ret);
- OSL_SLEEP(retry_interval);
- }
- if ((retry == 0) || (ret < 0))
- goto done;
- }
-
-done:
- if (escan)
- escan->autochannel = 0;
-
- return channel;
-}
-#endif /* WL_ESCAN */
-
-int
-wl_ext_autochannel(struct net_device *dev, uint acs, uint32 band)
-{
- int channel = 0;
- uint16 chan_2g, chan_5g;
-
- AEXT_INFO(dev->name, "acs=0x%x, band=%d \n", acs, band);
-
-#ifdef WL_CFG80211
- if (acs & ACS_FW_BIT) {
- int ret = 0;
- ret = wldev_ioctl_get(dev, WLC_GET_CHANNEL_SEL, &channel, sizeof(channel));
- channel = 0;
- if (ret != BCME_UNSUPPORTED)
- channel = wl_ext_fw_apcs(dev, band);
- if (channel)
- return channel;
- }
#endif
-#ifdef WL_ESCAN
- if (acs & ACS_DRV_BIT)
- channel = wl_ext_drv_apcs(dev, band);
-#endif /* WL_ESCAN */
-
- if (channel == 0) {
- wl_ext_get_default_chan(dev, &chan_2g, &chan_5g, TRUE);
- if (band == WLC_BAND_5G) {
- channel = chan_5g;
- } else {
- channel = chan_2g;
- }
- AEXT_ERROR(dev->name, "ACS failed. Fall back to default channel (%d) \n", channel);
- }
-
- return channel;
-}
-
#if defined(RSSIAVG)
void
wl_free_rssi_cache(wl_rssi_cache_ctrl_t *rssi_cache_ctrl)
node = *rssi_head;
for (;node;) {
- AEXT_INFO("wlan", "Free %d with BSSID %pM\n", i, &node->BSSID);
+ ANDROID_INFO(("%s: Free %d with BSSID %pM\n",
+ __FUNCTION__, i, &node->BSSID));
cur = node;
node = cur->next;
kfree(cur);
{
wl_rssi_cache_t *node, *prev, **rssi_head;
int i = -1, tmp = 0;
- struct osl_timespec now;
+ struct timeval now;
- osl_do_gettimeofday(&now);
+ do_gettimeofday(&now);
rssi_head = &rssi_cache_ctrl->m_cache_head;
node = *rssi_head;
tmp = 0;
prev->next = node->next;
}
- AEXT_INFO("wlan", "Del %d with BSSID %pM\n", i, &node->BSSID);
+ ANDROID_INFO(("%s: Del %d with BSSID %pM\n",
+ __FUNCTION__, i, &node->BSSID));
kfree(node);
if (tmp == 1) {
node = *rssi_head;
tmp = 0;
prev->next = node->next;
}
- AEXT_INFO("wlan", "Del %d with BSSID %pM\n", i, &node->BSSID);
+ ANDROID_INFO(("%s: Del %d with BSSID %pM\n",
+ __FUNCTION__, i, &node->BSSID));
kfree(node);
if (tmp == 1) {
node = *rssi_head;
int j, k=0;
int rssi, error=0;
struct ether_addr bssid;
- struct osl_timespec now, timeout;
+ struct timeval now, timeout;
scb_val_t scbval;
if (!g_wifi_on)
return 0;
- error = wldev_ioctl(net, WLC_GET_BSSID, &bssid, sizeof(bssid), 0);
+ error = wldev_ioctl(net, WLC_GET_BSSID, &bssid, sizeof(bssid), false);
if (error == BCME_NOTASSOCIATED) {
- AEXT_INFO("wlan", "Not Associated! res:%d\n", error);
+ ANDROID_INFO(("%s: Not Associated! res:%d\n", __FUNCTION__, error));
return 0;
}
if (error) {
- AEXT_ERROR(net->name, "Could not get bssid (%d)\n", error);
+ ANDROID_ERROR(("Could not get bssid (%d)\n", error));
}
error = wldev_get_rssi(net, &scbval);
if (error) {
- AEXT_ERROR(net->name, "Could not get rssi (%d)\n", error);
+ ANDROID_ERROR(("Could not get rssi (%d)\n", error));
return error;
}
rssi = scbval.val;
- osl_do_gettimeofday(&now);
+ do_gettimeofday(&now);
timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT;
if (timeout.tv_sec < now.tv_sec) {
/*
* Integer overflow - assume long enough timeout to be assumed
* to be infinite, i.e., the timeout would never happen.
*/
- AEXT_TRACE(net->name,
- "Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu\n",
- RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec);
+ ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
+ __FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
}
/* update RSSI */
prev = NULL;
for (;node;) {
if (!memcmp(&node->BSSID, &bssid, ETHER_ADDR_LEN)) {
- AEXT_INFO("wlan", "Update %d with BSSID %pM, RSSI=%d\n", k, &bssid, rssi);
+ ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%d\n",
+ __FUNCTION__, k, &bssid, rssi));
for (j=0; j<RSSIAVG_LEN-1; j++)
node->RSSI[j] = node->RSSI[j+1];
node->RSSI[j] = rssi;
leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL);
if (!leaf) {
- AEXT_ERROR(net->name, "Memory alloc failure %d\n", (int)sizeof(wl_rssi_cache_t));
+ ANDROID_ERROR(("%s: Memory alloc failure %d\n",
+ __FUNCTION__, (int)sizeof(wl_rssi_cache_t)));
return 0;
}
- AEXT_INFO(net->name, "Add %d with cached BSSID %pM, RSSI=%3d in the leaf\n",
- k, &bssid, rssi);
+ ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d in the leaf\n",
+ __FUNCTION__, k, &bssid, rssi));
leaf->next = NULL;
leaf->dirty = 0;
wl_rssi_cache_t *node, *prev, *leaf, **rssi_head;
wl_bss_info_t *bi = NULL;
int i, j, k;
- struct osl_timespec now, timeout;
+ struct timeval now, timeout;
if (!ss_list->count)
return;
- osl_do_gettimeofday(&now);
+ do_gettimeofday(&now);
timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT;
if (timeout.tv_sec < now.tv_sec) {
/*
* Integer overflow - assume long enough timeout to be assumed
* to be infinite, i.e., the timeout would never happen.
*/
- AEXT_TRACE("wlan",
- "Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu\n",
- RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec);
+ ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
+ __FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
}
rssi_head = &rssi_cache_ctrl->m_cache_head;
bi = bi ? (wl_bss_info_t *)((uintptr)bi + dtoh32(bi->length)) : ss_list->bss_info;
for (;node;) {
if (!memcmp(&node->BSSID, &bi->BSSID, ETHER_ADDR_LEN)) {
- AEXT_INFO("wlan", "Update %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
+ ANDROID_INFO(("%s: Update %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
for (j=0; j<RSSIAVG_LEN-1; j++)
node->RSSI[j] = node->RSSI[j+1];
node->RSSI[j] = dtoh16(bi->RSSI);
leaf = kmalloc(sizeof(wl_rssi_cache_t), GFP_KERNEL);
if (!leaf) {
- AEXT_ERROR("wlan", "Memory alloc failure %d\n",
- (int)sizeof(wl_rssi_cache_t));
+ ANDROID_ERROR(("%s: Memory alloc failure %d\n",
+ __FUNCTION__, (int)sizeof(wl_rssi_cache_t)));
return;
}
- AEXT_INFO("wlan", "Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\" in the leaf\n",
- k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
+ ANDROID_INFO(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\" in the leaf\n",
+ __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
leaf->next = NULL;
leaf->dirty = 0;
}
rssi = MIN(rssi, RSSI_MAXVAL);
if (rssi == RSSI_MINVAL) {
- AEXT_ERROR("wlan", "BSSID %pM does not in RSSI cache\n", addr);
+ ANDROID_ERROR(("%s: BSSID %pM does not in RSSI cache\n",
+ __FUNCTION__, addr));
}
return (int16)rssi;
}
-#endif /* RSSIAVG */
+#endif
#if defined(RSSIOFFSET)
int
{
#if defined(RSSIOFFSET_NEW)
int j;
-#endif /* RSSIOFFSET_NEW */
+#endif
if (!g_wifi_on)
return rssi;
rssi += j;
#else
rssi += RSSI_OFFSET;
-#endif /* RSSIOFFSET_NEW */
+#endif
return MIN(rssi, RSSI_MAXVAL);
}
-#endif /* RSSIOFFSET */
+#endif
#if defined(BSSCACHE)
void
wl_bss_cache_t *node, *cur, **bss_head;
int i=0;
- AEXT_TRACE("wlan", "called\n");
+ ANDROID_TRACE(("%s called\n", __FUNCTION__));
bss_head = &bss_cache_ctrl->m_cache_head;
node = *bss_head;
for (;node;) {
- AEXT_TRACE("wlan", "Free %d with BSSID %pM\n",
- i, &node->results.bss_info->BSSID);
+ ANDROID_TRACE(("%s: Free %d with BSSID %pM\n",
+ __FUNCTION__, i, &node->results.bss_info->BSSID));
cur = node;
node = cur->next;
kfree(cur);
{
wl_bss_cache_t *node, *prev, **bss_head;
int i = -1, tmp = 0;
- struct osl_timespec now;
+ struct timeval now;
- osl_do_gettimeofday(&now);
+ do_gettimeofday(&now);
bss_head = &bss_cache_ctrl->m_cache_head;
node = *bss_head;
tmp = 0;
prev->next = node->next;
}
- AEXT_TRACE("wlan", "Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- i, &node->results.bss_info->BSSID,
- dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID);
+ ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ __FUNCTION__, i, &node->results.bss_info->BSSID,
+ dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID));
kfree(node);
if (tmp == 1) {
node = *bss_head;
tmp = 0;
prev->next = node->next;
}
- AEXT_TRACE("wlan", "Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- i, &node->results.bss_info->BSSID,
- dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID);
+ ANDROID_TRACE(("%s: Del %d with BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ __FUNCTION__, i, &node->results.bss_info->BSSID,
+ dtoh16(node->results.bss_info->RSSI), node->results.bss_info->SSID));
kfree(node);
if (tmp == 1) {
node = *bss_head;
void dump_bss_cache(
#if defined(RSSIAVG)
wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
-#endif /* RSSIAVG */
+#endif
wl_bss_cache_t *node)
{
int k = 0;
rssi = wl_get_avg_rssi(rssi_cache_ctrl, &node->results.bss_info->BSSID);
#else
rssi = dtoh16(node->results.bss_info->RSSI);
-#endif /* RSSIAVG */
- AEXT_TRACE("wlan", "dump %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- k, &node->results.bss_info->BSSID, rssi, node->results.bss_info->SSID);
+#endif
+ ANDROID_TRACE(("%s: dump %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ __FUNCTION__, k, &node->results.bss_info->BSSID, rssi,
+ node->results.bss_info->SSID));
k++;
node = node->next;
}
wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl,
#if defined(RSSIAVG)
wl_rssi_cache_ctrl_t *rssi_cache_ctrl,
-#endif /* RSSIAVG */
+#endif
wl_scan_results_t *ss_list)
{
wl_bss_cache_t *node, *prev, *leaf, **bss_head;
int i, k=0;
#if defined(SORT_BSS_BY_RSSI)
int16 rssi, rssi_node;
-#endif /* SORT_BSS_BY_RSSI */
- struct osl_timespec now, timeout;
+#endif
+ struct timeval now, timeout;
if (!ss_list->count)
return;
- osl_do_gettimeofday(&now);
+ do_gettimeofday(&now);
timeout.tv_sec = now.tv_sec + BSSCACHE_TIMEOUT;
if (timeout.tv_sec < now.tv_sec) {
/*
* Integer overflow - assume long enough timeout to be assumed
* to be infinite, i.e., the timeout would never happen.
*/
- AEXT_TRACE("wlan",
- "Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu\n",
- BSSCACHE_TIMEOUT, now.tv_sec, timeout.tv_sec);
+ ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
+ __FUNCTION__, BSSCACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
}
bss_head = &bss_cache_ctrl->m_cache_head;
leaf = kmalloc(dtoh32(bi->length) + sizeof(wl_bss_cache_t), GFP_KERNEL);
if (!leaf) {
- AEXT_ERROR("wlan", "Memory alloc failure %d\n",
- dtoh32(bi->length) + (int)sizeof(wl_bss_cache_t));
+ ANDROID_ERROR(("%s: Memory alloc failure %d\n", __FUNCTION__,
+ dtoh32(bi->length) + (int)sizeof(wl_bss_cache_t)));
return;
}
if (node) {
kfree(node);
node = NULL;
- AEXT_TRACE("wlan",
- "Update %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
+ ANDROID_TRACE(("%s: Update %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
} else
- AEXT_TRACE("wlan",
- "Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
- k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID);
+ ANDROID_TRACE(("%s: Add %d with cached BSSID %pM, RSSI=%3d, SSID \"%s\"\n",
+ __FUNCTION__, k, &bi->BSSID, dtoh16(bi->RSSI), bi->SSID));
memcpy(leaf->results.bss_info, bi, dtoh32(bi->length));
leaf->next = NULL;
rssi = wl_get_avg_rssi(rssi_cache_ctrl, &leaf->results.bss_info->BSSID);
#else
rssi = dtoh16(leaf->results.bss_info->RSSI);
-#endif /* RSSIAVG */
+#endif
for (;node;) {
#if defined(RSSIAVG)
rssi_node = wl_get_avg_rssi(rssi_cache_ctrl,
&node->results.bss_info->BSSID);
#else
rssi_node = dtoh16(node->results.bss_info->RSSI);
-#endif /* RSSIAVG */
+#endif
if (rssi > rssi_node) {
leaf->next = node;
if (node == *bss_head)
#else
leaf->next = *bss_head;
*bss_head = leaf;
-#endif /* SORT_BSS_BY_RSSI */
+#endif
}
}
dump_bss_cache(
#if defined(RSSIAVG)
rssi_cache_ctrl,
-#endif /* RSSIAVG */
+#endif
*bss_head);
}
void
wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl)
{
- AEXT_TRACE("wlan", "Enter\n");
+ ANDROID_TRACE(("%s:\n", __FUNCTION__));
wl_free_bss_cache(bss_cache_ctrl);
}
-#endif /* BSSCACHE */
+#endif
/*
* Linux cfg80211 driver
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfg80211.c 826086 2019-06-18 19:23:59Z $
+ * $Id: wl_cfg80211.c 711110 2017-07-17 04:38:25Z $
*/
/* */
#include <typedefs.h>
#include <linuxver.h>
+#include <osl.h>
#include <linux/kernel.h>
+#include <wlc_types.h>
#include <bcmutils.h>
-#include <bcmstdlib_s.h>
#include <bcmwifi_channels.h>
#include <bcmendian.h>
#include <ethernet.h>
-#ifdef WL_WPS_SYNC
-#include <eapol.h>
-#endif /* WL_WPS_SYNC */
#include <802.11.h>
-#include <bcmiov.h>
#include <linux/if_arp.h>
#include <asm/uaccess.h>
#include <net/rtnetlink.h>
#include <wlioctl.h>
-#include <bcmevent.h>
#include <wldev_common.h>
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
-#include <wl_cfgscan.h>
#include <bcmdevs.h>
-#ifdef WL_FILS
-#include <fils.h>
-#include <frag.h>
-#endif /* WL_FILS */
#include <wl_android.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_linux.h>
-#include <dhd_linux_pktdump.h>
#include <dhd_debug.h>
#include <dhdioctl.h>
#include <wlioctl.h>
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
#endif /* PNO_SUPPORT */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
#include <wl_cfgvendor.h>
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
#if !defined(WL_VENDOR_EXT_SUPPORT)
#undef GSCAN_SUPPORT
#endif
+
+#if defined(STAT_REPORT)
+#include <wl_statreport.h>
+#endif /* STAT_REPORT */
#include <dhd_config.h>
-#ifdef WL_NAN
-#include <wl_cfgnan.h>
-#endif /* WL_NAN */
#ifdef PROP_TXSTATUS
#include <dhd_wlfc.h>
-#endif // endif
+#endif
#ifdef BCMPCIE
#include <dhd_flowring.h>
-#endif // endif
+#endif
#ifdef RTT_SUPPORT
#include <dhd_rtt.h>
#endif /* RTT_SUPPORT */
+#ifdef WL11U
+#endif /* WL11U */
-#define BRCM_SAE_VENDOR_EVENT_BUF_LEN 500
-
-#ifdef DNGL_AXI_ERROR_LOGGING
-#include <bcmtlv.h>
-#endif /* DNGL_AXI_ERROR_LOGGING */
-
-#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
-#include <linux/dev_ril_bridge.h>
-#include <linux/notifier.h>
-#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
-
-#ifdef BCMWAPI_WPI
-/* these items should evetually go into wireless.h of the linux system headfile dir */
-#ifndef IW_ENCODE_ALG_SM4
-#define IW_ENCODE_ALG_SM4 0x20
-#endif // endif
-
-#ifndef IW_AUTH_WAPI_ENABLED
-#define IW_AUTH_WAPI_ENABLED 0x20
-#endif // endif
-
-#ifndef IW_AUTH_WAPI_VERSION_1
-#define IW_AUTH_WAPI_VERSION_1 0x00000008
-#endif // endif
-
-#ifndef IW_AUTH_CIPHER_SMS4
-#define IW_AUTH_CIPHER_SMS4 0x00000020
-#endif // endif
-
-#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
-#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
-#endif // endif
+#ifndef DHD_UNSUPPORT_IF_CNTS
+#define DHD_SUPPORT_IF_CNTS
+#endif /* !DHD_UN_SUPPORT_IF_CNTS */
-#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
-#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
-#endif // endif
-#endif /* BCMWAPI_WPI */
-#ifdef BCMWAPI_WPI
-#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
-#else /* BCMWAPI_WPI */
#define IW_WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
-#endif /* BCMWAPI_WPI */
-
-#if (defined(WL_FW_OCE_AP_SELECT) || defined(BCMFW_ROAM_ENABLE) && ((LINUX_VERSION_CODE \
- >= KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)))
-uint fw_ap_select = true;
-#else
-uint fw_ap_select = false;
-#endif /* WL_FW_OCE_AP_SELECT && (ROAM_ENABLE || BCMFW_ROAM_ENABLE) */
-module_param(fw_ap_select, uint, 0660);
static struct device *cfg80211_parent_dev = NULL;
-static struct bcm_cfg80211 *g_bcmcfg = NULL;
-u32 wl_dbg_level = WL_DBG_ERR; // | WL_DBG_P2P_ACTION | WL_DBG_INFO;
+#ifdef CUSTOMER_HW4_DEBUG
+u32 wl_dbg_level = WL_DBG_ERR | WL_DBG_P2P_ACTION;
+#else
+u32 wl_dbg_level = WL_DBG_ERR;
+#endif /* CUSTOMER_HW4_DEBUG */
-#define MAX_VIF_OFFSET 15
#define MAX_WAIT_TIME 1500
#ifdef WLAIBSS_MCHAN
#define IBSS_IF_NAME "ibss%d"
#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)
#endif /* VSDB */
+#ifdef WL_CFG80211_SYNC_GON
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) \
+ (wl_get_drv_status_all(cfg, SENDING_ACT_FRM) || \
+ wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN))
+#else
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM)
+#endif /* WL_CFG80211_SYNC_GON */
+
#define DNGL_FUNC(func, parameters) func parameters
#define COEX_DHCP
#define WLAN_EID_SSID 0
#define CH_MIN_5G_CHANNEL 34
+#define CH_MIN_2G_CHANNEL 1
+#define ACTIVE_SCAN 1
+#define PASSIVE_SCAN 0
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+(entry) = list_first_entry((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+entry = container_of((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#else
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+(entry) = list_first_entry((ptr), type, member); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member); \
+
+#endif /* STRICT_GCC_WARNINGS */
#ifdef WL_RELMCAST
enum rmc_event_type {
};
#endif /* WL_RELMCAST */
+#ifdef WL_LASTEVT
+typedef struct wl_last_event {
+ uint32 current_time; /* current tyime */
+ uint32 timestamp; /* event timestamp */
+ wl_event_msg_t event; /* Encapsulated event */
+} wl_last_event_t;
+#endif /* WL_LASTEVT */
+
/* This is to override regulatory domains defined in cfg80211 module (reg.c)
* By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
* and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
4 && __GNUC_MINOR__ >= 6))
_Pragma("GCC diagnostic push")
_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
-#endif // endif
+#endif
static const struct ieee80211_regdomain brcm_regdom = {
.n_reg_rules = 4,
.alpha2 = "99",
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
4 && __GNUC_MINOR__ >= 6))
_Pragma("GCC diagnostic pop")
-#endif // endif
+#endif
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
* to kernel version.
*
* less than linux-3.8 - max:3 (wlan0 + p2p0 + group removal of p2p-p2p0-x)
- * linux-3.8 and above - max:4
- * sta + NAN NMI + NAN DPI open + NAN DPI sec (since there is no iface type
- * for NAN defined, registering it as STA type)
+ * linux-3.8 and above - max:2 (wlan0 + group removal of p2p-wlan0-x)
*/
#ifdef WL_ENABLE_P2P_IF
- .max = 5,
+ .max = 3,
#else
- .max = 4,
+ .max = 2,
#endif /* WL_ENABLE_P2P_IF */
.types = BIT(NL80211_IFTYPE_STATION),
},
.types = BIT(NL80211_IFTYPE_ADHOC),
},
};
-
+#ifdef BCM4330_CHIP
+#define NUM_DIFF_CHANNELS 1
+#else
#define NUM_DIFF_CHANNELS 2
-
+#endif
static const struct ieee80211_iface_combination
common_iface_combinations[] = {
{
.num_different_channels = NUM_DIFF_CHANNELS,
/*
- * At Max 5 network interfaces can be registered concurrently
+ * max_interfaces = 4
+ * The max no of interfaces will be used in dual p2p case.
+ * {STA, P2P Device, P2P Group 1, P2P Group 2}. Though we
+ * will not be using the STA functionality in this case, it
+ * will remain registered as it is the primary interface.
*/
- .max_interfaces = IFACE_MAX_CNT,
+ .max_interfaces = 4,
.limits = common_if_limits,
.n_limits = ARRAY_SIZE(common_if_limits),
},
};
#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
-static const char *wl_if_state_strs[WL_IF_STATE_MAX + 1] = {
- "WL_IF_CREATE_REQ",
- "WL_IF_CREATE_DONE",
- "WL_IF_DELETE_REQ",
- "WL_IF_DELETE_DONE",
- "WL_IF_CHANGE_REQ",
- "WL_IF_CHANGE_DONE",
- "WL_IF_STATE_MAX"
-};
-
-#ifdef BCMWAPI_WPI
-#if defined(ANDROID_PLATFORM_VERSION) && (ANDROID_PLATFORM_VERSION >= 8)
-/* WAPI define in ieee80211.h is used */
-#else
-#undef WLAN_AKM_SUITE_WAPI_PSK
-#define WLAN_AKM_SUITE_WAPI_PSK 0x000FAC04
-
-#undef WLAN_AKM_SUITE_WAPI_CERT
-#define WLAN_AKM_SUITE_WAPI_CERT 0x000FAC12
-
-#undef NL80211_WAPI_VERSION_1
-#define NL80211_WAPI_VERSION_1 1 << 2
-#endif /* ANDROID_PLATFORM_VERSION && ANDROID_PLATFORM_VERSION >= 8 */
-#endif /* BCMWAPI_WPI */
-
/* Data Element Definitions */
#define WPS_ID_CONFIG_METHODS 0x1008
#define WPS_ID_REQ_TYPE 0x103A
#define PM_BLOCK 1
#define PM_ENABLE 0
-/* GCMP crypto supported above kernel v4.0 */
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0))
-#define WL_GCMP
-#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0) */
+
+#define WL_AKM_SUITE_SHA256_1X 0x000FAC05
+#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06
#ifndef IBSS_COALESCE_ALLOWED
#define IBSS_COALESCE_ALLOWED IBSS_COALESCE_DEFAULT
-#endif // endif
+#endif
#ifndef IBSS_INITIAL_SCAN_ALLOWED
#define IBSS_INITIAL_SCAN_ALLOWED IBSS_INITIAL_SCAN_ALLOWED_DEFAULT
-#endif // endif
+#endif
#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */
#define LONG_LISTEN_TIME 2000
-#ifdef RTT_SUPPORT
-static s32 wl_cfg80211_rtt_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* RTT_SUPPORT */
-#ifdef WL_CHAN_UTIL
-static s32 wl_cfg80211_bssload_report_event_handler(struct bcm_cfg80211 *cfg,
- bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
-static s32 wl_cfg80211_start_bssload_report(struct net_device *ndev);
-#endif /* WL_CHAN_UTIL */
+#ifdef WBTEXT
+#define CMD_WBTEXT_PROFILE_CONFIG "WBTEXT_PROFILE_CONFIG"
+#define CMD_WBTEXT_WEIGHT_CONFIG "WBTEXT_WEIGHT_CONFIG"
+#define CMD_WBTEXT_TABLE_CONFIG "WBTEXT_TABLE_CONFIG"
+#define CMD_WBTEXT_DELTA_CONFIG "WBTEXT_DELTA_CONFIG"
+#define DEFAULT_WBTEXT_PROFILE_A "a -70 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_PROFILE_B "b -60 -75 70 10 -75 -128 0 10"
+#define DEFAULT_WBTEXT_WEIGHT_RSSI_A "RSSI a 65"
+#define DEFAULT_WBTEXT_WEIGHT_RSSI_B "RSSI b 65"
+#define DEFAULT_WBTEXT_WEIGHT_CU_A "CU a 35"
+#define DEFAULT_WBTEXT_WEIGHT_CU_B "CU b 35"
+#define DEFAULT_WBTEXT_TABLE_RSSI_A "RSSI a 0 55 100 55 60 90 \
+60 65 70 65 70 50 70 128 20"
+#define DEFAULT_WBTEXT_TABLE_RSSI_B "RSSI b 0 55 100 55 60 90 \
+60 65 70 65 70 50 70 128 20"
+#define DEFAULT_WBTEXT_TABLE_CU_A "CU a 0 30 100 30 50 90 \
+50 60 70 60 80 50 80 100 20"
+#define DEFAULT_WBTEXT_TABLE_CU_B "CU b 0 10 100 10 25 90 \
+25 40 70 40 70 50 70 100 20"
+
+typedef struct wl_wbtext_bssid {
+ struct ether_addr ea;
+ struct list_head list;
+} wl_wbtext_bssid_t;
+
+static void wl_cfg80211_wbtext_update_rcc(struct bcm_cfg80211 *cfg, struct net_device *dev);
+static bool wl_cfg80211_wbtext_check_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea);
+static bool wl_cfg80211_wbtext_add_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea);
+static void wl_cfg80211_wbtext_clear_bssid_list(struct bcm_cfg80211 *cfg);
+static bool wl_cfg80211_wbtext_send_nbr_req(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ struct wl_profile *profile);
+static bool wl_cfg80211_wbtext_send_btm_query(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ struct wl_profile *profile);
+static void wl_cfg80211_wbtext_set_wnm_maxidle(struct bcm_cfg80211 *cfg, struct net_device *dev);
+static int wl_cfg80211_recv_nbr_resp(struct net_device *dev, uint8 *body, int body_len);
+#endif /* WBTEXT */
#ifdef SUPPORT_AP_RADIO_PWRSAVE
#define RADIO_PWRSAVE_PPS 10
#define RADIO_PWRSAVE_STAS_ASSOC_CHECK 0
#define RADIO_PWRSAVE_LEVEL_MIN 1
-#define RADIO_PWRSAVE_LEVEL_MAX 9
+#define RADIO_PWRSAVE_LEVEL_MAX 5
#define RADIO_PWRSAVE_PPS_MIN 1
#define RADIO_PWRSAVE_QUIETTIME_MIN 1
#define RADIO_PWRSAVE_ASSOCCHECK_MIN 0
#define RADIO_PWRSAVE_ASSOCCHECK_MAX 1
#define RADIO_PWRSAVE_MAJOR_VER 1
-#define RADIO_PWRSAVE_MINOR_VER 1
+#define RADIO_PWRSAVE_MINOR_VER 0
#define RADIO_PWRSAVE_MAJOR_VER_SHIFT 8
#define RADIO_PWRSAVE_VERSION \
((RADIO_PWRSAVE_MAJOR_VER << RADIO_PWRSAVE_MAJOR_VER_SHIFT)| RADIO_PWRSAVE_MINOR_VER)
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-/* SoftAP related parameters */
-#define DEFAULT_2G_SOFTAP_CHANNEL 1
-#define DEFAULT_5G_SOFTAP_CHANNEL 149
-#define WL_MAX_NUM_CSA_COUNTERS 255
+#ifdef WLADPS_SEAK_AP_WAR
+#define ATHEROS_OUI "\x00\x03\x7F"
+#define CAMEO_MAC_PREFIX "\x00\x18\xE7"
+#define MAC_PREFIX_LEN 3
+static bool
+wl_find_vndr_ies_specific_vender(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, const u8 *vndr_oui);
+static s32 wl_set_adps_mode(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint8 enable_mode);
+#endif /* WLADPS_SEAK_AP_WAR */
-#define MAX_VNDR_OUI_STR_LEN 256u
-#define VNDR_OUI_STR_LEN 10u
-#define DOT11_DISCONNECT_RC 2u
+#define MAX_VNDR_OUI_STR_LEN 256
+#define VNDR_OUI_STR_LEN 10
static const uchar *exclude_vndr_oui_list[] = {
"\x00\x50\xf2", /* Microsoft */
"\x00\x00\xf0", /* Samsung Elec */
struct list_head list;
} wl_vndr_oui_entry_t;
-#if defined(WL_DISABLE_HE_SOFTAP) || defined(WL_DISABLE_HE_P2P) || \
- defined(SUPPORT_AP_BWCTRL)
-#define WL_HE_FEATURES_HE_AP 0x8
-#define WL_HE_FEATURES_HE_P2P 0x20
-#endif // endif
-
static int wl_vndr_ies_get_vendor_oui(struct bcm_cfg80211 *cfg,
struct net_device *ndev, char *vndr_oui, u32 vndr_oui_len);
static void wl_vndr_ies_clear_vendor_oui_list(struct bcm_cfg80211 *cfg);
-static s32 wl_cfg80211_parse_vndr_ies(const u8 *parse, u32 len,
- struct parsed_vndr_ies *vndr_ies);
-
-#if defined(WL_FW_OCE_AP_SELECT)
-static bool
-wl_cfgoce_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
-
-/* Check whether the given IE looks like WFA OCE IE. */
-#define wl_cfgoce_is_oce_ie(ie, tlvs, len) wl_cfgoce_has_ie(ie, tlvs, len, \
- (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_MBO_OCE)
-
-/* Is any of the tlvs the expected entry? If
- * not update the tlvs buffer pointer/length.
- */
-static bool
-wl_cfgoce_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
-{
- /* If the contents match the OUI and the type */
- if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
- !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
- type == ie[TLV_BODY_OFF + oui_len]) {
- return TRUE;
- }
-
- return FALSE;
-}
-#endif /* WL_FW_OCE_AP_SELECT */
/*
* cfg80211_ops api/callback list
*/
-static s32 wl_frame_get_mgmt(struct bcm_cfg80211 *cfg, u16 fc,
- const struct ether_addr *da, const struct ether_addr *sa,
- const struct ether_addr *bssid, u8 **pheader, u32 *body_len, u8 *pbody);
+static s32 wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+ const struct ether_addr *sa, const struct ether_addr *bssid,
+ u8 **pheader, u32 *body_len, u8 *pbody);
+static s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid);
+#if defined(WL_CFG80211_P2P_DEV_IF)
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
+#else
+static s32
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request);
+#endif /* WL_CFG80211_P2P_DEV_IF */
static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
#ifdef WLAIBSS_MCHAN
static bcm_struct_cfgdev* bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name);
static s32 wl_cfg80211_get_station(struct wiphy *wiphy,
struct net_device *dev, u8 *mac,
struct station_info *sinfo);
-#endif // endif
+#endif
static s32 wl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
struct net_device *dev, bool enabled,
s32 timeout);
static int wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme);
-#if defined(WL_FILS)
-static int wl_cfg80211_update_connect_params(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme, u32 changed);
-#endif /* WL_FILS */
static s32 wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code);
#if defined(WL_CFG80211_P2P_DEV_IF)
#else
static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
struct net_device *ndev, u8* mac_addr);
-#endif // endif
+#endif
+#ifdef WLMESH
+static s32 wl_cfg80211_join_mesh(
+ struct wiphy *wiphy, struct net_device *dev,
+ const struct mesh_config *conf,
+ const struct mesh_setup *setup);
+static s32 wl_cfg80211_leave_mesh(struct wiphy *wiphy,
+ struct net_device *dev);
+#endif /* WLMESH */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
struct net_device *dev, const u8 *mac, struct station_parameters *params);
#else
static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
struct net_device *dev, u8 *mac, struct station_parameters *params);
-#endif // endif
+#endif
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
static s32 wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
#else
static s32 wl_cfg80211_suspend(struct wiphy *wiphy);
-#endif // endif
+#endif
static s32 wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa);
static s32 wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa);
static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
struct net_device *dev);
-#ifdef WL_CLIENT_SAE
-static bool wl_is_pmkid_available(struct net_device *dev, const u8 *bssid);
-#endif /* WL_CLIENT_SAE */
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg);
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, bool aborted, bool fw_abort);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \
KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
static s32 wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
#else
static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, enum nl80211_tdls_operation oper);
-#endif // endif
-#endif /* LINUX_VERSION > KERNEL_VERSION(3,2,0) || WL_COMPAT_WIRELESS */
+#endif
+#endif
+#ifdef WL_SCHED_SCAN
+static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ , u64 reqid
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+);
+#endif
static s32 wl_cfg80211_set_ap_role(struct bcm_cfg80211 *cfg, struct net_device *dev);
-
-struct wireless_dev *
-wl_cfg80211_create_iface(struct wiphy *wiphy, wl_iftype_t
- iface_type, u8 *mac_addr, const char *name);
+#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF)
+bcm_struct_cfgdev*
+wl_cfg80211_create_iface(struct wiphy *wiphy, enum nl80211_iftype
+ iface_type, u8 *mac_addr, const char *name);
s32
-wl_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev);
+wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */
s32 wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
struct net_device *ndev, s32 bsscfg_idx,
- wl_iftype_t iftype, s32 del, u8 *addr);
+ enum nl80211_iftype iface_type, s32 del, u8 *addr);
s32 wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
struct net_device *ndev, s32 bsscfg_idx,
- wl_iftype_t brcm_iftype, s32 del, u8 *addr);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+ enum nl80211_iftype iface_type, s32 del, u8 *addr);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
static s32 wl_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) */
#ifdef GTK_OFFLOAD_SUPPORT
#endif /* GTK_OFFLOAD_SUPPORT */
chanspec_t wl_chspec_driver_to_host(chanspec_t chanspec);
chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
+#ifdef WL11ULB
+static s32 wl_cfg80211_get_ulb_bw(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev);
+static chanspec_t wl_cfg80211_ulb_get_min_bw_chspec(struct bcm_cfg80211 *cfg,
+ struct wireless_dev *wdev, s32 bssidx);
+static s32 wl_cfg80211_ulbbw_to_ulbchspec(u32 ulb_bw);
+#else
+static inline chanspec_t wl_cfg80211_ulb_get_min_bw_chspec(
+ struct bcm_cfg80211 *cfg, struct wireless_dev *wdev, s32 bssidx)
+{
+ return WL_CHANSPEC_BW_20;
+}
+#endif /* WL11ULB */
static void wl_cfg80211_wait_for_disconnection(struct bcm_cfg80211 *cfg, struct net_device *dev);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
-int wl_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_csa_settings *params);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
-static int wl_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev,
- const struct cfg80211_pmk_conf *conf);
-static int wl_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
- const u8 *aa);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
/*
* event & event Q handlers for cfg80211 interfaces
static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg);
static s32 wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 type,
const wl_event_msg_t *msg, void *data);
-static void wl_put_event(struct bcm_cfg80211 *cfg, struct wl_event_q *e);
+static void wl_put_event(struct wl_event_q *e);
static s32 wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data);
static s32 wl_notify_connect_status(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data, bool completed);
static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
static s32 wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
#endif /* BT_WIFI_HANDOVER */
+#ifdef WL_SCHED_SCAN
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data);
+#endif /* WL_SCHED_SCAN */
+#ifdef PNO_SUPPORT
+static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* PNO_SUPPORT */
#ifdef GSCAN_SUPPORT
+static s32 wl_notify_gscan_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
static s32 wl_handle_roam_exp_event(struct bcm_cfg80211 *wl, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
#endif /* GSCAN_SUPPORT */
static s32 wl_check_pmstatus(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
#endif /* CUSTOM_EVENT_PM_WAKE */
+#ifdef ENABLE_TEMP_THROTTLING
+static s32 wl_check_rx_throttle_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data);
+#endif /* ENABLE_TEMP_THROTTLING */
#if defined(DHD_LOSSLESS_ROAMING) || defined(DBG_PKT_MON)
static s32 wl_notify_roam_prep_status(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg);
#endif /* DHD_LOSSLESS_ROAMING */
-#ifdef WL_MBO
-static s32
-wl_mbo_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* WL_MBO */
-
-#ifdef WL_CLIENT_SAE
-static s32 wl_notify_connect_status_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+#ifdef WLTDLS
+static s32 wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg,
+ enum wl_tdls_config state, bool tdls_mode);
+static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
-static s32 wl_notify_start_auth(struct bcm_cfg80211 *cfg,
- bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
-static s32 wl_cfg80211_external_auth(struct wiphy *wiphy,
- struct net_device *dev, struct cfg80211_external_auth_params *ext_auth);
-#endif /* WL_CLIENT_SAE */
-
+#endif /* WLTDLS */
/*
* register/deregister parent device
*/
*/
static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, const void *data, s32 item);
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev);
/*
struct cfg80211_connect_params *sme);
static s32 wl_set_set_sharedkey(struct net_device *dev,
struct cfg80211_connect_params *sme);
-#ifdef WL_FILS
-static s32 wl_set_fils_params(struct net_device *dev,
- struct cfg80211_connect_params *sme);
-#endif // endif
-#ifdef BCMWAPI_WPI
-static s32 wl_set_set_wapi_ie(struct net_device *dev,
- struct cfg80211_connect_params *sme);
-#endif // endif
-#ifdef WL_GCMP
-static s32 wl_set_wsec_info_algos(struct net_device *dev, uint32 algos, uint32 mask);
-#endif /* WL_GCMP */
static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev);
static s32 wl_ch_to_chanspec(struct net_device *dev, int ch,
struct wl_join_params *join_params, size_t *join_params_size);
*/
static void wl_rst_ie(struct bcm_cfg80211 *cfg);
static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v);
-static void wl_update_hidden_ap_ie(wl_bss_info_t *bi, const u8 *ie_stream, u32 *ie_size,
- bool update_ssid);
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size,
+ bool roam);
static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size);
static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size);
static u32 wl_get_ielen(struct bcm_cfg80211 *cfg);
#ifdef MFP
-static int wl_cfg80211_get_rsn_capa(const bcm_tlv_t *wpa2ie, const u8** rsn_cap);
-#endif // endif
+static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8** rsn_cap);
+#endif
+
+#ifdef WL11U
+static bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len);
+static s32
+wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx);
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ uint8 ie_id, uint8 *data, uint8 data_len);
+#endif /* WL11U */
static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, dhd_pub_t *data);
static void wl_free_wdev(struct bcm_cfg80211 *cfg);
wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
-static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool update_ssid);
-static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool update_ssid);
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam);
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam);
static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
s32 wl_cfg80211_channel_to_freq(u32 channel);
+
+
static void wl_cfg80211_work_handler(struct work_struct *work);
static s32 wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, const u8 *mac_addr,
*/
static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg);
static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg);
+
+#ifdef WL_LASTEVT
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, void *data);
+#define WL_IS_LINKDOWN(cfg, e, data) wl_is_linkdown(cfg, e, data)
+#else
static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+#define WL_IS_LINKDOWN(cfg, e, data) wl_is_linkdown(cfg, e)
+#endif /* WL_LASTEVT */
static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e,
struct net_device *ndev);
static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
static void wl_link_up(struct bcm_cfg80211 *cfg);
static void wl_link_down(struct bcm_cfg80211 *cfg);
-static s32 wl_config_infra(struct bcm_cfg80211 *cfg, struct net_device *ndev, u16 iftype);
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype);
static void wl_init_conf(struct wl_conf *conf);
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+ struct net_device* ndev);
int wl_cfg80211_get_ioctl_version(void);
/*
#ifdef DEBUGFS_CFG80211
static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg);
static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg);
-#endif // endif
+#endif
+static wl_scan_params_t *wl_cfg80211_scan_alloc_params(struct bcm_cfg80211 *cfg,
+ int channel, int nprobes, int *out_params_size);
static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role);
#ifdef WL_CFG80211_ACL
int dhd_monitor_uninit(void);
int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
-#ifdef ESCAN_CHANNEL_CACHE
-void reset_roam_cache(struct bcm_cfg80211 *cfg);
-void add_roam_cache(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi);
-int get_roam_channel_list(int target_chan, chanspec_t *channels,
- int n_channels, const wlc_ssid_t *ssid, int ioctl_ver);
-void set_roam_band(int band);
-#endif /* ESCAN_CHANNEL_CACHE */
-
-#ifdef ROAM_CHANNEL_CACHE
-int init_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver);
-void print_roam_cache(struct bcm_cfg80211 *cfg);
-void update_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver);
-#endif /* ROAM_CHANNEL_CACHE */
#ifdef P2P_LISTEN_OFFLOADING
s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg);
const struct ether_addr *bssid);
static s32 __wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
-#ifdef WL_WPS_SYNC
-static void wl_init_wps_reauth_sm(struct bcm_cfg80211 *cfg);
-static void wl_deinit_wps_reauth_sm(struct bcm_cfg80211 *cfg);
-static void wl_wps_reauth_timeout(unsigned long data);
-static s32 wl_get_free_wps_inst(struct bcm_cfg80211 *cfg);
-static s32 wl_get_wps_inst_match(struct bcm_cfg80211 *cfg, struct net_device *ndev);
-static s32 wl_wps_session_add(struct net_device *ndev, u16 mode, u8 *peer_mac);
-static void wl_wps_session_del(struct net_device *ndev);
-static s32 wl_wps_session_update(struct net_device *ndev, u16 state, const u8 *peer_mac);
-static void wl_wps_handle_ifdel(struct net_device *ndev);
-#endif /* WL_WPS_SYNC */
-
-#if defined(WL_FW_OCE_AP_SELECT)
-bool static wl_cfg80211_is_oce_ap(struct wiphy *wiphy, const u8 *bssid_hint);
-#endif /* WL_FW_OCE_AP_SELECT */
-
-#ifdef WL_BCNRECV
-static s32 wl_bcnrecv_aborted_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* WL_BCNRECV */
-
-#ifdef WL_CAC_TS
-static s32 wl_cfg80211_cac_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* WL_CAC_TS */
-
-#if defined(WL_MBO) || defined(WL_OCE)
-static s32 wl_bssid_prune_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* WL_MBO || WL_OCE */
-
static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ,
WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ };
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) || (defined(CONFIG_ARCH_MSM) && \
+ defined(CFG80211_DISCONNECTED_V2))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+ cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
+#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
+ cfg80211_disconnected(dev, reason, ie, len, gfp);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) */
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) || (defined(CONFIG_ARCH_MSM) && \
defined(CFG80211_DISCONNECTED_V2))
#define CFG80211_GET_BSS(wiphy, channel, bssid, ssid, ssid_len) \
cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, \
- IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
#else
#define CFG80211_GET_BSS(wiphy, channel, bssid, ssid, ssid_len) \
cfg80211_get_bss(wiphy, channel, bssid, ssid, ssid_len, \
WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) || \
- defined(CFG80211_CONNECT_TIMEOUT_REASON_CODE) || defined(WL_FILS) || \
- defined(CONFIG_CFG80211_FILS_BKPORT)
-#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
- resp_ie_len, status, gfp) \
- cfg80211_connect_bss(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
- resp_ie_len, status, gfp, NL80211_TIMEOUT_UNSPECIFIED);
-#else
-#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
- resp_ie_len, status, gfp) \
- cfg80211_connect_bss(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
- resp_ie_len, status, gfp);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || \
- * (CFG80211_CONNECT_TIMEOUT_REASON_CODE) ||
- * WL_FILS || CONFIG_CFG80211_FILS_BKPORT
- */
-#elif defined(CFG80211_CONNECT_TIMEOUT_REASON_CODE)
-/* There are customer kernels with backported changes for
- * connect timeout. CFG80211_CONNECT_TIMEOUT_REASON_CODE define
- * is available for kernels < 4.7 in such cases.
- */
-#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
- resp_ie_len, status, gfp) \
- cfg80211_connect_bss(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
- resp_ie_len, status, gfp, NL80211_TIMEOUT_UNSPECIFIED);
-#else
-/* Kernels < 4.7 doesn't support cfg80211_connect_bss */
-#define CFG80211_CONNECT_RESULT(dev, bssid, bss, req_ie, req_ie_len, resp_ie, \
- resp_ie_len, status, gfp) \
- cfg80211_connect_result(dev, bssid, req_ie, req_ie_len, resp_ie, \
- resp_ie_len, status, gfp);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) */
-
-#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \
- (akm) == RSN_AKM_UNSPECIFIED || \
+#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \
+ (akm) == RSN_AKM_UNSPECIFIED || \
(akm) == RSN_AKM_PSK)
+
extern int dhd_wait_pend8021x(struct net_device *dev);
#ifdef PROP_TXSTATUS_VSDB
extern int disable_proptx;
#endif /* PROP_TXSTATUS_VSDB */
+static int wl_cfg80211_check_in4way(struct bcm_cfg80211 *cfg,
+ struct net_device *dev, uint action, enum wl_ext_status status, void *context);
+
+
+extern int passive_channel_skip;
static s32
wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
static s32
wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
-#ifdef SUPPORT_AP_BWCTRL
-static void
-wl_update_apchan_bwcap(struct bcm_cfg80211 *cfg, struct net_device *ndev, chanspec_t chanspec);
-static void
-wl_restore_ap_bw(struct bcm_cfg80211 *cfg);
-#endif /* SUPPORT_AP_BWCTRL */
-
#if ((LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0)) && (LINUX_VERSION_CODE <= (3, 7, \
0)))
struct chan_info {
int freq;
int chan_type;
};
-#endif // endif
+#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
#define CFG80211_PUT_BSS(wiphy, bss) cfg80211_put_bss(wiphy, bss);
* are supporting MFP. So advertise only when MFP support is enabled.
*/
WLAN_CIPHER_SUITE_AES_CMAC,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
- WLAN_CIPHER_SUITE_BIP_GMAC_256,
- WLAN_CIPHER_SUITE_BIP_GMAC_128,
- WLAN_CIPHER_SUITE_BIP_CMAC_256,
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */
#endif /* MFP */
-
-#ifdef BCMWAPI_WPI
- WLAN_CIPHER_SUITE_SMS4,
-#endif // endif
-#if defined(WLAN_CIPHER_SUITE_PMK)
- WLAN_CIPHER_SUITE_PMK,
-#endif /* WLAN_CIPHER_SUITE_PMK */
-#ifdef WL_GCMP
- WLAN_CIPHER_SUITE_GCMP,
- WLAN_CIPHER_SUITE_GCMP_256,
- WLAN_CIPHER_SUITE_BIP_GMAC_128,
- WLAN_CIPHER_SUITE_BIP_GMAC_256,
-#endif /* WL_GCMP */
};
#ifdef WL_SUPPORT_ACS
};
#endif /* WL_SUPPORT_ACS */
-#ifdef WL_CFG80211_GON_COLLISION
-#define BLOCK_GON_REQ_MAX_NUM 5
-#endif /* WL_CFG80211_GON_COLLISION */
#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
static int maxrxpktglom = 0;
-#endif // endif
+#endif
/* IOCtl version read from targeted driver */
int ioctl_version;
#ifdef DEBUGFS_CFG80211
-#define SUBLOGLEVEL 20
-#define SUBLOGLEVELZ ((SUBLOGLEVEL) + (1))
+#define S_SUBLOGLEVEL 20
static const struct {
u32 log_level;
char *sublogname;
{WL_DBG_TRACE, "TRACE"},
{WL_DBG_P2P_ACTION, "P2PACTION"}
};
-#endif // endif
-
-typedef struct rsn_cipher_algo_entry {
- u32 cipher_suite;
- u32 wsec_algo;
- u32 wsec_key_algo;
-} rsn_cipher_algo_entry_t;
-
-static const rsn_cipher_algo_entry_t rsn_cipher_algo_lookup_tbl[] = {
- {WLAN_CIPHER_SUITE_WEP40, WEP_ENABLED, CRYPTO_ALGO_WEP1},
- {WLAN_CIPHER_SUITE_WEP104, WEP_ENABLED, CRYPTO_ALGO_WEP128},
- {WLAN_CIPHER_SUITE_TKIP, TKIP_ENABLED, CRYPTO_ALGO_TKIP},
- {WLAN_CIPHER_SUITE_CCMP, AES_ENABLED, CRYPTO_ALGO_AES_CCM},
- {WLAN_CIPHER_SUITE_AES_CMAC, AES_ENABLED, CRYPTO_ALGO_BIP},
-#ifdef BCMWAPI_WPI
- {WLAN_CIPHER_SUITE_SMS4, SMS4_ENABLED, CRYPTO_ALGO_SMS4},
-#endif /* BCMWAPI_WPI */
-#ifdef WL_GCMP
- {WLAN_CIPHER_SUITE_GCMP, AES_ENABLED, CRYPTO_ALGO_AES_GCM},
- {WLAN_CIPHER_SUITE_GCMP_256, AES_ENABLED, CRYPTO_ALGO_AES_GCM256},
- {WLAN_CIPHER_SUITE_BIP_GMAC_128, AES_ENABLED, CRYPTO_ALGO_BIP_GMAC},
- {WLAN_CIPHER_SUITE_BIP_GMAC_256, AES_ENABLED, CRYPTO_ALGO_BIP_GMAC256},
-#endif /* WL_GCMP */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
- {WLAN_CIPHER_SUITE_BIP_CMAC_256, AES_ENABLED, CRYPTO_ALGO_BIP_CMAC256},
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) */
-};
+#endif
-typedef struct rsn_akm_wpa_auth_entry {
- u32 akm_suite;
- u32 wpa_auth;
-} rsn_akm_wpa_auth_entry_t;
-
-static const rsn_akm_wpa_auth_entry_t rsn_akm_wpa_auth_lookup_tbl[] = {
-#ifdef WL_OWE
- {WLAN_AKM_SUITE_OWE, WPA3_AUTH_OWE},
-#endif /* WL_OWE */
- {WLAN_AKM_SUITE_8021X, WPA2_AUTH_UNSPECIFIED},
- {WL_AKM_SUITE_SHA256_1X, WPA2_AUTH_1X_SHA256},
- {WL_AKM_SUITE_SHA256_PSK, WPA2_AUTH_PSK_SHA256},
- {WLAN_AKM_SUITE_PSK, WPA2_AUTH_PSK},
- {WLAN_AKM_SUITE_FT_8021X, WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT},
- {WLAN_AKM_SUITE_FT_PSK, WPA2_AUTH_PSK | WPA2_AUTH_FT},
- {WLAN_AKM_SUITE_FILS_SHA256, WPA2_AUTH_FILS_SHA256},
- {WLAN_AKM_SUITE_FILS_SHA384, WPA2_AUTH_FILS_SHA384},
- {WLAN_AKM_SUITE_8021X_SUITE_B, WPA3_AUTH_1X_SUITE_B_SHA256},
- {WLAN_AKM_SUITE_8021X_SUITE_B_192, WPA3_AUTH_1X_SUITE_B_SHA384},
-#ifdef BCMWAPI_WPI
- {WLAN_AKM_SUITE_WAPI_CERT, WAPI_AUTH_UNSPECIFIED},
- {WLAN_AKM_SUITE_WAPI_PSK, WAPI_AUTH_PSK},
-#endif /* BCMWAPI_WPI */
-#ifdef WL_SAE
- {WLAN_AKM_SUITE_SAE, WPA3_AUTH_SAE_PSK},
-#endif /* WL_SAE */
- {WLAN_AKM_SUITE_FT_8021X_SHA384, WPA3_AUTH_1X_SUITE_B_SHA384 | WPA2_AUTH_FT}
-};
+#ifdef CUSTOMER_HW4_DEBUG
+uint prev_dhd_console_ms = 0;
+u32 prev_wl_dbg_level = 0;
+bool wl_scan_timeout_dbg_enabled = 0;
+static void wl_scan_timeout_dbg_set(void);
+static void wl_scan_timeout_dbg_clear(void);
-#define BUFSZ 8
-#define BUFSZN BUFSZ + 1
+static void wl_scan_timeout_dbg_set(void)
+{
+ WL_ERR(("Enter \n"));
+ prev_dhd_console_ms = dhd_console_ms;
+ prev_wl_dbg_level = wl_dbg_level;
-#define _S(x) #x
-#define S(x) _S(x)
+ dhd_console_ms = 1;
+ wl_dbg_level |= (WL_DBG_ERR | WL_DBG_P2P_ACTION | WL_DBG_SCAN);
-#define SOFT_AP_IF_NAME "swlan0"
+ wl_scan_timeout_dbg_enabled = 1;
+}
+static void wl_scan_timeout_dbg_clear(void)
+{
+ WL_ERR(("Enter \n"));
+ dhd_console_ms = prev_dhd_console_ms;
+ wl_dbg_level = prev_wl_dbg_level;
+
+ wl_scan_timeout_dbg_enabled = 0;
+}
+#endif /* CUSTOMER_HW4_DEBUG */
/* watchdog timer for disconnecting when fw is not associated for FW_ASSOC_WATCHDOG_TIME ms */
uint32 fw_assoc_watchdog_ms = 0;
* work getting scheduled.
*/
if (delayed_work_pending(&cfg->pm_enable_work)) {
- cancel_delayed_work(&cfg->pm_enable_work);
+ cancel_delayed_work_sync(&cfg->pm_enable_work);
DHD_PM_WAKE_UNLOCK(cfg->pub);
}
if (wf_chspec_malformed(chspec)) {
WL_ERR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
- chspec));
+ chspec));
return INVCHANSPEC;
}
if (wf_chspec_malformed(chspec)) {
WL_ERR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
- chspec));
+ chspec));
return INVCHANSPEC;
}
/* cannot express the bandwidth */
char chanbuf[CHANSPEC_STR_LEN];
WL_ERR((
- "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
- "to pre-11ac format\n",
- wf_chspec_ntoa(chspec, chanbuf), chspec));
+ "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+ "to pre-11ac format\n",
+ wf_chspec_ntoa(chspec, chanbuf), chspec));
return INVCHANSPEC;
}
return lchspec;
}
-bool wl_cfg80211_is_hal_started(struct bcm_cfg80211 *cfg)
-{
- return cfg->hal_started;
-}
-
/* given a chanspec value, do the endian and chanspec version conversion to
* a chanspec_t value
* Returns INVCHANSPEC on error
* Returns INVCHANSPEC on error
*/
chanspec_t
-wl_ch_host_to_driver(u16 channel)
+wl_ch_host_to_driver(struct bcm_cfg80211 *cfg, s32 bssidx, u16 channel)
{
chanspec_t chanspec;
- chanspec_band_t band;
- band = WL_CHANNEL_BAND(channel);
+ chanspec = channel & WL_CHANSPEC_CHAN_MASK;
- chanspec = wf_create_20MHz_chspec(channel, band);
- if (chanspec == INVCHANSPEC) {
- return chanspec;
- }
+ if (channel <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
+
+ chanspec |= wl_cfg80211_ulb_get_min_bw_chspec(cfg, NULL, bssidx);
+
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
return wl_chspec_host_to_driver(chanspec);
}
char *c = NULL;
int count = 0;
- bzero(n, ETHER_ADDR_LEN);
+ memset(n, 0, ETHER_ADDR_LEN);
for (;;) {
n->octet[count++] = (uint8)simple_strtoul(a, &c, 16);
if (!*c++ || count == ETHER_ADDR_LEN)
/* There isn't a lot of sense in it, but you can transmit anything you like */
static const struct ieee80211_txrx_stypes
wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
-#ifdef WLMESH_CFG80211
+#ifdef WLMESH
[NL80211_IFTYPE_MESH_POINT] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4)
},
-#endif /* WLMESH_CFG80211 */
+#endif /* WLMESH */
[NL80211_IFTYPE_ADHOC] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4)
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
-#ifdef WL_CLIENT_SAE
- | BIT(IEEE80211_STYPE_AUTH >> 4)
-#endif /* WL_CLIENT_SAE */
},
[NL80211_IFTYPE_AP] = {
.tx = 0xffff,
key->iv_initialized = dtoh32(key->iv_initialized);
}
-#if defined(WL_FW_OCE_AP_SELECT)
-bool static wl_cfg80211_is_oce_ap(struct wiphy *wiphy, const u8 *bssid_hint)
-{
- const u8 *parse = NULL;
- bcm_tlv_t *ie;
- const struct cfg80211_bss_ies *ies;
- u32 len;
- struct cfg80211_bss *bss;
-
- bss = CFG80211_GET_BSS(wiphy, NULL, bssid_hint, 0, 0);
- if (!bss) {
- WL_ERR(("Unable to find AP in the cache"));
- return false;
- }
-
- if (rcu_access_pointer(bss->ies)) {
- ies = rcu_access_pointer(bss->ies);
- parse = ies->data;
- len = ies->len;
- } else {
- WL_ERR(("ies is NULL"));
- return false;
- }
-
- while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
- if (wl_cfgoce_is_oce_ie((const uint8*)ie, (u8 const **)&parse, &len) == TRUE) {
- return true;
- } else {
- ie = bcm_next_tlv((const bcm_tlv_t*) ie, &len);
- if (!ie) {
- return false;
- }
- parse = (uint8 *)ie;
- WL_DBG(("NON OCE IE. next ie ptr:%p", parse));
- }
- }
- WL_DBG(("OCE IE NOT found"));
- return false;
-}
-#endif /* WL_FW_OCE_AP_SELECT */
-
/* Dump the contents of the encoded wps ie buffer and get pbc value */
static void
-wl_validate_wps_ie(const char *wps_ie, s32 wps_ie_len, bool *pbc)
+wl_validate_wps_ie(char *wps_ie, s32 wps_ie_len, bool *pbc)
{
#define WPS_IE_FIXED_LEN 6
s16 len;
- const u8 *subel = NULL;
+ u8 *subel = NULL;
u16 subelt_id;
u16 subelt_len;
u16 val;
valptr[1] = *(subel + 1);
WL_DBG((" attr WPS_ID_CONFIG_METHODS: %x\n", HTON16(val)));
} else if (subelt_id == WPS_ID_DEVICE_NAME) {
- char devname[33];
+ char devname[100];
int namelen = MIN(subelt_len, (sizeof(devname) - 1));
if (namelen) {
if (dbm > 0xffff)
dbm = 0xffff;
txpwrqdbm = dbm * 4;
-#ifdef SUPPORT_WL_TXPOWER
- if (type == NL80211_TX_POWER_AUTOMATIC)
- txpwrqdbm = 127;
- else
- txpwrqdbm |= WL_TXPWR_OVERRIDE;
-#endif /* SUPPORT_WL_TXPOWER */
err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm,
sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
&cfg->ioctl_buf_sync);
{
s32 err = 0;
s32 txpwrdbm;
- char ioctl_buf[WLC_IOCTL_SMLEN];
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
err = wldev_iovar_getbuf_bsscfg(dev, "qtxpower",
- NULL, 0, ioctl_buf, WLC_IOCTL_SMLEN, 0, NULL);
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
if (unlikely(err)) {
WL_ERR(("error (%d)\n", err));
return err;
}
- memcpy(&txpwrdbm, ioctl_buf, sizeof(txpwrdbm));
+ memcpy(&txpwrdbm, cfg->ioctl_buf, sizeof(txpwrdbm));
txpwrdbm = dtoh32(txpwrdbm);
*dbm = (txpwrdbm & ~WL_TXPWR_OVERRIDE) / 4;
- WL_DBG(("dBm=%d, txpwrdbm=0x%x\n", *dbm, txpwrdbm));
+ WL_INFORM(("dBm=%d, txpwrdbm=0x%x\n", *dbm, txpwrdbm));
return err;
}
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
struct ether_addr bssid;
- wl_bss_info_t *bss = NULL;
- u16 channel = WL_P2P_TEMP_CHAN;
+ struct wl_bss_info *bss = NULL;
+ s32 bssidx = 0; /* Explicitly set to primary bssidx */
char *buf;
- bzero(&bssid, sizeof(bssid));
+ memset(&bssid, 0, sizeof(bssid));
if ((err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, sizeof(bssid)))) {
/* STA interface is not associated. So start the new interface on a temp
* channel . Later proper channel will be applied by the above framework
err = wldev_ioctl_get(dev, WLC_GET_BAND, &cur_band, sizeof(int));
if (unlikely(err)) {
WL_ERR(("Get band failed\n"));
- } else if (cur_band == WLC_BAND_5G) {
- channel = WL_P2P_TEMP_CHAN_5G;
+ return wl_ch_host_to_driver(cfg, bssidx, WL_P2P_TEMP_CHAN);
}
- return wl_ch_host_to_driver(channel);
+ if (cur_band == WLC_BAND_5G)
+ return wl_ch_host_to_driver(cfg, bssidx, WL_P2P_TEMP_CHAN_5G);
+ else
+ return wl_ch_host_to_driver(cfg, bssidx, WL_P2P_TEMP_CHAN);
}
- buf = (char *)MALLOCZ(cfg->osh, WL_EXTRA_BUF_MAX);
+ buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
if (!buf) {
WL_ERR(("buf alloc failed. use temp channel\n"));
- return wl_ch_host_to_driver(channel);
+ return wl_ch_host_to_driver(cfg, bssidx, WL_P2P_TEMP_CHAN);
}
*(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
if ((err = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, buf,
WL_EXTRA_BUF_MAX))) {
WL_ERR(("Failed to get associated bss info, use temp channel \n"));
- chspec = wl_ch_host_to_driver(channel);
+ chspec = wl_ch_host_to_driver(cfg, bssidx, WL_P2P_TEMP_CHAN);
}
else {
- bss = (wl_bss_info_t *) (buf + 4);
+ bss = (struct wl_bss_info *) (buf + 4);
chspec = bss->chanspec;
WL_DBG(("Valid BSS Found. chanspec:%d \n", chspec));
}
- MFREE(cfg->osh, buf, WL_EXTRA_BUF_MAX);
+ kfree(buf);
return chspec;
}
-static void
-wl_wlfc_enable(struct bcm_cfg80211 *cfg, bool enable)
+static bcm_struct_cfgdev *
+wl_cfg80211_add_monitor_if(const char *name)
+{
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+ WL_INFORM(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
+ return ERR_PTR(-EOPNOTSUPP);
+#else
+ struct net_device* ndev = NULL;
+
+ dhd_add_monitor(name, &ndev);
+ WL_INFORM(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+ return ndev_to_cfgdev(ndev);
+#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
+}
+
+static bcm_struct_cfgdev *
+wl_cfg80211_add_virtual_iface(struct wiphy *wiphy,
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ const char *name,
+#else
+ char *name,
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ unsigned char name_assign_type,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
+ enum nl80211_iftype type,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ u32 *flags,
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+ struct vif_params *params)
{
+ s32 err = -ENODEV;
+ s32 timeout = -1;
+ s32 wlif_type = -1;
+ s32 mode = 0;
+ s32 val = 0;
+ s32 cfg_type;
+ s32 dhd_mode = 0;
+ chanspec_t chspec;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *primary_ndev;
+ struct net_device *new_ndev;
+ struct ether_addr primary_mac;
+ bcm_struct_cfgdev *new_cfgdev;
#ifdef PROP_TXSTATUS_VSDB
#if defined(BCMSDIO) || defined(BCMDBUS)
- bool wlfc_enabled = FALSE;
- s32 err;
+ s32 up = 1;
+ bool enabled;
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
dhd_pub_t *dhd;
- struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ bool hang_required = false;
- dhd = (dhd_pub_t *)(cfg->pub);
- if (!dhd) {
- return;
- }
+ if (!cfg)
+ return ERR_PTR(-EINVAL);
- if (enable) {
- if (!cfg->wlfc_on && !disable_proptx) {
- dhd_wlfc_get_enable(dhd, &wlfc_enabled);
- if (!wlfc_enabled && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
- dhd->op_mode != DHD_FLAG_IBSS_MODE) {
- dhd_wlfc_init(dhd);
- err = wldev_ioctl_set(primary_ndev, WLC_UP, &up, sizeof(s32));
- if (err < 0)
- WL_ERR(("WLC_UP return err:%d\n", err));
- }
- cfg->wlfc_on = true;
- WL_DBG(("wlfc_on:%d \n", cfg->wlfc_on));
- }
- } else if (dhd->conf->disable_proptx != 0){
- dhd_wlfc_deinit(dhd);
- cfg->wlfc_on = false;
- }
-#endif /* BCMSDIO || BCMDBUS */
-#endif /* PROP_TXSTATUS_VSDB */
-}
+ dhd = (dhd_pub_t *)(cfg->pub);
-struct wireless_dev *
-wl_cfg80211_p2p_if_add(struct bcm_cfg80211 *cfg,
- wl_iftype_t wl_iftype,
- char const *name, u8 *mac_addr, s32 *ret_err)
-{
- u16 chspec;
- s16 cfg_type;
- long timeout;
- s32 err;
- u16 p2p_iftype;
- int dhd_mode;
- struct net_device *new_ndev = NULL;
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- struct ether_addr *p2p_addr;
+ /* Use primary I/F for sending cmds down to firmware */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- *ret_err = BCME_OK;
- if (!cfg->p2p) {
- WL_ERR(("p2p not initialized\n"));
- return NULL;
+ if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) {
+ WL_ERR(("device is not ready\n"));
+ return ERR_PTR(-ENODEV);
}
-#if defined(WL_CFG80211_P2P_DEV_IF)
- if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
- /* Handle Dedicated P2P discovery Interface */
- return wl_cfgp2p_add_p2p_disc_if(cfg);
- }
-#endif /* WL_CFG80211_P2P_DEV_IF */
-
- if (wl_iftype == WL_IF_TYPE_P2P_GO) {
- p2p_iftype = WL_P2P_IF_GO;
- } else {
- p2p_iftype = WL_P2P_IF_CLIENT;
+ if (!name) {
+ WL_ERR(("Interface name not provided \n"));
+ return ERR_PTR(-EINVAL);
}
- /* Dual p2p doesn't support multiple P2PGO interfaces,
- * p2p_go_count is the counter for GO creation
- * requests.
- */
- if ((cfg->p2p->p2p_go_count > 0) && (wl_iftype == WL_IF_TYPE_P2P_GO)) {
- WL_ERR(("FW does not support multiple GO\n"));
- *ret_err = -ENOTSUPP;
- return NULL;
- }
- if (!cfg->p2p->on) {
- p2p_on(cfg) = true;
- wl_cfgp2p_set_firm_p2p(cfg);
- wl_cfgp2p_init_discovery(cfg);
- }
+#ifdef WLTDLS
+ /* disable TDLS if number of connected interfaces is >= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_CREATE, false);
+#endif /* WLTDLS */
- strlcpy(cfg->p2p->vir_ifname, name, sizeof(cfg->p2p->vir_ifname));
- /* In concurrency case, STA may be already associated in a particular channel.
- * so retrieve the current channel of primary interface and then start the virtual
- * interface on that.
- */
- chspec = wl_cfg80211_get_shared_freq(wiphy);
+ mutex_lock(&cfg->if_sync);
+ WL_DBG(("if name: %s, type: %d\n", name, type));
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+#ifdef WLAIBSS_MCHAN
+ new_cfgdev = bcm_cfg80211_add_ibss_if(wiphy, (char *)name);
+ mutex_unlock(&cfg->if_sync);
+ return new_cfgdev;
+#endif /* WLAIBSS_MCHAN */
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_MESH_POINT:
+ WL_ERR(("Unsupported interface type\n"));
+ mode = WL_MODE_IBSS;
+ err = -EINVAL;
+ goto fail;
+ case NL80211_IFTYPE_MONITOR:
+ new_cfgdev = wl_cfg80211_add_monitor_if(name);
+ mutex_unlock(&cfg->if_sync);
+ return new_cfgdev;
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ case NL80211_IFTYPE_P2P_DEVICE:
+ cfg->down_disc_if = FALSE;
+ new_cfgdev = wl_cfgp2p_add_p2p_disc_if(cfg);
+ mutex_unlock(&cfg->if_sync);
+ return new_cfgdev;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ case NL80211_IFTYPE_STATION:
+#ifdef WL_VIRTUAL_APSTA
+#ifdef WLAIBSS_MCHAN
+ if (cfg->ibss_cfgdev) {
+ WL_ERR(("AIBSS is already operational. "
+ " AIBSS & DUALSTA can't be used together \n"));
+ err = -ENOMEM;
+ goto fail;
+ }
+#endif /* WLAIBSS_MCHAN */
- /* For P2P mode, use P2P-specific driver features to create the
- * bss: "cfg p2p_ifadd"
- */
- wl_set_p2p_status(cfg, IF_ADDING);
- bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
- cfg_type = wl_cfgp2p_get_conn_idx(cfg);
- if (cfg_type == BCME_ERROR) {
- wl_clr_p2p_status(cfg, IF_ADDING);
- WL_ERR(("Failed to get connection idx for p2p interface\n"));
- return NULL;
+ if (wl_cfgp2p_vif_created(cfg)) {
+ WL_ERR(("Could not create new iface."
+ "Already one p2p interface is running"));
+ err = -ENOMEM;
+ goto fail;
+ }
+ new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+ NL80211_IFTYPE_STATION, NULL, name);
+ if (!new_cfgdev) {
+ err = -ENOMEM;
+ goto fail;
+ } else {
+ mutex_unlock(&cfg->if_sync);
+ return new_cfgdev;
+ }
+#endif /* WL_VIRTUAL_APSTA */
+ case NL80211_IFTYPE_P2P_CLIENT:
+ wlif_type = WL_P2P_IF_CLIENT;
+ mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ wlif_type = WL_P2P_IF_GO;
+ mode = WL_MODE_AP;
+ break;
+#ifdef WL_VIRTUAL_APSTA
+ case NL80211_IFTYPE_AP:
+ dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
+ new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+ NL80211_IFTYPE_AP, NULL, name);
+ if (!new_cfgdev) {
+ err = -ENOMEM;
+ goto fail;
+ } else {
+ mutex_unlock(&cfg->if_sync);
+ return new_cfgdev;
+ }
+#endif /* WL_VIRTUAL_APSTA */
+ default:
+ WL_ERR(("Unsupported interface type\n"));
+ err = -EINVAL;
+ goto fail;
}
- p2p_addr = wl_to_p2p_bss_macaddr(cfg, cfg_type);
- memcpy(p2p_addr->octet, mac_addr, ETH_ALEN);
-
- err = wl_cfgp2p_ifadd(cfg, p2p_addr,
- htod32(p2p_iftype), chspec);
- if (unlikely(err)) {
- wl_clr_p2p_status(cfg, IF_ADDING);
- WL_ERR((" virtual iface add failed (%d) \n", err));
- return NULL;
- }
+ if (cfg->p2p_supported && (wlif_type != -1)) {
+ ASSERT(cfg->p2p); /* ensure expectation of p2p initialization */
- /* Wait for WLC_E_IF event with IF_ADD opcode */
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- ((wl_get_p2p_status(cfg, IF_ADDING) == false) &&
- (cfg->if_event_info.valid)),
- msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
- wl_if_event_info *event = &cfg->if_event_info;
- new_ndev = wl_cfg80211_post_ifcreate(bcmcfg_to_prmry_ndev(cfg), event,
- event->mac, cfg->p2p->vir_ifname, false);
- if (unlikely(!new_ndev)) {
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
+ if (!dhd) {
+ err = -EINVAL;
goto fail;
}
+#endif
+#endif /* PROP_TXSTATUS_VSDB */
- if (wl_iftype == WL_IF_TYPE_P2P_GO) {
- cfg->p2p->p2p_go_count++;
+ if (!cfg->p2p) {
+ WL_ERR(("Failed to start p2p"));
+ err = -ENODEV;
+ goto fail;
}
- /* Fill p2p specific data */
- wl_to_p2p_bss_ndev(cfg, cfg_type) = new_ndev;
- wl_to_p2p_bss_bssidx(cfg, cfg_type) = event->bssidx;
- WL_ERR((" virtual interface(%s) is "
- "created net attach done\n", cfg->p2p->vir_ifname));
- dhd_mode = (wl_iftype == WL_IF_TYPE_P2P_GC) ?
- DHD_FLAG_P2P_GC_MODE : DHD_FLAG_P2P_GO_MODE;
- DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode));
- /* reinitialize completion to clear previous count */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
- INIT_COMPLETION(cfg->iface_disable);
-#else
- init_completion(&cfg->iface_disable);
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
+ if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+ p2p_on(cfg) = true;
+ wl_cfgp2p_set_firm_p2p(cfg);
+ wl_cfgp2p_init_discovery(cfg);
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+ }
- return new_ndev->ieee80211_ptr;
- }
+ strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
+ cfg->p2p->vir_ifname[IFNAMSIZ - 1] = '\0';
-fail:
- return NULL;
-}
+ wl_cfg80211_scan_abort(cfg);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ if (!cfg->wlfc_on && !disable_proptx) {
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (!enabled && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+ dhd_wlfc_init(dhd);
+ err = wldev_ioctl_set(primary_ndev, WLC_UP, &up, sizeof(s32));
+ if (err < 0)
+ WL_ERR(("WLC_UP return err:%d\n", err));
+ }
+ cfg->wlfc_on = true;
+ }
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
-bool
-wl_cfg80211_check_vif_in_use(struct net_device *ndev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- bool nan_enabled = FALSE;
+ /* Dual p2p doesn't support multiple P2PGO interfaces,
+ * p2p_go_count is the counter for GO creation
+ * requests.
+ */
+ if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) {
+ WL_ERR(("Fw does not support multiple Go\n"));
+ err = -ENOTSUPP;
+ goto fail;
+ }
+ /* In concurrency case, STA may be already associated in a particular channel.
+ * so retrieve the current channel of primary interface and then start the virtual
+ * interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(wiphy);
-#ifdef WL_NAN
- nan_enabled = cfg->nan_enable;
-#endif /* WL_NAN */
+ /* For P2P mode, use P2P-specific driver features to create the
+ * bss: "cfg p2p_ifadd"
+ */
+ wl_set_p2p_status(cfg, IF_ADDING);
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ cfg_type = wl_cfgp2p_get_conn_idx(cfg);
+ if (cfg_type == BCME_ERROR) {
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR(("Failed to get connection idx for p2p interface\n"));
+ err = -ENOTSUPP;
+ goto fail;
+ }
+ err = wl_cfgp2p_ifadd(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type),
+ htod32(wlif_type), chspec);
+ if (unlikely(err)) {
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR((" virtual iface add failed (%d) \n", err));
+ err = -ENOMEM;
+ goto fail;
+ }
- if (nan_enabled || (wl_cfgp2p_vif_created(cfg)) ||
- (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_MEM(("%s: Virtual interfaces in use. NAN %d P2P %d softAP %d\n",
- __FUNCTION__, nan_enabled, wl_cfgp2p_vif_created(cfg),
- (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)));
- return TRUE;
- }
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ ((wl_get_p2p_status(cfg, IF_ADDING) == false) &&
+ (cfg->if_event_info.valid)),
+ msecs_to_jiffies(MAX_WAIT_TIME));
- return FALSE;
-}
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
+ struct wireless_dev *vwdev;
+ int pm_mode = PM_ENABLE;
+ wl_if_event_info *event = &cfg->if_event_info;
+ /* IF_ADD event has come back, we can proceed to to register
+ * the new interface now, use the interface name provided by caller (thus
+ * ignore the one from wlc)
+ */
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname,
+ event->mac, event->bssidx, event->name);
+ if (new_ndev == NULL) {
+ err = -ENODEV;
+ goto fail;
+ }
-void
-wl_cfg80211_iface_state_ops(struct wireless_dev *wdev,
- wl_interface_state_t state,
- wl_iftype_t wl_iftype, u16 wl_mode)
-{
- struct net_device *ndev;
- struct bcm_cfg80211 *cfg;
- dhd_pub_t *dhd;
- s32 bssidx;
+ wl_to_p2p_bss_ndev(cfg, cfg_type) = new_ndev;
+ wl_to_p2p_bss_bssidx(cfg, cfg_type) = event->bssidx;
+ vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+ if (unlikely(!vwdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ err = -ENOMEM;
+ goto fail;
+ }
+ vwdev->wiphy = cfg->wdev->wiphy;
+ WL_INFORM(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname));
+ if (type == NL80211_IFTYPE_P2P_GO) {
+ cfg->p2p->p2p_go_count++;
+ }
+ vwdev->iftype = type;
+ vwdev->netdev = new_ndev;
+ new_ndev->ieee80211_ptr = vwdev;
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy));
+ wl_set_drv_status(cfg, READY, new_ndev);
+ if (wl_config_ifmode(cfg, new_ndev, type) < 0) {
+ WL_ERR(("conf ifmode failed\n"));
+ kfree(vwdev);
+ err = -ENOTSUPP;
+ goto fail;
+ }
- WL_DBG(("state:%s wl_iftype:%d mode:%d\n",
- wl_if_state_strs[state], wl_iftype, wl_mode));
- if (!wdev) {
- WL_ERR(("wdev null\n"));
- return;
- }
+ if (wl_cfg80211_register_if(cfg,
+ event->ifidx, new_ndev, FALSE) != BCME_OK) {
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, FALSE);
+ err = -ENODEV;
+ goto fail;
+ }
+ err = wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode, event->bssidx);
+ if (unlikely(err != 0)) {
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, FALSE);
+ WL_ERR(("Allocation of netinfo failed (%d) \n", err));
+ goto fail;
+ }
+ val = 1;
+ /* Disable firmware roaming for P2P interface */
+ wldev_iovar_setint(new_ndev, "roam_off", val);
+ wldev_iovar_setint(new_ndev, "bcn_timeout", dhd->conf->bcn_timeout);
+#ifdef WL11ULB
+ if (cfg->p2p_wdev && is_p2p_group_iface(new_ndev->ieee80211_ptr)) {
+ u32 ulb_bw = wl_cfg80211_get_ulb_bw(cfg, cfg->p2p_wdev);
+ if (ulb_bw) {
+ /* Apply ULB BW settings on the newly spawned interface */
+ WL_DBG(("[ULB] Applying ULB BW for the newly"
+ "created P2P interface \n"));
+ if (wl_cfg80211_set_ulb_bw(new_ndev,
+ ulb_bw, new_ndev->name) < 0) {
+ /*
+ * If ulb_bw set failed, fail the iface creation.
+ * wl_dealloc_netinfo_by_wdev will be called by the
+ * unregister notifier.
+ */
+ wl_cfg80211_remove_if(cfg,
+ event->ifidx, new_ndev, FALSE);
+ err = -EINVAL;
+ goto fail;
+ }
+ }
+ }
+#endif /* WL11ULB */
- if ((wl_iftype == WL_IF_TYPE_P2P_DISC) || (wl_iftype == WL_IF_TYPE_NAN_NMI)) {
- /* P2P discovery is a netless device and uses a
- * hidden bsscfg interface in fw. Don't apply the
- * iface ops state changes for p2p discovery I/F.
- * NAN NMI is netless device and uses a hidden bsscfg interface in fw.
- * Don't apply iface ops state changes for NMI I/F.
- */
- return;
- }
+ if (mode != WL_MODE_AP)
+ wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1);
- cfg = wiphy_priv(wdev->wiphy);
- ndev = wdev->netdev;
- dhd = (dhd_pub_t *)(cfg->pub);
+ WL_ERR((" virtual interface(%s) is "
+ "created net attach done\n", cfg->p2p->vir_ifname));
+ if (mode == WL_MODE_AP)
+ wl_set_drv_status(cfg, CONNECTED, new_ndev);
+#ifdef SUPPORT_AP_POWERSAVE
+ if (mode == WL_MODE_AP) {
+ dhd_set_ap_powersave(dhd, 0, TRUE);
+ }
+#endif /* SUPPORT_AP_POWERSAVE */
+ if (type == NL80211_IFTYPE_P2P_CLIENT)
+ dhd_mode = DHD_FLAG_P2P_GC_MODE;
+ else if (type == NL80211_IFTYPE_P2P_GO)
+ dhd_mode = DHD_FLAG_P2P_GO_MODE;
+ DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode));
+ /* reinitialize completion to clear previous count */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ INIT_COMPLETION(cfg->iface_disable);
+#else
+ init_completion(&cfg->iface_disable);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
+#ifdef SUPPORT_SET_CAC
+ wl_cfg80211_set_cac(cfg, 0);
+#endif /* SUPPORT_SET_CAC */
+ mutex_unlock(&cfg->if_sync);
+ return ndev_to_cfgdev(new_ndev);
+ } else {
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname));
- bssidx = wl_get_bssidx_by_wdev(cfg, wdev);
- if (!ndev || (bssidx < 0)) {
- WL_ERR(("ndev null. skip iface state ops\n"));
- return;
- }
+ WL_ERR(("left timeout : %d\n", timeout));
+ WL_ERR(("IF_ADDING status : %d\n", wl_get_p2p_status(cfg, IF_ADDING)));
+ WL_ERR(("event valid : %d\n", cfg->if_event_info.valid));
- switch (state) {
- case WL_IF_CREATE_REQ:
-#ifdef WL_BCNRECV
- /* check fakeapscan in progress then abort */
- wl_android_bcnrecv_stop(ndev, WL_BCNRECV_CONCURRENCY);
-#endif /* WL_BCNRECV */
- wl_cfg80211_scan_abort(cfg);
- wl_wlfc_enable(cfg, true);
-#ifdef WLTDLS
- /* disable TDLS if number of connected interfaces is >= 1 */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_CREATE, false);
-#endif /* WLTDLS */
- break;
- case WL_IF_DELETE_REQ:
-#ifdef WL_WPS_SYNC
- wl_wps_handle_ifdel(ndev);
-#endif /* WPS_SYNC */
- if (wl_get_drv_status(cfg, SCANNING, ndev)) {
- /* Send completion for any pending scans */
- wl_cfg80211_cancel_scan(cfg);
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ wl_set_p2p_status(cfg, IF_DELETING);
+
+ hang_required = true;
+ if ((err = wl_cfgp2p_ifdel(cfg,
+ wl_to_p2p_bss_macaddr(cfg,
+ cfg_type))) == BCME_OK) {
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ ((wl_get_p2p_status(cfg, IF_DELETING) == false) &&
+ (cfg->if_event_info.valid)),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+ cfg->if_event_info.valid) {
+ hang_required = false;
+ WL_ERR(("IFDEL operation done\n"));
+ } else {
+ WL_ERR(("IFDEL didn't complete properly\n"));
+ }
+ err = -ENODEV;
+#ifdef SUPPORT_SET_CAC
+ wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+ } else {
+ WL_ERR(("IFDEL operation failed, error code = %d\n", err));
}
-#ifdef CUSTOM_SET_CPUCORE
- dhd->chan_isvht80 &= ~DHD_FLAG_P2P_MODE;
- if (!(dhd->chan_isvht80)) {
- dhd_set_cpucore(dhd, FALSE);
- }
-#endif /* CUSTOM_SET_CPUCORE */
- wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
- break;
- case WL_IF_CREATE_DONE:
- if (wl_mode == WL_MODE_BSS) {
- /* Common code for sta type interfaces - STA, GC */
- wldev_iovar_setint(ndev, "buf_key_b4_m4", 1);
- }
- if (wl_iftype == WL_IF_TYPE_P2P_GC) {
- /* Disable firmware roaming for P2P interface */
- wldev_iovar_setint(ndev, "roam_off", 1);
- wldev_iovar_setint(ndev, "bcn_timeout", dhd->conf->bcn_timeout);
- }
- if (wl_mode == WL_MODE_AP) {
- /* Common code for AP/GO */
+ memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+ wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE && dhd->conf->disable_proptx!=0) {
+ dhd_wlfc_deinit(dhd);
+ cfg->wlfc_on = false;
}
- break;
- case WL_IF_DELETE_DONE:
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+ }
+ }
+
+fail:
+ mutex_unlock(&cfg->if_sync);
+ if (err) {
#ifdef WLTDLS
- /* Enable back TDLS if connected interface is <= 1 */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
+ /* Enable back TDLS on failure */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
#endif /* WLTDLS */
- wl_wlfc_enable(cfg, false);
- break;
- case WL_IF_CHANGE_REQ:
- /* Flush existing IEs from firmware on role change */
- wl_cfg80211_clear_per_bss_ies(cfg, wdev);
- break;
- case WL_IF_CHANGE_DONE:
- if (wl_mode == WL_MODE_BSS) {
- /* Enable buffering of PTK key till EAPOL 4/4 is sent out */
- wldev_iovar_setint(ndev, "buf_key_b4_m4", 1);
+ if (err != -ENOTSUPP) {
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ if (dhd->memdump_enabled) {
+ /* Load the dongle side dump to host
+ * memory and then BUG_ON()
+ */
+ dhd->memdump_type = DUMP_TYPE_HANG_ON_IFACE_OP_FAIL;
+ dhd_bus_mem_dump(dhd);
}
- break;
-
- default:
- WL_ERR(("Unsupported state: %d\n", state));
- return;
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+ if (hang_required) {
+ /* Notify the user space to recover */
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ WL_ERR(("if add failed, error %d, sent HANG event to %s\n",
+ err, ndev->name));
+ dhd->hang_reason = HANG_REASON_IFACE_OP_FAILURE;
+ net_os_send_hang_message(ndev);
+ }
+ }
}
+ return ERR_PTR(err);
}
static s32
-wl_cfg80211_p2p_if_del(struct wiphy *wiphy, struct wireless_dev *wdev)
+wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
{
+ struct net_device *dev = NULL;
+ struct ether_addr p2p_mac;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- s16 bssidx;
- s16 err;
- s32 cfg_type;
- struct net_device *ndev;
- long timeout;
+ s32 timeout = -1;
+ s32 ret = 0;
+ s32 index = -1;
+ s32 type = -1;
+#if defined(CUSTOM_SET_CPUCORE) || defined(DHD_HANG_SEND_UP_TEST)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE || DHD_HANG_SEND_UP_TEST */
+ WL_DBG(("Enter\n"));
- if (unlikely(!wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg)))) {
- WL_INFORM_MEM(("device is not ready\n"));
- return BCME_NOTFOUND;
- }
+ memset(&p2p_mac, 0, sizeof(struct ether_addr));
+#ifdef CUSTOM_SET_CPUCORE
+ dhd->chan_isvht80 &= ~DHD_FLAG_P2P_MODE;
+ if (!(dhd->chan_isvht80))
+ dhd_set_cpucore(dhd, FALSE);
+#endif /* CUSTOM_SET_CPUCORE */
+ mutex_lock(&cfg->if_sync);
#ifdef WL_CFG80211_P2P_DEV_IF
- if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
- /* Handle dedicated P2P discovery interface. */
- return wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
+ if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ if (dhd_download_fw_on_driverload) {
+ ret = wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg);
+ } else {
+ cfg->down_disc_if = TRUE;
+ ret = 0;
+ }
+ mutex_unlock(&cfg->if_sync);
+ return ret;
}
#endif /* WL_CFG80211_P2P_DEV_IF */
+ dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- /* Handle P2P Group Interface */
- bssidx = wl_get_bssidx_by_wdev(cfg, wdev);
- if (bssidx <= 0) {
- WL_ERR(("bssidx not found\n"));
- return BCME_NOTFOUND;
- }
- if (wl_cfgp2p_find_type(cfg, bssidx, &cfg_type) != BCME_OK) {
- /* Couldn't find matching iftype */
- WL_MEM(("non P2P interface\n"));
- return BCME_NOTFOUND;
+#ifdef WLAIBSS_MCHAN
+ if (cfgdev == cfg->ibss_cfgdev) {
+ ret = bcm_cfg80211_del_ibss_if(wiphy, cfgdev);
+ goto done;
}
+#endif /* WLAIBSS_MCHAN */
- ndev = wdev->netdev;
- wl_clr_p2p_status(cfg, GO_NEG_PHASE);
- wl_clr_p2p_status(cfg, IF_ADDING);
+ if ((index = wl_get_bssidx_by_wdev(cfg, cfgdev_to_wdev(cfgdev))) < 0) {
+ WL_ERR(("Find p2p index from wdev failed\n"));
+ ret = -ENODEV;
+ goto done;
+ }
+ if ((cfg->p2p_supported) && index && (wl_cfgp2p_find_type(cfg, index, &type) == BCME_OK)) {
+ /* Handle P2P Interace del */
+ memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet, ETHER_ADDR_LEN);
- /* for GO */
- if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
- wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
- cfg->p2p->p2p_go_count--;
- /* disable interface before bsscfg free */
- err = wl_cfgp2p_ifdisable(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type));
- /* if fw doesn't support "ifdis",
- do not wait for link down of ap mode
+ /* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases
*/
- if (err == 0) {
- WL_ERR(("Wait for Link Down event for GO !!!\n"));
- wait_for_completion_timeout(&cfg->iface_disable,
- msecs_to_jiffies(500));
- } else if (err != BCME_UNSUPPORTED) {
- msleep(300);
- }
- } else {
- /* GC case */
- if (wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
- WL_ERR(("Wait for Link Down event for GC !\n"));
- wait_for_completion_timeout
+ WL_DBG(("P2P: GO_NEG_PHASE status cleared "));
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ if (wl_cfgp2p_vif_created(cfg)) {
+ if (wl_get_drv_status(cfg, SCANNING, dev)) {
+ wl_notify_escan_complete(cfg, dev, true, true);
+ }
+ /* Delete pm_enable_work */
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+
+ /* for GC */
+ if (wl_get_drv_status(cfg, DISCONNECTING, dev) &&
+ (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)) {
+ WL_ERR(("Wait for Link Down event for GC !\n"));
+ wait_for_completion_timeout
(&cfg->iface_disable, msecs_to_jiffies(500));
- }
- }
+ }
- bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
- wl_set_p2p_status(cfg, IF_DELETING);
- DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ wl_set_p2p_status(cfg, IF_DELETING);
+ DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
- err = wl_cfgp2p_ifdel(cfg, wl_to_p2p_bss_macaddr(cfg, cfg_type));
- if (unlikely(err)) {
- WL_ERR(("IFDEL operation failed, error code = %d\n", err));
- goto fail;
- } else {
- /* Wait for WLC_E_IF event */
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- ((wl_get_p2p_status(cfg, IF_DELETING) == false) &&
- (cfg->if_event_info.valid)),
- msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
- cfg->if_event_info.valid) {
- WL_ERR(("P2P IFDEL operation done\n"));
- err = BCME_OK;
- } else {
- WL_ERR(("IFDEL didn't complete properly\n"));
- err = -EINVAL;
+ /* for GO */
+ if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+ cfg->p2p->p2p_go_count--;
+ /* disable interface before bsscfg free */
+ ret = wl_cfgp2p_ifdisable(cfg, &p2p_mac);
+ /* if fw doesn't support "ifdis",
+ do not wait for link down of ap mode
+ */
+ if (ret == 0) {
+ WL_ERR(("Wait for Link Down event for GO !!!\n"));
+ wait_for_completion_timeout(&cfg->iface_disable,
+ msecs_to_jiffies(500));
+ } else if (ret != BCME_UNSUPPORTED) {
+ msleep(300);
+ }
+ }
+ wl_cfg80211_clear_per_bss_ies(cfg, index);
+
+ if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)
+ wldev_iovar_setint(dev, "buf_key_b4_m4", 0);
+ memcpy(p2p_mac.octet, wl_to_p2p_bss_macaddr(cfg, type).octet,
+ ETHER_ADDR_LEN);
+ CFGP2P_INFO(("primary idx %d : cfg p2p_ifdis "MACDBG"\n",
+ dev->ifindex, MAC2STRDBG(p2p_mac.octet)));
+
+ /* delete interface after link down */
+ ret = wl_cfgp2p_ifdel(cfg, &p2p_mac);
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (ret != BCME_OK ||
+ dhd->req_hang_type == HANG_REASON_IFACE_OP_FAILURE)
+#else /* DHD_HANG_SEND_UP_TEST */
+ if (ret != BCME_OK)
+#endif /* DHD_HANG_SEND_UP_TEST */
+ {
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+ WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n",
+ ret, ndev->name));
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+ if (dhd->memdump_enabled) {
+ /* Load the dongle side dump to host
+ * memory and then BUG_ON()
+ */
+ dhd->memdump_type = DUMP_TYPE_HANG_ON_IFACE_OP_FAIL;
+ dhd_bus_mem_dump(dhd);
+ }
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+ dhd->hang_reason = HANG_REASON_IFACE_OP_FAILURE;
+ net_os_send_hang_message(ndev);
+ } else {
+ /* Wait for IF_DEL operation to be finished */
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ ((wl_get_p2p_status(cfg, IF_DELETING) == false) &&
+ (cfg->if_event_info.valid)),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+ cfg->if_event_info.valid) {
+
+ WL_DBG(("IFDEL operation done\n"));
+ wl_cfg80211_handle_ifdel(cfg, &cfg->if_event_info, dev);
+ } else {
+ WL_ERR(("IFDEL didn't complete properly\n"));
+ }
+#ifdef SUPPORT_SET_CAC
+ wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+ }
+
+ ret = dhd_del_monitor(dev);
+ if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL((dhd_pub_t *)(cfg->pub));
+ }
}
+ } else if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) ||
+ (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION)) {
+#ifdef WL_VIRTUAL_APSTA
+ ret = wl_cfg80211_del_iface(wiphy, cfgdev);
+#else
+ WL_ERR(("Virtual APSTA not supported!\n"));
+#endif /* WL_VIRTUAL_APSTA */
}
-fail:
- /* Even in failure case, attempt to remove the host data structure.
- * Firmware would be cleaned up via WiFi reset done by the
- * user space from hang event context (for android only).
- */
- bzero(cfg->p2p->vir_ifname, IFNAMSIZ);
- wl_to_p2p_bss_bssidx(cfg, cfg_type) = -1;
- wl_to_p2p_bss_ndev(cfg, cfg_type) = NULL;
- wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, cfg_type));
- dhd_net_if_lock(ndev);
- if (cfg->if_event_info.ifidx) {
- /* Remove interface except for primary ifidx */
- wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev, FALSE);
+done:
+ mutex_unlock(&cfg->if_sync);
+#ifdef WLTDLS
+ if (ret == BCME_OK) {
+ /* If interface del is success, try enabling back TDLS */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
}
- dhd_net_if_unlock(ndev);
- return err;
+#endif /* WLTDLS */
+ return ret;
}
-#ifdef WL_IFACE_MGMT_CONF
-#ifdef WL_IFACE_MGMT
static s32
-wl_cfg80211_is_policy_config_allowed(struct bcm_cfg80211 *cfg)
+wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
+ enum nl80211_iftype type,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ u32 *flags,
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+ struct vif_params *params)
{
- s32 ret = BCME_OK;
- wl_iftype_t active_sec_iface = WL_IFACE_NOT_PRESENT;
- bool p2p_disc_on = false;
- bool sta_assoc_state = false;
+ s32 ap = 0;
+ s32 infra_ibss = 1;
+ s32 wlif_type;
+ s32 mode = 0;
+ s32 err = BCME_OK;
+ s32 index;
+ s32 conn_idx = -1;
+ chanspec_t chspec;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
mutex_lock(&cfg->if_sync);
-
- sta_assoc_state = (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) ||
- wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg)));
- active_sec_iface = wl_cfg80211_get_sec_iface(cfg);
- p2p_disc_on = wl_get_p2p_status(cfg, SCANNING);
-
- if ((sta_assoc_state == TRUE) || (p2p_disc_on == TRUE) ||
- (cfg->nan_init_state == TRUE) ||
- (active_sec_iface != WL_IFACE_NOT_PRESENT)) {
- WL_INFORM_MEM(("Active iface matrix: sta_assoc_state = %d,"
- " p2p_disc = %d, nan_disc = %d, active iface = %s\n",
- sta_assoc_state, p2p_disc_on, cfg->nan_init_state,
- wl_iftype_to_str(active_sec_iface)));
- ret = BCME_BUSY;
- }
- mutex_unlock(&cfg->if_sync);
- return ret;
-}
-#endif /* WL_IFACE_MGMT */
-#ifdef WL_NANP2P
-int
-wl_cfg80211_set_iface_conc_disc(struct net_device *ndev,
- uint8 arg_val)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- if (!cfg) {
- WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
- return BCME_ERROR;
+ WL_DBG(("Enter type %d\n", type));
+ switch (type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+#ifndef WLMESH
+ case NL80211_IFTYPE_MESH_POINT:
+#endif /* WLMESH */
+ ap = 1;
+ WL_ERR(("type (%d) : currently we do not support this type\n",
+ type));
+ break;
+#ifdef WLMESH
+ case NL80211_IFTYPE_MESH_POINT:
+ infra_ibss = WL_BSSTYPE_MESH;
+ break;
+#endif /* WLMESH */
+ case NL80211_IFTYPE_ADHOC:
+ mode = WL_MODE_IBSS;
+ infra_ibss = 0;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ s32 bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (bssidx < 0) {
+ /* validate bssidx */
+ WL_ERR(("Wrong bssidx! \n"));
+ err = -EINVAL;
+ goto error;
+ }
+ WL_DBG(("del interface. bssidx:%d", bssidx));
+ /* Downgrade role from AP to STA */
+ if ((err = wl_cfg80211_add_del_bss(cfg, ndev,
+ bssidx, NL80211_IFTYPE_STATION, 0, NULL)) < 0) {
+ WL_ERR(("AP-STA Downgrade failed \n"));
+ err = -EINVAL;
+ goto error;
+ }
+ }
+ mode = WL_MODE_BSS;
+ break;
+ case NL80211_IFTYPE_AP:
+ dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
+ /* intentional fall through */
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
+ mode = WL_MODE_AP;
+ ap = 1;
+ break;
+ default:
+ err = -EINVAL;
+ goto error;
}
- if (wl_cfg80211_is_policy_config_allowed(cfg) != BCME_OK) {
- WL_ERR(("Cant allow iface management modifications\n"));
- return BCME_BUSY;
+ if (!dhd) {
+ err = -EINVAL;
+ goto error;
}
+ if (ap) {
+ wl_set_mode_by_netdev(cfg, ndev, mode);
+ if (is_p2p_group_iface(ndev->ieee80211_ptr) &&
+ cfg->p2p && wl_cfgp2p_vif_created(cfg)) {
+ WL_DBG(("p2p_vif_created p2p_on (%d)\n", p2p_on(cfg)));
+ wl_notify_escan_complete(cfg, ndev, true, true);
+
+ /* Dual p2p doesn't support multiple P2PGO interfaces,
+ * p2p_go_count is the counter for GO creation
+ * requests.
+ */
+ if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) {
+ wl_set_mode_by_netdev(cfg, ndev, WL_MODE_BSS);
+ WL_ERR(("Fw does not support multiple GO\n"));
+ err = BCME_ERROR;
+ goto error;
+ }
+ /* In concurrency case, STA may be already associated in a particular
+ * channel. so retrieve the current channel of primary interface and
+ * then start the virtual interface on that.
+ */
+ chspec = wl_cfg80211_get_shared_freq(wiphy);
+ index = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (index < 0) {
+ WL_ERR(("Find p2p index from ndev(%p) failed\n", ndev));
+ err = BCME_ERROR;
+ goto error;
+ }
+ if (wl_cfgp2p_find_type(cfg, index, &conn_idx) != BCME_OK) {
+ err = BCME_ERROR;
+ goto error;
+ }
- if (arg_val) {
- cfg->conc_disc |= arg_val;
+ wlif_type = WL_P2P_IF_GO;
+ printf("%s: %s ap (%d), infra_ibss (%d), iftype (%d) conn_idx (%d)\n",
+ __FUNCTION__, ndev->name, ap, infra_ibss, type, conn_idx);
+ wl_set_p2p_status(cfg, IF_CHANGING);
+ wl_clr_p2p_status(cfg, IF_CHANGED);
+ wl_cfgp2p_ifchange(cfg, wl_to_p2p_bss_macaddr(cfg, conn_idx),
+ htod32(wlif_type), chspec, conn_idx);
+ wait_event_interruptible_timeout(cfg->netif_change_event,
+ (wl_get_p2p_status(cfg, IF_CHANGED) == true),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ wl_set_mode_by_netdev(cfg, ndev, mode);
+ dhd->op_mode &= ~DHD_FLAG_P2P_GC_MODE;
+ dhd->op_mode |= DHD_FLAG_P2P_GO_MODE;
+ wl_clr_p2p_status(cfg, IF_CHANGING);
+ wl_clr_p2p_status(cfg, IF_CHANGED);
+ if (mode == WL_MODE_AP)
+ wl_set_drv_status(cfg, CONNECTED, ndev);
+#ifdef SUPPORT_AP_POWERSAVE
+ dhd_set_ap_powersave(dhd, 0, TRUE);
+#endif /* SUPPORT_AP_POWERSAVE */
+ } else if ((type == NL80211_IFTYPE_AP) &&
+ !wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+#if 0
+ err = wl_cfg80211_set_ap_role(cfg, ndev);
+ if (unlikely(err)) {
+ WL_ERR(("set ap role failed!\n"));
+ goto error;
+ }
+#else
+ wl_set_drv_status(cfg, AP_CREATING, ndev);
+#endif
+ } else {
+ WL_ERR(("Cannot change the interface for GO or SOFTAP\n"));
+ err = -EINVAL;
+ goto error;
+ }
} else {
- cfg->conc_disc &= ~arg_val;
+ /* P2P GO interface deletion is handled on the basis of role type (AP).
+ * So avoid changing role for p2p type.
+ */
+ if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ wl_set_mode_by_netdev(cfg, ndev, mode);
+ WL_DBG(("Change_virtual_iface for transition from GO/AP to client/STA\n"));
+#ifdef SUPPORT_AP_POWERSAVE
+ dhd_set_ap_powersave(dhd, 0, FALSE);
+#endif /* SUPPORT_AP_POWERSAVE */
}
- return BCME_OK;
-}
-uint8
-wl_cfg80211_get_iface_conc_disc(struct net_device *ndev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- if (!cfg) {
- WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
- return BCME_ERROR;
+ if (!infra_ibss) {
+ err = wldev_ioctl_set(ndev, WLC_SET_INFRA, &infra_ibss, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("SET INFRA/IBSS error %d\n", err));
+ err = -EINVAL;
+ goto error;
+ }
}
- return cfg->conc_disc;
+
+ WL_DBG(("Setting iftype to %d \n", type));
+ ndev->ieee80211_ptr->iftype = type;
+error:
+ mutex_unlock(&cfg->if_sync);
+ return err;
}
-#endif /* WL_NANP2P */
-#ifdef WL_IFACE_MGMT
-int
-wl_cfg80211_set_iface_policy(struct net_device *ndev,
- char *arg, int len)
+
+s32
+wl_cfg80211_notify_ifadd(struct net_device *dev, int ifidx, char *name, uint8 *mac, uint8 bssidx)
{
- int ret = BCME_OK;
- uint8 i = 0;
- iface_mgmt_data_t *iface_data = NULL;
+ bool ifadd_expected = FALSE;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- if (!cfg) {
- WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
- return BCME_ERROR;
- }
+ /* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd")
+ * redirect the IF_ADD event to ifchange as it is not a real "new" interface
+ */
+ if (wl_get_p2p_status(cfg, IF_CHANGING))
+ return wl_cfg80211_notify_ifchange(dev, ifidx, name, mac, bssidx);
- if (wl_cfg80211_is_policy_config_allowed(cfg) != BCME_OK) {
- WL_ERR(("Cant allow iface management modifications\n"));
- return BCME_BUSY;
+ /* Okay, we are expecting IF_ADD (as IF_ADDING is true) */
+ if (wl_get_p2p_status(cfg, IF_ADDING)) {
+ ifadd_expected = TRUE;
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ } else if (cfg->bss_pending_op) {
+ ifadd_expected = TRUE;
+ cfg->bss_pending_op = FALSE;
}
- if (!arg || len <= 0 || len > sizeof(iface_mgmt_data_t)) {
- return BCME_BADARG;
- }
+ if (ifadd_expected) {
+ wl_if_event_info *if_event_info = &cfg->if_event_info;
- iface_data = (iface_mgmt_data_t *)arg;
- if (iface_data->policy >= WL_IF_POLICY_INVALID) {
- WL_ERR(("Unexpected value of policy = %d\n",
- iface_data->policy));
- return BCME_BADARG;
+ if_event_info->valid = TRUE;
+ if_event_info->ifidx = ifidx;
+ if_event_info->bssidx = bssidx;
+ strncpy(if_event_info->name, name, IFNAMSIZ);
+ if_event_info->name[IFNAMSIZ] = '\0';
+ if (mac)
+ memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN);
+ wake_up_interruptible(&cfg->netif_change_event);
+ return BCME_OK;
}
- bzero(&cfg->iface_data, sizeof(iface_mgmt_data_t));
- ret = memcpy_s(&cfg->iface_data, sizeof(iface_mgmt_data_t), arg, len);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy iface data, src len = %d\n", len));
- return ret;
+ return BCME_ERROR;
+}
+
+s32
+wl_cfg80211_notify_ifdel(struct net_device *dev, int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+ bool ifdel_expected = FALSE;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+ if (wl_get_p2p_status(cfg, IF_DELETING)) {
+ ifdel_expected = TRUE;
+ wl_clr_p2p_status(cfg, IF_DELETING);
+ } else if (cfg->bss_pending_op) {
+ ifdel_expected = TRUE;
+ cfg->bss_pending_op = FALSE;
}
- if (cfg->iface_data.policy == WL_IF_POLICY_ROLE_PRIORITY) {
- for (i = 0; i < WL_IF_TYPE_MAX; i++) {
- WL_DBG(("iface = %s, priority[i] = %d\n",
- wl_iftype_to_str(i), cfg->iface_data.priority[i]));
- }
+ if (ifdel_expected) {
+ if_event_info->valid = TRUE;
+ if_event_info->ifidx = ifidx;
+ if_event_info->bssidx = bssidx;
+ wake_up_interruptible(&cfg->netif_change_event);
+ return BCME_OK;
}
- return ret;
+ return BCME_ERROR;
}
-uint8
-wl_cfg80211_get_iface_policy(struct net_device *ndev)
-
+s32
+wl_cfg80211_notify_ifchange(struct net_device * dev, int ifidx, char *name, uint8 *mac,
+ uint8 bssidx)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- if (!cfg) {
- WL_ERR(("%s: Cannot find cfg\n", __FUNCTION__));
- return BCME_ERROR;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (wl_get_p2p_status(cfg, IF_CHANGING)) {
+ wl_set_p2p_status(cfg, IF_CHANGED);
+ wake_up_interruptible(&cfg->netif_change_event);
+ return BCME_OK;
}
- return cfg->iface_data.policy;
+ return BCME_ERROR;
}
-#endif /* WL_IFACE_MGMT */
-#endif /* WL_IFACE_MGMT_CONF */
-#ifdef WL_IFACE_MGMT
-/* Get active secondary data iface type */
-wl_iftype_t
-wl_cfg80211_get_sec_iface(struct bcm_cfg80211 *cfg)
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+ struct net_device* ndev)
{
-#ifndef WL_STATIC_IF
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
-#endif /* !WL_STATIC_IF */
- struct net_device *p2p_ndev = NULL;
+ s32 type = -1;
+ s32 bssidx = -1;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ bool enabled;
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
- p2p_ndev = wl_to_p2p_bss_ndev(cfg,
- P2PAPI_BSSCFG_CONNECTION1);
+ bssidx = if_event_info->bssidx;
+ if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION1) &&
+ bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION2)) {
+ WL_ERR(("got IF_DEL for if %d, not owned by cfg driver\n", bssidx));
+ return BCME_ERROR;
+ }
-#ifdef WL_STATIC_IF
- if (IS_CFG80211_STATIC_IF_ACTIVE(cfg)) {
- if (IS_AP_IFACE(cfg->static_ndev->ieee80211_ptr)) {
- return WL_IF_TYPE_AP;
+ if (p2p_is_on(cfg) && wl_cfgp2p_vif_created(cfg)) {
+ if (cfg->scan_request && (cfg->escan_info.ndev == ndev)) {
+ /* Abort any pending scan requests */
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ WL_DBG(("ESCAN COMPLETED\n"));
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, false);
}
- }
-#else
- if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
- return WL_IF_TYPE_AP;
- }
-#endif /* WL_STATIC_IF */
- if (p2p_ndev && p2p_ndev->ieee80211_ptr) {
- if (p2p_ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
- return WL_IF_TYPE_P2P_GO;
+ memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+ if (wl_cfgp2p_find_type(cfg, bssidx, &type) == BCME_OK) {
+ /* Update P2P data */
+ wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type));
+ wl_to_p2p_bss_ndev(cfg, type) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, type) = -1;
+ } else if (wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr) < 0) {
+ WL_ERR(("bssidx not known for the given ndev as per net_info data \n"));
+ return BCME_ERROR;
}
- if (p2p_ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
- return WL_IF_TYPE_P2P_GC;
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE && dhd->conf->disable_proptx!=0) {
+ dhd_wlfc_deinit(dhd);
+ cfg->wlfc_on = false;
}
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
}
-#ifdef WL_NAN
- if (wl_cfgnan_is_dp_active(bcmcfg_to_prmry_ndev(cfg))) {
- return WL_IF_TYPE_NAN;
- }
-#endif /* WL_NAN */
- return WL_IFACE_NOT_PRESENT;
+ dhd_net_if_lock(ndev);
+ wl_cfg80211_remove_if(cfg, if_event_info->ifidx, ndev, FALSE);
+ dhd_net_if_unlock(ndev);
+
+ return BCME_OK;
}
-/*
-* Handle incoming data interface request based on policy.
-* If there is any conflicting interface, that will be
-* deleted.
-*/
-s32
-wl_cfg80211_data_if_mgmt(struct bcm_cfg80211 *cfg,
- wl_iftype_t new_wl_iftype)
+/* Find listen channel */
+static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
+ const u8 *ie, u32 ie_len)
{
- s32 ret = BCME_OK;
- bool del_iface = false;
- wl_iftype_t sec_wl_if_type = wl_cfg80211_get_sec_iface(cfg);
+ wifi_p2p_ie_t *p2p_ie;
+ u8 *end, *pos;
+ s32 listen_channel;
- if (sec_wl_if_type == WL_IF_TYPE_NAN &&
- new_wl_iftype == WL_IF_TYPE_NAN) {
- /* Multi NDP is allowed irrespective of Policy */
- return BCME_OK;
- }
+/* unfortunately const cast required here - function is
+ * a callback so its signature must not be changed
+ * and cascade of changing wl_cfgp2p_find_p2pie
+ * causes need for const cast in other places
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ pos = (u8 *)ie;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
- if (sec_wl_if_type == WL_IFACE_NOT_PRESENT) {
- /*
- * If there is no active secondary I/F, there
- * is no interface conflict. Do nothing.
- */
- return BCME_OK;
- }
+ if (p2p_ie == NULL)
+ return 0;
- /* Handle secondary data link case */
- switch (cfg->iface_data.policy) {
- case WL_IF_POLICY_CUSTOM:
- case WL_IF_POLICY_DEFAULT: {
- if (sec_wl_if_type == WL_IF_TYPE_NAN) {
- /* NAN has the lowest priority */
- del_iface = true;
- } else {
- /* Active iface is present, returning error */
- ret = BCME_ERROR;
- }
- break;
- }
- case WL_IF_POLICY_FCFS: {
- WL_INFORM_MEM(("Found active iface = %s, can't support new iface = %s\n",
- wl_iftype_to_str(sec_wl_if_type), wl_iftype_to_str(new_wl_iftype)));
- ret = BCME_ERROR;
- break;
+ pos = p2p_ie->subelts;
+ end = p2p_ie->subelts + (p2p_ie->len - 4);
+
+ CFGP2P_DBG((" found p2p ie ! lenth %d \n",
+ p2p_ie->len));
+
+ while (pos < end) {
+ uint16 attr_len;
+ if (pos + 2 >= end) {
+ CFGP2P_DBG((" -- Invalid P2P attribute"));
+ return 0;
}
- case WL_IF_POLICY_LP: {
- WL_INFORM_MEM(("Remove active sec data interface, allow incoming iface\n"));
- /* Delete existing data iface and allow incoming sec iface */
- del_iface = true;
- break;
+ attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
+
+ if (pos + 3 + attr_len > end) {
+ CFGP2P_DBG(("P2P: Attribute underflow "
+ "(len=%u left=%d)",
+ attr_len, (int) (end - pos - 3)));
+ return 0;
}
- case WL_IF_POLICY_ROLE_PRIORITY: {
- WL_INFORM_MEM(("Existing iface = %s (%d) and new iface = %s (%d)\n",
- wl_iftype_to_str(sec_wl_if_type),
- cfg->iface_data.priority[sec_wl_if_type],
- wl_iftype_to_str(new_wl_iftype),
- cfg->iface_data.priority[new_wl_iftype]));
- if (cfg->iface_data.priority[new_wl_iftype] >
- cfg->iface_data.priority[sec_wl_if_type]) {
- del_iface = true;
- } else {
- WL_ERR(("Can't support new iface = %s\n",
- wl_iftype_to_str(new_wl_iftype)));
- ret = BCME_ERROR;
+
+ /* if Listen Channel att id is 6 and the vailue is valid,
+ * return the listen channel
+ */
+ if (pos[0] == 6) {
+ /* listen channel subel length format
+ * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
+ */
+ listen_channel = pos[1 + 2 + 3 + 1];
+
+ if (listen_channel == SOCIAL_CHAN_1 ||
+ listen_channel == SOCIAL_CHAN_2 ||
+ listen_channel == SOCIAL_CHAN_3) {
+ CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
+ return listen_channel;
}
- break;
- }
- default: {
- WL_ERR(("Unsupported interface policy = %d\n",
- cfg->iface_data.policy));
- return BCME_ERROR;
}
+ pos += 3 + attr_len;
}
- if (del_iface) {
- ret = wl_cfg80211_delete_iface(cfg, sec_wl_if_type);
- }
- return ret;
+ return 0;
}
-/* Handle discovery ifaces based on policy */
-s32
-wl_cfg80211_disc_if_mgmt(struct bcm_cfg80211 *cfg,
- wl_iftype_t new_wl_iftype, bool *disable_nan, bool *disable_p2p)
+static void wl_scan_prep(struct bcm_cfg80211 *cfg, struct wl_scan_params *params,
+ struct cfg80211_scan_request *request)
{
- s32 ret = BCME_OK;
- wl_iftype_t sec_wl_if_type =
- wl_cfg80211_get_sec_iface(cfg);
- *disable_p2p = false;
- *disable_nan = false;
-
- if (sec_wl_if_type == WL_IF_TYPE_NAN &&
- new_wl_iftype == WL_IF_TYPE_NAN) {
- /* Multi NDP is allowed irrespective of Policy */
- return BCME_OK;
- }
+ u32 n_ssids;
+ u32 n_channels;
+ u16 channel;
+ chanspec_t chanspec;
+ s32 i = 0, j = 0, offset;
+ char *ptr;
+ wlc_ssid_t ssid;
+ struct wireless_dev *wdev;
- /*
- * Check for any policy conflicts with active secondary
- * interface for incoming discovery iface
- */
- if ((sec_wl_if_type != WL_IFACE_NOT_PRESENT) &&
- (is_discovery_iface(new_wl_iftype))) {
- switch (cfg->iface_data.policy) {
- case WL_IF_POLICY_CUSTOM: {
- if (sec_wl_if_type == WL_IF_TYPE_NAN &&
- new_wl_iftype == WL_IF_TYPE_P2P_DISC) {
- WL_INFORM_MEM(("Allow P2P Discovery with active NDP\n"));
- /* No further checks are required. */
- return BCME_OK;
- }
- /*
- * Intentional fall through to default policy
- * as for AP and associated ifaces, both are same
- */
- }
- case WL_IF_POLICY_DEFAULT: {
- if (sec_wl_if_type == WL_IF_TYPE_AP) {
- WL_INFORM_MEM(("AP is active, cant support new iface\n"));
- ret = BCME_ERROR;
- } else if (sec_wl_if_type == WL_IF_TYPE_P2P_GC ||
- sec_wl_if_type == WL_IF_TYPE_P2P_GO) {
- if (new_wl_iftype == WL_IF_TYPE_P2P_DISC) {
- /*
- * Associated discovery case,
- * Fall through
- */
- } else {
- /* Active iface is present, returning error */
- WL_INFORM_MEM(("P2P group is active,"
- " cant support new iface\n"));
- ret = BCME_ERROR;
- }
- } else if (sec_wl_if_type == WL_IF_TYPE_NAN) {
- ret = wl_cfg80211_delete_iface(cfg, sec_wl_if_type);
- }
- break;
- }
- case WL_IF_POLICY_FCFS: {
- WL_INFORM_MEM(("Can't support new iface = %s\n",
- wl_iftype_to_str(new_wl_iftype)));
- ret = BCME_ERROR;
- break;
- }
- case WL_IF_POLICY_LP: {
- /* Delete existing data iface n allow incoming sec iface */
- WL_INFORM_MEM(("Remove active sec data interface = %s\n",
- wl_iftype_to_str(sec_wl_if_type)));
- ret = wl_cfg80211_delete_iface(cfg,
- sec_wl_if_type);
- break;
- }
- case WL_IF_POLICY_ROLE_PRIORITY: {
- WL_INFORM_MEM(("Existing iface = %s (%d) and new iface = %s (%d)\n",
- wl_iftype_to_str(sec_wl_if_type),
- cfg->iface_data.priority[sec_wl_if_type],
- wl_iftype_to_str(new_wl_iftype),
- cfg->iface_data.priority[new_wl_iftype]));
- if (cfg->iface_data.priority[new_wl_iftype] >
- cfg->iface_data.priority[sec_wl_if_type]) {
- WL_INFORM_MEM(("Remove active sec data iface\n"));
- ret = wl_cfg80211_delete_iface(cfg,
- sec_wl_if_type);
- } else {
- WL_ERR(("Can't support new iface = %s"
- " due to low priority\n",
- wl_iftype_to_str(new_wl_iftype)));
- ret = BCME_ERROR;
- }
- break;
+ memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = 0;
+ params->nprobes = -1;
+ params->active_time = -1;
+ params->passive_time = -1;
+ params->home_time = -1;
+ params->channel_num = 0;
+ memset(¶ms->ssid, 0, sizeof(wlc_ssid_t));
+
+ WL_SCAN(("Preparing Scan request\n"));
+ WL_SCAN(("nprobes=%d\n", params->nprobes));
+ WL_SCAN(("active_time=%d\n", params->active_time));
+ WL_SCAN(("passive_time=%d\n", params->passive_time));
+ WL_SCAN(("home_time=%d\n", params->home_time));
+ WL_SCAN(("scan_type=%d\n", params->scan_type));
+
+ params->nprobes = htod32(params->nprobes);
+ params->active_time = htod32(params->active_time);
+ params->passive_time = htod32(params->passive_time);
+ params->home_time = htod32(params->home_time);
+
+ /* if request is null just exit so it will be all channel broadcast scan */
+ if (!request)
+ return;
+
+ n_ssids = request->n_ssids;
+ n_channels = request->n_channels;
+
+ /* Copy channel array if applicable */
+ WL_SCAN(("### List of channelspecs to scan ###\n"));
+ if (n_channels > 0) {
+ for (i = 0; i < n_channels; i++) {
+ channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
+ /* SKIP DFS channels for Secondary interface */
+ if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
+ (request->channels[i]->flags &
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+ (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN)))
+#else
+ (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)))
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
+ continue;
+ if (!dhd_conf_match_channel(cfg->pub, channel))
+ continue;
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ wdev = request->wdev;
+#else
+ wdev = request->dev->ieee80211_ptr;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ chanspec = wl_cfg80211_ulb_get_min_bw_chspec(cfg, wdev, -1);
+ if (chanspec == INVCHANSPEC) {
+ WL_ERR(("Invalid chanspec! Skipping channel\n"));
+ continue;
}
- default: {
- WL_ERR(("Unsupported policy\n"));
- return BCME_ERROR;
+
+ if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ } else {
+ chanspec |= WL_CHANSPEC_BAND_5G;
}
+ params->channel_list[j] = channel;
+ params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
+ params->channel_list[j] |= chanspec;
+ WL_SCAN(("Chan : %d, Channel spec: %x \n",
+ channel, params->channel_list[j]));
+ params->channel_list[j] = wl_chspec_host_to_driver(params->channel_list[j]);
+ j++;
}
} else {
- /*
- * Handle incoming new secondary iface request,
- * irrespective of existing discovery ifaces
- */
- if ((cfg->iface_data.policy == WL_IF_POLICY_CUSTOM) &&
- (new_wl_iftype == WL_IF_TYPE_NAN)) {
- WL_INFORM_MEM(("Allow NAN Data Path\n"));
- /* No further checks are required. */
- return BCME_OK;
+ WL_SCAN(("Scanning all channels\n"));
+ }
+ n_channels = j;
+ /* Copy ssid array if applicable */
+ WL_SCAN(("### List of SSIDs to scan ###\n"));
+ if (n_ssids > 0) {
+ offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
+ offset = roundup(offset, sizeof(u32));
+ ptr = (char*)params + offset;
+ for (i = 0; i < n_ssids; i++) {
+ memset(&ssid, 0, sizeof(wlc_ssid_t));
+ ssid.SSID_len = MIN(request->ssids[i].ssid_len, DOT11_MAX_SSID_LEN);
+ memcpy(ssid.SSID, request->ssids[i].ssid, ssid.SSID_len);
+ if (!ssid.SSID_len)
+ WL_SCAN(("%d: Broadcast scan\n", i));
+ else
+ WL_SCAN(("%d: scan for %s size =%d\n", i,
+ ssid.SSID, ssid.SSID_len));
+ memcpy(ptr, &ssid, sizeof(wlc_ssid_t));
+ ptr += sizeof(wlc_ssid_t);
}
+ } else {
+ WL_SCAN(("Broadcast scan\n"));
}
+ /* Adding mask to channel numbers */
+ params->channel_num =
+ htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
- /* Check for any conflicting discovery iface */
- switch (new_wl_iftype) {
- case WL_IF_TYPE_P2P_DISC:
- case WL_IF_TYPE_P2P_GO:
- case WL_IF_TYPE_P2P_GC: {
- *disable_nan = true;
- break;
- }
- case WL_IF_TYPE_NAN_NMI:
- case WL_IF_TYPE_NAN: {
- *disable_p2p = true;
- break;
- }
- case WL_IF_TYPE_STA:
- case WL_IF_TYPE_AP: {
- *disable_nan = true;
- *disable_p2p = true;
- break;
- }
- default: {
- WL_ERR(("Unsupported\n"));
- return BCME_ERROR;
- }
+ if (n_channels == 1) {
+ params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
+ params->nprobes = htod32(params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
}
- return ret;
}
-bool
-wl_cfg80211_is_associated_discovery(struct bcm_cfg80211 *cfg,
- wl_iftype_t new_wl_iftype)
+static s32
+wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
{
- struct net_device *p2p_ndev = NULL;
- p2p_ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1);
+ wl_uint32_list_t *list;
+ s32 err = BCME_OK;
+ if (valid_chan_list == NULL || size <= 0)
+ return -ENOMEM;
- if (new_wl_iftype == WL_IF_TYPE_P2P_DISC && p2p_ndev &&
- p2p_ndev->ieee80211_ptr &&
- is_p2p_group_iface(p2p_ndev->ieee80211_ptr)) {
- return true;
+ memset(valid_chan_list, 0, size);
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ err = wldev_ioctl_get(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size);
+ if (err != 0) {
+ WL_ERR(("get channels failed with %d\n", err));
}
-#ifdef WL_NAN
- else if ((new_wl_iftype == WL_IF_TYPE_NAN_NMI) &&
- (wl_cfgnan_is_dp_active(bcmcfg_to_prmry_ndev(cfg)))) {
- return true;
- }
-#endif /* WL_NAN */
- return false;
+
+ return err;
}
-/* Handle incoming discovery iface request */
-s32
-wl_cfg80211_handle_discovery_config(struct bcm_cfg80211 *cfg,
- wl_iftype_t new_wl_iftype)
-{
- s32 ret = BCME_OK;
- bool disable_p2p = false;
- bool disable_nan = false;
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
+bool g_first_broadcast_scan = TRUE;
+#endif
- wl_iftype_t active_sec_iface =
- wl_cfg80211_get_sec_iface(cfg);
+static s32
+wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ struct cfg80211_scan_request *request, uint16 action)
+{
+ s32 err = BCME_OK;
+ u32 n_channels;
+ u32 n_ssids;
+ s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
+ wl_escan_params_t *params = NULL;
+ u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ u32 num_chans = 0;
+ s32 channel;
+ u32 n_valid_chan;
+ s32 search_state = WL_P2P_DISC_ST_SCAN;
+ u32 i, j, n_nodfs = 0;
+ u16 *default_chan_list = NULL;
+ wl_uint32_list_t *list;
+ s32 bssidx = -1;
+ struct net_device *dev = NULL;
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+ bool is_first_init_2g_scan = false;
+#endif
+ p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
+ scb_val_t scbval;
+ static int cnt = 0;
- if (is_discovery_iface(new_wl_iftype) &&
- (active_sec_iface != WL_IFACE_NOT_PRESENT)) {
- if (wl_cfg80211_is_associated_discovery(cfg,
- new_wl_iftype) == TRUE) {
- WL_DBG(("Associate iface request is allowed= %s\n",
- wl_iftype_to_str(new_wl_iftype)));
- return ret;
- }
- }
+ WL_DBG(("Enter \n"));
- ret = wl_cfg80211_disc_if_mgmt(cfg, new_wl_iftype,
- &disable_nan, &disable_p2p);
- if (ret != BCME_OK) {
- WL_ERR(("Failed at disc iface mgmt, ret = %d\n", ret));
- return ret;
- }
-#ifdef WL_NANP2P
- if (((new_wl_iftype == WL_IF_TYPE_P2P_DISC) && disable_nan) ||
- ((new_wl_iftype == WL_IF_TYPE_NAN_NMI) && disable_p2p)) {
- if ((cfg->nan_p2p_supported == TRUE) &&
- (cfg->conc_disc == WL_NANP2P_CONC_SUPPORT)) {
- WL_INFORM_MEM(("P2P + NAN conc is supported\n"));
- disable_p2p = false;
- disable_nan = false;
- }
+ /* scan request can come with empty request : perform all default scan */
+ if (!cfg) {
+ err = -EINVAL;
+ goto exit;
}
-#endif /* WL_NANP2P */
+ if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+ /* LEGACY SCAN TRIGGER */
+ WL_SCAN((" LEGACY E-SCAN START\n"));
- if (disable_nan) {
-#ifdef WL_NAN
- /* Disable nan */
- cfg->nancfg.disable_reason = NAN_CONCURRENCY_CONFLICT;
- ret = wl_cfgnan_disable(cfg);
- if (ret != BCME_OK) {
- WL_ERR(("failed to disable nan, error[%d]\n", ret));
- return ret;
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+ if (!request) {
+ err = -EINVAL;
+ goto exit;
}
-#endif /* WL_NAN */
- }
-
- if (disable_p2p) {
- /* Disable p2p discovery */
- ret = wl_cfg80211_deinit_p2p_discovery(cfg);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to disable p2p_disc for allowing nan\n"));
- return ret;
+ if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
+ is_first_init_2g_scan = true;
+ g_first_broadcast_scan = false;
}
- }
- return ret;
-}
-
-/*
-* Check for any conflicting iface before adding iface.
-* Based on policy, either conflicting iface is removed
-* or new iface add request is blocked.
-*/
-s32
-wl_cfg80211_handle_if_role_conflict(struct bcm_cfg80211 *cfg,
- wl_iftype_t new_wl_iftype)
-{
- s32 ret = BCME_OK;
+#endif
- WL_INFORM_MEM(("Incoming iface = %s\n", wl_iftype_to_str(new_wl_iftype)));
+ /* if scan request is not empty parse scan request paramters */
+ if (request != NULL) {
+ n_channels = request->n_channels;
+ n_ssids = request->n_ssids;
+ if (n_channels % 2)
+ /* If n_channels is odd, add a padd of u16 */
+ params_size += sizeof(u16) * (n_channels + 1);
+ else
+ params_size += sizeof(u16) * n_channels;
- if (!is_discovery_iface(new_wl_iftype)) {
- /* Incoming data interface request */
- if (wl_cfg80211_get_sec_iface(cfg) != WL_IFACE_NOT_PRESENT) {
- /* active interface present - Apply interface data policy */
- ret = wl_cfg80211_data_if_mgmt(cfg, new_wl_iftype);
- if (ret != BCME_OK) {
- WL_ERR(("if_mgmt fail:%d\n", ret));
- return ret;
- }
+ /* Allocate space for populating ssids in wl_escan_params_t struct */
+ params_size += sizeof(struct wlc_ssid) * n_ssids;
}
- }
- /* Apply discovery config */
- ret = wl_cfg80211_handle_discovery_config(cfg, new_wl_iftype);
- return ret;
-}
-#endif /* WL_IFACE_MGMT */
+ params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ wl_scan_prep(cfg, ¶ms->params, request);
-static struct wireless_dev *
-wl_cfg80211_add_monitor_if(struct wiphy *wiphy, const char *name)
-{
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
- WL_ERR(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
- return ERR_PTR(-EOPNOTSUPP);
-#else
- struct wireless_dev *wdev;
- struct net_device* ndev = NULL;
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+ /* Override active_time to reduce scan time if it's first bradcast scan. */
+ if (is_first_init_2g_scan)
+ params->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
+#endif
- dhd_add_monitor(name, &ndev);
+ params->version = htod32(ESCAN_REQ_VERSION);
+ params->action = htod16(action);
+ wl_escan_set_sync_id(params->sync_id, cfg);
+ wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
+ if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
+ WL_ERR(("ioctl buffer length not sufficient\n"));
+ kfree(params);
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (cfg->active_scan == PASSIVE_SCAN) {
+ params->params.scan_type = DOT11_SCANTYPE_PASSIVE;
+ WL_DBG(("Passive scan_type %d \n", params->params.scan_type));
+ }
- wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
- if (!wdev) {
- WL_ERR(("wireless_dev alloc failed! \n"));
- goto fail;
- }
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
- wdev->wiphy = wiphy;
- wdev->iftype = NL80211_IFTYPE_MONITOR;
- ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(ndev, wiphy_dev(wiphy));
+ err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
+ cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+// printf("%s: LEGACY_SCAN sync ID: %d, bssidx: %d\n", __FUNCTION__, params->sync_id, bssidx);
+ if (unlikely(err)) {
+ if (err == BCME_EPERM)
+ /* Scan Not permitted at this point of time */
+ WL_DBG((" Escan not permitted at this time (%d)\n", err));
+ else
+ WL_ERR((" Escan set error (%d)\n", err));
+ } else {
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_REQUESTED);
+ }
+ kfree(params);
+ }
+ else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
+ /* P2P SCAN TRIGGER */
+ s32 _freq = 0;
+ n_nodfs = 0;
+ if (request && request->n_channels) {
+ num_chans = request->n_channels;
+ WL_SCAN((" chann number : %d\n", num_chans));
+ default_chan_list = kzalloc(num_chans * sizeof(*default_chan_list),
+ GFP_KERNEL);
+ if (default_chan_list == NULL) {
+ WL_ERR(("channel list allocation failed \n"));
+ err = -ENOMEM;
+ goto exit;
+ }
+ if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
+#ifdef P2P_SKIP_DFS
+ int is_printed = false;
+#endif /* P2P_SKIP_DFS */
+ list = (wl_uint32_list_t *) chan_buf;
+ n_valid_chan = dtoh32(list->count);
+ if (n_valid_chan > WL_NUMCHANNELS) {
+ WL_ERR(("wrong n_valid_chan:%d\n", n_valid_chan));
+ kfree(default_chan_list);
+ err = -EINVAL;
+ goto exit;
+ }
- WL_DBG(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
- return ndev->ieee80211_ptr;
-fail:
- return ERR_PTR(-EOPNOTSUPP);
-#endif // endif
-}
+ for (i = 0; i < num_chans; i++)
+ {
+ _freq = request->channels[i]->center_freq;
+ channel = ieee80211_frequency_to_channel(_freq);
-static struct wireless_dev *
-wl_cfg80211_add_ibss(struct wiphy *wiphy, u16 wl_iftype, char const *name)
-{
-#ifdef WLAIBSS_MCHAN
- /* AIBSS */
- return bcm_cfg80211_add_ibss_if(wiphy, (char *)name);
+ /* ignore DFS channels */
+ if (request->channels[i]->flags &
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ (IEEE80211_CHAN_NO_IR
+ | IEEE80211_CHAN_RADAR))
#else
- /* Normal IBSS */
- WL_ERR(("IBSS not supported on Virtual iface\n"));
- return NULL;
-#endif // endif
-}
-
-s32
-wl_release_vif_macaddr(struct bcm_cfg80211 *cfg, u8 *mac_addr, u16 wl_iftype)
-{
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- u16 org_toggle_bytes;
- u16 cur_toggle_bytes;
- u16 toggled_bit;
+ (IEEE80211_CHAN_RADAR
+ | IEEE80211_CHAN_PASSIVE_SCAN))
+#endif
+ continue;
+#ifdef P2P_SKIP_DFS
+ if (channel >= 52 && channel <= 144) {
+ if (is_printed == false) {
+ WL_ERR(("SKIP DFS CHANs(52~144)\n"));
+ is_printed = true;
+ }
+ continue;
+ }
+#endif /* P2P_SKIP_DFS */
- if (!ndev || !mac_addr || ETHER_ISNULLADDR(mac_addr)) {
- return -EINVAL;
- }
- WL_DBG(("%s:Mac addr" MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(mac_addr)));
+ for (j = 0; j < n_valid_chan; j++) {
+ /* allows only supported channel on
+ * current reguatory
+ */
+ if (n_nodfs >= num_chans) {
+ break;
+ }
+ if (channel == (dtoh32(list->element[j]))) {
+ default_chan_list[n_nodfs++] =
+ channel;
+ }
+ }
- if ((wl_iftype == WL_IF_TYPE_P2P_DISC) || (wl_iftype == WL_IF_TYPE_AP) ||
- (wl_iftype == WL_IF_TYPE_P2P_GO) || (wl_iftype == WL_IF_TYPE_P2P_GC)) {
- /* Avoid invoking release mac addr code for interfaces using
- * fixed mac addr.
- */
- return BCME_OK;
- }
+ }
+ }
+ if (num_chans == SOCIAL_CHAN_CNT && (
+ (default_chan_list[0] == SOCIAL_CHAN_1) &&
+ (default_chan_list[1] == SOCIAL_CHAN_2) &&
+ (default_chan_list[2] == SOCIAL_CHAN_3))) {
+ /* SOCIAL CHANNELS 1, 6, 11 */
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+ WL_INFORM(("P2P SEARCH PHASE START \n"));
+ } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) ||
+ ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) {
+ /* If you are already a GO, then do SEARCH only */
+ WL_INFORM(("Already a GO. Do SEARCH Only"));
+ search_state = WL_P2P_DISC_ST_SEARCH;
+ num_chans = n_nodfs;
+ p2p_scan_purpose = P2P_SCAN_NORMAL;
+
+ } else if (num_chans == 1) {
+ p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
+ } else if (num_chans == SOCIAL_CHAN_CNT + 1) {
+ /* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
+ * the supplicant
+ */
+ p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
+ } else {
+ WL_INFORM(("P2P SCAN STATE START \n"));
+ num_chans = n_nodfs;
+ p2p_scan_purpose = P2P_SCAN_NORMAL;
+ }
+ } else {
+ err = -EINVAL;
+ goto exit;
+ }
+ err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list,
+ search_state, action,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
+ p2p_scan_purpose);
- /* Fetch last two bytes of mac address */
- org_toggle_bytes = ntoh16(*((u16 *)&ndev->dev_addr[4]));
- cur_toggle_bytes = ntoh16(*((u16 *)&mac_addr[4]));
+ if (!err)
+ cfg->p2p->search_state = search_state;
- toggled_bit = (org_toggle_bytes ^ cur_toggle_bytes);
- WL_DBG(("org_toggle_bytes:%04X cur_toggle_bytes:%04X\n",
- org_toggle_bytes, cur_toggle_bytes));
- if (toggled_bit & cfg->vif_macaddr_mask) {
- /* This toggled_bit is marked in the used mac addr
- * mask. Clear it.
- */
- cfg->vif_macaddr_mask &= ~toggled_bit;
- WL_INFORM(("MAC address - " MACDBG " released. toggled_bit:%04X vif_mask:%04X\n",
- MAC2STRDBG(mac_addr), toggled_bit, cfg->vif_macaddr_mask));
+ kfree(default_chan_list);
+ }
+exit:
+ if (unlikely(err)) {
+ int suppressed = 0;
+ wldev_ioctl(dev, WLC_GET_SCANSUPPRESS, &suppressed, sizeof(int), false);
+ /* Don't print Error incase of Scan suppress */
+ if ((err == BCME_EPERM) && (cfg->scan_suppressed || suppressed)) {
+ cnt = 0;
+ WL_DBG(("Escan failed: Scan Suppressed \n"));
+ } else {
+ cnt++;
+ WL_ERR(("error (%d), cnt=%d\n", err, cnt));
+ // terence 20140111: send disassoc to firmware
+ if (cnt >= 4) {
+ dev = bcmcfg_to_prmry_ndev(cfg);
+ memset(&scbval, 0, sizeof(scb_val_t));
+ wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true);
+ WL_ERR(("Send disassoc to break the busy dev=%p\n", dev));
+ cnt = 0;
+ }
+ }
} else {
- WL_ERR(("MAC address - " MACDBG " not found in the used list."
- " toggled_bit:%04x vif_mask:%04x\n", MAC2STRDBG(mac_addr),
- toggled_bit, cfg->vif_macaddr_mask));
- return -EINVAL;
+ cnt = 0;
}
-
- return BCME_OK;
+ return err;
}
-s32
-wl_get_vif_macaddr(struct bcm_cfg80211 *cfg, u16 wl_iftype, u8 *mac_addr)
-{
-#ifdef WL_P2P_USE_RANDMAC
- struct ether_addr *p2p_dev_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
-#endif // endif
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- u16 toggle_mask;
- u16 toggle_bit;
- u16 toggle_bytes;
- u16 used;
- u32 offset = 0;
- /* Toggle mask starts from MSB of second last byte */
- u16 mask = 0x8000;
- if (!mac_addr) {
- return -EINVAL;
- }
-#ifdef WL_P2P_USE_RANDMAC
- if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
- memcpy_s(mac_addr, ETH_ALEN, p2p_dev_addr->octet, ETH_ALEN);
- return 0;
- }
-#endif // endif
- memcpy(mac_addr, ndev->dev_addr, ETH_ALEN);
-/*
- * VIF MAC address managment
- * P2P Device addres: Primary MAC with locally admin. bit set
- * P2P Group address/NAN NMI/Softap/NAN DPI: Primary MAC addr
- * with local admin bit set and one additional bit toggled.
- * cfg->vif_macaddr_mask will hold the info regarding the mac address
- * released. Ensure to call wl_release_vif_macaddress to free up
- * the mac address.
- */
-#if defined(SPECIFIC_MAC_GEN_SCHEME)
- if (wl_iftype == WL_IF_TYPE_P2P_DISC || wl_iftype == WL_IF_TYPE_AP) {
- mac_addr[0] |= 0x02;
- } else if ((wl_iftype == WL_IF_TYPE_P2P_GO) || (wl_iftype == WL_IF_TYPE_P2P_GC)) {
- mac_addr[0] |= 0x02;
- mac_addr[4] ^= 0x80;
- }
-#else
- if (wl_iftype == WL_IF_TYPE_P2P_DISC) {
- mac_addr[0] |= 0x02;
- }
-#endif /* SEPCIFIC_MAC_GEN_SCHEME */
- else {
- /* For locally administered mac addresses, we keep the
- * OUI part constant and just work on the last two bytes.
- */
- mac_addr[0] |= 0x02;
- toggle_mask = cfg->vif_macaddr_mask;
- toggle_bytes = ntoh16(*((u16 *)&mac_addr[4]));
- do {
- used = toggle_mask & mask;
- if (!used) {
- /* Use this bit position */
- toggle_bit = mask >> offset;
- toggle_bytes ^= toggle_bit;
- cfg->vif_macaddr_mask |= toggle_bit;
- WL_DBG(("toggle_bit:%04X toggle_bytes:%04X toggle_mask:%04X\n",
- toggle_bit, toggle_bytes, cfg->vif_macaddr_mask));
- /* Macaddress are stored in network order */
- mac_addr[5] = *((u8 *)&toggle_bytes);
- mac_addr[4] = *(((u8 *)&toggle_bytes + 1));
- break;
- }
- /* Shift by one */
- toggle_mask = toggle_mask << 0x1;
- offset++;
- if (offset > MAX_VIF_OFFSET) {
- /* We have used up all macaddresses. Something wrong! */
- WL_ERR(("Entire range of macaddress used up.\n"));
- ASSERT(0);
- break;
- }
- } while (true);
- }
- WL_INFORM_MEM(("Get virtual I/F mac addr: "MACDBG"\n", MAC2STRDBG(mac_addr)));
- return 0;
-}
-#ifdef DNGL_AXI_ERROR_LOGGING
static s32
-_wl_cfg80211_check_axi_error(struct bcm_cfg80211 *cfg)
+wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
{
- s32 ret = BCME_OK;
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- hnd_ext_trap_hdr_t *hdr;
- int axi_host_error_size;
- uint8 *new_dst;
- uint32 *ext_data = dhd->extended_trap_data;
- struct file *fp = NULL;
- char *filename = DHD_COMMON_DUMP_PATH
- DHD_DUMP_AXI_ERROR_FILENAME
- DHD_DUMP_HAL_FILENAME_SUFFIX;
-
- WL_ERR(("%s: starts to read %s. Axi error \n", __FUNCTION__, filename));
-
- fp = filp_open(filename, O_RDONLY, 0);
-
- if (IS_ERR(fp) || (fp == NULL)) {
- WL_ERR(("%s: Couldn't read the file, err %ld,File [%s] No previous axi error \n",
- __FUNCTION__, PTR_ERR(fp), filename));
- return ret;
+ s32 err = BCME_OK;
+ s32 passive_scan = 0;
+ s32 passive_scan_time = 0;
+ s32 passive_scan_time_org = 0;
+ wl_scan_results_t *results;
+ WL_SCAN(("Enter \n"));
+
+ results = wl_escan_get_buf(cfg, FALSE);
+ results->version = 0;
+ results->count = 0;
+ results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
+
+ cfg->escan_info.ndev = ndev;
+ cfg->escan_info.wiphy = wiphy;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+ passive_scan = cfg->active_scan ? 0 : 1;
+ err = wldev_ioctl_set(ndev, WLC_SET_PASSIVE_SCAN,
+ &passive_scan, sizeof(passive_scan));
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ goto exit;
}
- kernel_read_compat(fp, fp->f_pos, (char *)dhd->axi_err_dump, sizeof(dhd_axi_error_dump_t));
- filp_close(fp, NULL);
-
- /* Delete axi error info file */
- if (dhd_file_delete(filename) < 0) {
- WL_ERR(("%s(): Failed to delete file: %s\n", __FUNCTION__, filename));
- return ret;
- }
- WL_ERR(("%s(): Success to delete file: %s\n", __FUNCTION__, filename));
+ if (passive_channel_skip) {
- if (dhd->axi_err_dump->etd_axi_error_v1.signature != HND_EXT_TRAP_AXIERROR_SIGNATURE) {
- WL_ERR(("%s: Invalid AXI signature: 0x%x\n",
- __FUNCTION__, dhd->axi_err_dump->etd_axi_error_v1.signature));
- }
+ err = wldev_ioctl_get(ndev, WLC_GET_SCAN_PASSIVE_TIME,
+ &passive_scan_time_org, sizeof(passive_scan_time_org));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
- /* First word is original trap_data */
- ext_data++;
+ WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org));
- /* Followed by the extended trap data header */
- hdr = (hnd_ext_trap_hdr_t *)ext_data;
- new_dst = hdr->data;
+ passive_scan_time = 0;
+ err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+ &passive_scan_time, sizeof(passive_scan_time));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
- axi_host_error_size = sizeof(dhd->axi_err_dump->axid)
- + sizeof(dhd->axi_err_dump->fault_address);
+ WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n",
+ passive_channel_skip));
+ }
- /* TAG_TRAP_AXI_HOST_INFO tlv : host's axid, fault address */
- new_dst = bcm_write_tlv(TAG_TRAP_AXI_HOST_INFO,
- (const void *)dhd->axi_err_dump,
- axi_host_error_size, new_dst);
+ err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
- /* TAG_TRAP_AXI_ERROR tlv */
- new_dst = bcm_write_tlv(TAG_TRAP_AXI_ERROR,
- (const void *)&dhd->axi_err_dump->etd_axi_error_v1,
- sizeof(dhd->axi_err_dump->etd_axi_error_v1), new_dst);
- hdr->len = new_dst - hdr->data;
+ if (passive_channel_skip) {
+ err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
+ &passive_scan_time_org, sizeof(passive_scan_time_org));
+ if (unlikely(err)) {
+ WL_ERR(("== error (%d)\n", err));
+ goto exit;
+ }
- dhd->dongle_trap_occured = TRUE;
- memset(dhd->axi_err_dump, 0, sizeof(dhd_axi_error_dump_t));
+ WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n",
+ passive_scan_time_org));
+ }
- dhd->hang_reason = HANG_REASON_DONGLE_TRAP;
- net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
- ret = BCME_ERROR;
- return ret;
+exit:
+ return err;
}
-#endif /* DNGL_AXI_ERROR_LOGGING */
-/* All Android/Linux private/Vendor Interface calls should make
- * use of below API for interface creation.
- */
-struct wireless_dev *
-wl_cfg80211_add_if(struct bcm_cfg80211 *cfg,
- struct net_device *primary_ndev,
- wl_iftype_t wl_iftype, const char *name, u8 *mac)
+static s32
+__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request,
+ struct cfg80211_ssid *this_ssid)
{
- u8 mac_addr[ETH_ALEN];
- s32 err = -ENODEV;
- struct wireless_dev *wdev = NULL;
- struct wiphy *wiphy;
- s32 wl_mode;
- dhd_pub_t *dhd;
- wl_iftype_t macaddr_iftype = wl_iftype;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct cfg80211_ssid *ssids;
+ struct ether_addr primary_mac;
+ bool p2p_ssid;
+#ifdef WL11U
+ bcm_tlv_t *interworking_ie;
+#endif
+ s32 err = 0;
+ s32 bssidx = -1;
+ s32 i;
- WL_INFORM_MEM(("if name: %s, wl_iftype:%d \n",
- name ? name : "NULL", wl_iftype));
- if (!cfg || !primary_ndev || !name) {
- WL_ERR(("cfg/ndev/name ptr null\n"));
- return NULL;
- }
- if (wl_cfg80211_get_wdev_from_ifname(cfg, name)) {
- WL_ERR(("Interface name %s exists!\n", name));
- return NULL;
+ unsigned long flags;
+ static s32 busy_count = 0;
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ struct net_device *remain_on_channel_ndev = NULL;
+#endif
+ uint scan_timer_interval_ms = WL_SCAN_TIMER_INTERVAL_MS;
+
+ /*
+ * Hostapd triggers scan before starting automatic channel selection
+ * to collect channel characteristics. However firmware scan engine
+ * doesn't support any channel characteristics collection along with
+ * scan. Hence return scan success.
+ */
+ if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) {
+ WL_INFORM(("Scan Command on SoftAP Interface. Ignoring...\n"));
+// terence 20161023: let it scan in SoftAP mode
+// return 0;
}
- wiphy = bcmcfg_to_wiphy(cfg);
- dhd = (dhd_pub_t *)(cfg->pub);
- if (!dhd) {
- return NULL;
+ ndev = ndev_to_wlc_ndev(ndev, cfg);
+
+ if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+ WL_ERR(("Sending Action Frames. Try it again.\n"));
+ return -EAGAIN;
}
- if ((wl_mode = wl_iftype_to_mode(wl_iftype)) < 0) {
- return NULL;
+ WL_DBG(("Enter wiphy (%p)\n", wiphy));
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ if (cfg->scan_request == NULL) {
+ wl_clr_drv_status_all(cfg, SCANNING);
+ WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
+ } else {
+ WL_ERR(("Scanning already\n"));
+ return -EAGAIN;
+ }
}
- mutex_lock(&cfg->if_sync);
-#ifdef WL_NAN
- if (wl_iftype == WL_IF_TYPE_NAN) {
- /*
- * Bypass the role conflict check for NDI and handle it
- * from dp req and dp resp context
- * because in aware comms, ndi gets created soon after nan enable.
- */
- } else
-#endif /* WL_NAN */
-#ifdef WL_IFACE_MGMT
- if ((err = wl_cfg80211_handle_if_role_conflict(cfg, wl_iftype)) < 0) {
- mutex_unlock(&cfg->if_sync);
- return NULL;
+ if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
+ WL_ERR(("Scanning being aborted\n"));
+ return -EAGAIN;
}
-#endif /* WL_IFACE_MGMT */
-#ifdef DNGL_AXI_ERROR_LOGGING
- /* Check the previous smmu fault error */
- if ((err = _wl_cfg80211_check_axi_error(cfg)) < 0) {
- mutex_unlock(&cfg->if_sync);
- return NULL;
+ if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
+ WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
+ return -EOPNOTSUPP;
}
-#endif /* DNGL_AXI_ERROR_LOGGING */
- /* Protect the interace op context */
- /* Do pre-create ops */
- wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr, WL_IF_CREATE_REQ,
- wl_iftype, wl_mode);
- if (strnicmp(name, SOFT_AP_IF_NAME, strlen(SOFT_AP_IF_NAME)) == 0) {
- macaddr_iftype = WL_IF_TYPE_AP;
+#ifdef P2P_LISTEN_OFFLOADING
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ WL_ERR(("P2P_FIND: Discovery offload is in progress\n"));
+ return -EAGAIN;
}
+#endif /* P2P_LISTEN_OFFLOADING */
- if (mac) {
- /* If mac address is provided, use that */
- memcpy(mac_addr, mac, ETH_ALEN);
- } else if ((wl_get_vif_macaddr(cfg, macaddr_iftype, mac_addr) != BCME_OK)) {
- /* Fetch the mac address to be used for virtual interface */
- err = -EINVAL;
- goto fail;
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
+ if (remain_on_channel_ndev) {
+ WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
+ wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
- switch (wl_iftype) {
- case WL_IF_TYPE_IBSS:
- wdev = wl_cfg80211_add_ibss(wiphy, wl_iftype, name);
- break;
- case WL_IF_TYPE_MONITOR:
- wdev = wl_cfg80211_add_monitor_if(wiphy, name);
- break;
- case WL_IF_TYPE_STA:
- case WL_IF_TYPE_AP:
- case WL_IF_TYPE_NAN:
- if (cfg->iface_cnt >= (IFACE_MAX_CNT - 1)) {
- WL_ERR(("iface_cnt exceeds max cnt. created iface_cnt: %d\n",
- cfg->iface_cnt));
- err = -ENOTSUPP;
- goto fail;
+
+ /* Arm scan timeout timer */
+ mod_timer(&cfg->scan_timeout, jiffies + msecs_to_jiffies(scan_timer_interval_ms));
+ if (request) { /* scan bss */
+ ssids = request->ssids;
+ p2p_ssid = false;
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len &&
+ IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
+ p2p_ssid = true;
+ break;
}
- wdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
- wl_iftype, mac_addr, name);
- break;
- case WL_IF_TYPE_P2P_DISC:
- case WL_IF_TYPE_P2P_GO:
- /* Intentional fall through */
- case WL_IF_TYPE_P2P_GC:
+ }
+ if (p2p_ssid) {
if (cfg->p2p_supported) {
- wdev = wl_cfg80211_p2p_if_add(cfg, wl_iftype,
- name, mac_addr, &err);
- break;
+ /* p2p scan trigger */
+ if (p2p_on(cfg) == false) {
+ /* p2p on at the first time */
+ p2p_on(cfg) = true;
+ wl_cfgp2p_set_firm_p2p(cfg);
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+#if defined(P2P_IE_MISSING_FIX)
+ cfg->p2p_prb_noti = false;
+#endif
+ }
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+ p2p_scan(cfg) = true;
}
- /* Intentionally fall through for unsupported interface
- * handling when firmware doesn't support p2p
+ } else {
+ /* legacy scan trigger
+ * So, we have to disable p2p discovery if p2p discovery is on
*/
- default:
- WL_ERR(("Unsupported interface type\n"));
- err = -ENOTSUPP;
- goto fail;
- }
+ if (cfg->p2p_supported) {
+ p2p_scan(cfg) = false;
+ /* If Netdevice is not equals to primary and p2p is on
+ * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+ */
- if (!wdev) {
- WL_ERR(("vif create failed. err:%d\n", err));
- if (err != -ENOTSUPP) {
- err = -ENODEV;
+ if (p2p_scan(cfg) == false) {
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ err = wl_cfgp2p_discover_enable_search(cfg,
+ false);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+
+ }
+ }
+ }
+ if (!cfg->p2p_supported || !p2p_scan(cfg)) {
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg,
+ ndev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find p2p index from ndev(%p) failed\n",
+ ndev));
+ err = BCME_ERROR;
+ goto scan_out;
+ }
+#ifdef WL11U
+ if (request && (interworking_ie =
+ wl_cfg80211_find_interworking_ie(
+ request->ie, request->ie_len)) != NULL) {
+ if ((err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+ VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+ interworking_ie->data,
+ interworking_ie->len)) != BCME_OK) {
+ WL_ERR(("Failed to add interworking IE"));
+ }
+ } else if (cfg->wl11u) {
+ /* we have to clear IW IE and disable gratuitous APR */
+ wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx);
+ err = wldev_iovar_setint_bsscfg(ndev, "grat_arp",
+ 0, bssidx);
+ /* we don't care about error here
+ * because the only failure case is unsupported,
+ * which is fine
+ */
+ if (unlikely(err)) {
+ WL_ERR(("Set grat_arp failed:(%d) Ignore!\n", err));
+ }
+ cfg->wl11u = FALSE;
+ }
+#endif /* WL11U */
+ if (request) {
+ err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(ndev),
+ bssidx, VNDR_IE_PRBREQ_FLAG, request->ie,
+ request->ie_len);
+ }
+
+ if (unlikely(err)) {
+// terence 20161023: let it scan in SoftAP mode
+// goto scan_out;
+ }
+
+ }
}
- goto fail;
+ } else { /* scan in ibss */
+ ssids = this_ssid;
}
- /* Ensure decrementing in case of failure */
- cfg->vif_count++;
+ if (request && cfg->p2p_supported) {
+ WL_TRACE_HW4(("START SCAN\n"));
+ DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub),
+ SCAN_WAKE_LOCK_TIMEOUT);
+ DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+ }
- wl_cfg80211_iface_state_ops(wdev,
- WL_IF_CREATE_DONE, wl_iftype, wl_mode);
+ if (cfg->p2p_supported) {
+ if (request && p2p_on(cfg) && p2p_scan(cfg)) {
- WL_INFORM_MEM(("Vif created. dev->ifindex:%d"
- " cfg_iftype:%d, vif_count:%d\n",
- (wdev->netdev ? wdev->netdev->ifindex : 0xff),
- wdev->iftype, cfg->vif_count));
- mutex_unlock(&cfg->if_sync);
- return wdev;
+ /* find my listen channel */
+ cfg->afx_hdl->my_listen_chan =
+ wl_find_listen_channel(cfg, request->ie,
+ request->ie_len);
+ err = wl_cfgp2p_enable_discovery(cfg, ndev,
+ request->ie, request->ie_len);
-fail:
- wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
- WL_IF_DELETE_REQ, wl_iftype, wl_mode);
+ if (unlikely(err)) {
+ goto scan_out;
+ }
+ }
+ }
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+ if (likely(!err))
+ goto scan_success;
+ else
+ goto scan_out;
+
+scan_success:
+ busy_count = 0;
+ cfg->scan_request = request;
+ wl_set_drv_status(cfg, SCANNING, ndev);
+
+ return 0;
- if (err != -ENOTSUPP) {
- /* For non-supported interfaces, just return error and
- * skip below recovery steps.
+scan_out:
+ if (err == BCME_BUSY || err == BCME_NOTREADY) {
+ WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
+ err = -EBUSY;
+ } else if ((err == BCME_EPERM) && cfg->scan_suppressed) {
+ WL_ERR(("Scan not permitted due to scan suppress\n"));
+ err = -EPERM;
+ } else {
+ /* For all other fw errors, use a generic error code as return
+ * value to cfg80211 stack
*/
- SUPP_LOG(("IF_ADD fail. err:%d\n", err));
- wl_flush_fw_log_buffer(primary_ndev, FW_LOGSET_MASK_ALL);
- if (dhd_query_bus_erros(dhd)) {
- goto exit;
- }
- dhd->iface_op_failed = TRUE;
-#if defined(DHD_DEBUG) && defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
- if (dhd->memdump_enabled) {
- dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
- dhd_bus_mem_dump(dhd);
+ err = -EAGAIN;
+ }
+
+#define SCAN_EBUSY_RETRY_LIMIT 20
+ if (err == -EBUSY) {
+ if (busy_count++ > SCAN_EBUSY_RETRY_LIMIT) {
+ struct ether_addr bssid;
+ s32 ret = 0;
+#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+ busy_count = 0;
+ WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
+ wl_get_drv_status(cfg, SCANNING, ndev),
+ wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
+ wl_get_drv_status(cfg, CONNECTING, ndev),
+ wl_get_drv_status(cfg, CONNECTED, ndev),
+ wl_get_drv_status(cfg, DISCONNECTING, ndev),
+ wl_get_drv_status(cfg, AP_CREATING, ndev),
+ wl_get_drv_status(cfg, AP_CREATED, ndev),
+ wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
+ wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
+
+#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY;
+ dhd_bus_mem_dump(dhdp);
+ }
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+
+ bzero(&bssid, sizeof(bssid));
+ if ((ret = wldev_ioctl_get(ndev, WLC_GET_BSSID,
+ &bssid, ETHER_ADDR_LEN)) == 0)
+ WL_ERR(("FW is connected with " MACDBG "/n",
+ MAC2STRDBG(bssid.octet)));
+ else
+ WL_ERR(("GET BSSID failed with %d\n", ret));
+
+ wl_cfg80211_scan_abort(cfg);
+
+ } else {
+ /* Hold the context for 400msec, so that 10 subsequent scans
+ * can give a buffer of 4sec which is enough to
+ * cover any on-going scan in the firmware
+ */
+ WL_DBG(("Enforcing delay for EBUSY case \n"));
+ msleep(400);
}
-#endif /* DHD_DEBUG && BCMPCIE && DHD_FW_COREDUMP */
- dhd->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
- net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+ } else {
+ busy_count = 0;
}
-exit:
- mutex_unlock(&cfg->if_sync);
- return NULL;
+
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ if (timer_pending(&cfg->scan_timeout))
+ del_timer_sync(&cfg->scan_timeout);
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ cfg->scan_request = NULL;
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+ return err;
}
-static bcm_struct_cfgdev *
-wl_cfg80211_add_virtual_iface(struct wiphy *wiphy,
+static s32
#if defined(WL_CFG80211_P2P_DEV_IF)
- const char *name,
+wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
#else
- char *name,
+wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_scan_request *request)
#endif /* WL_CFG80211_P2P_DEV_IF */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
- unsigned char name_assign_type,
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) */
- enum nl80211_iftype type,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
- u32 *flags,
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */
- struct vif_params *params)
{
- u16 wl_iftype;
- u16 wl_mode;
- struct net_device *primary_ndev;
+ s32 err = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct wireless_dev *wdev;
-
- WL_DBG(("Enter iftype: %d\n", type));
- if (!cfg) {
- return ERR_PTR(-EINVAL);
- }
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
- /* Use primary I/F for sending cmds down to firmware */
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) {
- WL_ERR(("device is not ready\n"));
- return ERR_PTR(-ENODEV);
- }
+ WL_DBG(("Enter\n"));
+ RETURN_EIO_IF_NOT_UP(cfg);
- if (!name) {
- WL_ERR(("Interface name not provided \n"));
- return ERR_PTR(-EINVAL);
- }
+#ifdef DHD_IFDEBUG
+#ifdef WL_CFG80211_P2P_DEV_IF
+ PRINT_WDEV_INFO(request->wdev);
+#else
+ PRINT_WDEV_INFO(ndev);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* DHD_IFDEBUG */
- if (cfg80211_to_wl_iftype(type, &wl_iftype, &wl_mode) < 0) {
- return ERR_PTR(-EINVAL);
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ if (wl_cfg_multip2p_operational(cfg)) {
+ WL_ERR(("wlan0 scan failed, p2p devices are operational"));
+ return -ENODEV;
+ }
}
+ err = wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
+ WL_EXT_STATUS_SCAN, NULL);
+ if (err)
+ return err;
- wdev = wl_cfg80211_add_if(cfg, primary_ndev, wl_iftype, name, NULL);
- if (unlikely(!wdev)) {
- return ERR_PTR(-ENODEV);
+ mutex_lock(&cfg->usr_sync);
+ err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("scan error (%d)\n", err));
}
- return wdev_to_cfgdev(wdev);
+ mutex_unlock(&cfg->usr_sync);
+#ifdef WL_DRV_AVOID_SCANCACHE
+ /* Reset roam cache after successful scan request */
+#endif /* WL_DRV_AVOID_SCANCACHE */
+ return err;
}
-static s32
-wl_cfg80211_del_ibss(struct wiphy *wiphy, struct wireless_dev *wdev)
+static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
{
- WL_INFORM_MEM(("del ibss wdev_ptr:%p\n", wdev));
-#ifdef WLAIBSS_MCHAN
- /* AIBSS */
- return bcm_cfg80211_del_ibss_if(wiphy, wdev);
-#else
- /* Normal IBSS */
- return wl_cfg80211_del_iface(wiphy, wdev);
-#endif // endif
-}
+ s32 err = 0;
-s32
-wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
- struct wireless_dev *wdev, char *ifname)
-{
- int ret = BCME_OK;
- mutex_lock(&cfg->if_sync);
- ret = _wl_cfg80211_del_if(cfg, primary_ndev, wdev, ifname);
- mutex_unlock(&cfg->if_sync);
- return ret;
+ err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+ return err;
}
-s32
-_wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
- struct wireless_dev *wdev, char *ifname)
+static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
{
- int ret = BCME_OK;
- s32 bssidx;
- struct wiphy *wiphy;
- u16 wl_mode;
- u16 wl_iftype;
- struct net_info *netinfo;
- dhd_pub_t *dhd;
- BCM_REFERENCE(dhd);
+ s32 err = 0;
- if (!cfg) {
- return -EINVAL;
+ err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
}
+ return err;
+}
- dhd = (dhd_pub_t *)(cfg->pub);
-
- if (!wdev && ifname) {
- /* If only ifname is provided, fetch corresponding wdev ptr from our
- * internal data structure
- */
- wdev = wl_cfg80211_get_wdev_from_ifname(cfg, ifname);
- }
+static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+{
+ s32 err = 0;
+ u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
- /* Check whether we have a valid wdev ptr */
- if (unlikely(!wdev)) {
- WL_ERR(("wdev not found. '%s' does not exists\n", ifname));
- return -ENODEV;
+ retry = htod32(retry);
+ err = wldev_ioctl_set(dev, cmd, &retry, sizeof(retry));
+ if (unlikely(err)) {
+ WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
+ return err;
}
+ return err;
+}
- WL_INFORM_MEM(("del vif. wdev cfg_iftype:%d\n", wdev->iftype));
+static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+{
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ s32 err = 0;
- wiphy = wdev->wiphy;
-#ifdef WL_CFG80211_P2P_DEV_IF
- if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
- /* p2p discovery would be de-initialized in stop p2p
- * device context/from other virtual i/f creation context
- * so netinfo list may not have any node corresponding to
- * discovery I/F. Handle it before bssidx check.
- */
- ret = wl_cfg80211_p2p_if_del(wiphy, wdev);
- if (unlikely(ret)) {
- goto exit;
- } else {
- /* success case. return from here */
- if (cfg->vif_count) {
- cfg->vif_count--;
- }
- return BCME_OK;
- }
- }
-#endif /* WL_CFG80211_P2P_DEV_IF */
-
- if ((netinfo = wl_get_netinfo_by_wdev(cfg, wdev)) == NULL) {
- WL_ERR(("Find netinfo from wdev %p failed\n", wdev));
- ret = -ENODEV;
- goto exit;
+ RETURN_EIO_IF_NOT_UP(cfg);
+ WL_DBG(("Enter\n"));
+ if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
+ (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+ cfg->conf->rts_threshold = wiphy->rts_threshold;
+ err = wl_set_rts(ndev, cfg->conf->rts_threshold);
+ if (err != BCME_OK)
+ return err;
}
-
- if (!wdev->netdev) {
- WL_ERR(("ndev null! \n"));
- } else {
- /* Disable tx before del */
- netif_tx_disable(wdev->netdev);
- }
-
- wl_iftype = netinfo->iftype;
- wl_mode = wl_iftype_to_mode(wl_iftype);
- bssidx = netinfo->bssidx;
- WL_INFORM_MEM(("[IFDEL] cfg_iftype:%d wl_iftype:%d mode:%d bssidx:%d\n",
- wdev->iftype, wl_iftype, wl_mode, bssidx));
-
- /* Do pre-interface del ops */
- wl_cfg80211_iface_state_ops(wdev, WL_IF_DELETE_REQ, wl_iftype, wl_mode);
-
- switch (wl_iftype) {
- case WL_IF_TYPE_P2P_GO:
- case WL_IF_TYPE_P2P_GC:
- case WL_IF_TYPE_AP:
- case WL_IF_TYPE_STA:
- case WL_IF_TYPE_NAN:
- ret = wl_cfg80211_del_iface(wiphy, wdev);
- break;
- case WL_IF_TYPE_IBSS:
- ret = wl_cfg80211_del_ibss(wiphy, wdev);
- break;
-
- default:
- WL_ERR(("Unsupported interface type\n"));
- ret = BCME_ERROR;
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
+ (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+ cfg->conf->frag_threshold = wiphy->frag_threshold;
+ err = wl_set_frag(ndev, cfg->conf->frag_threshold);
+ if (err != BCME_OK)
+ return err;
}
-
-exit:
- if (ret == BCME_OK) {
- /* Successful case */
- if (cfg->vif_count) {
- cfg->vif_count--;
- }
- wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
- WL_IF_DELETE_DONE, wl_iftype, wl_mode);
-#ifdef WL_NAN
- if (!((cfg->nancfg.mac_rand) && (wl_iftype == WL_IF_TYPE_NAN)))
-#endif /* WL_NAN */
- {
- wl_release_vif_macaddr(cfg, wdev->netdev->dev_addr, wl_iftype);
- }
- WL_INFORM_MEM(("vif deleted. vif_count:%d\n", cfg->vif_count));
- } else {
- if (!wdev->netdev) {
- WL_ERR(("ndev null! \n"));
- } else {
- /* IF del failed. revert back tx queue status */
- netif_tx_start_all_queues(wdev->netdev);
- }
-
- /* Skip generating log files and sending HANG event
- * if driver state is not READY
- */
- if (wl_get_drv_status(cfg, READY, bcmcfg_to_prmry_ndev(cfg))) {
- SUPP_LOG(("IF_DEL fail. err:%d\n", ret));
- wl_flush_fw_log_buffer(primary_ndev, FW_LOGSET_MASK_ALL);
- /* IF dongle is down due to previous hang or other conditions, sending
- * one more hang notification is not needed.
- */
- if (dhd_query_bus_erros(dhd) || (ret == BCME_DONGLE_DOWN)) {
- goto end;
- }
- dhd->iface_op_failed = TRUE;
-#if defined(DHD_FW_COREDUMP)
- if (dhd->memdump_enabled && (ret != -EBADTYPE)) {
- dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
- dhd_bus_mem_dump(dhd);
- }
-#endif /* DHD_FW_COREDUMP */
- WL_ERR(("Notify hang event to upper layer \n"));
- dhd->hang_reason = HANG_REASON_IFACE_DEL_FAILURE;
- net_os_send_hang_message(bcmcfg_to_prmry_ndev(cfg));
+ if (changed & WIPHY_PARAM_RETRY_LONG &&
+ (cfg->conf->retry_long != wiphy->retry_long)) {
+ cfg->conf->retry_long = wiphy->retry_long;
+ err = wl_set_retry(ndev, cfg->conf->retry_long, true);
+ if (err != BCME_OK)
+ return err;
+ }
+ if (changed & WIPHY_PARAM_RETRY_SHORT &&
+ (cfg->conf->retry_short != wiphy->retry_short)) {
+ cfg->conf->retry_short = wiphy->retry_short;
+ err = wl_set_retry(ndev, cfg->conf->retry_short, false);
+ if (err != BCME_OK) {
+ return err;
}
}
-end:
- return ret;
-}
-static s32
-wl_cfg80211_del_virtual_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+ return err;
+}
+static chanspec_t
+channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap)
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct wireless_dev *wdev = cfgdev_to_wdev(cfgdev);
- int ret = BCME_OK;
- u16 wl_iftype;
- u16 wl_mode;
- struct net_device *primary_ndev;
-
- if (!cfg) {
- return -EINVAL;
+ u8 *buf = NULL;
+ wl_uint32_list_t *list;
+ int err = BCME_OK;
+ chanspec_t c = 0, ret_c = 0;
+ int bw = 0, tmp_bw = 0;
+ int i;
+ u32 tmp_c;
+ u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+#define LOCAL_BUF_SIZE 1024
+ buf = (u8 *) kzalloc(LOCAL_BUF_SIZE, kflags);
+ if (!buf) {
+ WL_ERR(("buf memory alloc failed\n"));
+ goto exit;
}
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- wdev = cfgdev_to_wdev(cfgdev);
- if (!wdev) {
- WL_ERR(("wdev null"));
- return -ENODEV;
+ err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
+ 0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
+ if (err != BCME_OK) {
+ WL_ERR(("get chanspecs failed with %d\n", err));
+ goto exit;
}
- WL_DBG(("Enter wdev:%p iftype: %d\n", wdev, wdev->iftype));
- if (cfg80211_to_wl_iftype(wdev->iftype, &wl_iftype, &wl_mode) < 0) {
- WL_ERR(("Wrong iftype: %d\n", wdev->iftype));
- return -ENODEV;
- }
+ list = (wl_uint32_list_t *)(void *)buf;
+ for (i = 0; i < dtoh32(list->count); i++) {
+ c = dtoh32(list->element[i]);
+ if (channel <= CH_MAX_2G_CHANNEL) {
+ if (!CHSPEC_IS20(c))
+ continue;
+ if (channel == CHSPEC_CHANNEL(c)) {
+ ret_c = c;
+ bw = 20;
+ goto exit;
+ }
+ }
+ tmp_c = wf_chspec_ctlchan(c);
+ tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT];
+ if (tmp_c != channel)
+ continue;
- if ((ret = wl_cfg80211_del_if(cfg, primary_ndev,
- wdev, NULL)) < 0) {
- WL_ERR(("IF del failed\n"));
+ if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) {
+ bw = tmp_bw;
+ ret_c = c;
+ if (bw == bw_cap)
+ goto exit;
+ }
}
-
- return ret;
+exit:
+ if (buf)
+ kfree(buf);
+#undef LOCAL_BUF_SIZE
+ WL_INFORM(("return chanspec %x %d\n", ret_c, bw));
+ return ret_c;
}
-static s32
-wl_cfg80211_change_p2prole(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type)
+void
+wl_cfg80211_ibss_vsie_set_buffer(struct net_device *dev, vndr_ie_setbuf_t *ibss_vsie,
+ int ibss_vsie_len)
{
- s32 wlif_type;
- s32 mode = 0;
- s32 index;
- s32 err;
- s32 conn_idx = -1;
- chanspec_t chspec;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
-
- WL_INFORM_MEM(("Enter. current_role:%d new_role:%d \n", ndev->ieee80211_ptr->iftype, type));
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- if (!cfg->p2p || !wl_cfgp2p_vif_created(cfg)) {
- WL_ERR(("P2P not initialized \n"));
- return -EINVAL;
+ if (cfg != NULL && ibss_vsie != NULL) {
+ if (cfg->ibss_vsie != NULL) {
+ kfree(cfg->ibss_vsie);
+ }
+ cfg->ibss_vsie = ibss_vsie;
+ cfg->ibss_vsie_len = ibss_vsie_len;
}
+}
- if (!is_p2p_group_iface(ndev->ieee80211_ptr)) {
- WL_ERR(("Wrong if type \n"));
- return -EINVAL;
+static void
+wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg)
+{
+ /* free & initiralize VSIE (Vendor Specific IE) */
+ if (cfg->ibss_vsie != NULL) {
+ kfree(cfg->ibss_vsie);
+ cfg->ibss_vsie = NULL;
+ cfg->ibss_vsie_len = 0;
}
+}
- /* Abort any on-going scans to avoid race condition issues */
- wl_cfg80211_cancel_scan(cfg);
-
- index = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
- if (index < 0) {
- WL_ERR(("Find bsscfg index from ndev(%p) failed\n", ndev));
- return BCME_ERROR;
- }
- if (wl_cfgp2p_find_type(cfg, index, &conn_idx) != BCME_OK) {
- return BCME_ERROR;
- }
+s32
+wl_cfg80211_ibss_vsie_delete(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ char *ioctl_buf = NULL;
+ s32 ret = BCME_OK, bssidx;
- /* In concurrency case, STA may be already associated in a particular
- * channel. so retrieve the current channel of primary interface and
- * then start the virtual interface on that.
- */
- chspec = wl_cfg80211_get_shared_freq(wiphy);
- if (type == NL80211_IFTYPE_P2P_GO) {
- /* Dual p2p doesn't support multiple P2PGO interfaces,
- * p2p_go_count is the counter for GO creation
- * requests.
- */
- if ((cfg->p2p->p2p_go_count > 0) && (type == NL80211_IFTYPE_P2P_GO)) {
- WL_ERR(("FW does not support multiple GO\n"));
- return BCME_ERROR;
- }
- mode = WL_MODE_AP;
- wlif_type = WL_P2P_IF_GO;
- dhd->op_mode &= ~DHD_FLAG_P2P_GC_MODE;
- dhd->op_mode |= DHD_FLAG_P2P_GO_MODE;
- } else {
- wlif_type = WL_P2P_IF_CLIENT;
- /* for GO */
- if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
- WL_INFORM_MEM(("Downgrading P2P GO to cfg_iftype:%d \n", type));
- wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
- cfg->p2p->p2p_go_count--;
- /* disable interface before bsscfg free */
- err = wl_cfgp2p_ifdisable(cfg, wl_to_p2p_bss_macaddr(cfg, conn_idx));
- /* if fw doesn't support "ifdis",
- * do not wait for link down of ap mode
- */
- if (err == 0) {
- WL_DBG(("Wait for Link Down event for GO !!!\n"));
- wait_for_completion_timeout(&cfg->iface_disable,
- msecs_to_jiffies(500));
- } else if (err != BCME_UNSUPPORTED) {
- msleep(300);
- }
+ if (cfg != NULL && cfg->ibss_vsie != NULL) {
+ ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+ if (!ioctl_buf) {
+ WL_ERR(("ioctl memory alloc failed\n"));
+ return -ENOMEM;
}
- }
- wl_set_p2p_status(cfg, IF_CHANGING);
- wl_clr_p2p_status(cfg, IF_CHANGED);
- wl_cfgp2p_ifchange(cfg, wl_to_p2p_bss_macaddr(cfg, conn_idx),
- htod32(wlif_type), chspec, conn_idx);
- wait_event_interruptible_timeout(cfg->netif_change_event,
- (wl_get_p2p_status(cfg, IF_CHANGED) == true),
- msecs_to_jiffies(MAX_WAIT_TIME));
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
+ WL_ERR(("Find index failed\n"));
+ ret = BCME_ERROR;
+ goto end;
+ }
+ /* change the command from "add" to "del" */
+ strncpy(cfg->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1);
+ cfg->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
- wl_clr_p2p_status(cfg, IF_CHANGING);
- wl_clr_p2p_status(cfg, IF_CHANGED);
+ ret = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie",
+ cfg->ibss_vsie, cfg->ibss_vsie_len,
+ ioctl_buf, WLC_IOCTL_MEDLEN, bssidx, &cfg->ioctl_buf_sync);
+ WL_ERR(("ret=%d\n", ret));
- if (mode == WL_MODE_AP) {
- wl_set_drv_status(cfg, CONNECTED, ndev);
+ if (ret == BCME_OK) {
+ /* free & initiralize VSIE */
+ kfree(cfg->ibss_vsie);
+ cfg->ibss_vsie = NULL;
+ cfg->ibss_vsie_len = 0;
+ }
+end:
+ if (ioctl_buf) {
+ kfree(ioctl_buf);
+ }
}
- return BCME_OK;
+ return ret;
}
-static s32
-wl_cfg80211_change_virtual_iface(struct wiphy *wiphy, struct net_device *ndev,
- enum nl80211_iftype type,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
- u32 *flags,
-#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) */
- struct vif_params *params)
+#ifdef WLAIBSS_MCHAN
+static bcm_struct_cfgdev*
+bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name)
{
- s32 infra = 1;
- s32 err = BCME_OK;
- u16 wl_iftype;
- u16 wl_mode;
+ int err = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_info *netinfo = NULL;
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- struct net_device *primary_ndev;
+ struct wireless_dev* wdev = NULL;
+ struct net_device *new_ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ s32 timeout;
+ wl_aibss_if_t aibss_if;
+ wl_if_event_info *event = NULL;
- if (!dhd)
- return -EINVAL;
+ if (cfg->ibss_cfgdev != NULL) {
+ WL_ERR(("IBSS interface %s already exists\n", name));
+ return NULL;
+ }
- WL_INFORM_MEM(("[%s] Enter. current cfg_iftype:%d new cfg_iftype:%d \n",
- ndev->name, ndev->ieee80211_ptr->iftype, type));
+ WL_ERR(("Try to create IBSS interface %s\n", name));
primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ /* generate a new MAC address for the IBSS interface */
+ get_primary_mac(cfg, &cfg->ibss_if_addr);
+ cfg->ibss_if_addr.octet[4] ^= 0x40;
+ memset(&aibss_if, sizeof(aibss_if), 0);
+ memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr));
+ aibss_if.chspec = 0;
+ aibss_if.len = sizeof(aibss_if);
- if (cfg80211_to_wl_iftype(type, &wl_iftype, &wl_mode) < 0) {
- WL_ERR(("Unknown role \n"));
- return -EINVAL;
- }
-
- mutex_lock(&cfg->if_sync);
- netinfo = wl_get_netinfo_by_wdev(cfg, ndev->ieee80211_ptr);
- if (unlikely(!netinfo)) {
-#ifdef WL_STATIC_IF
- if (IS_CFG80211_STATIC_IF(cfg, ndev)) {
- /* Incase of static interfaces, the netinfo will be
- * allocated only when FW interface is initialized. So
- * store the value and use it during initialization.
- */
- WL_INFORM_MEM(("skip change vif for static if\n"));
- ndev->ieee80211_ptr->iftype = type;
- err = BCME_OK;
- } else
-#endif /* WL_STATIC_IF */
- {
- WL_ERR(("netinfo not found \n"));
- err = -ENODEV;
- }
+ cfg->bss_pending_op = TRUE;
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if,
+ sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (err) {
+ WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err));
goto fail;
}
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op)
+ goto fail;
- /* perform pre-if-change tasks */
- wl_cfg80211_iface_state_ops(ndev->ieee80211_ptr,
- WL_IF_CHANGE_REQ, wl_iftype, wl_mode);
-
- switch (type) {
- case NL80211_IFTYPE_ADHOC:
- infra = 0;
- break;
- case NL80211_IFTYPE_STATION:
- /* Supplicant sets iftype to STATION while removing p2p GO */
- if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
- /* Downgrading P2P GO */
- err = wl_cfg80211_change_p2prole(wiphy, ndev, type);
- if (unlikely(err)) {
- WL_ERR(("P2P downgrade failed \n"));
- }
- } else if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
- /* Downgrade role from AP to STA */
- if ((err = wl_cfg80211_add_del_bss(cfg, ndev,
- netinfo->bssidx, wl_iftype, 0, NULL)) < 0) {
- WL_ERR(("AP-STA Downgrade failed \n"));
- goto fail;
- }
- }
- break;
- case NL80211_IFTYPE_AP:
- /* intentional fall through */
- case NL80211_IFTYPE_AP_VLAN:
- {
- if (!wl_get_drv_status(cfg, AP_CREATED, ndev) &&
- wl_get_drv_status(cfg, READY, ndev)) {
- err = wl_cfg80211_set_ap_role(cfg, ndev);
- if (unlikely(err)) {
- WL_ERR(("set ap role failed!\n"));
- goto fail;
- }
- } else {
- WL_INFORM_MEM(("AP_CREATED bit set. Skip role change\n"));
- }
- break;
- }
- case NL80211_IFTYPE_P2P_GO:
- /* Intentional fall through */
- case NL80211_IFTYPE_P2P_CLIENT:
- infra = 1;
- err = wl_cfg80211_change_p2prole(wiphy, ndev, type);
- break;
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_MESH_POINT:
- /* Intentional fall through */
- default:
- WL_ERR(("Unsupported type:%d \n", type));
- err = -EINVAL;
+ event = &cfg->if_event_info;
+ /* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control
+ * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux
+ * and will be freed by dhd_detach unless it gets unregistered before that. The
+ * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will
+ * be freed by wl_dealloc_netinfo
+ */
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name,
+ event->mac, event->bssidx, event->name);
+ if (new_ndev == NULL)
goto fail;
- }
-
- if (wl_get_drv_status(cfg, READY, ndev)) {
- err = wldev_ioctl_set(ndev, WLC_SET_INFRA, &infra, sizeof(s32));
- if (err < 0) {
- WL_ERR(("SET INFRA/IBSS error %d\n", err));
- goto fail;
- }
- }
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (wdev == NULL)
+ goto fail;
+ wdev->wiphy = wiphy;
+ wdev->iftype = NL80211_IFTYPE_ADHOC;
+ wdev->netdev = new_ndev;
+ new_ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
- wl_cfg80211_iface_state_ops(primary_ndev->ieee80211_ptr,
- WL_IF_CHANGE_DONE, wl_iftype, wl_mode);
+ /* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if
+ * needs to be modified to take one parameter (bool need_rtnl_lock)
+ */
+ ASSERT_RTNL();
+ if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, FALSE) != BCME_OK)
+ goto fail;
- /* Update new iftype in relevant structures */
- ndev->ieee80211_ptr->iftype = type;
- netinfo->iftype = wl_iftype;
- WL_INFORM_MEM(("[%s] cfg_iftype changed to %d\n", ndev->name, type));
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_update_iftype(ndev, netinfo->ifidx, wl_iftype);
-#endif
+ wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE, event->bssidx);
+ cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev);
+ WL_ERR(("IBSS interface %s created\n", new_ndev->name));
+ return cfg->ibss_cfgdev;
fail:
+ WL_ERR(("failed to create IBSS interface %s \n", name));
+ cfg->bss_pending_op = FALSE;
+ if (new_ndev)
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, FALSE);
+ if (wdev)
+ kfree(wdev);
+ return NULL;
+}
+
+static s32
+bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ s32 timeout;
+
+ if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet))
+ return -EINVAL;
+ ndev = (struct net_device *)cfgdev_to_ndev(cfg->ibss_cfgdev);
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ cfg->bss_pending_op = TRUE;
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr,
+ sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (err) {
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err));
+ goto fail;
}
- mutex_unlock(&cfg->if_sync);
- return err;
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op) {
+ WL_ERR(("timeout in waiting IF_DEL event\n"));
+ goto fail;
+ }
+
+ wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev, FALSE);
+ cfg->ibss_cfgdev = NULL;
+ return 0;
+
+fail:
+ cfg->bss_pending_op = FALSE;
+ return -1;
}
+#endif /* WLAIBSS_MCHAN */
+#ifdef WLMESH
s32
-wl_cfg80211_notify_ifadd(struct net_device *dev,
- int ifidx, char *name, uint8 *mac, uint8 bssidx, uint8 role)
+wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ enum nl80211_iftype iface_type, s32 del, u8 *addr)
{
- bool ifadd_expected = FALSE;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- bool bss_pending_op = TRUE;
-
- /* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd")
- * redirect the IF_ADD event to ifchange as it is not a real "new" interface
- */
- if (wl_get_p2p_status(cfg, IF_CHANGING))
- return wl_cfg80211_notify_ifchange(dev, ifidx, name, mac, bssidx);
+ wl_interface_create_t iface;
+ s32 ret;
+ wl_interface_info_t *info;
- /* Okay, we are expecting IF_ADD (as IF_ADDING is true) */
- if (wl_get_p2p_status(cfg, IF_ADDING)) {
- ifadd_expected = TRUE;
- wl_clr_p2p_status(cfg, IF_ADDING);
- } else if (cfg->bss_pending_op) {
- ifadd_expected = TRUE;
- bss_pending_op = FALSE;
- }
+ bzero(&iface, sizeof(wl_interface_create_t));
- if (ifadd_expected) {
- wl_if_event_info *if_event_info = &cfg->if_event_info;
+ iface.ver = WL_INTERFACE_CREATE_VER;
- if_event_info->valid = TRUE;
- if_event_info->ifidx = ifidx;
- if_event_info->bssidx = bssidx;
- if_event_info->role = role;
- strlcpy(if_event_info->name, name, sizeof(if_event_info->name));
- if_event_info->name[IFNAMSIZ - 1] = '\0';
- if (mac)
- memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN);
+ if (iface_type == NL80211_IFTYPE_AP)
+ iface.flags = WL_INTERFACE_CREATE_AP;
+ else
+ iface.flags = WL_INTERFACE_CREATE_STA;
- /* Update bss pendig operation status */
- if (!bss_pending_op) {
- cfg->bss_pending_op = FALSE;
+ if (del) {
+ ret = wldev_iovar_setbuf(ndev, "interface_remove",
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ } else {
+ if (addr) {
+ memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
+ iface.flags |= WL_INTERFACE_MAC_USE;
+ }
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface, sizeof(wl_interface_create_t),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret == 0) {
+ /* success */
+ info = (wl_interface_info_t *)cfg->ioctl_buf;
+ WL_DBG(("wl interface create success!! bssidx:%d \n",
+ info->bsscfgidx));
}
- WL_INFORM_MEM(("IF_ADD ifidx:%d bssidx:%d role:%d\n",
- ifidx, bssidx, role));
- OSL_SMP_WMB();
- wake_up_interruptible(&cfg->netif_change_event);
- return BCME_OK;
}
- return BCME_ERROR;
-}
+ if (ret < 0)
+ WL_ERR(("Interface %s failed!! ret %d\n",
+ del ? "remove" : "create", ret));
+ return ret;
+}
+#else
s32
-wl_cfg80211_notify_ifdel(struct net_device *dev, int ifidx, char *name, uint8 *mac, uint8 bssidx)
+wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ enum nl80211_iftype iface_type, s32 del, u8 *addr)
{
- bool ifdel_expected = FALSE;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- wl_if_event_info *if_event_info = &cfg->if_event_info;
- bool bss_pending_op = TRUE;
+ s32 ret;
+ struct wl_interface_create_v2 iface;
+ wl_interface_create_v3_t iface_v3;
+ struct wl_interface_info_v1 *info;
+ wl_interface_info_v2_t *info_v2;
+ enum wl_interface_type iftype;
+ uint32 ifflags;
+ bool use_iface_info_v2 = false;
+ u8 ioctl_buf[WLC_IOCTL_SMLEN];
- if (wl_get_p2p_status(cfg, IF_DELETING)) {
- ifdel_expected = TRUE;
- wl_clr_p2p_status(cfg, IF_DELETING);
- } else if (cfg->bss_pending_op) {
- ifdel_expected = TRUE;
- bss_pending_op = FALSE;
+ if (del) {
+ ret = wldev_iovar_setbuf(ndev, "interface_remove",
+ NULL, 0, ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (unlikely(ret))
+ WL_ERR(("Interface remove failed!! ret %d\n", ret));
+ return ret;
}
- if (ifdel_expected) {
- if_event_info->valid = TRUE;
- if_event_info->ifidx = ifidx;
- if_event_info->bssidx = bssidx;
- /* Update bss pendig operation status */
- if (!bss_pending_op) {
- cfg->bss_pending_op = FALSE;
- }
- WL_INFORM_MEM(("IF_DEL ifidx:%d bssidx:%d\n", ifidx, bssidx));
- OSL_SMP_WMB();
- wake_up_interruptible(&cfg->netif_change_event);
- return BCME_OK;
+ /* Interface create */
+ bzero(&iface, sizeof(iface));
+ /*
+ * flags field is still used along with iftype inorder to support the old version of the
+ * FW work with the latest app changes.
+ */
+ if (iface_type == NL80211_IFTYPE_AP) {
+ iftype = WL_INTERFACE_TYPE_AP;
+ ifflags = WL_INTERFACE_CREATE_AP;
+ } else {
+ iftype = WL_INTERFACE_TYPE_STA;
+ ifflags = WL_INTERFACE_CREATE_STA;
+ }
+ if (addr) {
+ ifflags |= WL_INTERFACE_MAC_USE;
}
- return BCME_ERROR;
-}
+ /* Pass ver = 0 for fetching the interface_create iovar version */
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface, sizeof(struct wl_interface_create_v2),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (ret == BCME_UNSUPPORTED) {
+ WL_ERR(("interface_create iovar not supported\n"));
+ return ret;
+ } else if ((ret == 0) && *((uint32 *)ioctl_buf) == WL_INTERFACE_CREATE_VER_3) {
+ WL_DBG(("interface_create version 3\n"));
+ use_iface_info_v2 = true;
+ bzero(&iface_v3, sizeof(wl_interface_create_v3_t));
+ iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
+ iface_v3.iftype = iftype;
+ iface_v3.flags = ifflags;
+ if (addr) {
+ memcpy(&iface_v3.mac_addr.octet, addr, ETH_ALEN);
+ }
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface_v3, sizeof(wl_interface_create_v3_t),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+ } else {
+#if 0
+ /* On any other error, attempt with iovar version 2 */
+ WL_DBG(("interface_create version 2. get_ver:%d\n", ret));
+ iface.ver = WL_INTERFACE_CREATE_VER_2;
+ iface.iftype = iftype;
+ iface.flags = ifflags;
+ if (addr) {
+ memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
+ }
+ ret = wldev_iovar_getbuf(ndev, "interface_create",
+ &iface, sizeof(struct wl_interface_create_v2),
+ ioctl_buf, sizeof(ioctl_buf), NULL);
+#endif
+ }
-s32
-wl_cfg80211_notify_ifchange(struct net_device * dev, int ifidx, char *name, uint8 *mac,
- uint8 bssidx)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ if (unlikely(ret)) {
+ WL_ERR(("Interface create failed!! ret %d\n", ret));
+ return ret;
+ }
- if (wl_get_p2p_status(cfg, IF_CHANGING)) {
- wl_set_p2p_status(cfg, IF_CHANGED);
- OSL_SMP_WMB();
- wake_up_interruptible(&cfg->netif_change_event);
- return BCME_OK;
+ /* success case */
+ if (use_iface_info_v2 == true) {
+ info_v2 = (wl_interface_info_v2_t *)ioctl_buf;
+ ret = info_v2->bsscfgidx;
+ } else {
+ /* Use v1 struct */
+ info = (struct wl_interface_info_v1 *)ioctl_buf;
+ ret = info->bsscfgidx;
}
- return BCME_ERROR;
+ WL_DBG(("wl interface create success!! bssidx:%d \n", ret));
+ return ret;
}
+#endif
-static s32 wl_set_rts(struct net_device *dev, u32 rts_threshold)
+bool
+wl_customer6_legacy_chip_check(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev)
{
- s32 err = 0;
+ u32 chipnum;
+ wlc_rev_info_t revinfo;
+ int ret;
- err = wldev_iovar_setint(dev, "rtsthresh", rts_threshold);
- if (unlikely(err)) {
- WL_ERR(("Error (%d)\n", err));
- return err;
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = wldev_ioctl_get(ndev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+ if (ret < 0) {
+ WL_ERR(("%s: GET revinfo FAILED. ret:%d\n", __FUNCTION__, ret));
+ ASSERT(0);
+ return false;
}
- return err;
+
+ WL_DBG(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+ dtoh32(revinfo.deviceid), dtoh32(revinfo.vendorid), dtoh32(revinfo.chipnum)));
+ chipnum = revinfo.chipnum;
+ if ((chipnum == BCM4350_CHIP_ID) || (chipnum == BCM4355_CHIP_ID) ||
+ (chipnum == BCM4345_CHIP_ID) || (chipnum == BCM43430_CHIP_ID) ||
+ (chipnum == BCM43362_CHIP_ID)) {
+ /* WAR required */
+ return true;
+ }
+
+ return false;
}
-static s32 wl_set_frag(struct net_device *dev, u32 frag_threshold)
-{
- s32 err = 0;
-
- err = wldev_iovar_setint_bsscfg(dev, "fragthresh", frag_threshold, 0);
- if (unlikely(err)) {
- WL_ERR(("Error (%d)\n", err));
- return err;
- }
- return err;
-}
-
-static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l)
+void
+wl_bss_iovar_war(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 *val)
{
- s32 err = 0;
- u32 cmd = (l ? WLC_SET_LRL : WLC_SET_SRL);
+ u32 chipnum;
+ wlc_rev_info_t revinfo;
+ int ret;
+ bool need_war = false;
-#ifdef CUSTOM_LONG_RETRY_LIMIT
- if ((cmd == WLC_SET_LRL) &&
- (retry != CUSTOM_LONG_RETRY_LIMIT)) {
- WL_DBG(("CUSTOM_LONG_RETRY_LIMIT is used.Ignore configuration"));
- return err;
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = wldev_ioctl_get(ndev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
+ if (ret < 0) {
+ WL_ERR(("%s: GET revinfo FAILED. ret:%d\n", __FUNCTION__, ret));
+ } else {
+ WL_DBG(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
+ dtoh32(revinfo.deviceid), dtoh32(revinfo.vendorid), dtoh32(revinfo.chipnum)));
+ chipnum = revinfo.chipnum;
+ if ((chipnum == BCM4359_CHIP_ID) || (chipnum == BCM43596_CHIP_ID)) {
+ /* WAR required */
+ need_war = true;
+ }
}
-#endif /* CUSTOM_LONG_RETRY_LIMIT */
- retry = htod32(retry);
- err = wldev_ioctl_set(dev, cmd, &retry, sizeof(retry));
- if (unlikely(err)) {
- WL_ERR(("cmd (%d) , error (%d)\n", cmd, err));
- return err;
+ if (wl_customer6_legacy_chip_check(cfg, ndev) || need_war) {
+ /* Few firmware branches have issues in bss iovar handling and
+ * that can't be changed since they are in production.
+ */
+ if (*val == WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE) {
+ *val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
+ } else if (*val == WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE) {
+ *val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
+ } else {
+ /* Ignore for other bss enums */
+ return;
+ }
+ WL_ERR(("wl bss %d\n", *val));
}
- return err;
}
-static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+s32
+wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ enum nl80211_iftype iface_type, s32 del, u8 *addr)
{
- struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- s32 err = 0;
+ s32 ret = BCME_OK;
+ s32 val = 0;
- RETURN_EIO_IF_NOT_UP(cfg);
- WL_DBG(("Enter\n"));
- if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
- (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
- cfg->conf->rts_threshold = wiphy->rts_threshold;
- err = wl_set_rts(ndev, cfg->conf->rts_threshold);
- if (err != BCME_OK)
- return err;
- }
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
- (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
- cfg->conf->frag_threshold = wiphy->frag_threshold;
- err = wl_set_frag(ndev, cfg->conf->frag_threshold);
- if (err != BCME_OK)
- return err;
- }
- if (changed & WIPHY_PARAM_RETRY_LONG &&
- (cfg->conf->retry_long != wiphy->retry_long)) {
- cfg->conf->retry_long = wiphy->retry_long;
- err = wl_set_retry(ndev, cfg->conf->retry_long, true);
- if (err != BCME_OK)
- return err;
- }
- if (changed & WIPHY_PARAM_RETRY_SHORT &&
- (cfg->conf->retry_short != wiphy->retry_short)) {
- cfg->conf->retry_short = wiphy->retry_short;
- err = wl_set_retry(ndev, cfg->conf->retry_short, false);
- if (err != BCME_OK) {
- return err;
- }
- }
+ struct {
+ s32 cfg;
+ s32 val;
+ struct ether_addr ea;
+ } bss_setbuf;
- return err;
-}
-static chanspec_t
-channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- u8 *buf = NULL;
- wl_uint32_list_t *list;
- int err = BCME_OK;
- chanspec_t c = 0, ret_c = 0;
- int bw = 0, tmp_bw = 0;
- int i;
- u32 tmp_c;
+ WL_INFORM(("iface_type:%d del:%d \n", iface_type, del));
-#define LOCAL_BUF_SIZE 1024
- buf = (u8 *)MALLOC(cfg->osh, LOCAL_BUF_SIZE);
- if (!buf) {
- WL_ERR(("buf memory alloc failed\n"));
- goto exit;
- }
+ bzero(&bss_setbuf, sizeof(bss_setbuf));
- err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
- 0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
- if (err != BCME_OK) {
- WL_ERR(("get chanspecs failed with %d\n", err));
- goto exit;
+ /* AP=2, STA=3, up=1, down=0, val=-1 */
+ if (del) {
+ val = WLC_AP_IOV_OP_DELETE;
+ } else if (iface_type == NL80211_IFTYPE_AP) {
+ /* Add/role change to AP Interface */
+ WL_DBG(("Adding AP Interface \n"));
+ val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
+ } else if (iface_type == NL80211_IFTYPE_STATION) {
+ /* Add/role change to STA Interface */
+ WL_DBG(("Adding STA Interface \n"));
+ val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
+ } else {
+ WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", iface_type));
+ return -EINVAL;
}
- list = (wl_uint32_list_t *)(void *)buf;
- for (i = 0; i < dtoh32(list->count); i++) {
- c = dtoh32(list->element[i]);
- if (channel <= CH_MAX_2G_CHANNEL) {
- if (!CHSPEC_IS20(c))
- continue;
- if (channel == CHSPEC_CHANNEL(c)) {
- ret_c = c;
- bw = 20;
- goto exit;
- }
- }
- tmp_c = wf_chspec_ctlchan(c);
- tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT];
- if (tmp_c != channel)
- continue;
-
- if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) {
- bw = tmp_bw;
- ret_c = c;
- if (bw == bw_cap)
- goto exit;
- }
- }
-exit:
- if (buf) {
- MFREE(cfg->osh, buf, LOCAL_BUF_SIZE);
+ if (!del) {
+ wl_bss_iovar_war(cfg, ndev, &val);
}
-#undef LOCAL_BUF_SIZE
- WL_DBG(("return chanspec %x %d\n", ret_c, bw));
- return ret_c;
-}
-void
-wl_cfg80211_ibss_vsie_set_buffer(struct net_device *dev, vndr_ie_setbuf_t *ibss_vsie,
- int ibss_vsie_len)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
- if (cfg != NULL && ibss_vsie != NULL) {
- if (cfg->ibss_vsie != NULL) {
- MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
- }
- cfg->ibss_vsie = ibss_vsie;
- cfg->ibss_vsie_len = ibss_vsie_len;
+ if (addr) {
+ memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
}
-}
-static void
-wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg)
-{
- /* free & initiralize VSIE (Vendor Specific IE) */
- if (cfg->ibss_vsie != NULL) {
- MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
- cfg->ibss_vsie_len = 0;
- }
+ WL_DBG(("wl bss %d bssidx:%d iface:%s \n", val, bsscfg_idx, ndev->name));
+ ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret != 0)
+ WL_ERR(("'bss %d' failed with %d\n", val, ret));
+
+ return ret;
}
s32
-wl_cfg80211_ibss_vsie_delete(struct net_device *dev)
+wl_cfg80211_bss_up(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 bss_up)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- char *ioctl_buf = NULL;
- s32 ret = BCME_OK, bssidx;
+ s32 ret = BCME_OK;
+ s32 val = bss_up ? 1 : 0;
- if (cfg != NULL && cfg->ibss_vsie != NULL) {
- ioctl_buf = (char *)MALLOC(cfg->osh, WLC_IOCTL_MEDLEN);
- if (!ioctl_buf) {
- WL_ERR(("ioctl memory alloc failed\n"));
- return -ENOMEM;
- }
- if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find index failed\n"));
- ret = BCME_ERROR;
- goto end;
- }
- /* change the command from "add" to "del" */
- strlcpy(cfg->ibss_vsie->cmd, "del", sizeof(cfg->ibss_vsie->cmd));
+ struct {
+ s32 cfg;
+ s32 val;
+ } bss_setbuf;
- ret = wldev_iovar_setbuf_bsscfg(dev, "vndr_ie",
- cfg->ibss_vsie, cfg->ibss_vsie_len,
- ioctl_buf, WLC_IOCTL_MEDLEN, bssidx, NULL);
- WL_ERR(("ret=%d\n", ret));
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
- if (ret == BCME_OK) {
- /* Free & initialize VSIE */
- MFREE(cfg->osh, cfg->ibss_vsie, cfg->ibss_vsie_len);
- cfg->ibss_vsie_len = 0;
- }
-end:
- if (ioctl_buf) {
- MFREE(cfg->osh, ioctl_buf, WLC_IOCTL_MEDLEN);
- }
+ WL_DBG(("wl bss -C %d %s\n", bsscfg_idx, bss_up ? "up" : "down"));
+ ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+ if (ret != 0) {
+ WL_ERR(("'bss %d' failed with %d\n", bss_up, ret));
}
return ret;
}
-#ifdef WLAIBSS_MCHAN
-static bcm_struct_cfgdev*
-bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name)
+bool
+wl_cfg80211_bss_isup(struct net_device *ndev, int bsscfg_idx)
{
- int err = 0;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct wireless_dev* wdev = NULL;
- struct net_device *new_ndev = NULL;
- struct net_device *primary_ndev = NULL;
- long timeout;
- wl_aibss_if_t aibss_if;
- wl_if_event_info *event = NULL;
+ s32 result, val;
+ bool isup = false;
+ s8 getbuf[64];
- if (cfg->ibss_cfgdev != NULL) {
- WL_ERR(("IBSS interface %s already exists\n", name));
- return NULL;
+ /* Check if the BSS is up */
+ *(int*)getbuf = -1;
+ result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
+ sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
+ if (result != 0) {
+ WL_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result));
+ WL_ERR(("NOTE: this ioctl error is normal "
+ "when the BSS has not been created yet.\n"));
+ } else {
+ val = *(int*)getbuf;
+ val = dtoh32(val);
+ WL_DBG(("wl bss -C %d = %d\n", bsscfg_idx, val));
+ isup = (val ? TRUE : FALSE);
}
-
- WL_ERR(("Try to create IBSS interface %s\n", name));
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- /* generate a new MAC address for the IBSS interface */
- get_primary_mac(cfg, &cfg->ibss_if_addr);
- cfg->ibss_if_addr.octet[4] ^= 0x40;
- bzero(&aibss_if, sizeof(aibss_if));
- memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr));
- aibss_if.chspec = 0;
- aibss_if.len = sizeof(aibss_if);
-
- cfg->bss_pending_op = TRUE;
- bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
- err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if,
- sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (err) {
- WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err));
- goto fail;
- }
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout <= 0 || cfg->bss_pending_op)
- goto fail;
-
- event = &cfg->if_event_info;
- /* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control
- * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux
- * and will be freed by dhd_detach unless it gets unregistered before that. The
- * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will
- * be freed by wl_dealloc_netinfo
- */
- new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name,
- event->mac, event->bssidx, event->name);
- if (new_ndev == NULL)
- goto fail;
- wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
- if (wdev == NULL)
- goto fail;
- wdev->wiphy = wiphy;
- wdev->iftype = NL80211_IFTYPE_ADHOC;
- wdev->netdev = new_ndev;
- new_ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
-
- /* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if
- * needs to be modified to take one parameter (bool need_rtnl_lock)
- */
- ASSERT_RTNL();
- if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, FALSE) != BCME_OK)
- goto fail;
-
- wl_alloc_netinfo(cfg, new_ndev, wdev, WL_IF_TYPE_IBSS,
- PM_ENABLE, event->bssidx, event->ifidx);
- cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev);
- WL_ERR(("IBSS interface %s created\n", new_ndev->name));
- return cfg->ibss_cfgdev;
-
-fail:
- WL_ERR(("failed to create IBSS interface %s \n", name));
- cfg->bss_pending_op = FALSE;
- if (new_ndev)
- wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, FALSE);
- if (wdev) {
- MFREE(cfg->osh, wdev, sizeof(*wdev));
- }
- return NULL;
-}
+ return isup;
+}
static s32
-bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
-{
- int err = 0;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_device *ndev = NULL;
- struct net_device *primary_ndev = NULL;
- long timeout;
-
- if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet))
- return -EINVAL;
- ndev = (struct net_device *)cfgdev_to_ndev(cfg->ibss_cfgdev);
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
-
- cfg->bss_pending_op = TRUE;
- bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
- err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr,
- sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (err) {
- WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err));
- goto fail;
- }
- timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
- !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout <= 0 || cfg->bss_pending_op) {
- WL_ERR(("timeout in waiting IF_DEL event\n"));
- goto fail;
- }
-
- wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev, FALSE);
- cfg->ibss_cfgdev = NULL;
- return 0;
-
-fail:
- cfg->bss_pending_op = FALSE;
- return -1;
-}
-#endif /* WLAIBSS_MCHAN */
-
-s32
-wl_cfg80211_to_fw_iftype(wl_iftype_t iftype)
+cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode)
{
- s32 ret = BCME_ERROR;
-
- switch (iftype) {
- case WL_IF_TYPE_AP:
- ret = WL_INTERFACE_TYPE_AP;
- break;
- case WL_IF_TYPE_STA:
- ret = WL_INTERFACE_TYPE_STA;
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ *role = WLC_E_IF_ROLE_STA;
+ *mode = WL_MODE_BSS;
break;
- case WL_IF_TYPE_NAN_NMI:
- case WL_IF_TYPE_NAN:
- ret = WL_INTERFACE_TYPE_NAN;
+ case NL80211_IFTYPE_AP:
+ *role = WLC_E_IF_ROLE_AP;
+ *mode = WL_MODE_AP;
break;
- case WL_IF_TYPE_P2P_DISC:
- ret = WL_INTERFACE_TYPE_P2P_DISC;
+ case NL80211_IFTYPE_P2P_GO:
+ *role = WLC_E_IF_ROLE_P2P_GO;
+ *mode = WL_MODE_AP;
break;
- case WL_IF_TYPE_P2P_GO:
- ret = WL_INTERFACE_TYPE_P2P_GO;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ *role = WLC_E_IF_ROLE_P2P_CLIENT;
+ *mode = WL_MODE_BSS;
break;
- case WL_IF_TYPE_P2P_GC:
- ret = WL_INTERFACE_TYPE_P2P_GC;
+ case NL80211_IFTYPE_MONITOR:
+ WL_ERR(("Unsupported mode \n"));
+ return BCME_UNSUPPORTED;
+ case NL80211_IFTYPE_ADHOC:
+ *role = WLC_E_IF_ROLE_IBSS;
+ *mode = WL_MODE_IBSS;
break;
-
default:
- WL_ERR(("Unsupported type:%d \n", iftype));
- ret = -EINVAL;
- break;
+ WL_ERR(("Unknown interface type:0x%x\n", type));
+ return BCME_ERROR;
}
- return ret;
+ return BCME_OK;
}
-bool
-wl_legacy_chip_check(struct bcm_cfg80211 *cfg)
+static s32
+wl_if_to_cfg80211_type(uint16 role)
{
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- uint chip;
-
- chip = dhd_conf_get_chip(dhd);
-
- if (chip == BCM43362_CHIP_ID || chip == BCM4330_CHIP_ID ||
- chip == BCM43430_CHIP_ID || chip == BCM43012_CHIP_ID ||
- chip == BCM4334_CHIP_ID || chip == BCM43340_CHIP_ID ||
- chip == BCM43341_CHIP_ID || chip == BCM4324_CHIP_ID ||
- chip == BCM4335_CHIP_ID || chip == BCM4339_CHIP_ID ||
- chip == BCM4345_CHIP_ID || chip == BCM43454_CHIP_ID ||
- chip == BCM4354_CHIP_ID || chip == BCM4356_CHIP_ID ||
- chip == BCM4371_CHIP_ID || chip == BCM4359_CHIP_ID ||
- chip == BCM43143_CHIP_ID || chip == BCM43242_CHIP_ID ||
- chip == BCM43569_CHIP_ID) {
- return true;
+ switch (role) {
+ case WLC_E_IF_ROLE_STA:
+ return NL80211_IFTYPE_STATION;
+ case WLC_E_IF_ROLE_AP:
+ return NL80211_IFTYPE_AP;
+ case WLC_E_IF_ROLE_P2P_GO:
+ return NL80211_IFTYPE_P2P_GO;
+ case WLC_E_IF_ROLE_P2P_CLIENT:
+ return NL80211_IFTYPE_P2P_CLIENT;
+ case WLC_E_IF_ROLE_IBSS:
+ return NL80211_IFTYPE_ADHOC;
+ default:
+ WL_ERR(("Unknown interface role:0x%x. Forcing type station\n", role));
+ return BCME_ERROR;
}
-
- return false;
}
-s32
-wl_cfg80211_interface_ops(struct bcm_cfg80211 *cfg,
- struct net_device *ndev, s32 bsscfg_idx,
- wl_iftype_t cfg_iftype, s32 del, u8 *addr)
+struct net_device *
+wl_cfg80211_post_ifcreate(struct net_device *ndev,
+ wl_if_event_info *event, u8 *addr,
+ const char *name, bool rtnl_lock_reqd)
{
+ struct bcm_cfg80211 *cfg;
+ struct net_device *primary_ndev;
+ struct net_device *new_ndev = NULL;
+ struct wireless_dev *wdev = NULL;
+ s32 iface_type;
s32 ret;
- struct wl_interface_create_v2 iface;
- wl_interface_create_v3_t iface_v3;
- wl_interface_create_t iface_v0;
- struct wl_interface_info_v1 *info;
- wl_interface_info_v2_t *info_v2;
- wl_interface_info_t *info_v0;
- uint32 ifflags = 0;
- bool use_iface_info_v2 = false;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- s32 iftype;
+ u16 mode;
+ u16 role;
+ u8 mac_addr[ETH_ALEN];
- if (del) {
- ret = wldev_iovar_setbuf(ndev, "interface_remove",
- NULL, 0, ioctl_buf, sizeof(ioctl_buf), NULL);
- if (unlikely(ret))
- WL_ERR(("Interface remove failed!! ret %d\n", ret));
- return ret;
+ if (!ndev || !event) {
+ WL_ERR(("Wrong arg\n"));
+ return NULL;
}
- /* Interface create */
- bzero(&iface, sizeof(iface));
- /*
- * flags field is still used along with iftype inorder to support the old version of the
- * FW work with the latest app changes.
- */
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("cfg null\n"));
+ return NULL;
+ }
- iftype = wl_cfg80211_to_fw_iftype(cfg_iftype);
- if (iftype < 0) {
- return -ENOTSUPP;
+ WL_DBG(("Enter. role:%d ifidx:%d bssidx:%d\n",
+ event->role, event->ifidx, event->bssidx));
+ if (!event->ifidx || !event->bssidx) {
+ /* Fw returned primary idx (0) for virtual interface */
+ WL_ERR(("Wrong index. ifidx:%d bssidx:%d \n",
+ event->ifidx, event->bssidx));
+ return NULL;
}
- if (addr) {
- ifflags |= WL_INTERFACE_MAC_USE;
+ iface_type = wl_if_to_cfg80211_type(event->role);
+ if (iface_type < 0) {
+ /* Unknown iface type */
+ WL_ERR(("Wrong iface type \n"));
+ return NULL;
}
- /* Pass ver = 0 for fetching the interface_create iovar version */
- if (wl_legacy_chip_check(cfg)) {
- bzero(&iface_v0, sizeof(iface_v0));
- iface_v0.ver = WL_INTERFACE_CREATE_VER;
- iface_v0.flags = iftype | ifflags;
- if (addr) {
- memcpy(&iface_v0.mac_addr.octet, addr, ETH_ALEN);
- }
- ret = wldev_iovar_getbuf(ndev, "interface_create",
- &iface_v0, sizeof(struct wl_interface_create),
- ioctl_buf, sizeof(ioctl_buf), NULL);
- if (ret == 0) {
- info_v0 = (wl_interface_info_t *)ioctl_buf;
- ret = info_v0->bsscfgidx;
- goto exit;
- }
- } else {
- ret = wldev_iovar_getbuf(ndev, "interface_create",
- &iface, sizeof(struct wl_interface_create_v2),
- ioctl_buf, sizeof(ioctl_buf), NULL);
+ if (cfg80211_to_wl_iftype(iface_type, &role, &mode) < 0) {
+ /* Unsupported operating mode */
+ WL_ERR(("Unsupported operating mode \n"));
+ return NULL;
}
- if (ret == BCME_UNSUPPORTED) {
- WL_ERR(("interface_create iovar not supported\n"));
- return ret;
- } else if ((ret == 0) && *((uint32 *)ioctl_buf) == WL_INTERFACE_CREATE_VER_3) {
- WL_DBG(("interface_create version 3. flags:0x%x \n", ifflags));
- use_iface_info_v2 = true;
- bzero(&iface_v3, sizeof(wl_interface_create_v3_t));
- iface_v3.ver = WL_INTERFACE_CREATE_VER_3;
- iface_v3.iftype = iftype;
- iface_v3.flags = ifflags;
- if (addr) {
- memcpy(&iface_v3.mac_addr.octet, addr, ETH_ALEN);
- }
- ret = wldev_iovar_getbuf(ndev, "interface_create",
- &iface_v3, sizeof(wl_interface_create_v3_t),
- ioctl_buf, sizeof(ioctl_buf), NULL);
- } else {
- /* On any other error, attempt with iovar version 2 */
- WL_DBG(("interface_create version 2. get_ver:%d ifflags:0x%x\n", ret, ifflags));
- iface.ver = WL_INTERFACE_CREATE_VER_2;
- iface.iftype = iftype;
- iface.flags = ifflags;
- if (addr) {
- memcpy(&iface.mac_addr.octet, addr, ETH_ALEN);
- }
- ret = wldev_iovar_getbuf(ndev, "interface_create",
- &iface, sizeof(struct wl_interface_create_v2),
- ioctl_buf, sizeof(ioctl_buf), NULL);
+
+ WL_DBG(("mac_ptr:%p name:%s role:%d nl80211_iftype:%d " MACDBG "\n",
+ addr, name, event->role, iface_type, MAC2STRDBG(event->mac)));
+ if (!name) {
+ /* If iface name is not provided, use dongle ifname */
+ name = event->name;
}
- if (unlikely(ret)) {
- WL_ERR(("Interface create failed!! ret %d\n", ret));
- return ret;
+ if (!addr) {
+ /* If mac address is not set, use primary mac with locally administered
+ * bit set.
+ */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ memcpy(mac_addr, primary_ndev->dev_addr, ETH_ALEN);
+#ifndef CUSTOMER_HW6
+ /* For customer6 builds, use primary mac address for virtual interface */
+ mac_addr[0] |= 0x02;
+#endif /* CUSTOMER_HW6 */
+ addr = mac_addr;
}
- /* success case */
- if (use_iface_info_v2 == true) {
- info_v2 = (wl_interface_info_v2_t *)ioctl_buf;
- ret = info_v2->bsscfgidx;
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx,
+ name, addr, event->bssidx, event->name);
+ if (!new_ndev) {
+ WL_ERR(("I/F allocation failed! \n"));
+ goto fail;
} else {
- /* Use v1 struct */
- info = (struct wl_interface_info_v1 *)ioctl_buf;
- ret = info->bsscfgidx;
+ WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n",
+ event->ifidx, event->bssidx));
}
-exit:
- WL_DBG(("wl interface create success!! bssidx:%d \n", ret));
- return ret;
-}
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (!wdev) {
+ WL_ERR(("wireless_dev alloc failed! \n"));
+ goto fail;
+ }
-s32
-wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
- struct net_device *ndev, s32 bsscfg_idx,
- wl_iftype_t brcm_iftype, s32 del, u8 *addr)
-{
- s32 ret = BCME_OK;
- s32 val = 0;
-
- struct {
- s32 cfg;
- s32 val;
- struct ether_addr ea;
- } bss_setbuf;
-
- WL_DBG(("wl_iftype:%d del:%d \n", brcm_iftype, del));
-
- bzero(&bss_setbuf, sizeof(bss_setbuf));
+ wdev->wiphy = bcmcfg_to_wiphy(cfg);
+ wdev->iftype = iface_type;
+ new_ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
- /* AP=2, STA=3, up=1, down=0, val=-1 */
- if (del) {
- val = WLC_AP_IOV_OP_DELETE;
- } else if (brcm_iftype == WL_IF_TYPE_AP) {
- /* Add/role change to AP Interface */
- WL_DBG(("Adding AP Interface \n"));
- val = WLC_AP_IOV_OP_MANUAL_AP_BSSCFG_CREATE;
- } else if (brcm_iftype == WL_IF_TYPE_STA) {
- /* Add/role change to STA Interface */
- WL_DBG(("Adding STA Interface \n"));
- val = WLC_AP_IOV_OP_MANUAL_STA_BSSCFG_CREATE;
- } else {
- WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", brcm_iftype));
- return -EINVAL;
+ /* Check whether mac addr is in sync with fw. If not,
+ * apply it using cur_etheraddr.
+ */
+ if (memcmp(addr, event->mac, ETH_ALEN) != 0) {
+ ret = wldev_iovar_setbuf_bsscfg(new_ndev, "cur_etheraddr",
+ addr, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ event->bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(ret)) {
+ WL_ERR(("set cur_etheraddr Error (%d)\n", ret));
+ goto fail;
+ }
+ memcpy(new_ndev->dev_addr, addr, ETH_ALEN);
+ WL_ERR(("Applying updated mac address to firmware\n"));
}
- if (!del) {
- wl_ext_bss_iovar_war(ndev, &val);
+ if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd) != BCME_OK) {
+ WL_ERR(("IFACE register failed \n"));
+ goto fail;
}
- bss_setbuf.cfg = htod32(bsscfg_idx);
- bss_setbuf.val = htod32(val);
-
- if (addr) {
- memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
+ /* Initialize with the station mode params */
+ ret = wl_alloc_netinfo(cfg, new_ndev, wdev, mode,
+ PM_ENABLE, event->bssidx);
+ if (unlikely(ret)) {
+ WL_ERR(("wl_alloc_netinfo Error (%d)\n", ret));
+ goto fail;
}
- WL_MSG(ndev->name, "wl bss %d bssidx:%d\n", val, bsscfg_idx);
- ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (ret != 0)
- WL_ERR(("'bss %d' failed with %d\n", val, ret));
-
- return ret;
-}
-
-s32
-wl_cfg80211_bss_up(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 bss_up)
-{
- s32 ret = BCME_OK;
- s32 val = bss_up ? 1 : 0;
+ /* Apply the mode & infra setting based on iftype */
+ if ((ret = wl_config_ifmode(cfg, new_ndev, iface_type)) < 0) {
+ WL_ERR(("config ifmode failure (%d)\n", ret));
+ goto fail;
+ }
- struct {
- s32 cfg;
- s32 val;
- } bss_setbuf;
+ if (mode == WL_MODE_AP) {
+ wl_set_drv_status(cfg, AP_CREATING, new_ndev);
+ }
- bss_setbuf.cfg = htod32(bsscfg_idx);
- bss_setbuf.val = htod32(val);
+ WL_INFORM(("Host Network Interface (%s) for Secondary I/F created."
+ " cfg_iftype:%d wl_role:%d\n", new_ndev->name, iface_type, event->role));
- WL_INFORM_MEM(("wl bss -C %d %s\n", bsscfg_idx, bss_up ? "up" : "down"));
- ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ return new_ndev;
- if (ret != 0) {
- WL_ERR(("'bss %d' failed with %d\n", bss_up, ret));
- }
+fail:
+ if (wdev)
+ kfree(wdev);
+ if (new_ndev)
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
- return ret;
+ return NULL;
}
-bool
-wl_cfg80211_bss_isup(struct net_device *ndev, int bsscfg_idx)
+void
+wl_cfg80211_cleanup_virtual_ifaces(struct net_device *dev, bool rtnl_lock_reqd)
{
- s32 result, val;
- bool isup = false;
- s8 getbuf[64];
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct net_info *iter, *next;
+ struct net_device *primary_ndev;
- /* Check if the BSS is up */
- *(int*)getbuf = -1;
- result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
- sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
- if (result != 0) {
- WL_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result));
- WL_ERR(("NOTE: this ioctl error is normal "
- "when the BSS has not been created yet.\n"));
- } else {
- val = *(int*)getbuf;
- val = dtoh32(val);
- WL_DBG(("wl bss -C %d = %d\n", bsscfg_idx, val));
- isup = (val ? TRUE : FALSE);
+ /* Note: This function will clean up only the network interface and host
+ * data structures. The firmware interface clean up will happen in the
+ * during chip reset (ifconfig wlan0 down for built-in drivers/rmmod
+ * context for the module case).
+ */
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev && (iter->ndev != primary_ndev)) {
+ WL_DBG(("Cleaning up iface:%s \n", iter->ndev->name));
+ wl_cfg80211_post_ifdel(iter->ndev, rtnl_lock_reqd);
+ }
}
- return isup;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
}
s32
-wl_iftype_to_mode(wl_iftype_t iftype)
+wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd)
{
- s32 mode = BCME_ERROR;
+ int ifidx = -1;
+ struct bcm_cfg80211 *cfg;
- switch (iftype) {
- case WL_IF_TYPE_STA:
- case WL_IF_TYPE_P2P_GC:
- case WL_IF_TYPE_P2P_DISC:
- mode = WL_MODE_BSS;
- break;
- case WL_IF_TYPE_AP:
- case WL_IF_TYPE_P2P_GO:
- mode = WL_MODE_AP;
- break;
- case WL_IF_TYPE_NAN:
- mode = WL_MODE_NAN;
- break;
- case WL_IF_TYPE_AIBSS:
- /* Intentional fall through */
- case WL_IF_TYPE_IBSS:
- mode = WL_MODE_IBSS;
- break;
-#ifdef WLMESH_CFG80211
- case WL_IF_TYPE_MESH:
- mode = WL_MODE_MESH;
- break;
-#endif /* WLMESH_CFG80211 */
- default:
- WL_ERR(("Unsupported type:%d\n", iftype));
- break;
+ if (!ndev || !ndev->ieee80211_ptr) {
+ /* No wireless dev done for this interface */
+ return -EINVAL;
}
- return mode;
-}
-s32
-cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode)
-{
- switch (type) {
- case NL80211_IFTYPE_STATION:
- *role = WL_IF_TYPE_STA;
- *mode = WL_MODE_BSS;
- break;
- case NL80211_IFTYPE_AP:
- *role = WL_IF_TYPE_AP;
- *mode = WL_MODE_AP;
- break;
-#ifdef WL_CFG80211_P2P_DEV_IF
- case NL80211_IFTYPE_P2P_DEVICE:
- *role = WL_IF_TYPE_P2P_DISC;
- *mode = WL_MODE_BSS;
- break;
-#endif /* WL_CFG80211_P2P_DEV_IF */
- case NL80211_IFTYPE_P2P_GO:
- *role = WL_IF_TYPE_P2P_GO;
- *mode = WL_MODE_AP;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- *role = WL_IF_TYPE_P2P_GC;
- *mode = WL_MODE_BSS;
- break;
- case NL80211_IFTYPE_MONITOR:
- WL_ERR(("Unsupported mode \n"));
- return BCME_UNSUPPORTED;
- case NL80211_IFTYPE_ADHOC:
- *role = WL_IF_TYPE_IBSS;
- *mode = WL_MODE_IBSS;
- break;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
- case NL80211_IFTYPE_NAN:
- *role = WL_IF_TYPE_NAN;
- *mode = WL_MODE_NAN;
- break;
-#endif // endif
-#ifdef WLMESH_CFG80211
- case NL80211_IFTYPE_MESH_POINT:
- *role = WLC_E_IF_ROLE_AP;
- *mode = WL_MODE_MESH;
- break;
-#endif /* WLMESH_CFG80211 */
- default:
- WL_ERR(("Unknown interface type:0x%x\n", type));
- return BCME_ERROR;
+ cfg = wl_get_cfg(ndev);
+ if (!cfg) {
+ WL_ERR(("cfg null\n"));
+ return BCME_ERROR;
}
- return BCME_OK;
-}
-
-static s32
-wl_role_to_cfg80211_type(uint16 role, uint16 *wl_iftype, uint16 *mode)
-{
- switch (role) {
- case WLC_E_IF_ROLE_STA:
- *wl_iftype = WL_IF_TYPE_STA;
- *mode = WL_MODE_BSS;
- return NL80211_IFTYPE_STATION;
- case WLC_E_IF_ROLE_AP:
- *wl_iftype = WL_IF_TYPE_AP;
- *mode = WL_MODE_AP;
- return NL80211_IFTYPE_AP;
- case WLC_E_IF_ROLE_P2P_GO:
- *wl_iftype = WL_IF_TYPE_P2P_GO;
- *mode = WL_MODE_AP;
- return NL80211_IFTYPE_P2P_GO;
- case WLC_E_IF_ROLE_P2P_CLIENT:
- *wl_iftype = WL_IF_TYPE_P2P_GC;
- *mode = WL_MODE_BSS;
- return NL80211_IFTYPE_P2P_CLIENT;
- case WLC_E_IF_ROLE_IBSS:
- *wl_iftype = WL_IF_TYPE_IBSS;
- *mode = WL_MODE_IBSS;
- return NL80211_IFTYPE_ADHOC;
- case WLC_E_IF_ROLE_NAN:
- *wl_iftype = WL_IF_TYPE_NAN;
- *mode = WL_MODE_NAN;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) && defined(WL_CFG80211_NAN)
- /* NL80211_IFTYPE_NAN should only be used with CFG80211 NAN MGMT
- * For Vendor HAL based NAN implementation, continue advertising
- * as a STA interface
- */
- return NL80211_IFTYPE_NAN;
-#else
- return NL80211_IFTYPE_STATION;
-#endif /* ((LINUX_VER >= KERNEL_VERSION(4, 9, 0))) && WL_CFG80211_NAN */
-#ifdef WLDWDS
- case WLC_E_IF_ROLE_WDS:
- *wl_iftype = WL_IF_TYPE_AP;
- *mode = WL_MODE_AP;
- return NL80211_IFTYPE_AP;
-#endif
-#ifdef WLMESH_CFG80211
- case WLC_E_IF_ROLE_MESH:
- *wl_iftype = WL_IF_TYPE_MESH;
- *mode = WL_MODE_MESH;
- return NL80211_IFTYPE_MESH_POINT;
-#endif /* WLMESH_CFG80211 */
-
- default:
- WL_ERR(("Unknown interface role:0x%x. Forcing type station\n", role));
+ ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev);
+ BCM_REFERENCE(ifidx);
+ if (ifidx <= 0) {
+ WL_ERR(("Invalid IF idx for iface:%s\n", ndev->name));
+ ASSERT(0);
return BCME_ERROR;
}
+ WL_DBG(("cfg80211_remove for iface:%s \n", ndev->name));
+ wl_cfg80211_remove_if(cfg, ifidx, ndev, rtnl_lock_reqd);
+ cfg->bss_pending_op = FALSE;
+
+ return BCME_OK;
}
-struct net_device *
-wl_cfg80211_post_ifcreate(struct net_device *ndev,
- wl_if_event_info *event, u8 *addr,
- const char *name, bool rtnl_lock_reqd)
+#if defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF)
+/* Create a Generic Network Interface and initialize it depending up on
+ * the interface type
+ */
+bcm_struct_cfgdev*
+wl_cfg80211_create_iface(struct wiphy *wiphy,
+ enum nl80211_iftype iface_type,
+ u8 *mac_addr, const char *name)
{
- struct bcm_cfg80211 *cfg;
- struct net_device *primary_ndev;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_device *new_ndev = NULL;
- struct wireless_dev *wdev = NULL;
- s32 iface_type;
+ struct net_device *primary_ndev = NULL;
s32 ret = BCME_OK;
- u16 mode;
- u8 mac_addr[ETH_ALEN];
- u16 wl_iftype;
+ s32 bsscfg_idx = 0;
+ u32 timeout;
+ wl_if_event_info *event = NULL;
+ u8 addr[ETH_ALEN];
+ struct net_info *iter, *next;
+#ifdef WLMESH
+ u16 role = 0, mode = 0;
+#endif
- if (!ndev || !event) {
- WL_ERR(("Wrong arg\n"));
+ WL_DBG(("Enter\n"));
+ if (!name) {
+ WL_ERR(("Interface name not provided\n"));
return NULL;
}
+ else {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ if (strcmp(iter->ndev->name, name) == 0) {
+ WL_ERR(("Interface name, %s exists !\n", iter->ndev->name));
+ return NULL;
+ }
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ }
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- cfg = wl_get_cfg(ndev);
- if (!cfg) {
- WL_ERR(("cfg null\n"));
- return NULL;
+ if (likely(!mac_addr)) {
+ /* Use primary MAC with the locally administered bit for the
+ * Secondary STA I/F
+ */
+ memcpy(addr, primary_ndev->dev_addr, ETH_ALEN);
+ addr[0] |= 0x02;
+ } else {
+ /* Use the application provided mac address (if any) */
+ memcpy(addr, mac_addr, ETH_ALEN);
}
- WL_DBG(("Enter. role:%d ifidx:%d bssidx:%d\n",
- event->role, event->ifidx, event->bssidx));
- if (!event->ifidx || !event->bssidx) {
- /* Fw returned primary idx (0) for virtual interface */
- WL_ERR(("Wrong index. ifidx:%d bssidx:%d \n",
- event->ifidx, event->bssidx));
+ if ((iface_type != NL80211_IFTYPE_STATION) && (iface_type != NL80211_IFTYPE_AP)) {
+ WL_ERR(("IFACE type:%d not supported. STA "
+ "or AP IFACE is only supported\n", iface_type));
return NULL;
}
-#if defined(WLMESH_CFG80211) && defined(WL_EXT_IAPSTA)
- if (wl_ext_iapsta_mesh_creating(ndev)) {
- event->role = WLC_E_IF_ROLE_MESH;
- WL_MSG(ndev->name, "change role to WLC_E_IF_ROLE_MESH\n");
- }
-#endif /* WLMESH_CFG80211 && WL_EXT_IAPSTA */
+ cfg->bss_pending_op = TRUE;
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
- iface_type = wl_role_to_cfg80211_type(event->role, &wl_iftype, &mode);
- if (iface_type < 0) {
- /* Unknown iface type */
- WL_ERR(("Wrong iface type \n"));
- return NULL;
- }
-
- WL_DBG(("mac_ptr:%p name:%s role:%d nl80211_iftype:%d " MACDBG "\n",
- addr, name, event->role, iface_type, MAC2STRDBG(event->mac)));
- if (!name) {
- /* If iface name is not provided, use dongle ifname */
- name = event->name;
- }
-
- if (!addr) {
- /* If mac address is not set, use primary mac with locally administered
- * bit set.
- */
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- memcpy(mac_addr, primary_ndev->dev_addr, ETH_ALEN);
- /* For customer6 builds, use primary mac address for virtual interface */
- mac_addr[0] |= 0x02;
- addr = mac_addr;
- }
-
-#ifdef WL_STATIC_IF
- if (IS_CFG80211_STATIC_IF_NAME(cfg, name)) {
- new_ndev = wl_cfg80211_post_static_ifcreate(cfg, event, addr, iface_type);
- if (!new_ndev) {
- WL_ERR(("failed to get I/F pointer\n"));
- return NULL;
- }
- wdev = new_ndev->ieee80211_ptr;
- } else
-#endif /* WL_STATIC_IF */
- {
- new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx,
- name, addr, event->bssidx, event->name);
- if (!new_ndev) {
- WL_ERR(("I/F allocation failed! \n"));
- return NULL;
- } else {
- WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n",
- event->ifidx, event->bssidx));
- }
-
- wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
- if (!wdev) {
- WL_ERR(("wireless_dev alloc failed! \n"));
- wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
- return NULL;
- }
-
- wdev->wiphy = bcmcfg_to_wiphy(cfg);
- wdev->iftype = iface_type;
-
- new_ndev->ieee80211_ptr = wdev;
-#ifdef WLDWDS
- /* set wds0.x to 4addr interface here */
- if (event->role == WLC_E_IF_ROLE_WDS) {
- printf("\n\n\n event->role == WLC_E_IF_ROLE_WDS, set vwdev 4addr to %s\n", event->name);
- wdev->use_4addr = true;
- }
-#endif /* WLDWDS */
- SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
-
- memcpy(new_ndev->dev_addr, addr, ETH_ALEN);
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_ifadding(new_ndev, event->ifidx);
-#endif /* WL_EXT_IAPSTA */
- if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd)
- != BCME_OK) {
- WL_ERR(("IFACE register failed \n"));
- /* Post interface registration, wdev would be freed from the netdev
- * destructor path. For other cases, handle it here.
- */
- MFREE(cfg->osh, wdev, sizeof(*wdev));
- wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
- return NULL;
- }
- }
-
- /* Initialize with the station mode params */
- ret = wl_alloc_netinfo(cfg, new_ndev, wdev, wl_iftype,
- PM_ENABLE, event->bssidx, event->ifidx);
- if (unlikely(ret)) {
- WL_ERR(("wl_alloc_netinfo Error (%d)\n", ret));
- goto fail;
- }
-
- /* Apply the mode & infra setting based on iftype */
- if ((ret = wl_config_infra(cfg, new_ndev, wl_iftype)) < 0) {
- WL_ERR(("config ifmode failure (%d)\n", ret));
- goto fail;
- }
-
- if (mode == WL_MODE_AP) {
- wl_set_drv_status(cfg, AP_CREATING, new_ndev);
- }
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_update_iftype(new_ndev, event->ifidx, wl_iftype);
-#endif
-
- WL_INFORM_MEM(("Network Interface (%s) registered with host."
- " cfg_iftype:%d wl_role:%d " MACDBG "\n",
- new_ndev->name, iface_type, event->role, MAC2STRDBG(new_ndev->dev_addr)));
-
-#ifdef SUPPORT_SET_CAC
- wl_cfg80211_set_cac(cfg, 0);
-#endif /* SUPPORT_SET_CAC */
-
- return new_ndev;
-
-fail:
-#ifdef WL_STATIC_IF
- /* remove static if from iflist */
- if (IS_CFG80211_STATIC_IF_NAME(cfg, name)) {
- cfg->static_ndev_state = NDEV_STATE_FW_IF_FAILED;
- wl_cfg80211_update_iflist_info(cfg, new_ndev, WL_STATIC_IFIDX, addr,
- event->bssidx, event->name, NDEV_STATE_FW_IF_FAILED);
- }
-#endif /* WL_STATIC_IF */
- if (new_ndev) {
- /* wdev would be freed from netdev destructor call back */
- wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev, rtnl_lock_reqd);
- }
-
- return NULL;
-}
-
-s32
-wl_cfg80211_delete_iface(struct bcm_cfg80211 *cfg,
- wl_iftype_t sec_data_if_type)
-{
- struct net_info *iter, *next;
- struct net_device *primary_ndev;
- s32 ret = BCME_OK;
- uint8 i = 0;
-
- BCM_REFERENCE(i);
- BCM_REFERENCE(ret);
-
- /* Note: This function will clean up only the network interface and host
- * data structures. The firmware interface clean up will happen in the
- * during chip reset (ifconfig wlan0 down for built-in drivers/rmmod
- * context for the module case).
- */
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- WL_DBG(("Enter, deleting iftype %s\n",
- wl_iftype_to_str(sec_data_if_type)));
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if (iter->ndev && (iter->ndev != primary_ndev)) {
- if (iter->iftype != sec_data_if_type) {
- continue;
- }
- switch (sec_data_if_type) {
- case WL_IF_TYPE_P2P_GO:
- case WL_IF_TYPE_P2P_GC: {
- ret = _wl_cfg80211_del_if(cfg,
- iter->ndev, NULL, iter->ndev->name);
- break;
- }
-#ifdef WL_NAN
- case WL_IF_TYPE_NAN: {
- if (cfg->nan_enable == false) {
- WL_INFORM_MEM(("Nan is not active,"
- " ignore NDI delete\n"));
- } else {
- ret = wl_cfgnan_delete_ndp(cfg, iter->ndev);
- }
- break;
- }
-#endif /* WL_NAN */
- case WL_IF_TYPE_AP: {
- /* Cleanup AP */
-#ifdef WL_STATIC_IF
- /* handle static ap */
- if (IS_CFG80211_STATIC_IF(cfg, iter->ndev)) {
- dev_close(iter->ndev);
- } else
-#endif /* WL_STATIC_IF */
- {
- /* handle virtual created AP */
- ret = _wl_cfg80211_del_if(cfg, iter->ndev,
- NULL, iter->ndev->name);
- }
- break;
- }
- default: {
- WL_ERR(("Unsupported interface type\n"));
- ret = -ENOTSUPP;
- goto fail;
- }
- }
- }
- }
-fail:
- return ret;
-}
-
-void
-wl_cfg80211_cleanup_virtual_ifaces(struct bcm_cfg80211 *cfg, bool rtnl_lock_reqd)
-{
- struct net_info *iter, *next;
- struct net_device *primary_ndev;
-
- /* Note: This function will clean up only the network interface and host
- * data structures. The firmware interface clean up will happen in the
- * during chip reset (ifconfig wlan0 down for built-in drivers/rmmod
- * context for the module case).
- */
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- WL_DBG(("Enter\n"));
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if (iter->ndev && (iter->ndev != primary_ndev)) {
- /* Ensure interfaces are down before deleting */
-#ifdef WL_STATIC_IF
- /* Avoiding cleaning static ifaces */
- if (!IS_CFG80211_STATIC_IF(cfg, iter->ndev))
-#endif /* WL_STATIC_IF */
- {
- dev_close(iter->ndev);
- WL_DBG(("Cleaning up iface:%s \n", iter->ndev->name));
- wl_cfg80211_post_ifdel(iter->ndev, rtnl_lock_reqd, 0);
- }
- }
- }
-}
-
-s32
-wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd, s32 ifidx)
-{
- s32 ret = BCME_OK;
- struct bcm_cfg80211 *cfg;
- struct net_info *netinfo = NULL;
-
- if (!ndev || !ndev->ieee80211_ptr) {
- /* No wireless dev done for this interface */
- ret = -EINVAL;
- goto exit;
- }
-
- cfg = wl_get_cfg(ndev);
- if (!cfg) {
- WL_ERR(("cfg null\n"));
- ret = BCME_ERROR;
- goto exit;
- }
-
- if (ifidx <= 0) {
- WL_ERR(("Invalid IF idx for iface:%s\n", ndev->name));
- ifidx = dhd_net2idx(((struct dhd_pub *)(cfg->pub))->info, ndev);
- BCM_REFERENCE(ifidx);
- if (ifidx <= 0) {
- ASSERT(0);
- ret = BCME_ERROR;
- goto exit;
- }
- }
-
- if ((netinfo = wl_get_netinfo_by_wdev(cfg, ndev_to_wdev(ndev))) == NULL) {
- WL_ERR(("Find netinfo from wdev %p failed\n", ndev_to_wdev(ndev)));
- ret = -ENODEV;
- goto exit;
- }
-
-#ifdef WL_STATIC_IF
- if (IS_CFG80211_STATIC_IF(cfg, ndev)) {
- ret = wl_cfg80211_post_static_ifdel(cfg, ndev);
- } else
-#endif /* WL_STATIC_IF */
- {
- WL_INFORM_MEM(("[%s] cfg80211_remove_if ifidx:%d, vif_count:%d\n",
- ndev->name, ifidx, cfg->vif_count));
- wl_cfg80211_remove_if(cfg, ifidx, ndev, rtnl_lock_reqd);
- cfg->bss_pending_op = FALSE;
- }
-
-#ifdef SUPPORT_SET_CAC
- wl_cfg80211_set_cac(cfg, 1);
-#endif /* SUPPORT_SET_CAC */
-exit:
- return ret;
-}
-
-int
-wl_cfg80211_deinit_p2p_discovery(struct bcm_cfg80211 *cfg)
-{
- s32 ret = BCME_OK;
- bcm_struct_cfgdev *cfgdev;
-
- if (cfg->p2p) {
- /* De-initialize the p2p discovery interface, if operational */
- WL_ERR(("Disabling P2P Discovery Interface \n"));
+ /* De-initialize the p2p discovery interface, if operational */
+ if (p2p_is_on(cfg)) {
+ WL_DBG(("Disabling P2P Discovery Interface \n"));
#ifdef WL_CFG80211_P2P_DEV_IF
- cfgdev = bcmcfg_to_p2p_wdev(cfg);
+ ret = wl_cfg80211_scan_stop(cfg, bcmcfg_to_p2p_wdev(cfg));
#else
- cfgdev = cfg->p2p_net;
-#endif // endif
- if (cfgdev) {
- ret = wl_cfg80211_scan_stop(cfg, cfgdev);
- if (unlikely(ret < 0)) {
- CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
- }
+ ret = wl_cfg80211_scan_stop(cfg, cfg->p2p_net);
+#endif
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
}
wl_cfgp2p_disable_discovery(cfg);
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
p2p_on(cfg) = false;
}
- return ret;
-}
-
-/* Create a Generic Network Interface and initialize it depending up on
- * the interface type
- */
-struct wireless_dev *
-wl_cfg80211_create_iface(struct wiphy *wiphy,
- wl_iftype_t wl_iftype,
- u8 *mac_addr, const char *name)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_device *new_ndev = NULL;
- struct net_device *primary_ndev = NULL;
- s32 ret = BCME_OK;
- s32 bsscfg_idx = 0;
- long timeout;
- wl_if_event_info *event = NULL;
- u8 addr[ETH_ALEN];
- struct net_info *iter, *next;
-
- WL_DBG(("Enter\n"));
- if (!name) {
- WL_ERR(("Interface name not provided\n"));
- return NULL;
- }
- else {
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if (iter->ndev) {
- if (strncmp(iter->ndev->name, name, strlen(name)) == 0) {
- WL_ERR(("Interface name,%s exists!\n", iter->ndev->name));
- return NULL;
- }
- }
- }
- }
- primary_ndev = bcmcfg_to_prmry_ndev(cfg);
- if (likely(!mac_addr)) {
- /* Use primary MAC with the locally administered bit for the
- * Secondary STA I/F
- */
- memcpy(addr, primary_ndev->dev_addr, ETH_ALEN);
- addr[0] |= 0x02;
- } else {
- /* Use the application provided mac address (if any) */
- memcpy(addr, mac_addr, ETH_ALEN);
- }
-
- cfg->bss_pending_op = TRUE;
- bzero(&cfg->if_event_info, sizeof(cfg->if_event_info));
/*
* Intialize the firmware I/F.
*/
- {
+ if (wl_customer6_legacy_chip_check(cfg, primary_ndev)) {
+ /* Use bss iovar instead of interface_create iovar */
+ ret = BCME_UNSUPPORTED;
+ } else {
ret = wl_cfg80211_interface_ops(cfg, primary_ndev, bsscfg_idx,
- wl_iftype, 0, addr);
+ iface_type, 0, addr);
}
if (ret == BCME_UNSUPPORTED) {
/* Use bssidx 1 by default */
bsscfg_idx = 1;
if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
- bsscfg_idx, wl_iftype, 0, addr)) < 0) {
+ bsscfg_idx, iface_type, 0, addr)) < 0) {
goto exit;
}
} else if (ret < 0) {
}
WL_DBG(("Interface created!! bssidx:%d \n", bsscfg_idx));
+
/*
* Wait till the firmware send a confirmation event back.
*/
timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
if (timeout <= 0 || cfg->bss_pending_op) {
- WL_ERR(("ADD_IF event, didn't come. Return. timeout:%lu bss_pending_op:%d\n",
- timeout, cfg->bss_pending_op));
- if (timeout == -ERESTARTSYS) {
- WL_ERR(("waitqueue was interrupted by a signal, returns -ERESTARTSYS\n"));
- }
+ WL_ERR(("ADD_IF event, didn't come. Return \n"));
goto exit;
}
event = &cfg->if_event_info;
+#ifdef WLMESH
+ cfg80211_to_wl_iftype(iface_type, &role, &mode);
+ event->role = role;
+#endif
+
/*
* Since FW operation is successful,we can go ahead with the
* the host interface creation.
*/
- new_ndev = wl_cfg80211_post_ifcreate(primary_ndev,
- event, addr, name, false);
-
- if (new_ndev) {
+ if ((new_ndev = wl_cfg80211_post_ifcreate(primary_ndev,
+ event, mac_addr, name, false))) {
/* Iface post ops successful. Return ndev/wdev ptr */
- return new_ndev->ieee80211_ptr;
+ return ndev_to_cfgdev(new_ndev);
}
exit:
}
s32
-wl_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
+wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_device *ndev = NULL;
s32 ret = BCME_OK;
s32 bsscfg_idx = 1;
- long timeout;
- u16 wl_iftype;
- u16 wl_mode;
+ u32 timeout;
+ enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION;
WL_DBG(("Enter\n"));
/* If any scan is going on, abort it */
if (wl_get_drv_status_all(cfg, SCANNING)) {
WL_DBG(("Scan in progress. Aborting the scan!\n"));
- wl_cfg80211_cancel_scan(cfg);
- }
-
- bsscfg_idx = wl_get_bssidx_by_wdev(cfg, wdev);
- if (bsscfg_idx <= 0) {
- /* validate bsscfgidx */
- WL_ERR(("Wrong bssidx! \n"));
- return -EINVAL;
- }
-
- /* Handle p2p iface */
- if ((ret = wl_cfg80211_p2p_if_del(wiphy, wdev)) != BCME_NOTFOUND) {
- WL_DBG(("P2P iface del handled \n"));
-#ifdef SUPPORT_SET_CAC
- wl_cfg80211_set_cac(cfg, 1);
-#endif /* SUPPORT_SET_CAC */
- return ret;
- }
-
- ndev = wdev->netdev;
- if (unlikely(!ndev)) {
- WL_ERR(("ndev null! \n"));
- return -EINVAL;
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
}
+ ndev = (struct net_device *)cfgdev_to_ndev(cfgdev);
+ cfg->bss_pending_op = TRUE;
memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
- if (cfg80211_to_wl_iftype(ndev->ieee80211_ptr->iftype,
- &wl_iftype, &wl_mode) < 0) {
- return -EINVAL;
- }
-
- WL_DBG(("del interface. bssidx:%d cfg_iftype:%d wl_iftype:%d",
- bsscfg_idx, ndev->ieee80211_ptr->iftype, wl_iftype));
/* Delete the firmware interface. "interface_remove" command
* should go on the interface to be deleted
*/
- if (wl_cfg80211_get_bus_state(cfg)) {
- WL_ERR(("Bus state is down: %d\n", __LINE__));
- ret = BCME_DONGLE_DOWN;
- goto exit;
+ bsscfg_idx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
+ if (bsscfg_idx <= 0) {
+ /* validate bsscfgidx */
+ WL_ERR(("Wrong bssidx! \n"));
+ return -EINVAL;
}
-
- cfg->bss_pending_op = true;
+ WL_DBG(("del interface. bssidx:%d", bsscfg_idx));
ret = wl_cfg80211_interface_ops(cfg, ndev, bsscfg_idx,
- wl_iftype, 1, NULL);
+ NL80211_IFTYPE_STATION, 1, NULL);
if (ret == BCME_UNSUPPORTED) {
if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
- bsscfg_idx, wl_iftype, true, NULL)) < 0) {
+ bsscfg_idx, iface_type, true, NULL)) < 0) {
WL_ERR(("DEL bss failed ret:%d \n", ret));
goto exit;
}
- } else if ((ret == BCME_NOTAP) || (ret == BCME_NOTSTA)) {
- /* De-init sequence involving role downgrade not happened.
- * Do nothing and return error. The del command should be
- * retried.
- */
- WL_ERR(("ifdel role mismatch:%d\n", ret));
- ret = -EBADTYPE;
- goto exit;
} else if (ret < 0) {
WL_ERR(("Interface DEL failed ret:%d \n", ret));
goto exit;
!cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
if (timeout <= 0 || cfg->bss_pending_op) {
WL_ERR(("timeout in waiting IF_DEL event\n"));
- /* The interface unregister will happen from wifi reset context */
- ret = -ETIMEDOUT;
- /* fall through */
}
exit:
- if (ret < 0) {
- WL_ERR(("iface del failed:%d\n", ret));
-#ifdef WL_STATIC_IF
- if (IS_CFG80211_STATIC_IF(cfg, ndev)) {
- /*
- * For static interface, clean up the host data,
- * irrespective of fw status. For dynamic
- * interfaces it gets cleaned from dhd_stop context
- */
- wl_cfg80211_post_static_ifdel(cfg, ndev);
+ ret = wl_cfg80211_post_ifdel(ndev, false);
+ if (unlikely(ret)) {
+ WL_ERR(("post_ifdel failed\n"));
+ }
+
+ return ret;
+}
+#endif /* defined(WL_VIRTUAL_APSTA) || defined(DUAL_STA_STATIC_IF) */
+
+#ifdef WLMESH
+s32 wl_cfg80211_set_sae_password(struct net_device *dev, char* buf, int len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ sscanf(buf, "%s %d", cfg->sae_password, &cfg->sae_password_len);
+ return 0;
+}
+
+static s32 wl_cfg80211_join_mesh(
+ struct wiphy *wiphy, struct net_device *dev,
+ const struct mesh_config *conf,
+ const struct mesh_setup *setup)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ struct ieee80211_channel *chan = setup->chandef.chan;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 6, 0))
+ struct ieee80211_channel *chan = setup->channel;
+#endif
+ u32 param[2] = {0, 0};
+ s32 err = 0;
+ u32 bw_cap = 0;
+ u32 beacon_interval = setup->beacon_interval;
+ u32 dtim_period = setup->dtim_period;
+ size_t join_params_size;
+ struct wl_join_params join_params;
+ chanspec_t chanspec = 0;
+
+ cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+
+ if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+ struct wlc_ssid *lssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID);
+ u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN);
+ if ((memcmp(setup->mesh_id, lssid->SSID, lssid->SSID_len) == 0) &&
+ (*channel == cfg->channel)) {
+ WL_ERR(("MESH connection already existed to " MACDBG "\n",
+ MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID))));
+ return -EISCONN;
}
-#endif /* WL_STATIC_IF */
+ WL_ERR(("Previous connecton existed, please disconnect mesh %s (" MACDBG ") first\n",
+ lssid->SSID, MAC2STRDBG(bssid)));
+ return -EISCONN;
+ }
+
+ if (chan) {
+ if (chan->band == IEEE80211_BAND_5GHZ)
+ param[0] = WLC_BAND_5G;
+ else if (chan->band == IEEE80211_BAND_2GHZ)
+ param[0] = WLC_BAND_2G;
+ err = wldev_iovar_getint(dev, "bw_cap", param);
+ if (unlikely(err)) {
+ WL_ERR(("Get bw_cap Failed (%d)\n", err));
+ return err;
+ }
+ bw_cap = param[0];
+ chanspec = channel_to_chanspec(wiphy, dev, cfg->channel, bw_cap);
+ }
+
+ memset(&join_params, 0, sizeof(join_params));
+ memcpy((void *)join_params.ssid.SSID, (void *)setup->mesh_id,
+ setup->mesh_id_len);
+
+ join_params.ssid.SSID_len = htod32(setup->mesh_id_len);
+ join_params.params.chanspec_list[0] = chanspec;
+ join_params.params.chanspec_num = 1;
+ wldev_iovar_setint(dev, "chanspec", chanspec);
+ join_params_size = sizeof(join_params);
+
+ wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+ wldev_iovar_setint(dev, "wsec", 0);
+
+ if (cfg->sae_password_len > 0) {
+ wldev_iovar_setint(dev, "mesh_auth_proto", 1);
+ wldev_iovar_setint(dev, "wpa_auth", WPA2_AUTH_PSK);
+ wldev_iovar_setint(dev, "wsec", AES_ENABLED);
+ wldev_iovar_setint(dev, "mfp", WL_MFP_REQUIRED);
+ printf("%s: password=%s, len=%d\n", __FUNCTION__,
+ cfg->sae_password, cfg->sae_password_len);
+ wldev_iovar_setbuf(dev, "sae_password", cfg->sae_password, cfg->sae_password_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
} else {
- ret = wl_cfg80211_post_ifdel(ndev, false, cfg->if_event_info.ifidx);
- if (unlikely(ret)) {
- WL_ERR(("post_ifdel failed\n"));
+ wldev_iovar_setint(dev, "mesh_auth_proto", 0);
+ wldev_iovar_setint(dev, "mfp", WL_MFP_NONE);
+ }
+
+ if (beacon_interval) {
+ if ((err = wldev_ioctl_set(dev, WLC_SET_BCNPRD,
+ &beacon_interval, sizeof(s32))) < 0) {
+ WL_ERR(("Beacon Interval Set Error, %d\n", err));
+ return err;
}
}
- cfg->bss_pending_op = false;
- return ret;
+ if (dtim_period) {
+ if ((err = wldev_ioctl_set(dev, WLC_SET_DTIMPRD,
+ &dtim_period, sizeof(s32))) < 0) {
+ WL_ERR(("DTIM Interval Set Error, %d\n", err));
+ return err;
+ }
+ }
+ wldev_iovar_setint(dev, "mpc", 0);
+
+ WL_ERR(("JOIN %s on channel %d with chanspec 0x%4x\n",
+ join_params.ssid.SSID, cfg->channel, chanspec));
+
+ err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params,
+ join_params_size);
+
+ if (unlikely(err)) {
+ WL_ERR(("Error (%d)\n", err));
+ return err;
+ }
+
+ wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+ wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN);
+ return err;
+}
+
+
+static s32 wl_cfg80211_leave_mesh(
+ struct wiphy *wiphy, struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ scb_val_t scbval;
+ u8 *curbssid;
+
+ RETURN_EIO_IF_NOT_UP(cfg);
+ wl_link_down(cfg);
+
+ WL_ERR(("Leave MESH\n"));
+ curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
+ scbval.val = 0;
+ memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
+ err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
+ sizeof(scb_val_t));
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ WL_ERR(("error(%d)\n", err));
+ return err;
+ }
+ memset(cfg->sae_password, 0, SAE_MAX_PASSWD_LEN);
+ cfg->sae_password_len = 0;
+
+ return err;
}
+#endif /* WLMESH */
static s32
wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
WL_TRACE(("In\n"));
RETURN_EIO_IF_NOT_UP(cfg);
- WL_INFORM_MEM(("IBSS JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid)));
+ WL_INFORM(("JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid)));
if (!params->ssid || params->ssid_len <= 0 ||
- params->ssid_len > DOT11_MAX_SSID_LEN) {
+ params->ssid_len > DOT11_MAX_SSID_LEN) {
WL_ERR(("Invalid parameter\n"));
return -EINVAL;
}
* Join with specific BSSID and cached SSID
* If SSID is zero join based on BSSID only
*/
- bzero(&join_params, sizeof(join_params));
- memcpy((void *)join_params.ssid.SSID, (const void *)params->ssid,
+ memset(&join_params, 0, sizeof(join_params));
+ memcpy((void *)join_params.ssid.SSID, (void *)params->ssid,
params->ssid_len);
join_params.ssid.SSID_len = htod32(params->ssid_len);
if (params->bssid) {
return err;
}
} else
- bzero(&join_params.params.bssid, ETHER_ADDR_LEN);
+ memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
scan_suppress = TRUE;
err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params,
join_params_size);
if (unlikely(err)) {
- WL_ERR(("IBSS set_ssid Error (%d)\n", err));
+ WL_ERR(("Error (%d)\n", err));
return err;
}
RETURN_EIO_IF_NOT_UP(cfg);
wl_link_down(cfg);
- WL_INFORM_MEM(("Leave IBSS\n"));
+ WL_ERR(("Leave IBSS\n"));
curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
wl_set_drv_status(cfg, DISCONNECTING, dev);
scbval.val = 0;
}
#ifdef MFP
-static
-int wl_cfg80211_get_rsn_capa(const bcm_tlv_t *wpa2ie,
- const u8** rsn_cap)
+static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8** rsn_cap)
{
u16 suite_count;
- const wpa_suite_mcast_t *mcast;
- const wpa_suite_ucast_t *ucast;
- int len;
- const wpa_suite_auth_key_mgmt_t *mgmt;
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ u16 len;
+ wpa_suite_auth_key_mgmt_t *mgmt;
if (!wpa2ie)
- return BCME_BADARG;
+ return -1;
len = wpa2ie->len;
-
- /* check for Multicast cipher suite */
- if ((len -= (WPA_SUITE_LEN + WPA2_VERSION_LEN)) <= 0) {
- return BCME_NOTFOUND;
- }
-
- mcast = (const wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
-
- /* Check for the unicast suite(s) */
- if (len < WPA_IE_SUITE_COUNT_LEN) {
- return BCME_NOTFOUND;
- }
-
- ucast = (const wpa_suite_ucast_t *)&mcast[1];
+ mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+ if ((len -= WPA_SUITE_LEN) <= 0)
+ return BCME_BADLEN;
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
suite_count = ltoh16_ua(&ucast->count);
if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
(len -= (WPA_IE_SUITE_COUNT_LEN +
(WPA_SUITE_LEN * suite_count))) <= 0)
return BCME_BADLEN;
- /* Check for AUTH key management suite(s) */
- if (len < WPA_IE_SUITE_COUNT_LEN) {
- return BCME_NOTFOUND;
- }
-
- mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
suite_count = ltoh16_ua(&mgmt->count);
- if ((suite_count <= NL80211_MAX_NR_CIPHER_SUITES) &&
- (len -= (WPA_IE_SUITE_COUNT_LEN +
- (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
- rsn_cap[0] = (const u8 *)&mgmt->list[suite_count];
- } else {
+ if ((suite_count > NL80211_MAX_NR_CIPHER_SUITES) ||
+ (len -= (WPA_IE_SUITE_COUNT_LEN +
+ (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+ rsn_cap[0] = (u8 *)&mgmt->list[suite_count];
+ } else
return BCME_BADLEN;
- }
- return BCME_OK;
+ return 0;
}
#endif /* MFP */
if (is_wps_conn(sme))
val = WPA_AUTH_DISABLED;
-#ifdef BCMWAPI_WPI
- if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
- WL_DBG((" * wl_set_wpa_version, set wpa_auth"
- " to WPA_AUTH_WAPI 0x400"));
- val = WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED;
- }
-#endif // endif
- WL_INFORM_MEM(("[%s] wl wpa_auth 0x%0x\n", dev->name, val));
+ WL_DBG(("setting wpa_auth to 0x%0x\n", val));
err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
if (unlikely(err)) {
WL_ERR(("set wpa_auth failed (%d)\n", err));
return err;
}
-#ifdef BCMWAPI_WPI
-static s32
-wl_set_set_wapi_ie(struct net_device *dev, struct cfg80211_connect_params *sme)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- s32 err = 0;
- s32 bssidx;
-
- WL_DBG((" wl_set_set_wapi_ie\n"));
- if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
- return BCME_ERROR;
- }
-
- err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", (const void *)sme->ie, sme->ie_len,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("set_wapi_ie Error (%d)\n", err));
- return err;
- }
- WL_INFORM_MEM(("wapi_ie successfully (%s)\n", dev->name));
- return err;
-}
-#endif /* BCMWAPI_WPI */
static s32
wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
val = WL_AUTH_OPEN_SHARED;
WL_DBG(("automatic\n"));
break;
-#ifdef WL_FILS
- case NL80211_AUTHTYPE_FILS_SK:
- WL_DBG(("fils shared key\n"));
- val = WL_AUTH_FILS_SHARED;
- break;
- case NL80211_AUTHTYPE_FILS_SK_PFS:
- val = WL_AUTH_FILS_SHARED_PFS;
- WL_DBG(("fils shared key with pfs\n"));
- break;
- case NL80211_AUTHTYPE_FILS_PK:
- WL_DBG(("fils public key\n"));
- val = WL_AUTH_FILS_PUBLIC;
- break;
-#endif /* WL_FILS */
-#ifdef WL_CLIENT_SAE
- case NL80211_AUTHTYPE_SAE:
- if (!wl_is_pmkid_available(dev, sme->bssid)) {
- val = WL_AUTH_SAE_KEY;
- } else {
- /* Fw will choose right auth type
- * dynamically based on PMKID availability
- */
- val = WL_AUTH_OPEN_SHARED;
- }
- WL_DBG(("sae auth type %d\n", val));
- break;
-#endif /* WL_CLIENT_SAE */
default:
val = 2;
WL_ERR(("invalid auth type (%d)\n", sme->auth_type));
break;
}
- WL_INFORM_MEM(("[%s] wl auth 0x%0x \n", dev->name, val));
err = wldev_iovar_setint_bsscfg(dev, "auth", val, bssidx);
if (unlikely(err)) {
WL_ERR(("set auth failed (%d)\n", err));
return err;
}
-#ifdef WL_CLIENT_SAE
-static bool
-wl_is_pmkid_available(struct net_device *dev, const u8 *bssid)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- int i;
- int npmkids = (cfg->pmk_list->pmkids.length - sizeof(uint16)*2) / sizeof(pmkid_v2_t);
-
- /* check the bssid is null or not */
- if (!bssid) return FALSE;
-
- for (i = 0; i < npmkids; i++) {
- if (!memcmp(bssid, &cfg->pmk_list->pmkids.pmkid[i].bssid, ETHER_ADDR_LEN)) {
- WL_DBG(("FOUND PMKID\n"));
- return TRUE;
- }
- }
- WL_ERR(("PMKID NOT FOUND\n"));
- return FALSE;
-}
-#endif /* WL_CLIENT_SAE */
-
-static u32
-wl_rsn_cipher_wsec_algo_lookup(uint32 cipher)
-{
- uint i;
-
- for (i = 0; i < ARRAYSIZE(rsn_cipher_algo_lookup_tbl); i++) {
- if (cipher == rsn_cipher_algo_lookup_tbl[i].cipher_suite) {
- return rsn_cipher_algo_lookup_tbl[i].wsec_algo;
- }
- }
- return WSEC_NONE;
-}
-
-static u32
-wl_rsn_cipher_wsec_key_algo_lookup(uint32 cipher)
-{
- uint i;
-
- for (i = 0; i < ARRAYSIZE(rsn_cipher_algo_lookup_tbl); i++) {
- if (cipher == rsn_cipher_algo_lookup_tbl[i].cipher_suite) {
- return rsn_cipher_algo_lookup_tbl[i].wsec_key_algo;
- }
- }
- return CRYPTO_ALGO_OFF;
-}
-
-static u32
-wl_rsn_akm_wpa_auth_lookup(uint32 akm)
-{
- uint i;
-
- for (i = 0; i < ARRAYSIZE(rsn_akm_wpa_auth_lookup_tbl); i++) {
- if (akm == rsn_akm_wpa_auth_lookup_tbl[i].akm_suite) {
- return rsn_akm_wpa_auth_lookup_tbl[i].wpa_auth;
- }
- }
- return WPA_AUTH_DISABLED;
-}
-
static s32
wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
{
s32 gval = 0;
s32 err = 0;
s32 wsec_val = 0;
-#ifdef BCMWAPI_WPI
- s32 wapi_val = 0;
- s32 val = 0;
-#endif // endif
s32 bssidx;
-#ifdef WL_GCMP
- uint32 algos = 0, mask = 0;
-#endif /* WL_GCMP */
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
}
if (sme->crypto.n_ciphers_pairwise) {
- pval = wl_rsn_cipher_wsec_algo_lookup(sme->crypto.ciphers_pairwise[0]);
- if (pval == WSEC_NONE) {
- WL_ERR(("invalid cipher pairwise (%d)\n", sme->crypto.ciphers_pairwise[0]));
- return BCME_BADARG;
- }
switch (sme->crypto.ciphers_pairwise[0]) {
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- val = pval;
- err = wl_set_set_wapi_ie(dev, sme);
- if (unlikely(err)) {
- WL_DBG(("Set wapi ie failed \n"));
- return err;
- } else {
- WL_DBG(("Set wapi ie succeded\n"));
- }
- wapi_val = WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED;
- WL_INFORM_MEM(("[WAPI] wl wpa_auth to 0x%0x (%s)\n", val, dev->name));
- err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wapi_val, bssidx);
- if (unlikely(err)) {
- WL_ERR(("set wpa_auth failed (%d)\n", err));
- return err;
- }
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ pval = WEP_ENABLED;
break;
-#endif /* BCMWAPI_WPI */
-#ifdef WL_GCMP
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- algos = KEY_ALGO_MASK(wl_rsn_cipher_wsec_key_algo_lookup(
- sme->crypto.ciphers_pairwise[0]));
- mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ case WLAN_CIPHER_SUITE_TKIP:
+ pval = TKIP_ENABLED;
break;
-#endif /* WL_GCMP */
- default: /* No post processing required */
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ pval = AES_ENABLED;
break;
+ default:
+ WL_ERR(("invalid cipher pairwise (%d)\n",
+ sme->crypto.ciphers_pairwise[0]));
+ return -EINVAL;
}
}
-#if defined(BCMSUP_4WAY_HANDSHAKE)
- /* Ensure in-dongle supplicant is turned on when FBT wants to do the 4-way
- * handshake.
- * Note that the FW feature flag only exists on kernels that support the
- * FT-EAP AKM suite.
- */
- if (cfg->wdev->wiphy->features & NL80211_FEATURE_FW_4WAY_HANDSHAKE) {
- err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 1, bssidx);
- if (err) {
- WL_ERR(("FBT: Error setting sup_wpa (%d)\n", err));
- return err;
- } else {
- WL_INFORM_MEM(("idsup enabled.\n"));
- }
- }
-#endif /* BCMSUP_4WAY_HANDSHAKE */
if (sme->crypto.cipher_group) {
- gval = wl_rsn_cipher_wsec_algo_lookup(sme->crypto.cipher_group);
- if (gval == WSEC_NONE) {
- WL_ERR(("invalid cipher group (%d)\n", sme->crypto.cipher_group));
- return BCME_BADARG;
- }
switch (sme->crypto.cipher_group) {
-#ifdef BCMWAPI_WPI
- case WLAN_CIPHER_SUITE_SMS4:
- val = gval;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ gval = WEP_ENABLED;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ gval = TKIP_ENABLED;
break;
-#endif // endif
-#ifdef WL_GCMP
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- algos = KEY_ALGO_MASK(
- wl_rsn_cipher_wsec_key_algo_lookup(sme->crypto.cipher_group));
- mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ case WLAN_CIPHER_SUITE_CCMP:
+ gval = AES_ENABLED;
break;
-#endif /* WL_GCMP */
- default: /* No post processing required */
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ gval = AES_ENABLED;
break;
+ default:
+ WL_ERR(("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group));
+ return -EINVAL;
}
}
WL_DBG(("pval (%d) gval (%d)\n", pval, gval));
-#ifdef WL_GCMP
- WL_DBG(("algos:%x, mask:%x", algos, mask));
-#endif /* WL_GCMP */
if (is_wps_conn(sme)) {
- if (sme->privacy) {
- wsec_val = 4;
- } else {
+ if (sme->privacy)
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", 4, bssidx);
+ else
/* WPS-2.0 allows no security */
- wsec_val = 0;
- }
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
} else {
-#ifdef BCMWAPI_WPI
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_SMS4) {
- WL_DBG((" NO, is_wps_conn, WAPI set to SMS4_ENABLED"));
- wsec_val = val;
- } else
-#endif // endif
- {
WL_DBG((" NO, is_wps_conn, Set pval | gval to WSEC"));
wsec_val = pval | gval;
- }
- }
- WL_INFORM_MEM(("[%s] wl wsec 0x%x\n", dev->name, wsec_val));
- err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec_val, bssidx);
+ WL_DBG((" Set WSEC to fW 0x%x \n", wsec_val));
+ err = wldev_iovar_setint_bsscfg(dev, "wsec",
+ wsec_val, bssidx);
+ }
if (unlikely(err)) {
WL_ERR(("error (%d)\n", err));
return err;
}
-#ifdef WL_GCMP
- wl_set_wsec_info_algos(dev, algos, mask);
-#endif /* WL_GCMP */
+
sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
sec->cipher_group = sme->crypto.cipher_group;
+
return err;
}
-#ifdef WL_GCMP
+
+#ifdef MFP
static s32
-wl_set_wsec_info_algos(struct net_device *dev, uint32 algos, uint32 mask)
+wl_cfg80211_set_mfp(struct bcm_cfg80211 *cfg,
+ struct net_device *dev,
+ struct cfg80211_connect_params *sme)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- s32 bssidx;
- s32 err = 0;
- wl_wsec_info_t *wsec_info;
- bcm_xtlv_t *wsec_info_tlv;
- uint16 tlv_data_len;
- uint8 tlv_data[8];
- uint32 param_len;
- uint8 * buf;
-
- WL_DBG(("enter.\n"));
- if (!cfg) {
- return BCME_ERROR;
- }
- if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find index from wdev(%p) failed\n", dev->ieee80211_ptr));
- return BCME_ERROR;
- }
-
- buf = MALLOCZ(cfg->osh, sizeof(wl_wsec_info_t) + sizeof(tlv_data));
- if (!buf) {
- WL_ERR(("No memory"));
- return BCME_NOMEM;
- }
- wsec_info = (wl_wsec_info_t *)buf;
- wsec_info->version = WL_WSEC_INFO_VERSION;
- wsec_info_tlv = (bcm_xtlv_t *)(buf + OFFSETOF(wl_wsec_info_t, tlvs));
-
- wsec_info->num_tlvs++;
- tlv_data_len = sizeof(tlv_data);
- err = memcpy_s(tlv_data, sizeof(tlv_data), &algos, sizeof(algos));
- if (err) {
- WL_ERR(("memcpy_s algos error (%d)\n", err));
- goto exit;
- }
- err = memcpy_s(tlv_data + sizeof(algos), sizeof(mask), &mask, sizeof(mask));
- if (err) {
- WL_ERR(("memcpy_s mask error (%d)\n", err));
- goto exit;
- }
- bcm_xtlv_pack_xtlv(wsec_info_tlv, WL_WSEC_INFO_BSS_ALGOS, tlv_data_len, tlv_data, 0);
- param_len = OFFSETOF(wl_wsec_info_t, tlvs) + WL_WSEC_INFO_TLV_HDR_LEN + tlv_data_len;
-
- err = wldev_iovar_setbuf_bsscfg(dev, "wsec_info", wsec_info, param_len,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
- if (unlikely(err))
- WL_ERR(("wsec_info error (%d)\n", err));
-exit:
- MFREE(cfg->osh, buf, sizeof(wl_wsec_info_t) + sizeof(tlv_data));
- return err;
-}
-#endif /* WL_GCMP */
-#ifdef MFP
-static s32
-wl_cfg80211_set_mfp(struct bcm_cfg80211 *cfg,
- struct net_device *dev,
- struct cfg80211_connect_params *sme)
-{
- s32 mfp = WL_MFP_NONE;
- s32 current_mfp = WL_MFP_NONE;
- const bcm_tlv_t *wpa2_ie;
- const u8* rsn_cap = NULL;
- bool fw_support = false;
- int err, count = 0;
- const u8 *eptr = NULL, *ptr = NULL;
- const u8* group_mgmt_cs = NULL;
- const wpa_pmkid_list_t* pmkid = NULL;
-
- if (!sme) {
- /* No connection params from userspace, Do nothing. */
- return 0;
+ s32 mfp = WL_MFP_NONE;
+ s32 current_mfp = WL_MFP_NONE;
+ bcm_tlv_t *wpa2_ie;
+ u8* rsn_cap = NULL;
+ bool fw_support = false;
+ int err, count = 0;
+ u8 *eptr = NULL, *ptr = NULL;
+ u8* group_mgmt_cs = NULL;
+ wpa_pmkid_list_t* pmkid = NULL;
+
+ if (!sme) {
+ /* No connection params from userspace, Do nothing. */
+ return 0;
}
/* Check fw support and retreive current mfp val */
}
/* Parse the wpa2ie to decode the MFP capablity */
- if (((wpa2_ie = bcm_parse_tlvs((const u8 *)sme->ie, sme->ie_len,
+ if (((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
DOT11_MNG_RSN_ID)) != NULL) &&
- (wl_cfg80211_get_rsn_capa(wpa2_ie, &rsn_cap) == 0) && rsn_cap) {
- WL_DBG(("rsn_cap 0x%x%x\n", rsn_cap[0], rsn_cap[1]));
+ (wl_cfg80211_get_rsn_capa(wpa2_ie, &rsn_cap) == 0)) {
/* Check for MFP cap in the RSN capability field */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
- if (sme->mfp)
-#endif // endif
- {
- if (rsn_cap[0] & RSN_CAP_MFPR) {
- mfp = WL_MFP_REQUIRED;
- } else if (rsn_cap[0] & RSN_CAP_MFPC) {
- mfp = WL_MFP_CAPABLE;
- }
+ if (rsn_cap[0] & RSN_CAP_MFPR) {
+ mfp = WL_MFP_REQUIRED;
+ } else if (rsn_cap[0] & RSN_CAP_MFPC) {
+ mfp = WL_MFP_CAPABLE;
}
+
/*
* eptr --> end/last byte addr of wpa2_ie
* ptr --> to keep track of current/required byte addr
*/
- eptr = (const u8*)wpa2_ie + (wpa2_ie->len + TLV_HDR_LEN);
+ eptr = (u8*)wpa2_ie + (wpa2_ie->len + TLV_HDR_LEN);
/* pointing ptr to the next byte after rns_cap */
- ptr = (const u8*)rsn_cap + RSN_CAP_LEN;
+ ptr = (u8*)rsn_cap + RSN_CAP_LEN;
if (mfp && (eptr - ptr) >= WPA2_PMKID_COUNT_LEN) {
/* pmkid now to point to 1st byte addr of pmkid in wpa2_ie */
- pmkid = (const wpa_pmkid_list_t*)ptr;
+ pmkid = (wpa_pmkid_list_t*)ptr;
count = pmkid->count.low | (pmkid->count.high << 8);
/* ptr now to point to last byte addr of pmkid */
- ptr = (const u8*)pmkid + (count * WPA2_PMKID_LEN
- + WPA2_PMKID_COUNT_LEN);
+ ptr = (u8*)pmkid + (count * WPA2_PMKID_LEN
+ + WPA2_PMKID_COUNT_LEN);
if ((eptr - ptr) >= WPA_SUITE_LEN) {
/* group_mgmt_cs now to point to first byte addr of bip */
group_mgmt_cs = ptr;
}
}
- WL_DBG(("mfp:%d wpa2_ie ptr:%p mfp fw_support:%d\n",
- mfp, wpa2_ie, fw_support));
+ WL_DBG((" mfp:%d wpa2_ie ptr:%p rsn_cap 0x%x%x fw mfp support:%d\n",
+ mfp, wpa2_ie, rsn_cap[0], rsn_cap[1], fw_support));
if (fw_support == false) {
if (mfp == WL_MFP_REQUIRED) {
*/
WL_ERR(("mfp capability found in wpaie. But fw doesn't "
"seem to support MFP\n"));
- err = -EINVAL;
- goto exit;
+ return -EINVAL;
} else {
/* Firmware doesn't support mfp. But since connection request
* is for non-mfp case, don't bother.
*/
- err = BCME_OK;
- goto exit;
+ return 0;
}
} else if (mfp != current_mfp) {
err = wldev_iovar_setint(dev, "mfp", mfp);
if (unlikely(err)) {
WL_ERR(("mfp (%d) set failed ret:%d \n", mfp, err));
- goto exit;
+ return err;
}
- WL_INFORM_MEM(("[%s] wl mfp 0x%x\n", dev->name, mfp));
+ WL_DBG(("mfp set to 0x%x \n", mfp));
}
if (group_mgmt_cs && bcmp((const uint8 *)WPA2_OUI,
group_mgmt_cs, (WPA_SUITE_LEN - 1)) == 0) {
WL_DBG(("BIP is found\n"));
err = wldev_iovar_setbuf(dev, "bip",
- group_mgmt_cs, WPA_SUITE_LEN, cfg->ioctl_buf,
- WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ group_mgmt_cs, WPA_SUITE_LEN, cfg->ioctl_buf,
+ WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
/*
* Dont return failure for unsupported cases
* of bip iovar for backward compatibility
*/
if (err != BCME_UNSUPPORTED && err < 0) {
WL_ERR(("bip set error (%d)\n", err));
- {
- goto exit;
- }
- } else {
- WL_INFORM_MEM(("[%s] wl bip %02X:%02X:%02X\n",
- dev->name, group_mgmt_cs[0], group_mgmt_cs[1],
- group_mgmt_cs[2]));
+ return err;
}
}
-exit:
- if (err) {
- wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
- FW_LOGSET_MASK_ALL);
- }
return 0;
}
#endif /* MFP */
-#ifdef WL_FILS
-bool
-wl_is_fils_supported(struct net_device *ndev)
-{
- s32 err;
- u8 ioctl_buf[WLC_IOCTL_SMLEN] = {0};
- bcm_iov_buf_t *iov_buf = (bcm_iov_buf_t *)ioctl_buf;
-
- iov_buf->version = WL_FILS_IOV_VERSION;
- err = wldev_iovar_getbuf(ndev, "fils", (uint8*)iov_buf, sizeof(bcm_iov_buf_t),
- iov_buf, WLC_IOCTL_SMLEN, NULL);
- if (err == BCME_UNSUPPORTED) {
- WL_DBG(("FILS NOT supported\n"));
- return false;
- }
-
- WL_INFORM(("FILS supported\n"));
- return true;
-}
-
-#define WL_NUM_OF_TLV_IN_SET_FILS_PARAMS 4u
-static s32
-wl_set_fils_params(struct net_device *dev, struct cfg80211_connect_params *sme)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- bcm_iov_buf_t *iov_buf = NULL;
- bcm_xtlvbuf_t tbuf;
- s32 err = BCME_OK;
- uint32 buf_size;
-
- if ((sme->auth_type != NL80211_AUTHTYPE_FILS_SK) &&
- (sme->auth_type != NL80211_AUTHTYPE_FILS_SK_PFS) &&
- (sme->auth_type != NL80211_AUTHTYPE_FILS_PK)) {
- return BCME_OK;
- }
- if (sme->fils_erp_rrk_len > WL_MAX_FILS_KEY_LEN) {
- WL_ERR(("%s: FILS rRK exceed allowed size\n", __FUNCTION__));
- err = BCME_BADARG;
- goto exit;
- }
- /* Check incoming buffer length */
- buf_size = sme->fils_erp_username_len + sme->fils_erp_realm_len + sme->fils_erp_rrk_len +
- sizeof(sme->fils_erp_next_seq_num) +
- WL_NUM_OF_TLV_IN_SET_FILS_PARAMS * BCM_XTLV_HDR_SIZE_EX(BCM_XTLV_OPTION_ALIGN32) +
- sizeof(bcm_iov_buf_t) - 1u;
-
- if (buf_size > WLC_IOCTL_SMLEN) {
- WL_ERR(("%s: FILS connect params arguments exceed allowed size\n", __FUNCTION__));
- err = BCME_BADARG;
- goto exit;
- }
- iov_buf = MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
- if (!iov_buf) {
- WL_ERR(("%s: iov_buf alloc failed! %d bytes\n", __FUNCTION__, WLC_IOCTL_SMLEN));
- err = BCME_NOMEM;
- goto exit;
- }
- iov_buf->version = WL_FILS_IOV_VERSION;
- iov_buf->id = WL_FILS_CMD_ADD_CONNECT_PARAMS;
- /* check if this should be len w/o headers */
- err = bcm_xtlv_buf_init(&tbuf, (uint8*)&iov_buf->data[0],
- WLC_IOCTL_SMLEN - sizeof(bcm_iov_buf_t) + sizeof(uint16),
- BCM_XTLV_OPTION_ALIGN32);
- if (err != BCME_OK) {
- WL_ERR(("%s: xtlv_context initialization failed\n", __FUNCTION__));
- goto exit;
- }
- if (sme->fils_erp_username_len && sme->fils_erp_username != NULL) {
- err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_USERNAME,
- sme->fils_erp_username, sme->fils_erp_username_len);
- if (err != BCME_OK) {
- WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
- goto exit;
- }
- }
- if (sme->fils_erp_realm_len && sme->fils_erp_realm != NULL) {
- err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_REALM,
- sme->fils_erp_realm, sme->fils_erp_realm_len);
- if (err != BCME_OK) {
- WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
- goto exit;
- }
- }
- if (sme->fils_erp_rrk_len && sme->fils_erp_rrk != NULL) {
- err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_RRK,
- sme->fils_erp_rrk, sme->fils_erp_rrk_len);
- if (err != BCME_OK) {
- WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
- goto exit;
- }
- }
- err = bcm_xtlv_put_data(&tbuf, WL_FILS_XTLV_ERP_NEXT_SEQ_NUM,
- (u8 *)&sme->fils_erp_next_seq_num, sizeof(sme->fils_erp_next_seq_num));
- if (err != BCME_OK) {
- WL_ERR(("%s: write xtlv failed\n", __FUNCTION__));
- goto exit;
- }
- iov_buf->len = bcm_xtlv_buf_len(&tbuf);
- err = wldev_iovar_setbuf(dev, "fils", iov_buf, iov_buf->len + sizeof(bcm_iov_buf_t) -
- sizeof(uint16), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("set fils params ioctl error (%d)\n", err));
- goto exit;
- }
-
-exit:
- if (err != BCME_OK) {
- WL_ERR(("set FILS params error %d\n", err));
- }
- else {
- WL_INFORM_MEM(("FILS parameters succesfully applied\n"));
- }
- if (iov_buf) {
- MFREE(cfg->osh, iov_buf, WLC_IOCTL_SMLEN);
- }
- return err;
-}
-
-#if !defined(WL_FILS_ROAM_OFFLD) && defined(WL_FILS)
-static s32
-wl_get_bcn_timeout(struct net_device *dev, u32 *bcn_timeout)
-{
- s32 err = 0;
-
- err = wldev_iovar_getint(dev, "bcn_timeout", bcn_timeout);
- if (unlikely(err)) {
- WL_ERR(("could not get bcn_timeout (%d)\n", err));
- }
- return err;
-}
-
-#define WL_ROAM_ENABLE 0
-#define WL_ROAM_DISABLE 1
-/* Beacon Timeout beacon loss in case FILS roaming offload is not supported by fw */
-#define WL_BCN_TIMEOUT 3
-
-static s32
-wl_fils_toggle_roaming(struct net_device *dev, u32 auth_type)
-{
- s32 err = 0;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-
- if (WPA2_AUTH_IS_FILS(auth_type) && !cfg->fils_info.fils_roam_disabled) {
- err = wl_get_bcn_timeout(dev, &cfg->fils_info.fils_bcn_timeout_cache);
- if (unlikely(err)) {
- return err;
- }
- wl_dongle_roam(dev, WL_ROAM_DISABLE, WL_BCN_TIMEOUT);
- cfg->fils_info.fils_roam_disabled = true;
- WL_INFORM_MEM(("fw roam disabled for FILS akm\n"));
- } else if (cfg->fils_info.fils_roam_disabled) {
- /* Enable roaming back for other auth types */
- wl_dongle_roam(dev, WL_ROAM_ENABLE, cfg->fils_info.fils_bcn_timeout_cache);
- cfg->fils_info.fils_roam_disabled = false;
- WL_INFORM_MEM(("fw roam enabled\n"));
- }
- return err;
-}
-#endif /* !WL_FILS_ROAM_OFFLD && WL_FILS */
-#endif /* WL_FILS */
-
static s32
wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
{
} else if (val & (WPA2_AUTH_PSK |
WPA2_AUTH_UNSPECIFIED)) {
switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA2_AUTH_UNSPECIFIED;
+ break;
#ifdef MFP
+#ifdef CUSTOMER_HW6
+ case WL_AKM_SUITE_SHA256_1X:
+ if (wl_customer6_legacy_chip_check(cfg, dev)) {
+ val = WPA2_AUTH_UNSPECIFIED;
+ } else {
+ val = WPA2_AUTH_1X_SHA256;
+ }
+ break;
+ case WL_AKM_SUITE_SHA256_PSK:
+ if (wl_customer6_legacy_chip_check(cfg, dev)) {
+ val = WPA2_AUTH_PSK;
+ } else {
+ val = WPA2_AUTH_PSK_SHA256;
+ }
+ break;
+#else
case WL_AKM_SUITE_SHA256_1X:
val = WPA2_AUTH_1X_SHA256;
break;
case WL_AKM_SUITE_SHA256_PSK:
val = WPA2_AUTH_PSK_SHA256;
break;
+#endif /* CUSTOMER_HW6 */
#endif /* MFP */
- case WLAN_AKM_SUITE_8021X:
case WLAN_AKM_SUITE_PSK:
-#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_8021X)
- case WLAN_AKM_SUITE_FT_8021X:
-#endif // endif
-#if defined(WLFBT) && defined(WLAN_AKM_SUITE_FT_PSK)
- case WLAN_AKM_SUITE_FT_PSK:
-#endif // endif
- case WLAN_AKM_SUITE_FILS_SHA256:
- case WLAN_AKM_SUITE_FILS_SHA384:
- case WLAN_AKM_SUITE_8021X_SUITE_B:
- case WLAN_AKM_SUITE_8021X_SUITE_B_192:
-#ifdef WL_OWE
- case WLAN_AKM_SUITE_OWE:
-#endif /* WL_OWE */
- case WLAN_AKM_SUITE_FT_8021X_SHA384:
- val = wl_rsn_akm_wpa_auth_lookup(sme->crypto.akm_suites[0]);
- break;
- case WLAN_AKM_SUITE_FT_FILS_SHA256:
- val = WPA2_AUTH_FILS_SHA256 | WPA2_AUTH_FT;
- break;
- case WLAN_AKM_SUITE_FT_FILS_SHA384:
- val = WPA2_AUTH_FILS_SHA384 | WPA2_AUTH_FT;
- break;
-#if defined(WL_SAE) || defined(WL_CLIENT_SAE)
- case WLAN_AKM_SUITE_SAE:
- val = WPA3_AUTH_SAE_PSK;
- break;
-#endif /* WL_SAE || WL_CLIENT_SAE */
- default:
- WL_ERR(("invalid akm suite (0x%x)\n",
- sme->crypto.akm_suites[0]));
- return -EINVAL;
- }
- }
-#ifdef BCMWAPI_WPI
- else if (val & (WAPI_AUTH_PSK | WAPI_AUTH_UNSPECIFIED)) {
- switch (sme->crypto.akm_suites[0]) {
- case WLAN_AKM_SUITE_WAPI_CERT:
- val = WAPI_AUTH_UNSPECIFIED;
- break;
- case WLAN_AKM_SUITE_WAPI_PSK:
- val = WAPI_AUTH_PSK;
+ val = WPA2_AUTH_PSK;
break;
default:
WL_ERR(("invalid akm suite (0x%x)\n",
return -EINVAL;
}
}
-#endif // endif
-
-#ifdef WL_FILS
-#if !defined(WL_FILS_ROAM_OFFLD)
- err = wl_fils_toggle_roaming(dev, val);
- if (unlikely(err)) {
- return err;
- }
-#endif /* !WL_FILS_ROAM_OFFLD */
-#endif /* !WL_FILS */
#ifdef MFP
if ((err = wl_cfg80211_set_mfp(cfg, dev, sme)) < 0) {
}
#endif /* MFP */
- WL_INFORM_MEM(("[%s] wl wpa_auth to 0x%x\n", dev->name, val));
+ WL_DBG(("setting wpa_auth to 0x%x\n", val));
err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
if (unlikely(err)) {
WL_ERR(("could not set wpa_auth (0x%x)\n", err));
sec->wpa_versions, sec->cipher_pairwise));
if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
NL80211_WPA_VERSION_2)) &&
-#ifdef BCMWAPI_WPI
- !is_wapi(sec->cipher_pairwise) &&
-#endif // endif
(sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 |
WLAN_CIPHER_SUITE_WEP104)))
{
- bzero(&key, sizeof(key));
+ memset(&key, 0, sizeof(key));
key.len = (u32) sme->key_len;
key.index = (u32) sme->key_idx;
if (unlikely(key.len > sizeof(key.data))) {
}
memcpy(key.data, sme->key, key.len);
key.flags = WL_PRIMARY_KEY;
- if ((sec->cipher_pairwise == WLAN_CIPHER_SUITE_WEP40) ||
- (sec->cipher_pairwise == WLAN_CIPHER_SUITE_WEP104)) {
- key.algo = wl_rsn_cipher_wsec_key_algo_lookup(sec->cipher_pairwise);
- } else {
+ switch (sec->cipher_pairwise) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ break;
+ default:
WL_ERR(("Invalid algorithm (%d)\n",
sme->crypto.ciphers_pairwise[0]));
return -EINVAL;
WL_ERR(("WLC_SET_KEY error (%d)\n", err));
return err;
}
- WL_INFORM_MEM(("key applied to fw\n"));
if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
WL_DBG(("set auth_type to shared key\n"));
val = WL_AUTH_SHARED_KEY; /* shared key */
static u8 broad_bssid[6];
#endif /* ESCAN_RESULT_PATCH */
+
+
#if defined(CUSTOM_SET_CPUCORE) || defined(CONFIG_TCPACK_FASTTX)
static bool wl_get_chan_isvht80(struct net_device *net, dhd_pub_t *dhd)
{
chanspec = wl_chspec_driver_to_host(chanspec);
isvht80 = chanspec & WL_CHANSPEC_BW_80;
- WL_DBG(("wl_get_chan_isvht80: chanspec(%x:%d)\n", chanspec, isvht80));
+ WL_INFORM(("%s: chanspec(%x:%d)\n", __FUNCTION__, chanspec, isvht80));
return isvht80;
}
int wait_cnt;
if (disassociate) {
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- BCM_REFERENCE(dhdp);
- DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
- dhd_net2idx(dhdp->info, dev), DOT11_RC_DISASSOC_LEAVING);
WL_ERR(("Disassociate previous connection!\n"));
wl_set_drv_status(cfg, DISCONNECTING, dev);
scbval.val = DOT11_RC_DISASSOC_LEAVING;
if (wait_cnt == 0) {
WL_ERR(("DISCONNECING clean up failed!\n"));
- /* Clear DISCONNECTING driver status as we have made sufficient attempts
- * for driver clean up.
- */
- wl_clr_drv_status(cfg, DISCONNECTING, dev);
- wl_clr_drv_status(cfg, CONNECTED, dev);
return BCME_NOTREADY;
}
return BCME_OK;
}
-#ifdef WL_FILS
-static int
-wl_fils_add_hlp_container(struct bcm_cfg80211 *cfg, struct net_device *dev,
- const uint8* ie_buf, uint16 ie_len)
-{
- const bcm_tlv_ext_t *hlp_ie;
-
- if ((hlp_ie = (const bcm_tlv_ext_t*)bcm_parse_tlvs_dot11((const uint8 *)ie_buf, ie_len,
- FILS_HLP_CONTAINER_EXT_ID, TRUE))) {
- u16 hlp_len = hlp_ie->len;
- u16 left_len = (ie_len - ((const uint8*)hlp_ie - ie_buf));
- bcm_iov_buf_t *iov_buf = 0;
- uint8* pxtlv;
- int err;
- size_t iov_buf_len;
- bcm_tlv_dot11_frag_tot_len(ie_buf, ie_len, FILS_HLP_CONTAINER_EXT_ID,
- TRUE, (uint*)&hlp_len);
-
- hlp_len += BCM_TLV_EXT_HDR_SIZE;
-
- if ((hlp_len > DOT11_MAX_MPDU_BODY_LEN) || (hlp_len > left_len)) {
- WL_ERR(("bad HLP length %d\n", hlp_len));
- return EFAULT;
- }
- iov_buf_len = sizeof(bcm_iov_buf_t) + sizeof(bcm_xtlv_t) - 1 + hlp_len;
- iov_buf = MALLOCZ(cfg->osh, iov_buf_len);
- if (iov_buf == NULL) {
- WL_ERR(("failed to allocated iov_buf\n"));
- return ENOMEM;
- }
-
- prhex("HLP, HLP", (const uchar *)hlp_ie, hlp_len);
-
- pxtlv = (uint8 *)&iov_buf->data[0];
- ((bcm_xtlv_t*)pxtlv)->id = WL_FILS_XTLV_HLP_IE;
- ((bcm_xtlv_t*)pxtlv)->len = hlp_len;
-
- memcpy(((bcm_xtlv_t*)pxtlv)->data, hlp_ie, ((bcm_xtlv_t*)pxtlv)->len);
-
- iov_buf->version = WL_FILS_IOV_VERSION;
- iov_buf->id = WL_FILS_CMD_ADD_HLP_IE;
- iov_buf->len = ((sizeof(bcm_xtlv_t)-1) + ((bcm_xtlv_t*)pxtlv)->len);
-
- err = wldev_iovar_setbuf(dev, "fils", iov_buf,
- sizeof(bcm_iov_buf_t) + iov_buf->len,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("fils wldev_iovar_setbuf error (%d)\n", err));
- }
- else {
- WL_INFORM_MEM(("FILS HLP Packet succesfully updated\n"));
- }
- MFREE(cfg->osh, iov_buf, iov_buf_len);
- }
- return BCME_OK;
-}
-#endif /* WL_FILS */
-
-#if defined(WL_FILS)
-#ifndef UPDATE_FILS_ERP_INFO
-#define UPDATE_FILS_ERP_INFO BIT(1)
-#define UPDATE_AUTH_TYPE BIT(2)
-#endif // endif
-
-static int
-wl_cfg80211_update_connect_params(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme, u32 changed)
-{
- s32 err = BCME_OK;
- if (changed & UPDATE_FILS_ERP_INFO) {
- err = wl_set_fils_params(dev, sme);
-
- if (unlikely(err)) {
- WL_ERR(("Invalid FILS params\n"));
- goto exit;
- }
- }
- if (changed & UPDATE_AUTH_TYPE) {
- err = wl_set_auth_type(dev, sme);
- if (unlikely(err)) {
- WL_ERR(("Invalid auth type\n"));
- goto exit;
- }
- }
- if ((changed & UPDATE_FILS_ERP_INFO) && !(changed & UPDATE_AUTH_TYPE)) {
- WL_DBG(("Warning: FILS ERP params are set, but authentication type - not\n"));
- }
-exit:
- return err;
-
-}
-#endif /* WL_FILS */
-
#define MAX_SCAN_ABORT_WAIT_CNT 20
#define WAIT_SCAN_ABORT_OSL_SLEEP_TIME 10
struct wl_join_params join_params;
size_t join_params_size;
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-#if defined(ROAM_ENABLE) && defined(ROAM_AP_ENV_DETECTION)
- s32 roam_trigger[2] = {0, 0};
-#endif /* ROAM_AP_ENV_DETECTION */
s32 err = 0;
- const wpa_ie_fixed_t *wpa_ie;
- const bcm_tlv_t *wpa2_ie;
- const u8* wpaie = 0;
+ wpa_ie_fixed_t *wpa_ie;
+ bcm_tlv_t *wpa2_ie;
+ u8* wpaie = 0;
u32 wpaie_len = 0;
u32 chan_cnt = 0;
struct ether_addr bssid;
s32 bssidx = -1;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
- bool skip_hints = fw_ap_select;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
-#ifdef ESCAN_CHANNEL_CACHE
- chanspec_t chanspec_list[MAX_ROAM_CHANNEL];
-#endif /* ESCAN_CHANNEL_CACHE */
+#if (defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH))
int wait_cnt;
- char sec[32];
+#endif
WL_DBG(("In\n"));
- if (!dev) {
- WL_ERR(("dev is null\n"));
- return -EINVAL;
- }
BCM_REFERENCE(dhdp);
- DHD_STATLOG_CTRL(dhdp, ST(ASSOC_START), dhd_net2idx(dhdp->info, dev), 0);
-
-#ifdef ESCAN_CHANNEL_CACHE
- memset(chanspec_list, 0, (sizeof(chanspec_t) * MAX_ROAM_CHANNEL));
-#endif /* ESCAN_CHANNEL_CACHE */
- /* Connection attempted via linux-wireless */
- wl_set_drv_status(cfg, CFG80211_CONNECT, dev);
-#ifdef DHDTCPSYNC_FLOOD_BLK
- dhd_reset_tcpsync_info_by_dev(dev);
-#endif /* DHDTCPSYNC_FLOOD_BLK */
+#ifdef WLMESH
+ wl_config_ifmode(cfg, dev, dev->ieee80211_ptr->iftype);
+#endif
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ wl_cfg80211_set_random_mac(dev, FALSE);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
-#ifdef WL_SKIP_CONNECT_HINTS
- skip_hints = true;
-#elif defined(WL_FW_OCE_AP_SELECT)
- /* override bssid_hint for oce networks */
- skip_hints = (fw_ap_select && wl_cfg80211_is_oce_ap(wiphy, sme->bssid_hint));
-#endif // endif
- if (skip_hints) {
- /* Let fw choose the best AP */
- WL_INFORM(("skipping bssid & channel hint\n"));
- } else {
- if (sme->channel_hint) {
- chan = sme->channel_hint;
- WL_INFORM_MEM(("channel_hint (%d), channel_hint center_freq (%d)\n",
- ieee80211_frequency_to_channel(sme->channel_hint->center_freq),
- sme->channel_hint->center_freq));
- }
- if (sme->bssid_hint) {
- sme->bssid = sme->bssid_hint;
- WL_INFORM_MEM(("bssid_hint "MACDBG" \n", MAC2STRDBG(sme->bssid_hint)));
- }
+ if (sme->channel_hint) {
+ chan = sme->channel_hint;
+ WL_DBG(("channel_hint (%d), channel_hint center_freq (%d)\n",
+ ieee80211_frequency_to_channel(sme->channel_hint->center_freq),
+ sme->channel_hint->center_freq));
+ }
+ if (sme->bssid_hint) {
+ sme->bssid = sme->bssid_hint;
+ WL_DBG(("bssid_hint "MACDBG" \n", MAC2STRDBG(sme->bssid_hint)));
}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
WL_DBG(("SME IE : len=%zu\n", sme->ie_len));
if (sme->ie != NULL && sme->ie_len > 0 && (wl_dbg_level & WL_DBG_DBG)) {
- prhex(NULL, sme->ie, sme->ie_len);
+ prhex(NULL, (uchar *)sme->ie, sme->ie_len);
}
RETURN_EIO_IF_NOT_UP(cfg);
+
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
*/
+#if (defined(BCM4359_CHIP) || !defined(ESCAN_RESULT_PATCH))
if (cfg->scan_request) {
WL_TRACE_HW4(("Aborting the scan! \n"));
wl_cfg80211_scan_abort(cfg);
OSL_SLEEP(WAIT_SCAN_ABORT_OSL_SLEEP_TIME);
}
if (wl_get_drv_status(cfg, SCANNING, dev)) {
- wl_cfg80211_cancel_scan(cfg);
+ wl_notify_escan_complete(cfg, dev, true, true);
}
}
+#endif
#ifdef WL_SCHED_SCAN
/* Locks are taken in wl_cfg80211_sched_scan_stop()
* A start scan occuring during connect is unlikely
*/
if (cfg->sched_scan_req) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
- wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg),
- cfg->sched_scan_req->reqid);
+ wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg), 0);
#else
wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg));
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
}
-#endif /* WL_SCHED_SCAN */
-#ifdef WL_CFG80211_GON_COLLISION
- /* init block gon req count */
- cfg->block_gon_req_tx_count = 0;
- cfg->block_gon_req_rx_count = 0;
-#endif /* WL_CFG80211_GON_COLLISION */
+#endif
#if defined(ESCAN_RESULT_PATCH)
if (sme->bssid)
memcpy(connect_req_bssid, sme->bssid, ETHER_ADDR_LEN);
else
bzero(connect_req_bssid, ETHER_ADDR_LEN);
bzero(broad_bssid, ETHER_ADDR_LEN);
-#endif // endif
+#endif
#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
maxrxpktglom = 0;
-#endif // endif
+#endif
if (wl_get_drv_status(cfg, CONNECTING, dev) || wl_get_drv_status(cfg, CONNECTED, dev)) {
/* set nested connect bit to identify the context */
wl_set_drv_status(cfg, NESTED_CONNECT, dev);
err = wl_cfg80211_cleanup_mismatch_status(dev, cfg, TRUE);
} else if (wl_get_drv_status(cfg, DISCONNECTING, dev)) {
/* DHD prev status is DISCONNECTING */
- err = wl_cfg80211_cleanup_mismatch_status(dev, cfg, false);
+ err = wl_cfg80211_cleanup_mismatch_status(dev, cfg, FALSE);
} else if (!wl_get_drv_status(cfg, CONNECTED, dev)) {
/* DHD previous status is not connected and FW connected */
if (wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN) == 0) {
/* set nested connect bit to identify the context */
wl_set_drv_status(cfg, NESTED_CONNECT, dev);
- err = wl_cfg80211_cleanup_mismatch_status(dev, cfg, true);
+ err = wl_cfg80211_cleanup_mismatch_status(dev, cfg, TRUE);
}
}
- wl_cfg80211_check_in4way(cfg, dev, WAIT_DISCONNECTED,
- WL_EXT_STATUS_CONNECTING, NULL);
- if (sme->bssid) {
- wl_update_prof(cfg, dev, NULL, sme->bssid, WL_PROF_LATEST_BSSID);
- } else {
- wl_update_prof(cfg, dev, NULL, ðer_bcast, WL_PROF_LATEST_BSSID);
- }
-#ifdef SUPPORT_AP_BWCTRL
- if (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) {
- wl_restore_ap_bw(cfg);
- }
-#endif /* SUPPORT_AP_BWCTRL */
/* 'connect' request received */
wl_set_drv_status(cfg, CONNECTING, dev);
/* clear nested connect bit on proceeding for connection */
}
/* find the RSN_IE */
- if ((wpa2_ie = bcm_parse_tlvs((const u8 *)sme->ie, sme->ie_len,
+ if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
DOT11_MNG_RSN_ID)) != NULL) {
WL_DBG((" WPA2 IE is found\n"));
}
/* find the WPA_IE */
- if ((wpa_ie = wl_cfgp2p_find_wpaie(sme->ie,
+ if ((wpa_ie = wl_cfgp2p_find_wpaie((u8 *)sme->ie,
sme->ie_len)) != NULL) {
WL_DBG((" WPA IE is found\n"));
}
if (wpa_ie != NULL || wpa2_ie != NULL) {
- wpaie = (wpa_ie != NULL) ? (const u8 *)wpa_ie : (const u8 *)wpa2_ie;
+ wpaie = (wpa_ie != NULL) ? (u8 *)wpa_ie : (u8 *)wpa2_ie;
wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
err = wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
goto exit;
}
}
-#if defined(ROAM_ENABLE) && defined(ROAM_AP_ENV_DETECTION)
- if (dhdp->roam_env_detection) {
- bool is_roamtrig_reset = TRUE;
- bool is_roam_env_ok = (wldev_iovar_setint(dev, "roam_env_detection",
- AP_ENV_DETECT_NOT_USED) == BCME_OK);
-#ifdef SKIP_ROAM_TRIGGER_RESET
- roam_trigger[1] = WLC_BAND_2G;
- is_roamtrig_reset =
- (wldev_ioctl_get(dev, WLC_GET_ROAM_TRIGGER, roam_trigger,
- sizeof(roam_trigger)) == BCME_OK) &&
- (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER-10);
-#endif /* SKIP_ROAM_TRIGGER_RESET */
- if (is_roamtrig_reset && is_roam_env_ok) {
- roam_trigger[0] = WL_AUTO_ROAM_TRIGGER;
- roam_trigger[1] = WLC_BAND_ALL;
- err = wldev_ioctl_set(dev, WLC_SET_ROAM_TRIGGER, roam_trigger,
- sizeof(roam_trigger));
- if (unlikely(err)) {
- WL_ERR((" failed to restore roam_trigger for auto env"
- " detection\n"));
- }
- }
- }
-#endif /* ROAM_ENABLE && ROAM_AP_ENV_DETECTION */
+
if (chan) {
- cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
- chan_cnt = 1;
- WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel,
- chan->center_freq, chan_cnt));
+ cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+ chan_cnt = 1;
+ WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel,
+ chan->center_freq, chan_cnt));
} else {
- WL_DBG(("No channel info from user space\n"));
- cfg->channel = 0;
+ WL_DBG(("No channel info from user space\n"));
+ cfg->channel = 0;
}
-#ifdef ESCAN_CHANNEL_CACHE
- /*
- * No channel information from user space. if ECC is enabled, the ECC
- * would prepare the channel list, else no channel would be provided
- * and firmware would need to do a full channel scan.
- *
- * Use cached channels. This might take slightly longer time compared
- * to using a single channel based join. But ECC would help choose
- * a better AP for a given ssid. For a given SSID there might multiple
- * APs on different channels and ECC would scan all those channels
- * before deciding up on the AP. This accounts for the additional delay.
- */
- if (cfg->rcc_enabled || cfg->channel == 0)
- {
- wlc_ssid_t ssid;
- int band;
- err = wldev_get_band(dev, &band);
- if (!err) {
- set_roam_band(band);
- }
- memcpy(ssid.SSID, sme->ssid, sme->ssid_len);
- ssid.SSID_len = (uint32)sme->ssid_len;
- chan_cnt = get_roam_channel_list(cfg->channel, chanspec_list,
- MAX_ROAM_CHANNEL, &ssid, ioctl_version);
- WL_DBG(("RCC channel count:%d \n", chan_cnt));
- }
-#endif /* ESCAN_CHANNEL_CACHE */
WL_DBG(("3. set wpa version \n"));
err = wl_set_wpa_version(dev, sme);
WL_ERR(("Invalid wpa_version\n"));
goto exit;
}
-#ifdef BCMWAPI_WPI
- if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1)
- WL_DBG(("4. WAPI Dont Set wl_set_auth_type\n"));
- else {
- WL_DBG(("4. wl_set_auth_type\n"));
-#endif // endif
err = wl_set_auth_type(dev, sme);
if (unlikely(err)) {
WL_ERR(("Invalid auth type\n"));
goto exit;
}
-#ifdef BCMWAPI_WPI
- }
-#endif // endif
-#ifdef WL_FILS
- if (sme->ie && sme->ie_len) {
- err = wl_fils_add_hlp_container(cfg, dev, sme->ie, sme->ie_len);
- if (unlikely(err)) {
- WL_ERR(("FILS sending HLP failed\n"));
- goto exit;
- }
- }
-#endif /* WL_FILS */
+
err = wl_set_set_cipher(dev, sme);
if (unlikely(err)) {
WL_ERR(("Invalid ciper\n"));
WL_ERR(("Invalid shared key\n"));
goto exit;
}
-#ifdef WL_FILS
- err = wl_set_fils_params(dev, sme);
- if (unlikely(err)) {
- WL_ERR(("Invalid FILS params\n"));
- goto exit;
- }
-#endif /* WL_FILS */
/*
* Join with specific BSSID and cached SSID
*/
join_params_size = WL_EXTJOIN_PARAMS_FIXED_SIZE +
chan_cnt * sizeof(chanspec_t);
- ext_join_params = (wl_extjoin_params_t *)MALLOCZ(cfg->osh, join_params_size);
+ ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
if (ext_join_params == NULL) {
err = -ENOMEM;
wl_clr_drv_status(cfg, CONNECTING, dev);
goto exit;
}
- ext_join_params->ssid.SSID_len =
- (uint32)min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
+ ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
else
memcpy(&ext_join_params->assoc.bssid, ðer_bcast, ETH_ALEN);
ext_join_params->assoc.chanspec_num = chan_cnt;
-
- if (chan_cnt && !cfg->rcc_enabled) {
+ if (chan_cnt) {
if (cfg->channel) {
/*
* Use the channel provided by userspace
: WL_CHANSPEC_BAND_5G;
/* Get min_bw set for the interface */
- bw = WL_CHANSPEC_BW_20;
+ bw = wl_cfg80211_ulb_get_min_bw_chspec(cfg, dev->ieee80211_ptr, bssidx);
if (bw == INVCHANSPEC) {
WL_ERR(("Invalid chanspec \n"));
- MFREE(cfg->osh, ext_join_params, join_params_size);
+ kfree(ext_join_params);
err = BCME_ERROR;
goto exit;
}
wl_chspec_host_to_driver(ext_join_params->assoc.chanspec_list[0]);
}
}
-#ifdef ESCAN_CHANNEL_CACHE
- else {
- memcpy(ext_join_params->assoc.chanspec_list, chanspec_list,
- sizeof(chanspec_t) * chan_cnt);
- }
-#endif /* ESCAN_CHANNEL_CACHE */
ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
- WL_DBG(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+ WL_INFORM(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
ext_join_params->ssid.SSID_len));
}
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
- MFREE(cfg->osh, ext_join_params, join_params_size);
+ kfree(ext_join_params);
err = BCME_ERROR;
goto exit;
}
-#ifdef WLTDLS
- /* disable TDLS if number of connected interfaces is >= 1 */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_CONNECT, false);
-#endif /* WLTDLS */
+#if defined(CUSTOMER_HW2)
+ DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
+#endif /* BCMDONGLEHOST && CUSTOMER_HW2 */
#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_update_channel(dhdp, dev, cfg->channel);
+ wl_ext_iapsta_update_channel(dev, cfg->channel);
#endif
+ err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
- wl_ext_get_sec(dev, 0, sec, sizeof(sec));
if (cfg->rcc_enabled) {
- WL_MSG(dev->name, "Connecting with " MACDBG " ssid \"%s\", len (%d), "
- "sec=%s, with rcc channels. chan_cnt:%d \n\n",
+ printf("Connecting with " MACDBG " ssid \"%s\", len (%d) with rcc channels \n\n",
MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
- ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, sec, chan_cnt);
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len);
} else {
- WL_MSG(dev->name, "Connecting with " MACDBG " ssid \"%s\", len (%d), "
- "sec=%s, channel=%d\n\n",
+ printf("Connecting with " MACDBG " ssid \"%s\", len (%d) channel=%d\n\n",
MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
- ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, sec,
- cfg->channel);
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len, cfg->channel);
}
- SUPP_LOG(("[%s] Connecting with " MACDBG " ssid \"%s\","
- "channel:%d rcc:%d\n",
- dev->name, MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)),
- ext_join_params->ssid.SSID, cfg->channel, cfg->rcc_enabled));
- err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
- MFREE(cfg->osh, ext_join_params, join_params_size);
+
+ kfree(ext_join_params);
if (err) {
wl_clr_drv_status(cfg, CONNECTING, dev);
if (err == BCME_UNSUPPORTED) {
WL_DBG(("join iovar is not supported\n"));
goto set_ssid;
} else {
- WL_ERR(("join iovar error (%d)\n", err));
+ WL_ERR(("error (%d)\n", err));
goto exit;
}
} else
goto exit;
set_ssid:
-#if defined(ROAMEXP_SUPPORT)
- /* Clear Blacklist bssid and Whitelist ssid list before join issue
- * This is temporary fix since currently firmware roaming is not
- * disabled by android framework before SSID join from framework
- */
- /* Flush blacklist bssid content */
- dhd_dev_set_blacklist_bssid(dev, NULL, 0, true);
- /* Flush whitelist ssid content */
- dhd_dev_set_whitelist_ssid(dev, NULL, 0, true);
-#endif /* ROAMEXP_SUPPORT */
- bzero(&join_params, sizeof(join_params));
+ memset(&join_params, 0, sizeof(join_params));
join_params_size = sizeof(join_params.ssid);
- join_params.ssid.SSID_len = (uint32)min(sizeof(join_params.ssid.SSID), sme->ssid_len);
+ join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
WL_DBG(("join_param_size %zu\n", join_params_size));
if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
- WL_MSG(dev->name, "ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
- join_params.ssid.SSID_len);
+ WL_INFORM(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+ join_params.ssid.SSID_len));
}
err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params, join_params_size);
exit:
if (err) {
WL_ERR(("error (%d)\n", err));
wl_clr_drv_status(cfg, CONNECTING, dev);
- wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
-#ifdef WLTDLS
- /* If connect fails, check whether we can enable back TDLS */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_DISCONNECT, false);
-#endif /* WLTDLS */
}
if (!err)
wl_cfg80211_check_in4way(cfg, dev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
- WL_EXT_STATUS_CONNECTING, NULL);
+ WL_EXT_STATUS_CONNECTING, NULL);
+
+#ifdef WLTDLS
+ /* disable TDLS if number of connected interfaces is >= 1 */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_CONNECT, false);
+#endif /* WLTDLS */
+
#ifdef DBG_PKT_MON
if ((dev == bcmcfg_to_prmry_ndev(cfg)) && !err) {
DHD_DBG_PKT_MON_START(dhdp);
return err;
}
-static void wl_cfg80211_disconnect_state_sync(struct bcm_cfg80211 *cfg, struct net_device *dev)
-{
- struct wireless_dev *wdev;
- uint8 wait_cnt;
-
- if (!dev || !dev->ieee80211_ptr) {
- WL_ERR(("wrong ndev\n"));
- return;
- }
-
- wdev = dev->ieee80211_ptr;
- wait_cnt = WAIT_FOR_DISCONNECT_STATE_SYNC;
- while ((wdev->current_bss) && wait_cnt) {
- WL_DBG(("Waiting for disconnect sync, wait_cnt: %d\n", wait_cnt));
- wait_cnt--;
- OSL_SLEEP(50);
- }
-
- if (wait_cnt == 0) {
- /* state didn't get cleared within given timeout */
- WL_INFORM_MEM(("cfg80211 state. wdev->current_bss non null\n"));
- } else {
- WL_MEM(("cfg80211 disconnect state sync done\n"));
- }
-
-}
-
+#define WAIT_FOR_DISCONNECT_MAX 10
static void wl_cfg80211_wait_for_disconnection(struct bcm_cfg80211 *cfg, struct net_device *dev)
{
uint8 wait_cnt;
- u32 status = 0;
wait_cnt = WAIT_FOR_DISCONNECT_MAX;
- while ((status = wl_get_drv_status(cfg, DISCONNECTING, dev)) && wait_cnt) {
+ while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
WL_DBG(("Waiting for disconnection, wait_cnt: %d\n", wait_cnt));
wait_cnt--;
OSL_SLEEP(50);
}
- WL_INFORM_MEM(("Wait for disconnection done. status:%d wait_cnt:%d\n", status, wait_cnt));
- if (!wait_cnt && wl_get_drv_status(cfg, DISCONNECTING, dev)) {
- /* No response from firmware. Indicate connect result
- * to clear cfg80211 state machine
- */
- WL_INFORM_MEM(("force send connect result\n"));
- CFG80211_CONNECT_RESULT(dev, NULL, NULL, NULL, 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- wl_clr_drv_status(cfg, DISCONNECTING, dev);
- }
return;
}
scb_val_t scbval;
bool act = false;
s32 err = 0;
- u8 *curbssid = NULL;
- u8 null_bssid[ETHER_ADDR_LEN];
- s32 bssidx = 0;
+ u8 *curbssid;
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-
+ WL_ERR(("Reason %d\n", reason_code));
RETURN_EIO_IF_NOT_UP(cfg);
act = *(bool *) wl_read_prof(cfg, dev, WL_PROF_ACT);
curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
- WL_MSG(dev->name, "Reason %d, act %d\n", reason_code, act);
BCM_REFERENCE(dhdp);
- DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_START),
- dhd_net2idx(dhdp->info, dev), reason_code);
-#ifdef DHD_4WAYM4_FAIL_DISCONNECT
- dhd_cleanup_m4_state_work(dhdp, dhd_net2idx(dhdp->info, dev));
-#endif /* DHD_4WAYM4_FAIL_DISCONNECT */
#ifdef ESCAN_RESULT_PATCH
- if (wl_get_drv_status(cfg, CONNECTING, dev)) {
- if (curbssid) {
- WL_ERR(("Disconnecting while CONNECTING status"
- " connecting device: " MACDBG "\n", MAC2STRDBG(curbssid)));
- } else {
- WL_ERR(("Disconnecting while CONNECTING status \n"));
- }
+ if (wl_get_drv_status(cfg, CONNECTING, dev) && curbssid &&
+ (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0)) {
+ WL_ERR(("Disconnecting from connecting device: " MACDBG "\n",
+ MAC2STRDBG(curbssid)));
act = true;
}
#endif /* ESCAN_RESULT_PATCH */
- if (!curbssid) {
- WL_ERR(("Disconnecting while CONNECTING status %d\n", (int)sizeof(null_bssid)));
- bzero(null_bssid, sizeof(null_bssid));
- curbssid = null_bssid;
- }
-
if (act) {
#ifdef DBG_PKT_MON
- /* Stop packet monitor */
if (dev == bcmcfg_to_prmry_ndev(cfg)) {
DHD_DBG_PKT_MON_STOP(dhdp);
}
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
*/
+#if !defined(ESCAN_RESULT_PATCH)
/* Let scan aborted by F/W */
if (cfg->scan_request) {
WL_TRACE_HW4(("Aborting the scan! \n"));
- wl_cfg80211_cancel_scan(cfg);
+ wl_notify_escan_complete(cfg, dev, true, true);
}
- /* Set DISCONNECTING state. We are clearing this state in all exit paths */
- wl_set_drv_status(cfg, DISCONNECTING, dev);
+#endif /* ESCAN_RESULT_PATCH */
if (wl_get_drv_status(cfg, CONNECTING, dev) ||
wl_get_drv_status(cfg, CONNECTED, dev)) {
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
scbval.val = reason_code;
memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
scbval.val = htod32(scbval.val);
- WL_INFORM_MEM(("[%s] wl disassoc\n", dev->name));
err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
sizeof(scb_val_t));
if (unlikely(err)) {
wl_clr_drv_status(cfg, DISCONNECTING, dev);
WL_ERR(("error (%d)\n", err));
- goto exit;
+ return err;
}
- wl_cfg80211_check_in4way(cfg, dev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
- WL_EXT_STATUS_DISCONNECTING, NULL);
- }
-#ifdef WL_WPS_SYNC
- /* If are in WPS reauth state, then we would be
- * dropping the link down events. Ensure that
- * Event is sent up for the disconnect Req
- */
- if (wl_wps_session_update(dev,
- WPS_STATE_DISCONNECT, curbssid) == BCME_OK) {
- WL_INFORM_MEM(("[WPS] Disconnect done.\n"));
- wl_clr_drv_status(cfg, DISCONNECTING, dev);
+ wl_cfg80211_wait_for_disconnection(cfg, dev);
}
-#endif /* WPS_SYNC */
- wl_cfg80211_wait_for_disconnection(cfg, dev);
- } else {
- /* Not in connecting or connected state. However since disconnect came
- * from upper layer, indicate connect fail to clear any state mismatch
- */
- WL_INFORM_MEM(("act is false. report connect result fail.\n"));
- CFG80211_CONNECT_RESULT(dev, NULL, NULL, NULL, 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL);
}
#ifdef CUSTOM_SET_CPUCORE
/* set default cpucore */
}
#endif /* CUSTOM_SET_CPUCORE */
- cfg->rssi = 0; /* reset backup of rssi */
-
-exit:
- /* Clear IEs for disaasoc */
- if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find index failed\n"));
- err = -EINVAL;
- return err;
- }
- WL_ERR(("Clearing disconnect IEs \n"));
- err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
- ndev_to_cfgdev(dev), bssidx, VNDR_IE_DISASSOC_FLAG, NULL, 0);
-
return err;
}
s32 bssidx;
s32 mode = wl_get_mode_by_netdev(cfg, dev);
- WL_MSG(dev->name, "key index (%d)\n", key_idx);
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
- bzero(&key, sizeof(key));
+ memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
if (!ETHER_ISMULTI(mac_addr))
/* if IW_ENCODE_EXT_RX_SEQ_VALID set */
if (params->seq && params->seq_len == 6) {
/* rx iv */
- const u8 *ivptr;
- ivptr = (const u8 *) params->seq;
+ u8 *ivptr;
+ ivptr = (u8 *) params->seq;
key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
(ivptr[3] << 8) | ivptr[2];
key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
key.iv_initialized = true;
}
- key.algo = wl_rsn_cipher_wsec_key_algo_lookup(params->cipher);
- if (key.algo == CRYPTO_ALGO_OFF) { //not found.
+
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
+ break;
+ default:
WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
return -EINVAL;
}
WL_ERR(("WLC_SET_KEY error (%d)\n", err));
return err;
}
- WL_INFORM_MEM(("[%s] wsec key set\n", dev->name));
}
return err;
}
/* roam offload is only for the primary device */
return -1;
}
-
- WL_INFORM_MEM(("[%s] wl roam_offload %d\n", dev->name, enable));
err = wldev_iovar_setint(dev, "roam_offload", enable);
if (err)
return err;
return err;
}
-struct wireless_dev *
-wl_cfg80211_get_wdev_from_ifname(struct bcm_cfg80211 *cfg, const char *name)
+#if defined(WL_VIRTUAL_APSTA)
+int
+wl_cfg80211_interface_create(struct net_device *dev, char *name)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ bcm_struct_cfgdev *new_cfgdev;
+ char ifname[IFNAMSIZ];
+ char iftype[IFNAMSIZ];
+ enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION;
+
+ sscanf(name, "%s %s", ifname, iftype);
+
+ if (strnicmp(iftype, "AP", strlen("AP")) == 0) {
+ iface_type = NL80211_IFTYPE_AP;
+ }
+
+ new_cfgdev = wl_cfg80211_create_iface(cfg->wdev->wiphy,
+ iface_type, NULL, ifname);
+ if (!new_cfgdev) {
+ return BCME_ERROR;
+ }
+ else {
+ WL_DBG(("Iface %s created successfuly\n", name));
+ return BCME_OK;
+ }
+}
+
+int
+wl_cfg80211_interface_delete(struct net_device *dev, char *name)
{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
struct net_info *iter, *next;
+ int err = BCME_ERROR;
if (name == NULL) {
- WL_ERR(("Iface name is not provided\n"));
- return NULL;
+ return BCME_ERROR;
}
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
if (iter->ndev) {
if (strcmp(iter->ndev->name, name) == 0) {
- return iter->ndev->ieee80211_ptr;
+ err = wl_cfg80211_del_iface(cfg->wdev->wiphy,
+ ndev_to_cfgdev(iter->ndev));
+ break;
}
}
}
-
- WL_DBG(("Iface %s not found\n", name));
- return NULL;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ if (!err) {
+ WL_DBG(("Iface %s deleted successfuly", name));
+ }
+ return err;
}
#if defined(PKT_FILTER_SUPPORT) && defined(APSTA_BLOCK_ARP_DURING_DHCP)
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- WL_INFORM_MEM(("[%s] Enter. enable:%d\n", dev->name, enable));
if (!dhd_pkt_filter_enable) {
- WL_DBG(("Packet filter isn't enabled\n"));
+ WL_INFORM(("Packet filter isn't enabled\n"));
return;
}
* the upstream AP in case of STA+SoftAP Concurrenct mode
*/
if (!wl_get_drv_status(cfg, CONNECTED, dev)) {
- WL_DBG(("STA not connected to upstream AP\n"));
+ WL_INFORM(("STA doesn't connected to upstream AP\n"));
return;
}
}
}
#endif /* PKT_FILTER_SUPPORT && APSTA_BLOCK_ARP_DURING_DHCP */
+#endif /* defined (WL_VIRTUAL_APSTA) */
static s32
wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
s32 bssidx = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 mode = wl_get_mode_by_netdev(cfg, dev);
-#ifdef WL_GCMP
- uint32 algos = 0, mask = 0;
-#endif /* WL_GCMP */
-#if defined(WLAN_CIPHER_SUITE_PMK)
- int j;
- wsec_pmk_t pmk;
- char keystring[WSEC_MAX_PSK_LEN + 1];
- char* charptr = keystring;
- u16 len;
- struct wl_security *sec;
-#endif /* defined(WLAN_CIPHER_SUITE_PMK) */
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- WL_INFORM_MEM(("key index (%d) (0x%x)\n", key_idx, params->cipher));
+ WL_DBG(("key index (%d)\n", key_idx));
RETURN_EIO_IF_NOT_UP(cfg);
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
}
+ wl_cfg80211_check_in4way(cfg, dev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
+ WL_EXT_STATUS_4WAY_DONE, NULL);
if (mac_addr &&
((params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
wl_add_keyext(wiphy, dev, key_idx, mac_addr, params);
goto exit;
}
-
- BCM_REFERENCE(dhdp);
- DHD_STATLOG_CTRL(dhdp, ST(INSTALL_KEY), dhd_net2idx(dhdp->info, dev), 0);
-
- bzero(&key, sizeof(key));
+ memset(&key, 0, sizeof(key));
/* Clear any buffered wep key */
- bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
+ memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
key.len = (u32) params->key_len;
key.index = (u32) key_idx;
memcpy(key.data, params->key, key.len);
key.flags = WL_PRIMARY_KEY;
-
- key.algo = wl_rsn_cipher_wsec_key_algo_lookup(params->cipher);
- val = wl_rsn_cipher_wsec_algo_lookup(params->cipher);
- if (val == WSEC_NONE) {
- WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
-#if defined(WLAN_CIPHER_SUITE_PMK)
- /* WLAN_CIPHER_SUITE_PMK is not NL80211 standard ,but BRCM proprietary cipher suite.
- * so it doesn't have right algo type too. Just for now, bypass this check for
- * backward compatibility.
- * TODO: deprecate this proprietary way and replace to nl80211 set_pmk API.
- */
- if (params->cipher != WLAN_CIPHER_SUITE_PMK)
-#endif /* defined(WLAN_CIPHER_SUITE_PMK) */
- return -EINVAL;
- }
switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ key.algo = CRYPTO_ALGO_WEP1;
+ val = WEP_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ key.algo = CRYPTO_ALGO_WEP128;
+ val = WEP_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_WEP104\n"));
+ break;
case WLAN_CIPHER_SUITE_TKIP:
+ key.algo = CRYPTO_ALGO_TKIP;
+ val = TKIP_ENABLED;
/* wpa_supplicant switches the third and fourth quarters of the TKIP key */
if (mode == WL_MODE_BSS) {
bcopy(&key.data[24], keybuf, sizeof(keybuf));
}
WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
break;
-#if defined(WLAN_CIPHER_SUITE_PMK)
- case WLAN_CIPHER_SUITE_PMK:
- sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
-
- WL_MEM(("set_pmk: wpa_auth:%x akm:%x\n", sec->wpa_auth, params->cipher));
- /* Avoid pmk set for SAE and OWE for external supplicant case. */
- if (IS_AKM_SAE(sec->wpa_auth) || IS_AKM_OWE(sec->wpa_auth)) {
- WL_INFORM_MEM(("skip pmk set for akm:%x\n", sec->wpa_auth));
- break;
- }
-
- if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
- (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
- err = wldev_iovar_setbuf(dev, "okc_info_pmk", (const void *)params->key,
- WSEC_MAX_PSK_LEN / 2, keystring, sizeof(keystring), NULL);
- if (err) {
- /* could fail in case that 'okc' is not supported */
- WL_INFORM_MEM(("okc_info_pmk failed, err=%d (ignore)\n", err));
- }
- }
- /* copy the raw hex key to the appropriate format */
- for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
- charptr += snprintf(charptr, sizeof(keystring), "%02x", params->key[j]);
- }
- len = (u16)strlen(keystring);
- pmk.key_len = htod16(len);
- bcopy(keystring, pmk.key, len);
- pmk.flags = htod16(WSEC_PASSPHRASE);
-
- err = wldev_ioctl_set(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
- if (err) {
- return err;
- }
- /* Clear key length to delete key */
- key.len = 0;
- break;
-#endif /* WLAN_CIPHER_SUITE_PMK */
-#ifdef WL_GCMP
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- algos = KEY_ALGO_MASK(key.algo);
- mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM);
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ val = AES_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
break;
-#endif /* WL_GCMP */
- default: /* No post processing required */
- WL_DBG(("no post processing required (0x%x)\n", params->cipher));
+ case WLAN_CIPHER_SUITE_CCMP:
+ key.algo = CRYPTO_ALGO_AES_CCM;
+ val = AES_ENABLED;
+ WL_DBG(("WLAN_CIPHER_SUITE_CCMP\n"));
break;
+ default:
+ WL_ERR(("Invalid cipher (0x%x)\n", params->cipher));
+ return -EINVAL;
}
/* Set the new key/index */
WL_ERR(("set wsec error (%d)\n", err));
return err;
}
-#ifdef WL_GCMP
- wl_set_wsec_info_algos(dev, algos, mask);
-#endif /* WL_GCMP */
- wl_cfg80211_check_in4way(cfg, dev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
- WL_EXT_STATUS_ADD_KEY, NULL);
+
return err;
}
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
s32 bssidx;
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
#ifndef MFP
if ((key_idx >= DOT11_MAX_DEFAULT_KEYS) && (key_idx < DOT11_MAX_DEFAULT_KEYS+2))
return -EINVAL;
-#endif // endif
+#endif
RETURN_EIO_IF_NOT_UP(cfg);
- BCM_REFERENCE(dhdp);
- DHD_STATLOG_CTRL(dhdp, ST(DELETE_KEY), dhd_net2idx(dhdp->info, dev), 0);
- bzero(&key, sizeof(key));
+ memset(&key, 0, sizeof(key));
key.flags = WL_PRIMARY_KEY;
key.algo = CRYPTO_ALGO_OFF;
return err;
}
-/* NOTE : this function cannot work as is and is never called */
static s32
wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
}
WL_DBG(("key index (%d)\n", key_idx));
RETURN_EIO_IF_NOT_UP(cfg);
- bzero(&key, sizeof(key));
+ memset(&key, 0, sizeof(key));
key.index = key_idx;
swap_key_to_BE(&key);
- bzero(¶ms, sizeof(params));
+ memset(¶ms, 0, sizeof(params));
params.key_len = (u8) min_t(u8, DOT11_MAX_KEY_SIZE, key.len);
- params.key = key.data;
+ memcpy((void *)params.key, key.data, params.key_len);
err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
if (unlikely(err)) {
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
WL_DBG(("WLAN_CIPHER_SUITE_AES_CMAC\n"));
break;
-#ifdef BCMWAPI_WPI
- case SMS4_ENABLED:
- params.cipher = WLAN_CIPHER_SUITE_SMS4;
- WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
- break;
-#endif // endif
#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
/* to connect to mixed mode AP */
case (AES_ENABLED | TKIP_ENABLED): /* TKIP CCMP */
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
break;
-#endif // endif
+#endif
default:
WL_ERR(("Invalid algo (0x%x)\n", wsec));
return -EINVAL;
#ifdef MFP
return 0;
#else
- WL_INFORM_MEM(("Not supported\n"));
+ WL_INFORM(("Not supported\n"));
return -EOPNOTSUPP;
#endif /* MFP */
}
-static bool
-wl_check_assoc_state(struct bcm_cfg80211 *cfg, struct net_device *dev)
-{
- wl_assoc_info_t asinfo;
- uint32 state = 0;
- int err;
-
- err = wldev_iovar_getbuf_bsscfg(dev, "assoc_info",
- NULL, 0, cfg->ioctl_buf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("failed to get assoc_info : err=%d\n", err));
- return FALSE;
- } else {
- memcpy(&asinfo, cfg->ioctl_buf, sizeof(wl_assoc_info_t));
- state = dtoh32(asinfo.state);
- WL_DBG(("assoc state=%d\n", state));
- }
-
- return (state > 0)? TRUE:FALSE;
-}
-
static s32
-wl_cfg80211_get_rssi(struct net_device *dev, struct bcm_cfg80211 *cfg, s32 *rssi)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_info *sinfo)
+#else
+wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
+ u8 *mac, struct station_info *sinfo)
+#endif
{
- s32 err = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
scb_val_t scb_val;
-#ifdef SUPPORT_RSSI_SUM_REPORT
- wl_rssi_ant_mimo_t rssi_ant_mimo;
-#endif /* SUPPORT_RSSI_SUM_REPORT */
-
- if (dev == NULL || cfg == NULL) {
- return BCME_ERROR;
- }
+ s32 rssi;
+ s32 rate;
+ s32 err = 0;
+ sta_info_t *sta;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ s8 eabuf[ETHER_ADDR_STR_LEN];
+#endif
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ bool fw_assoc_state = FALSE;
+ u32 dhd_assoc_state = 0;
+ static int err_cnt = 0;
- /* initialize rssi */
- *rssi = 0;
+ RETURN_EIO_IF_NOT_UP(cfg);
+ if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+ err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac,
+ ETHER_ADDR_LEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("GET STA INFO failed, %d\n", err));
+ return err;
+ }
+ sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME);
+ sta = (sta_info_t *)cfg->ioctl_buf;
+ sta->len = dtoh16(sta->len);
+ sta->cap = dtoh16(sta->cap);
+ sta->flags = dtoh32(sta->flags);
+ sta->idle = dtoh32(sta->idle);
+ sta->in = dtoh32(sta->in);
+ sinfo->inactive_time = sta->idle * 1000;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (sta->flags & WL_STA_ASSOC) {
+ sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME);
+ sinfo->connected_time = sta->in;
+ }
+ WL_INFORM(("STA %s : idle time : %d sec, connected time :%d ms\n",
+ bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time,
+ sta->idle * 1000));
+#endif
+ } else if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS ||
+ wl_get_mode_by_netdev(cfg, dev) == WL_MODE_IBSS) {
+ get_pktcnt_t pktcnt;
+#ifdef DHD_SUPPORT_IF_CNTS
+ wl_if_stats_t *if_stats = NULL;
+#endif /* DHD_SUPPORT_IF_CNTS */
+ u8 *curmacp;
+
+ if (cfg->roam_offload) {
+ struct ether_addr bssid;
+ memset(&bssid, 0, sizeof(bssid));
+ err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
+ if (err) {
+ WL_ERR(("Failed to get current BSSID\n"));
+ } else {
+ if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
+ /* roaming is detected */
+ err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
+ if (err)
+ WL_ERR(("Failed to handle the delayed roam, "
+ "err=%d", err));
+ mac = (u8 *)bssid.octet;
+ }
+ }
+ }
+ dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev);
+ DHD_OS_WAKE_LOCK(dhd);
+ fw_assoc_state = dhd_is_associated(dhd, 0, &err);
+ DHD_OS_WAKE_UNLOCK(dhd);
+ if (!dhd_assoc_state || !fw_assoc_state) {
+ WL_ERR(("NOT assoc\n"));
+ if (err == -ENODATA)
+ return err;
+ if (!dhd_assoc_state) {
+ WL_TRACE_HW4(("drv state is not connected \n"));
+ }
+ if (!fw_assoc_state) {
+ WL_TRACE_HW4(("fw state is not associated \n"));
+ }
+ /* Disconnect due to fw is not associated for FW_ASSOC_WATCHDOG_TIME ms.
+ * 'err == 0' of dhd_is_associated() and '!fw_assoc_state'
+ * means that BSSID is null.
+ */
+ if (dhd_assoc_state && !fw_assoc_state && !err) {
+ if (!fw_assoc_watchdog_started) {
+ fw_assoc_watchdog_ms = OSL_SYSUPTIME();
+ fw_assoc_watchdog_started = TRUE;
+ WL_TRACE_HW4(("fw_assoc_watchdog_started \n"));
+ } else {
+ if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms >
+ FW_ASSOC_WATCHDOG_TIME) {
+ fw_assoc_watchdog_started = FALSE;
+ err = -ENODEV;
+ WL_TRACE_HW4(("fw is not associated for %d ms \n",
+ (OSL_SYSUPTIME() - fw_assoc_watchdog_ms)));
+ goto get_station_err;
+ }
+ }
+ }
+ err = -ENODEV;
+ return err;
+ }
+ fw_assoc_watchdog_started = FALSE;
+ curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
+ WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
+ MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
+ }
-#ifdef SUPPORT_RSSI_SUM_REPORT
- /* Query RSSI sum across antennas */
- bzero(&rssi_ant_mimo, sizeof(rssi_ant_mimo));
- err = wl_get_rssi_per_ant(dev, dev->name, NULL, &rssi_ant_mimo);
- if (err) {
- WL_ERR(("Could not get rssi sum (%d)\n", err));
- /* set rssi to zero and do not return error,
- * because iovar phy_rssi_ant could return BCME_UNSUPPORTED
- * when bssid was null during roaming
- */
- err = BCME_OK;
- } else {
- cfg->rssi_sum_report = TRUE;
- if ((*rssi = rssi_ant_mimo.rssi_sum) >= 0) {
- *rssi = 0;
+ /* Report the current tx rate */
+ rate = 0;
+ err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
+ if (err) {
+ WL_ERR(("Could not get rate (%d)\n", err));
+ } else {
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+ int rxpktglom;
+#endif
+ rate = dtoh32(rate);
+ sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE);
+ sinfo->txrate.legacy = rate * 5;
+ WL_DBG(("Rate %d Mbps\n", (rate / 2)));
+#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
+ rxpktglom = ((rate/2) > 150) ? 20 : 10;
+
+ if (maxrxpktglom != rxpktglom) {
+ maxrxpktglom = rxpktglom;
+ WL_DBG(("Rate %d Mbps, update bus:maxtxpktglom=%d\n", (rate/2),
+ maxrxpktglom));
+ err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
+ (char*)&maxrxpktglom, 4, cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, NULL);
+ if (err < 0) {
+ WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
+ }
+ }
+#endif
}
- }
-#endif /* SUPPORT_RSSI_SUM_REPORT */
- /* if SUPPORT_RSSI_SUM_REPORT works once, do not use legacy method anymore */
- if (cfg->rssi_sum_report == FALSE) {
- bzero(&scb_val, sizeof(scb_val));
+ memset(&scb_val, 0, sizeof(scb_val));
scb_val.val = 0;
err = wldev_ioctl_get(dev, WLC_GET_RSSI, &scb_val,
sizeof(scb_val_t));
if (err) {
WL_ERR(("Could not get rssi (%d)\n", err));
- return err;
+ goto get_station_err;
+ }
+ rssi = dtoh32(scb_val.val);
+#if defined(RSSIAVG)
+ err = wl_update_connected_rssi_cache(dev, &cfg->g_connected_rssi_cache_ctrl, &rssi);
+ if (err) {
+ WL_ERR(("Could not get rssi (%d)\n", err));
+ goto get_station_err;
}
+ wl_delete_dirty_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+ wl_reset_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+#endif
#if defined(RSSIOFFSET)
- *rssi = wl_update_rssi_offset(dev, dtoh32(scb_val.val));
-#else
- *rssi = dtoh32(scb_val.val);
+ rssi = wl_update_rssi_offset(dev, rssi);
#endif
- }
+#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(rssi, RSSI_MAXVAL);
+#endif
+ sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL);
+ sinfo->signal = rssi;
+ WL_DBG(("RSSI %d dBm\n", rssi));
+
+#ifdef DHD_SUPPORT_IF_CNTS
+ if ((if_stats = kmalloc(sizeof(*if_stats), GFP_KERNEL)) == NULL) {
+ WL_ERR(("%s(%d): kmalloc failed\n", __FUNCTION__, __LINE__));
+ goto error;
+ }
+ memset(if_stats, 0, sizeof(*if_stats));
- if (*rssi >= 0) {
- /* check assoc status including roaming */
- DHD_OS_WAKE_LOCK((dhd_pub_t *)(cfg->pub));
- if (wl_get_drv_status(cfg, CONNECTED, dev) && wl_check_assoc_state(cfg, dev)) {
- *rssi = cfg->rssi; /* use previous RSSI */
- WL_DBG(("use previous RSSI %d dBm\n", cfg->rssi));
+ err = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
+ (char *)if_stats, sizeof(*if_stats), NULL);
+ if (!err) {
+ sinfo->rx_packets = (uint32)dtoh64(if_stats->rxframe);
+ sinfo->rx_dropped_misc = 0;
+ sinfo->tx_packets = (uint32)dtoh64(if_stats->txfrmsnt);
+ sinfo->tx_failed = (uint32)dtoh64(if_stats->txnobuf) +
+ (uint32)dtoh64(if_stats->txrunt) +
+ (uint32)dtoh64(if_stats->txfail);
} else {
- *rssi = 0;
+ // WL_ERR(("%s: if_counters not supported ret=%d\n",
+ // __FUNCTION__, err));
+#endif /* DHD_SUPPORT_IF_CNTS */
+
+ err = wldev_ioctl_get(dev, WLC_GET_PKTCNTS, &pktcnt,
+ sizeof(pktcnt));
+ if (err) {
+ WL_ERR(("Could not get WLC_GET_PKTCNTS (%d)\n", err));
+ goto get_station_err;
+ }
+ sinfo->rx_packets = pktcnt.rx_good_pkt;
+ sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
+ sinfo->tx_packets = pktcnt.tx_good_pkt;
+ sinfo->tx_failed = pktcnt.tx_bad_pkt;
+#ifdef DHD_SUPPORT_IF_CNTS
}
- DHD_OS_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
- } else {
- /* backup the current rssi */
- cfg->rssi = *rssi;
- }
-
- return err;
-}
-
-static int
-wl_cfg80211_ifstats_counters_cb(void *ctx, const uint8 *data, uint16 type, uint16 len)
-{
- switch (type) {
- case WL_IFSTATS_XTLV_IF_INDEX:
- WL_DBG(("Stats received on interface index: %d\n", *data));
- break;
- case WL_IFSTATS_XTLV_GENERIC: {
- if (len > sizeof(wl_if_stats_t)) {
- WL_INFORM(("type 0x%x: cntbuf length too long! %d > %d\n",
- type, len, (int)sizeof(wl_if_stats_t)));
- }
- memcpy(ctx, data, sizeof(wl_if_stats_t));
- break;
- }
- default:
- WL_DBG(("Unsupported counter type 0x%x\n", type));
- break;
- }
-
- return BCME_OK;
-}
-
-/* Parameters to if_counters iovar need to be converted to XTLV format
- * before sending to FW. The length of the top level XTLV container
- * containing parameters should not exceed 228 bytes
- */
-#define IF_COUNTERS_PARAM_CONTAINER_LEN_MAX 228
-
-int
-wl_cfg80211_ifstats_counters(struct net_device *dev, wl_if_stats_t *if_stats)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- uint8 *pbuf = NULL;
- bcm_xtlvbuf_t xtlvbuf, local_xtlvbuf;
- bcm_xtlv_t *xtlv;
- uint16 expected_resp_len;
- wl_stats_report_t *request = NULL, *response = NULL;
- int bsscfg_idx;
- int ret = BCME_OK;
-
- pbuf = (uint8 *)MALLOCZ(dhdp->osh, WLC_IOCTL_MEDLEN);
- if (!pbuf) {
- WL_ERR(("Failed to allocate local pbuf\n"));
- return BCME_NOMEM;
- }
-
- /* top level container length cannot exceed 228 bytes.
- * This is because the output buffer is 1535 bytes long.
- * Allow 1300 bytes for reporting stats coming in XTLV format
- */
- request = (wl_stats_report_t *)
- MALLOCZ(dhdp->osh, IF_COUNTERS_PARAM_CONTAINER_LEN_MAX);
- if (!request) {
- WL_ERR(("Failed to allocate wl_stats_report_t with length (%d)\n",
- IF_COUNTERS_PARAM_CONTAINER_LEN_MAX));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- request->version = WL_STATS_REPORT_REQUEST_VERSION_V2;
-
- /* Top level container... we will create it ourselves */
- /* Leave space for report version, length, and top level XTLV
- * WL_IFSTATS_XTLV_IF.
- */
- ret = bcm_xtlv_buf_init(&local_xtlvbuf,
- (uint8*)(request->data) + BCM_XTLV_HDR_SIZE,
- IF_COUNTERS_PARAM_CONTAINER_LEN_MAX -
- offsetof(wl_stats_report_t, data) - BCM_XTLV_HDR_SIZE,
- BCM_XTLV_OPTION_ALIGN32);
-
- if (ret) {
- goto fail;
- }
-
- /* Populate requests using this the local_xtlvbuf context. The xtlvbuf
- * is used to fill the container containing the XTLVs populated using
- * local_xtlvbuf.
- */
- ret = bcm_xtlv_buf_init(&xtlvbuf,
- (uint8*)(request->data),
- IF_COUNTERS_PARAM_CONTAINER_LEN_MAX -
- offsetof(wl_stats_report_t, data),
- BCM_XTLV_OPTION_ALIGN32);
-
- if (ret) {
- goto fail;
- }
-
- /* Request generic stats */
- ret = bcm_xtlv_put_data(&local_xtlvbuf,
- WL_IFSTATS_XTLV_GENERIC, NULL, 0);
- if (ret) {
- goto fail;
- }
-
- /* Complete the outer container with type and length
- * only.
- */
- ret = bcm_xtlv_put_data(&xtlvbuf,
- WL_IFSTATS_XTLV_IF,
- NULL, bcm_xtlv_buf_len(&local_xtlvbuf));
-
- if (ret) {
- goto fail;
- }
-
- request->length = bcm_xtlv_buf_len(&xtlvbuf) +
- offsetof(wl_stats_report_t, data);
- bsscfg_idx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr);
-
- /* send the command over to the device and get teh output */
- ret = wldev_iovar_getbuf_bsscfg(dev, "if_counters", (void *)request,
- request->length, pbuf, WLC_IOCTL_MEDLEN, bsscfg_idx,
- &cfg->ioctl_buf_sync);
- if (ret < 0) {
- WL_ERR(("if_counters not supported ret=%d\n", ret));
- goto fail;
- }
-
- /* Reuse request to process response */
- response = (wl_stats_report_t *)pbuf;
-
- /* version check */
- if (response->version != WL_STATS_REPORT_REQUEST_VERSION_V2) {
- ret = BCME_VERSION;
- goto fail;
- }
-
- xtlv = (bcm_xtlv_t *)(response->data);
-
- expected_resp_len =
- (BCM_XTLV_LEN(xtlv) + OFFSETOF(wl_stats_report_t, data));
-
- /* Check if the received length is as expected */
- if ((response->length > WLC_IOCTL_MEDLEN) ||
- (response->length < expected_resp_len)) {
- ret = BCME_ERROR;
- WL_ERR(("Illegal response length received. Got: %d"
- " Expected: %d. Expected len must be <= %u\n",
- response->length, expected_resp_len, WLC_IOCTL_MEDLEN));
- goto fail;
- }
-
- /* check the type. The return data will be in
- * WL_IFSTATS_XTLV_IF container. So check if that container is
- * present
- */
- if (BCM_XTLV_ID(xtlv) != WL_IFSTATS_XTLV_IF) {
- ret = BCME_ERROR;
- WL_ERR(("unexpected type received: %d Expected: %d\n",
- BCM_XTLV_ID(xtlv), WL_IFSTATS_XTLV_IF));
- goto fail;
- }
-
- /* Process XTLVs within WL_IFSTATS_XTLV_IF container */
- ret = bcm_unpack_xtlv_buf(if_stats,
- (uint8*)response->data + BCM_XTLV_HDR_SIZE,
- BCM_XTLV_LEN(xtlv), /* total length of all TLVs in container */
- BCM_XTLV_OPTION_ALIGN32, wl_cfg80211_ifstats_counters_cb);
- if (ret) {
- WL_ERR(("Error unpacking XTLVs in wl_ifstats_counters: %d\n", ret));
- }
-
-fail:
- if (pbuf) {
- MFREE(dhdp->osh, pbuf, WLC_IOCTL_MEDLEN);
- }
-
- if (request) {
- MFREE(dhdp->osh, request, IF_COUNTERS_PARAM_CONTAINER_LEN_MAX);
- }
- return ret;
-}
-#undef IF_COUNTERS_PARAM_CONTAINER_LEN_MAX
-
-static s32
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
-wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
- const u8 *mac, struct station_info *sinfo)
-#else
-wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
- u8 *mac, struct station_info *sinfo)
-#endif // endif
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- s32 rssi = 0;
- s32 rate = 0;
- s32 err = 0;
- u16 wl_iftype = 0;
- u16 wl_mode = 0;
- get_pktcnt_t pktcnt;
- wl_if_stats_t *if_stats = NULL;
- sta_info_v4_t *sta = NULL;
- u8 *curmacp = NULL;
- s8 eabuf[ETHER_ADDR_STR_LEN];
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- bool fw_assoc_state = FALSE;
- u32 dhd_assoc_state = 0;
- void *buf;
-
- RETURN_EIO_IF_NOT_UP(cfg);
-
- if (cfg80211_to_wl_iftype(dev->ieee80211_ptr->iftype, &wl_iftype, &wl_mode) < 0) {
- return -EINVAL;
- }
-
- buf = MALLOC(cfg->osh, MAX(sizeof(wl_if_stats_t), WLC_IOCTL_SMLEN));
- if (buf == NULL) {
- WL_ERR(("wl_cfg80211_get_station: MALLOC failed\n"));
- goto error;
- }
-
- switch (wl_iftype) {
- case WL_IF_TYPE_STA:
- case WL_IF_TYPE_IBSS:
- if (cfg->roam_offload) {
- struct ether_addr bssid;
- bzero(&bssid, sizeof(bssid));
- err = wldev_ioctl_get(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
- if (err) {
- WL_ERR(("Failed to get current BSSID\n"));
- } else {
- if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
- /* roaming is detected */
- err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
- if (err)
- WL_ERR(("Failed to handle the delayed"
- " roam, err=%d", err));
- mac = (u8 *)bssid.octet;
- }
- }
- }
- dhd_assoc_state = wl_get_drv_status(cfg, CONNECTED, dev);
- DHD_OS_WAKE_LOCK(dhd);
- fw_assoc_state = dhd_is_associated(dhd, 0, &err);
- if (dhd_assoc_state && !fw_assoc_state) {
- /* check roam (join) status */
- if (wl_check_assoc_state(cfg, dev)) {
- fw_assoc_state = TRUE;
- WL_DBG(("roam status\n"));
- }
- }
- DHD_OS_WAKE_UNLOCK(dhd);
- if (!dhd_assoc_state || !fw_assoc_state) {
- WL_ERR(("NOT assoc\n"));
- if (err == -ENODATA)
- goto error;
- if (!dhd_assoc_state) {
- WL_TRACE_HW4(("drv state is not connected \n"));
- }
- if (!fw_assoc_state) {
- WL_TRACE_HW4(("fw state is not associated \n"));
- }
- /* Disconnect due to fw is not associated for
- * FW_ASSOC_WATCHDOG_TIME ms.
- * 'err == 0' of dhd_is_associated() and '!fw_assoc_state'
- * means that BSSID is null.
- */
- if (dhd_assoc_state && !fw_assoc_state && !err) {
- if (!fw_assoc_watchdog_started) {
- fw_assoc_watchdog_ms = OSL_SYSUPTIME();
- fw_assoc_watchdog_started = TRUE;
- WL_TRACE_HW4(("fw_assoc_watchdog_started \n"));
- } else if (OSL_SYSUPTIME() - fw_assoc_watchdog_ms >
- FW_ASSOC_WATCHDOG_TIME) {
- fw_assoc_watchdog_started = FALSE;
- err = -ENODEV;
- WL_TRACE_HW4(("fw is not associated for %d ms \n",
- (OSL_SYSUPTIME() - fw_assoc_watchdog_ms)));
- goto get_station_err;
- }
- }
- err = -ENODEV;
- goto error;
- }
- if (dhd_is_associated(dhd, 0, NULL)) {
- fw_assoc_watchdog_started = FALSE;
- }
- curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
- if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
- WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
- MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
- }
- /* go through to get another information */
- case WL_IF_TYPE_P2P_GC:
- case WL_IF_TYPE_P2P_DISC:
- if ((err = wl_cfg80211_get_rssi(dev, cfg, &rssi)) != BCME_OK) {
- goto get_station_err;
- }
-#if defined(RSSIAVG)
- err = wl_update_connected_rssi_cache(dev, &cfg->g_connected_rssi_cache_ctrl, &rssi);
- if (err) {
- WL_ERR(("Could not get rssi (%d)\n", err));
- goto get_station_err;
- }
- wl_delete_dirty_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
- wl_reset_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
-#endif
-#if defined(RSSIOFFSET)
- rssi = wl_update_rssi_offset(dev, rssi);
-#endif
-#if !defined(RSSIAVG) && !defined(RSSIOFFSET)
- // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
- rssi = MIN(rssi, RSSI_MAXVAL);
-#endif
- sinfo->filled |= STA_INFO_BIT(INFO_SIGNAL);
- sinfo->signal = rssi;
- WL_DBG(("RSSI %d dBm\n", rssi));
- /* go through to get another information */
- case WL_IF_TYPE_P2P_GO:
- /* Report the current tx rate */
- rate = 0;
- err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
- if (err) {
- WL_ERR(("Could not get rate (%d)\n", err));
- } else {
-#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
- int rxpktglom;
-#endif // endif
- rate = dtoh32(rate);
- sinfo->filled |= STA_INFO_BIT(INFO_TX_BITRATE);
- sinfo->txrate.legacy = rate * 5;
- WL_DBG(("Rate %d Mbps\n", (rate / 2)));
-#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
- rxpktglom = ((rate/2) > 150) ? 20 : 10;
-
- if (maxrxpktglom != rxpktglom) {
- maxrxpktglom = rxpktglom;
- WL_DBG(("Rate %d Mbps, update bus:"
- "maxtxpktglom=%d\n", (rate/2), maxrxpktglom));
- err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
- (char*)&maxrxpktglom, 4, cfg->ioctl_buf,
- WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (err < 0) {
- WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
- }
- }
-#endif // endif
- }
- if_stats = (wl_if_stats_t *)buf;
- bzero(if_stats, sizeof(*if_stats));
- if (FW_SUPPORTED(dhd, ifst)) {
- err = wl_cfg80211_ifstats_counters(dev, if_stats);
- } else
- {
- err = wldev_iovar_getbuf(dev, "if_counters", NULL, 0,
- (char *)if_stats, sizeof(*if_stats), NULL);
- }
-
- if (err) {
-// WL_ERR(("if_counters not supported ret=%d\n", err));
- bzero(&pktcnt, sizeof(pktcnt));
- err = wldev_ioctl_get(dev, WLC_GET_PKTCNTS, &pktcnt,
- sizeof(pktcnt));
- if (!err) {
- sinfo->rx_packets = pktcnt.rx_good_pkt;
- sinfo->rx_dropped_misc = pktcnt.rx_bad_pkt;
- sinfo->tx_packets = pktcnt.tx_good_pkt;
- sinfo->tx_failed = pktcnt.tx_bad_pkt;
- }
- } else {
- sinfo->rx_packets = (uint32)dtoh64(if_stats->rxframe);
- sinfo->rx_dropped_misc = 0;
- sinfo->tx_packets = (uint32)dtoh64(if_stats->txfrmsnt);
- sinfo->tx_failed = (uint32)dtoh64(if_stats->txnobuf) +
- (uint32)dtoh64(if_stats->txrunt) +
- (uint32)dtoh64(if_stats->txfail);
- }
-
- sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) |
- STA_INFO_BIT(INFO_RX_DROP_MISC) |
- STA_INFO_BIT(INFO_TX_PACKETS) |
- STA_INFO_BIT(INFO_TX_FAILED));
-get_station_err:
- if (err && (err != -ENODATA)) {
- /* Disconnect due to zero BSSID or error to get RSSI */
- scb_val_t scbval;
- DHD_STATLOG_CTRL(dhd, ST(DISASSOC_INT_START),
- dhd_net2idx(dhd->info, dev), DOT11_RC_DISASSOC_LEAVING);
- scbval.val = htod32(DOT11_RC_DISASSOC_LEAVING);
- err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval,
- sizeof(scb_val_t));
- if (unlikely(err)) {
- WL_ERR(("disassoc error (%d)\n", err));
- }
-
- WL_ERR(("force cfg80211_disconnected: %d\n", err));
- wl_clr_drv_status(cfg, CONNECTED, dev);
- DHD_STATLOG_CTRL(dhd, ST(DISASSOC_DONE),
- dhd_net2idx(dhd->info, dev), DOT11_RC_DISASSOC_LEAVING);
- CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
- wl_link_down(cfg);
- }
- break;
- case WL_IF_TYPE_AP:
- err = wldev_iovar_getbuf(dev, "sta_info", (const void*)mac,
- ETHER_ADDR_LEN, buf, WLC_IOCTL_SMLEN, NULL);
- if (err < 0) {
- WL_ERR(("GET STA INFO failed, %d\n", err));
- goto error;
- }
- sinfo->filled = STA_INFO_BIT(INFO_INACTIVE_TIME);
- sta = (sta_info_v4_t *)buf;
- if (sta->ver != WL_STA_VER_4 && sta->ver != WL_STA_VER_5) {
- WL_ERR(("GET STA INFO version mismatch, %d\n", err));
- return BCME_VERSION;
- }
- sta->len = dtoh16(sta->len);
- sta->cap = dtoh16(sta->cap);
- sta->flags = dtoh32(sta->flags);
- sta->idle = dtoh32(sta->idle);
- sta->in = dtoh32(sta->in);
- sinfo->inactive_time = sta->idle * 1000;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) || defined(WL_COMPAT_WIRELESS)
- if (sta->flags & WL_STA_ASSOC) {
- sinfo->filled |= STA_INFO_BIT(INFO_CONNECTED_TIME);
- sinfo->connected_time = sta->in;
- }
-#endif // endif
- WL_INFORM_MEM(("STA %s, flags 0x%x, idle time %ds, connected time %ds\n",
- bcm_ether_ntoa((const struct ether_addr *)mac, eabuf),
- sta->flags, sta->idle, sta->in));
- break;
- default :
- WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
- }
-error:
- if (buf) {
- MFREE(cfg->osh, buf, MAX(sizeof(wl_if_stats_t), WLC_IOCTL_SMLEN));
+#endif /* DHD_SUPPORT_IF_CNTS */
+
+ sinfo->filled |= (STA_INFO_BIT(INFO_RX_PACKETS) |
+ STA_INFO_BIT(INFO_RX_DROP_MISC) |
+ STA_INFO_BIT(INFO_TX_PACKETS) |
+ STA_INFO_BIT(INFO_TX_FAILED));
+
+get_station_err:
+ if (err)
+ err_cnt++;
+ else
+ err_cnt = 0;
+ if (err_cnt >= 3 && (err != -ENODATA)) {
+ /* Disconnect due to zero BSSID or error to get RSSI */
+ scb_val_t scbval;
+ scbval.val = htod32(DOT11_RC_DISASSOC_LEAVING);
+ err = wldev_ioctl_set(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
+ if (unlikely(err)) {
+ WL_ERR(("disassoc error (%d)\n", err));
+ }
+
+ WL_ERR(("force cfg80211_disconnected: %d\n", err));
+ wl_clr_drv_status(cfg, CONNECTED, dev);
+ CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
+ wl_link_down(cfg);
+ }
+#ifdef DHD_SUPPORT_IF_CNTS
+error:
+ if (if_stats) {
+ kfree(if_stats);
+ }
+#endif /* DHD_SUPPORT_IF_CNTS */
+ }
+ else {
+ WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
}
return err;
s32 err = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_info *_net_info = wl_get_netinfo_by_netdev(cfg, dev);
- s32 mode;
#ifdef RTT_SUPPORT
rtt_status_info_t *rtt_status;
#endif /* RTT_SUPPORT */
dhd_pub_t *dhd = cfg->pub;
- RETURN_EIO_IF_NOT_UP(cfg);
+ RETURN_EIO_IF_NOT_UP(cfg);
WL_DBG(("Enter\n"));
- mode = wl_get_mode_by_netdev(cfg, dev);
if (cfg->p2p_net == dev || _net_info == NULL ||
!wl_get_drv_status(cfg, CONNECTED, dev) ||
- ((mode != WL_MODE_BSS) &&
- (mode != WL_MODE_IBSS))) {
+ (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_BSS &&
+ wl_get_mode_by_netdev(cfg, dev) != WL_MODE_IBSS)) {
return err;
}
err = wldev_ioctl_get(dev, WLC_GET_PM, &pm, sizeof(pm));
if (err)
- WL_ERR(("error (%d)\n", err));
+ WL_ERR(("%s:error (%d)\n", __FUNCTION__, err));
else if (pm != -1 && dev->ieee80211_ptr)
dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true;
}
+void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (strcmp(command, "SCAN-ACTIVE") == 0) {
+ cfg->active_scan = 1;
+ } else if (strcmp(command, "SCAN-PASSIVE") == 0) {
+ cfg->active_scan = 0;
+ } else
+ WL_ERR(("Unknown command \n"));
+}
+
static __used u32 wl_find_msb(u16 bit16)
{
u32 ret = 0;
s32 err = BCME_OK;
if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
- WL_INFORM_MEM(("device is not ready\n"));
+ WL_INFORM(("device is not ready\n"));
return err;
}
+
return err;
}
+
static s32
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
#else
wl_cfg80211_suspend(struct wiphy *wiphy)
-#endif // endif
+#endif /* KERNEL_VERSION(2, 6, 39) || WL_COMPAT_WIRELES */
{
s32 err = BCME_OK;
#ifdef DHD_CLEAR_ON_SUSPEND
struct net_info *iter, *next;
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
unsigned long flags;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+#endif
if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
- WL_INFORM_MEM(("device is not ready : status (%d)\n",
+ WL_INFORM(("device is not ready : status (%d)\n",
(int)cfg->status));
return err;
}
if (iter->ndev)
wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
}
- WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
if (cfg->scan_request) {
- wl_notify_scan_done(cfg, true);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ info.aborted = true;
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, true);
+#endif
cfg->scan_request = NULL;
}
for_each_ndev(cfg, iter, next) {
wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
}
}
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
for_each_ndev(cfg, iter, next) {
if (iter->ndev) {
if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
}
#endif /* DHD_CLEAR_ON_SUSPEND */
+
return err;
}
int i, j;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
- int npmkids = cfg->pmk_list->pmkids.count;
- ASSERT(cfg->pmk_list->pmkids.length >= (sizeof(u16)*2));
if (!pmk_list) {
- WL_ERR(("pmk_list is NULL\n"));
+ printf("pmk_list is NULL\n");
return -EINVAL;
}
/* pmk list is supported only for STA interface i.e. primary interface
* Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init
*/
if (primary_dev != dev) {
- WL_INFORM_MEM(("Not supporting Flushing pmklist on virtual"
+ WL_INFORM(("Not supporting Flushing pmklist on virtual"
" interfaces than primary interface\n"));
return err;
}
- WL_DBG(("No of elements %d\n", npmkids));
- for (i = 0; i < npmkids; i++) {
+ WL_DBG(("No of elements %d\n", pmk_list->pmkids.npmkid));
+ for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
WL_DBG(("PMKID[%d]: %pM =\n", i,
- &pmk_list->pmkids.pmkid[i].bssid));
+ &pmk_list->pmkids.pmkid[i].BSSID));
for (j = 0; j < WPA2_PMKID_LEN; j++) {
- WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].pmkid[j]));
+ WL_DBG(("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]));
}
}
- if (cfg->wlc_ver.wlc_ver_major >= MIN_PMKID_LIST_V3_FW_MAJOR) {
- pmk_list->pmkids.version = PMKID_LIST_VER_3;
- err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
- sizeof(*pmk_list), cfg->ioctl_buf,
- WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- }
- else if (cfg->wlc_ver.wlc_ver_major == MIN_PMKID_LIST_V2_FW_MAJOR) {
- u32 v2_list_size = (u32)(sizeof(pmkid_list_v2_t) + npmkids*sizeof(pmkid_v2_t));
- pmkid_list_v2_t *pmkid_v2_list = (pmkid_list_v2_t *)MALLOCZ(cfg->osh, v2_list_size);
-
- if (pmkid_v2_list == NULL) {
- WL_ERR(("failed to allocate pmkid list\n"));
- return BCME_NOMEM;
- }
-
- pmkid_v2_list->version = PMKID_LIST_VER_2;
- /* Account for version, length and pmkid_v2_t fields */
- pmkid_v2_list->length = (npmkids * sizeof(pmkid_v2_t)) + (2 * sizeof(u16));
-
- for (i = 0; i < npmkids; i++) {
- /* memcpy_s return checks not needed as buffers are of same size */
- (void)memcpy_s(&pmkid_v2_list->pmkid[i].BSSID,
- ETHER_ADDR_LEN, &pmk_list->pmkids.pmkid[i].bssid,
- ETHER_ADDR_LEN);
-
- /* copy pmkid if available */
- if (pmk_list->pmkids.pmkid[i].pmkid_len) {
- (void)memcpy_s(pmkid_v2_list->pmkid[i].PMKID,
- WPA2_PMKID_LEN,
- pmk_list->pmkids.pmkid[i].pmkid,
- pmk_list->pmkids.pmkid[i].pmkid_len);
- }
-
- if (pmk_list->pmkids.pmkid[i].pmk_len) {
- (void)memcpy_s(pmkid_v2_list->pmkid[i].pmk,
- pmk_list->pmkids.pmkid[i].pmk_len,
- pmk_list->pmkids.pmkid[i].pmk,
- pmk_list->pmkids.pmkid[i].pmk_len);
- pmkid_v2_list->pmkid[i].pmk_len = pmk_list->pmkids.pmkid[i].pmk_len;
- }
-
- if (pmk_list->pmkids.pmkid[i].ssid_len) {
- (void)memcpy_s(pmkid_v2_list->pmkid[i].ssid.ssid,
- pmk_list->pmkids.pmkid[i].ssid_len,
- pmk_list->pmkids.pmkid[i].ssid,
- pmk_list->pmkids.pmkid[i].ssid_len);
- pmkid_v2_list->pmkid[i].ssid.ssid_len
- = pmk_list->pmkids.pmkid[i].ssid_len;
- }
-
- (void)memcpy_s(pmkid_v2_list->pmkid[i].fils_cache_id,
- FILS_CACHE_ID_LEN, &pmk_list->pmkids.pmkid[i].fils_cache_id,
- FILS_CACHE_ID_LEN);
- pmkid_v2_list->pmkid[i].length = PMKID_ELEM_V2_LENGTH;
- }
- err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmkid_v2_list,
- v2_list_size, cfg->ioctl_buf,
- WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("pmkid_info failed (%d)\n", err));
- }
-
- MFREE(cfg->osh, pmkid_v2_list, v2_list_size);
+ if (likely(!err)) {
+ err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
+ sizeof(*pmk_list), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
}
- else {
- u32 v1_list_size = (u32)(sizeof(pmkid_list_v1_t) + npmkids*sizeof(pmkid_v1_t));
- pmkid_list_v1_t *pmkid_v1_list = (pmkid_list_v1_t *)MALLOCZ(cfg->osh, v1_list_size);
- if (pmkid_v1_list == NULL) {
- WL_ERR(("failed to allocate pmkid list\n"));
- return BCME_NOMEM;
- }
- for (i = 0; i < npmkids; i++) {
- /* memcpy_s return checks not needed as buffers are of same size */
- (void)memcpy_s(&pmkid_v1_list->pmkid[i].BSSID,
- ETHER_ADDR_LEN, &pmk_list->pmkids.pmkid[i].bssid,
- ETHER_ADDR_LEN);
- (void)memcpy_s(pmkid_v1_list->pmkid[i].PMKID,
- WPA2_PMKID_LEN, pmk_list->pmkids.pmkid[i].pmkid,
- WPA2_PMKID_LEN);
- pmkid_v1_list->npmkid++;
- }
- err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmkid_v1_list,
- v1_list_size, cfg->ioctl_buf,
- WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("pmkid_info failed (%d)\n", err));
- }
- MFREE(cfg->osh, pmkid_v1_list, v1_list_size);
- }
return err;
}
-/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
- * entry operation.
- */
static s32
wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa)
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
int i;
- int npmkids = cfg->pmk_list->pmkids.count;
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
RETURN_EIO_IF_NOT_UP(cfg);
- BCM_REFERENCE(dhdp);
- DHD_STATLOG_CTRL(dhdp, ST(INSTALL_PMKSA), dhd_net2idx(dhdp->info, dev), 0);
-
- for (i = 0; i < npmkids; i++) {
- if (pmksa->bssid != NULL) {
- if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].bssid,
- ETHER_ADDR_LEN))
- break;
- }
-#ifdef WL_FILS
- else if (pmksa->ssid != NULL) {
- if (!memcmp(pmksa->ssid, &cfg->pmk_list->pmkids.pmkid[i].ssid,
- pmksa->ssid_len))
- break;
- }
-#endif /* WL_FILS */
- }
+ for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+ if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
if (i < WL_NUM_PMKIDS_MAX) {
- if (pmksa->bssid != NULL) {
- memcpy(&cfg->pmk_list->pmkids.pmkid[i].bssid, pmksa->bssid,
- ETHER_ADDR_LEN);
- }
-#ifdef WL_FILS
- else if (pmksa->ssid != NULL) {
- cfg->pmk_list->pmkids.pmkid[i].ssid_len = pmksa->ssid_len;
- memcpy(&cfg->pmk_list->pmkids.pmkid[i].ssid, pmksa->ssid,
- pmksa->ssid_len);
- memcpy(&cfg->pmk_list->pmkids.pmkid[i].fils_cache_id, pmksa->cache_id,
- FILS_CACHE_ID_LEN);
- }
-#endif /* WL_FILS */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || defined(WL_FILS))
- if (pmksa->pmk_len) {
- if (memcpy_s(&cfg->pmk_list->pmkids.pmkid[i].pmk, PMK_LEN_MAX, pmksa->pmk,
- pmksa->pmk_len)) {
- WL_ERR(("invalid pmk len = %zu", pmksa->pmk_len));
- } else {
- cfg->pmk_list->pmkids.pmkid[i].pmk_len = pmksa->pmk_len;
- }
- }
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) || defined(WL_FILS) */
- /* return check not required as buffer lengths are same */
- (void)memcpy_s(cfg->pmk_list->pmkids.pmkid[i].pmkid, WPA2_PMKID_LEN, pmksa->pmkid,
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+ ETHER_ADDR_LEN);
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
WPA2_PMKID_LEN);
- cfg->pmk_list->pmkids.pmkid[i].pmkid_len = WPA2_PMKID_LEN;
-
- /* set lifetime not to expire in firmware by default.
- * Currently, wpa_supplicant control PMKID lifetime on his end. e.g) set 12 hours
- * when it expired, wpa_supplicant should call set_pmksa/del_pmksa to update
- * corresponding entry.
- */
- cfg->pmk_list->pmkids.pmkid[i].time_left = KEY_PERM_PMK;
- if (i == npmkids) {
- cfg->pmk_list->pmkids.length += sizeof(pmkid_v3_t);
- cfg->pmk_list->pmkids.count++;
- }
+ if (i == cfg->pmk_list->pmkids.npmkid)
+ cfg->pmk_list->pmkids.npmkid++;
} else {
err = -EINVAL;
}
-
-#if (WL_DBG_LEVEL > 0)
- if (pmksa->bssid != NULL) {
- WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
- &cfg->pmk_list->pmkids.pmkid[npmkids - 1].bssid));
- }
+ WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
+ &cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].BSSID));
for (i = 0; i < WPA2_PMKID_LEN; i++) {
WL_DBG(("%02x\n",
- cfg->pmk_list->pmkids.pmkid[npmkids - 1].
- pmkid[i]));
+ cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].
+ PMKID[i]));
}
-#endif /* (WL_DBG_LEVEL > 0) */
err = wl_update_pmklist(dev, cfg->pmk_list, err);
return err;
}
-/* sending pmkid_info IOVAR to manipulate PMKID(PMKSA) list in firmware.
- * input @pmksa: host given single pmksa info.
- * if it's NULL, assume whole list manipulated. e.g) flush all PMKIDs in firmware.
- * input @set: TRUE means adding PMKSA operation. FALSE means deleting.
- * return: log internal BCME_XXX error, and convert it to -EINVAL to linux generic error code.
- */
-static s32 wl_cfg80211_update_pmksa(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_pmksa *pmksa, bool set) {
-
+static s32
+wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_pmksa *pmksa)
+{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ struct _pmkid_list pmkid = {.npmkid = 0};
s32 err = 0;
- pmkid_list_v3_t *pmk_list;
- uint32 alloc_len;
+ int i;
RETURN_EIO_IF_NOT_UP(cfg);
+ memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
+ memcpy(pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
- if (cfg->wlc_ver.wlc_ver_major < MIN_PMKID_LIST_V3_FW_MAJOR) {
- WL_ERR(("wlc_ver_major not supported:%d\n", cfg->wlc_ver.wlc_ver_major));
- return BCME_VERSION;
+ WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
+ &pmkid.pmkid[0].BSSID));
+ for (i = 0; i < WPA2_PMKID_LEN; i++) {
+ WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
}
- alloc_len = (uint32) OFFSETOF(pmkid_list_v3_t, pmkid) + ((pmksa) ? sizeof(pmkid_v3_t) : 0);
- pmk_list = (pmkid_list_v3_t *)MALLOCZ(cfg->osh, alloc_len);
+ for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+ if (!memcmp
+ (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
+ ETHER_ADDR_LEN))
+ break;
- if (pmk_list == NULL) {
- return BCME_NOMEM;
+ if ((cfg->pmk_list->pmkids.npmkid > 0) &&
+ (i < cfg->pmk_list->pmkids.npmkid)) {
+ memset(&cfg->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
+ for (; i < (cfg->pmk_list->pmkids.npmkid - 1); i++) {
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
+ &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
+ ETHER_ADDR_LEN);
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
+ &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
+ WPA2_PMKID_LEN);
+ }
+ cfg->pmk_list->pmkids.npmkid--;
+ } else {
+ err = -EINVAL;
}
- pmk_list->version = PMKID_LIST_VER_3;
- pmk_list->length = alloc_len;
- pmk_list->count = (pmksa) ? 1 : 0; // 1 means single entry operation, 0 means whole list.
+ err = wl_update_pmklist(dev, cfg->pmk_list, err);
- /* controll set/del action by lifetime parameter accordingly.
- * if set == TRUE, it's set PMKID action with lifetime permanent.
- * if set == FALSE, it's del PMKID action with lifetime zero.
- */
- pmk_list->pmkid->time_left = (set) ? KEY_PERM_PMK : 0;
+ return err;
- if (pmksa) {
- if (pmksa->bssid) {
- err = memcpy_s(&pmk_list->pmkid->bssid, sizeof(pmk_list->pmkid->bssid),
- pmksa->bssid, ETHER_ADDR_LEN);
- if (err) {
- goto exit;
- }
- }
- if (pmksa->pmkid) {
- err = memcpy_s(&pmk_list->pmkid->pmkid, sizeof(pmk_list->pmkid->pmkid),
- pmksa->pmkid, WPA2_PMKID_LEN);
- if (err) {
- goto exit;
- }
- }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
- if (pmksa->pmk) {
- err = memcpy_s(&pmk_list->pmkid->pmk, sizeof(pmk_list->pmkid->pmk),
- pmksa->pmk, pmksa->pmk_len);
- if (err) {
- goto exit;
- }
- pmk_list->pmkid->pmk_len = pmksa->pmk_len;
- }
- if (pmksa->ssid) {
- err = memcpy_s(&pmk_list->pmkid->ssid, sizeof(pmk_list->pmkid->ssid),
- pmksa->ssid, pmksa->ssid_len);
- if (err) {
- goto exit;
- }
- pmk_list->pmkid->ssid_len = pmksa->ssid_len;
- }
- if (pmksa->cache_id) {
- pmk_list->pmkid->fils_cache_id = *(uint16 *)pmksa->cache_id;
- }
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
- }
- err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
- alloc_len, cfg->ioctl_buf,
- WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
-
-exit:
- if (pmk_list) {
- MFREE(cfg->osh, pmk_list, alloc_len);
- }
- return err;
}
-/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
- * entry operation.
- */
static s32
-wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_pmksa *pmksa)
+wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
{
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
- int i;
- int npmkids = cfg->pmk_list->pmkids.count;
RETURN_EIO_IF_NOT_UP(cfg);
+ memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+ err = wl_update_pmklist(dev, cfg->pmk_list, err);
+ return err;
+}
- if (!pmksa) {
- WL_ERR(("pmksa is not initialized\n"));
- return BCME_ERROR;
- }
- if (!npmkids) {
- /* nmpkids = 0, nothing to delete */
- WL_DBG(("npmkids=0. Skip del\n"));
- return BCME_OK;
- }
-
-#if (WL_DBG_LEVEL > 0)
- if (pmksa->bssid) {
- WL_DBG(("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
- pmksa->bssid));
- }
-#ifdef WL_FILS
- else if (pmksa->ssid) {
- WL_DBG(("FILS: del_pmksa for ssid: "));
- for (i = 0; i < pmksa->ssid_len; i++) {
- WL_DBG(("%c", pmksa->ssid[i]));
- }
- WL_DBG(("\n"));
- }
-#endif /* WL_FILS */
- if (pmksa->pmkid) {
- for (i = 0; i < WPA2_PMKID_LEN; i++) {
- WL_DBG(("%02x\n", pmksa->pmkid[i]));
- }
- }
-#endif /* (WL_DBG_LEVEL > 0) */
+static wl_scan_params_t *
+wl_cfg80211_scan_alloc_params(struct bcm_cfg80211 *cfg, int channel, int nprobes,
+ int *out_params_size)
+{
+ wl_scan_params_t *params;
+ int params_size;
+ int num_chans;
+ int bssidx = 0;
- for (i = 0; i < npmkids; i++) {
- if (pmksa->bssid) {
- if (!memcmp
- (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].bssid,
- ETHER_ADDR_LEN)) {
- break;
- }
- }
-#ifdef WL_FILS
- else if (pmksa->ssid) {
- if (!memcmp
- (pmksa->ssid, &cfg->pmk_list->pmkids.pmkid[i].ssid,
- pmksa->ssid_len)) {
- break;
- }
- }
-#endif /* WL_FILS */
- }
- if ((npmkids > 0) && (i < npmkids)) {
- bzero(&cfg->pmk_list->pmkids.pmkid[i], sizeof(pmkid_v3_t));
- for (; i < (npmkids - 1); i++) {
- (void)memcpy_s(&cfg->pmk_list->pmkids.pmkid[i],
- sizeof(pmkid_v3_t),
- &cfg->pmk_list->pmkids.pmkid[i + 1],
- sizeof(pmkid_v3_t));
- }
- npmkids--;
- cfg->pmk_list->pmkids.length -= sizeof(pmkid_v3_t);
- cfg->pmk_list->pmkids.count--;
+ *out_params_size = 0;
- } else {
- err = -EINVAL;
+ /* Our scan params only need space for 1 channel and 0 ssids */
+ params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
+ params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
+ if (params == NULL) {
+ WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
+ return params;
}
+ memset(params, 0, params_size);
+ params->nprobes = nprobes;
- /* current wl_update_pmklist() doesn't delete corresponding PMKID entry.
- * inside firmware. So we need to issue delete action explicitely through
- * this function.
- */
- err = wl_cfg80211_update_pmksa(wiphy, dev, pmksa, FALSE);
- /* intentional fall through even on error.
- * it should work above MIN_PMKID_LIST_V3_FW_MAJOR, otherwise let ignore it.
- */
-
- err = wl_update_pmklist(dev, cfg->pmk_list, err);
+ num_chans = (channel == 0) ? 0 : 1;
- return err;
+ memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN);
+ params->bss_type = DOT11_BSSTYPE_ANY;
+ params->scan_type = DOT11_SCANTYPE_ACTIVE;
+ params->nprobes = htod32(1);
+ params->active_time = htod32(-1);
+ params->passive_time = htod32(-1);
+ params->home_time = htod32(10);
+ if (channel == -1)
+ params->channel_list[0] = htodchanspec(channel);
+ else
+ params->channel_list[0] = wl_ch_host_to_driver(cfg, bssidx, channel);
-}
+ /* Our scan params have 1 channel and 0 ssids */
+ params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
-/* TODO: remove temporal cfg->pmk_list list, and call wl_cfg80211_update_pmksa for single
- * entry operation.
- */
-static s32
-wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- s32 err = 0;
- RETURN_EIO_IF_NOT_UP(cfg);
- bzero(cfg->pmk_list, sizeof(*cfg->pmk_list));
- cfg->pmk_list->pmkids.length = OFFSETOF(pmkid_list_v3_t, pmkid);
- cfg->pmk_list->pmkids.count = 0;
- cfg->pmk_list->pmkids.version = PMKID_LIST_VER_3;
- err = wl_update_pmklist(dev, cfg->pmk_list, err);
- return err;
+ *out_params_size = params_size; /* rtn size to the caller */
+ return params;
}
static s32
+#if defined(WL_CFG80211_P2P_DEV_IF)
+wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
+ struct ieee80211_channel *channel, unsigned int duration, u64 *cookie)
+#else
wl_cfg80211_remain_on_channel(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
- struct ieee80211_channel *channel,
-#if !defined(WL_CFG80211_P2P_DEV_IF)
+ struct ieee80211_channel * channel,
enum nl80211_channel_type channel_type,
-#endif /* WL_CFG80211_P2P_DEV_IF */
unsigned int duration, u64 *cookie)
+#endif /* WL_CFG80211_P2P_DEV_IF */
{
s32 target_channel;
u32 id;
struct net_device *ndev = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- RETURN_EIO_IF_NOT_UP(cfg);
#ifdef DHD_IFDEBUG
PRINT_WDEV_INFO(cfgdev);
#endif /* DHD_IFDEBUG */
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- mutex_lock(&cfg->usr_sync);
WL_DBG(("Enter, channel: %d, duration ms (%d) SCANNING ?? %s \n",
ieee80211_frequency_to_channel(channel->center_freq),
duration, (wl_get_drv_status(cfg, SCANNING, ndev)) ? "YES":"NO"));
#ifdef P2P_LISTEN_OFFLOADING
if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
WL_ERR(("P2P_FIND: Discovery offload is in progress\n"));
- err = -EAGAIN;
- goto exit;
+ return -EAGAIN;
}
#endif /* P2P_LISTEN_OFFLOADING */
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
if (wl_get_drv_status_all(cfg, SCANNING)) {
- wl_cfg80211_cancel_scan(cfg);
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
}
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
if (wl_get_drv_status(cfg, SCANNING, ndev)) {
- timer_list_compat_t *_timer;
+ struct timer_list *_timer;
WL_DBG(("scan is running. go to fake listen state\n"));
if (duration > LONG_LISTEN_TIME) {
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
-#ifdef WL_BCNRECV
- /* check fakeapscan in progress then abort */
- wl_android_bcnrecv_stop(ndev, WL_BCNRECV_LISTENBUSY);
-#endif /* WL_BCNRECV */
#ifdef WL_CFG80211_SYNC_GON
if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
/* do not enter listen mode again if we are in listen mode already for next af.
* without turning on P2P
*/
get_primary_mac(cfg, &primary_mac);
-#ifndef WL_P2P_USE_RANDMAC
wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
-#endif /* WL_P2P_USE_RANDMAC */
p2p_on(cfg) = true;
}
wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
-
- if (err) {
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
- }
-
/* WAR: set err = ok to prevent cookie mismatch in wpa_supplicant
* and expire timer will send a completion to the upper layer
*/
exit:
if (err == BCME_OK) {
- WL_DBG(("Success\n"));
+ WL_INFORM(("Success\n"));
#if defined(WL_CFG80211_P2P_DEV_IF)
cfg80211_ready_on_channel(cfgdev, *cookie, channel,
duration, GFP_KERNEL);
} else {
WL_ERR(("Fail to Set (err=%d cookie:%llu)\n", err, *cookie));
}
- mutex_unlock(&cfg->usr_sync);
return err;
}
wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
} else {
- WL_ERR(("wl_cfg80211_cancel_remain_on_channel: ignore, request cookie(%llu)"
- " is not matched. (cur : %llu)\n",
- cookie, cfg->last_roc_id));
+ WL_ERR(("%s : ignore, request cookie(%llu) is not matched. (cur : %llu)\n",
+ __FUNCTION__, cookie, cfg->last_roc_id));
}
return err;
s32 ret = BCME_OK;
BCM_SET_CONTAINER_OF(afx_instance, work, struct afx_hdl, work);
+ if (!afx_instance) {
+ WL_ERR(("afx_instance is NULL\n"));
+ return;
+ }
+ cfg = wl_get_cfg(afx_instance->dev);
if (afx_instance) {
- cfg = wl_get_cfg(afx_instance->dev);
- if (cfg != NULL && cfg->afx_hdl->is_active) {
+ if (cfg->afx_hdl->is_active) {
if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) {
ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan,
(100 * (1 + (RANDOM32() % 3)))); /* 100ms ~ 300ms */
struct p2p_config_af_params {
s32 max_tx_retry; /* max tx retry count if tx no ack */
-#ifdef WL_CFG80211_GON_COLLISION
- /* drop tx go nego request if go nego collision occurs */
- bool drop_tx_req;
-#endif // endif
#ifdef WL_CFG80211_SYNC_GON
bool extra_listen;
-#endif // endif
+#endif
bool search_channel; /* 1: search peer's channel to send af */
};
-#ifdef WL_DISABLE_HE_P2P
-static s32
-wl_cfg80211_he_p2p_disable(struct wiphy *wiphy, struct ether_addr peer_mac)
-{
- struct cfg80211_bss *bss;
- u8 *ie = NULL;
- u32 ie_len = 0;
- struct net_device *ndev = NULL;
- s32 bssidx = 0;
- s32 err = BCME_OK;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-
- bss = CFG80211_GET_BSS(wiphy, NULL, peer_mac.octet, NULL, 0);
- if (!bss) {
- WL_ERR(("Could not find the Peer device\n"));
- return BCME_ERROR;
- } else {
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
-#if defined(WL_CFG80211_P2P_DEV_IF)
- ie = (u8 *)bss->ies->data;
- ie_len = bss->ies->len;
-#else
- ie = bss->information_elements;
- ie_len = bss->len_information_elements;
-#endif /* WL_CFG80211_P2P_DEV_IF */
- GCC_DIAGNOSTIC_POP();
- }
- if (ie) {
- if ((bcm_parse_tlvs_dot11(ie, ie_len,
- EXT_MNG_HE_CAP_ID, TRUE)) == NULL) {
- WL_DBG(("Peer does not support HE capability\n"));
- ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1);
- if (ndev && (bssidx =
- wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find index failed\n"));
- err = BCME_ERROR;
- } else {
- WL_DBG(("Disabling HE for P2P\n"));
- err = wl_cfg80211_set_he_mode(ndev, cfg, bssidx,
- WL_IF_TYPE_P2P_DISC, FALSE);
- if (err < 0) {
- WL_ERR(("failed to set he features, error=%d\n", err));
- }
- }
- } else {
- WL_DBG(("Peer supports HE capability\n"));
- }
- }
- CFG80211_PUT_BSS(wiphy, bss);
-
- return err;
-}
-#endif /* WL_DISABLE_HE_P2P */
-
static s32
wl_cfg80211_config_p2p_pub_af_tx(struct wiphy *wiphy,
wl_action_frame_t *action_frame, wl_af_params_t *af_params,
(wifi_p2p_pub_act_frame_t *) (action_frame->data);
/* initialize default value */
-#ifdef WL_CFG80211_GON_COLLISION
- config_af_params->drop_tx_req = false;
-#endif // endif
#ifdef WL_CFG80211_SYNC_GON
config_af_params->extra_listen = true;
-#endif // endif
+#endif
config_af_params->search_channel = false;
config_af_params->max_tx_retry = WL_AF_TX_MAX_RETRY;
cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
switch (act_frm->subtype) {
case P2P_PAF_GON_REQ: {
- /* Disable he if peer does not support before starting GONEG */
-#ifdef WL_DISABLE_HE_P2P
- wl_cfg80211_he_p2p_disable(wiphy, action_frame->da);
-#endif /* WL_DISABLE_HE_P2P */
WL_DBG(("P2P: GO_NEG_PHASE status set \n"));
wl_set_p2p_status(cfg, GO_NEG_PHASE);
/* increase dwell time to wait for RESP frame */
af_params->dwell_time = WL_MED_DWELL_TIME;
-#ifdef WL_CFG80211_GON_COLLISION
- config_af_params->drop_tx_req = true;
-#endif /* WL_CFG80211_GON_COLLISION */
break;
}
case P2P_PAF_GON_RSP: {
/* minimize dwell time */
af_params->dwell_time = WL_MIN_DWELL_TIME;
-#ifdef WL_CFG80211_GON_COLLISION
- /* if go nego formation done, clear it */
- cfg->block_gon_req_tx_count = 0;
- cfg->block_gon_req_rx_count = 0;
-#endif /* WL_CFG80211_GON_COLLISION */
#ifdef WL_CFG80211_SYNC_GON
config_af_params->extra_listen = false;
#endif /* WL_CFG80211_SYNC_GON */
void *frame, u16 frame_len)
{
struct wl_scan_results *bss_list;
- wl_bss_info_t *bi = NULL;
+ struct wl_bss_info *bi = NULL;
bool result = false;
s32 i;
chanspec_t chanspec;
struct net_info *netinfo;
#ifdef VSDB
ulong off_chan_started_jiffies = 0;
-#endif // endif
+#endif
ulong dwell_jiffies = 0;
bool dwell_overflow = false;
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ bool miss_gon_cfm = false;
int32 requested_dwell = af_params->dwell_time;
cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY;
config_af_params.search_channel = false;
-#ifdef WL_CFG80211_GON_COLLISION
- config_af_params.drop_tx_req = false;
-#endif // endif
#ifdef WL_CFG80211_SYNC_GON
config_af_params.extra_listen = false;
-#endif // endif
+#endif
/* config parameters */
/* Public Action Frame Process - DOT11_ACTION_CAT_PUBLIC */
WL_DBG(("Unknown subtype.\n"));
}
-#ifdef WL_CFG80211_GON_COLLISION
- if (config_af_params.drop_tx_req) {
- if (cfg->block_gon_req_tx_count) {
- /* drop gon req tx action frame */
- WL_DBG(("Drop gon req tx action frame: count %d\n",
- cfg->block_gon_req_tx_count));
- goto exit;
- }
- }
-#endif /* WL_CFG80211_GON_COLLISION */
} else if (action_frame_len >= sizeof(wifi_p2psd_gas_pub_act_frame_t)) {
/* service discovery process */
if (action == P2PSD_ACTION_ID_GAS_IREQ ||
return false;
}
}
-
- netinfo = wl_get_netinfo_by_wdev(cfg, cfgdev_to_wdev(cfgdev));
+ netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
/* validate channel and p2p ies */
if (config_af_params.search_channel && IS_P2P_SOCIAL(af_params->channel) &&
netinfo && netinfo->bss.ies.probe_req_ie_len) {
if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {
OSL_SLEEP(50);
}
-#endif // endif
+#endif
/* if scan is ongoing, abort current scan. */
if (wl_get_drv_status_all(cfg, SCANNING)) {
- wl_cfg80211_cancel_scan(cfg);
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
}
/* Abort P2P listen */
* but after the check of piggyback algorithm.
* To take care of current piggback algo, lets abort the scan here itself.
*/
- wl_cfg80211_cancel_scan(cfg);
+ wl_notify_escan_complete(cfg, dev, true, true);
/* Suspend P2P discovery's search-listen to prevent it from
* starting a scan or changing the channel.
*/
ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true;
dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies);
+ if (ack && (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM))) {
+ wifi_p2p_pub_act_frame_t *pact_frm;
+ pact_frm = (wifi_p2p_pub_act_frame_t *)(action_frame->data);
+ if (pact_frm->subtype == P2P_PAF_GON_RSP) {
+ WL_ERR(("Miss GO Nego cfm after P2P_PAF_GON_RSP\n"));
+ miss_gon_cfm = true;
+ }
+ }
+
/* if failed, retry it. tx_retry_max value is configure by .... */
- while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry) &&
+ while ((miss_gon_cfm || (ack == false)) && (tx_retry++ < config_af_params.max_tx_retry) &&
!dwell_overflow) {
#ifdef VSDB
if (af_params->channel) {
#endif /* VSDB */
ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ?
false : true;
+ if (miss_gon_cfm && !wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+ WL_ERR(("Received GO Nego cfm after P2P_PAF_GON_RSP\n"));
+ miss_gon_cfm = false;
+ }
dwell_overflow = wl_cfg80211_check_dwell_overflow(requested_dwell, dwell_jiffies);
}
* the dwell time, go to listen state again to get next action response frame.
*/
if (ack && config_af_params.extra_listen &&
-#ifdef WL_CFG80211_GON_COLLISION
- !cfg->block_gon_req_tx_count &&
-#endif /* WL_CFG80211_GON_COLLISION */
wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM) &&
cfg->af_sent_channel == cfg->afx_hdl->my_listen_chan) {
s32 extar_listen_time;
}
#endif /* WL_CFG80211_SYNC_GON */
wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
-
cfg->afx_hdl->pending_tx_act_frm = NULL;
- if (ack) {
- WL_DBG(("-- Action Frame Tx succeeded, listen chan: %d\n",
- cfg->afx_hdl->my_listen_chan));
- } else {
- WL_ERR(("-- Action Frame Tx failed, listen chan: %d\n",
- cfg->afx_hdl->my_listen_chan));
- }
+ WL_INFORM(("-- sending Action Frame is %s, listen chan: %d\n",
+ (ack) ? "Succeeded!!":"Failed!!", cfg->afx_hdl->my_listen_chan));
-#ifdef WL_CFG80211_GON_COLLISION
- if (cfg->block_gon_req_tx_count) {
- cfg->block_gon_req_tx_count--;
- /* if ack is ture, supplicant will wait more time(100ms).
- * so we will return it as a success to get more time .
- */
- ack = true;
- }
-#endif /* WL_CFG80211_GON_COLLISION */
return ack;
}
bool channel_type_valid,
#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0) */
unsigned int wait, const u8* buf, size_t len,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
bool no_cck,
-#endif // endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS)
+#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
bool dont_wait_for_ack,
-#endif // endif
+#endif
u64 *cookie)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
{
struct ieee80211_channel *channel = params->chan;
const u8 *buf = params->buf;
size_t len = params->len;
-#endif // endif
+#endif
const struct ieee80211_mgmt *mgmt;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_device *dev = NULL;
#if defined(P2P_IE_MISSING_FIX)
if (!cfg->p2p_prb_noti) {
cfg->p2p_prb_noti = true;
- WL_DBG(("wl_cfg80211_mgmt_tx: TX 802_1X Probe"
- " Response first time.\n"));
+ WL_DBG(("%s: TX 802_1X Probe Response first time.\n",
+ __FUNCTION__));
}
-#endif // endif
+#endif
goto exit;
} else if (ieee80211_is_disassoc(mgmt->frame_control) ||
ieee80211_is_deauth(mgmt->frame_control)) {
sizeof(scb_val_t));
if (err < 0)
WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON error %d\n", err));
- WL_ERR(("Disconnect STA : " MACDBG " scb_val.val %d\n",
- MAC2STRDBG(bcm_ether_ntoa((const struct ether_addr *)mgmt->da,
- eabuf)), scb_val.val));
+ WL_ERR(("Disconnect STA : %s scb_val.val %d\n",
+ bcm_ether_ntoa((const struct ether_addr *)mgmt->da, eabuf),
+ scb_val.val));
if (num_associated > 0 && ETHER_ISBCAST(mgmt->da))
wl_delay(400);
* And previous off-channel action frame must be ended before new af tx.
*/
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- wl_cfg80211_cancel_scan(cfg);
+ wl_notify_escan_complete(cfg, dev, true, true);
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
}
-#ifdef WL_CLIENT_SAE
- else if (ieee80211_is_auth(mgmt->frame_control)) {
- int err = 0;
- wl_assoc_mgr_cmd_t *cmd;
- char *ambuf = NULL;
- int param_len;
- ack = true;
- if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p) {
- bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- }
- param_len = sizeof(wl_assoc_mgr_cmd_t) + len;
- ambuf = MALLOCZ(cfg->osh, param_len);
- if (ambuf == NULL) {
- WL_ERR(("unable to allocate frame\n"));
- return -ENOMEM;
- }
- cmd = (wl_assoc_mgr_cmd_t*)ambuf;
- cmd->version = WL_ASSOC_MGR_CURRENT_VERSION;
- cmd->length = len;
- cmd->cmd = WL_ASSOC_MGR_CMD_SEND_AUTH;
- memcpy(&cmd->params, buf, len);
- err = wldev_iovar_setbuf(dev, "assoc_mgr_cmd", ambuf, param_len,
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("Failed to send auth(%d)\n", err));
- ack = false;
- }
- MFREE(cfg->osh, ambuf, param_len);
- cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, ack, GFP_KERNEL);
- goto exit;
- }
-#endif /* WL_CLIENT_SAE */
} else {
WL_ERR(("Driver only allows MGMT packet type\n"));
goto exit;
}
- af_params = (wl_af_params_t *)MALLOCZ(cfg->osh, WL_WIFI_AF_PARAMS_SIZE);
+ af_params = (wl_af_params_t *) kzalloc(WL_WIFI_AF_PARAMS_SIZE, GFP_KERNEL);
if (af_params == NULL)
{
af_params->dwell_time = params->wait;
#else
af_params->dwell_time = wait;
-#endif // endif
+#endif
memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
action_frame, action_frame->len, bssidx);
cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, ack, GFP_KERNEL);
- MFREE(cfg->osh, af_params, WL_WIFI_AF_PARAMS_SIZE);
+ kfree(af_params);
exit:
return err;
}
+
static void
wl_cfg80211_mgmt_frame_register(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
u16 frame, bool reg)
return;
}
+
static s32
wl_cfg80211_change_bss(struct wiphy *wiphy,
struct net_device *dev,
s32 ap_isolate = 0;
#ifdef PCIE_FULL_DONGLE
s32 ifidx = DHD_BAD_IF;
-#endif // endif
+#endif
#if defined(PCIE_FULL_DONGLE)
dhd_pub_t *dhd;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
if (cfg->p2p_net == dev)
dev = bcmcfg_to_prmry_ndev(cfg);
#endif
-#endif // endif
+#endif
if (params->use_cts_prot >= 0) {
}
if (params->ht_opmode >= 0) {
}
- return err;
-}
-
-static int
-wl_get_bandwidth_cap(struct net_device *ndev, uint32 band, uint32 *bandwidth)
-{
- u32 bw = WL_CHANSPEC_BW_20;
- s32 err = BCME_OK;
- s32 bw_cap = 0;
- struct {
- u32 band;
- u32 bw_cap;
- } param = {0, 0};
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
-
- if (band == IEEE80211_BAND_5GHZ) {
- param.band = WLC_BAND_5G;
- err = wldev_iovar_getbuf(ndev, "bw_cap", ¶m, sizeof(param),
- ioctl_buf, sizeof(ioctl_buf), NULL);
- if (err) {
- if (err != BCME_UNSUPPORTED) {
- WL_ERR(("bw_cap failed, %d\n", err));
- return err;
- } else {
- err = wldev_iovar_getint(ndev, "mimo_bw_cap", &bw_cap);
- if (err) {
- WL_ERR(("error get mimo_bw_cap (%d)\n", err));
- }
- if (bw_cap != WLC_N_BW_20ALL) {
- bw = WL_CHANSPEC_BW_40;
- }
- }
- } else {
- if (WL_BW_CAP_80MHZ(ioctl_buf[0])) {
- bw = WL_CHANSPEC_BW_80;
- } else if (WL_BW_CAP_40MHZ(ioctl_buf[0])) {
- bw = WL_CHANSPEC_BW_40;
- } else {
- bw = WL_CHANSPEC_BW_20;
- }
- }
- } else if (band == IEEE80211_BAND_2GHZ) {
- bw = WL_CHANSPEC_BW_20;
- }
-
- *bandwidth = bw;
return err;
}
chanspec_t chspec = 0;
chanspec_t fw_chspec = 0;
u32 bw = WL_CHANSPEC_BW_20;
+#ifdef WL11ULB
+ u32 ulb_bw = wl_cfg80211_get_ulb_bw(wl_get_cfg(dev), dev->ieee80211_ptr);
+#endif /* WL11ULB */
+
s32 err = BCME_OK;
+ s32 bw_cap = 0;
+ struct {
+ u32 band;
+ u32 bw_cap;
+ } param = {0, 0};
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-#if defined(CUSTOM_SET_CPUCORE) || defined(APSTA_RESTRICTED_CHANNEL) || defined(WL_EXT_IAPSTA)
+#if defined(CUSTOM_SET_CPUCORE) || (defined(WL_VIRTUAL_APSTA) && \
+ defined(APSTA_RESTRICTED_CHANNEL))
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
-#endif /* CUSTOM_SET_CPUCORE || APSTA_RESTRICTED_CHANNEL */
+#endif /* CUSTOM_SET_CPUCORE || (WL_VIRTUAL_APSTA && APSTA_RESTRICTED_CHANNEL) */
dev = ndev_to_wlc_ndev(dev, cfg);
_chan = ieee80211_frequency_to_channel(chan->center_freq);
#ifdef WL_EXT_IAPSTA
- if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
- wl_ext_iapsta_update_iftype(dev, dhd_net2idx(dhd->info, dev), WL_IF_TYPE_AP);
- _chan = wl_ext_iapsta_update_channel(dhd, dev, _chan);
- }
+ _chan = wl_ext_iapsta_update_channel(dev, _chan);
#endif
- WL_MSG(dev->name, "netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
- dev->ifindex, channel_type, _chan);
-
-#ifdef NOT_YET
- switch (channel_type) {
- case NL80211_CHAN_HT40MINUS:
- /* secondary channel is below the control channel */
- chspec = CH40MHZ_CHSPEC(channel, WL_CHANSPEC_CTL_SB_UPPER);
- break;
- case NL80211_CHAN_HT40PLUS:
- /* secondary channel is above the control channel */
- chspec = CH40MHZ_CHSPEC(channel, WL_CHANSPEC_CTL_SB_LOWER);
- break;
- default:
- chspec = CH20MHZ_CHSPEC(channel);
+ printf("%s: netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
+ __FUNCTION__, dev->ifindex, channel_type, _chan);
- }
-#endif /* NOT_YET */
-#if defined(APSTA_RESTRICTED_CHANNEL)
+#if defined(WL_VIRTUAL_APSTA) && defined(APSTA_RESTRICTED_CHANNEL)
+#define DEFAULT_2G_SOFTAP_CHANNEL 1
+#define DEFAULT_5G_SOFTAP_CHANNEL 149
if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP &&
- DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
+ (dhd->op_mode & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) ==
+ DHD_FLAG_CONCURR_STA_HOSTAP_MODE &&
wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
u32 *sta_chan = (u32 *)wl_read_prof(cfg,
bcmcfg_to_prmry_ndev(cfg), WL_PROF_CHAN);
}
}
}
-#endif /* APSTA_RESTRICTED_CHANNEL */
+#undef DEFAULT_2G_SOFTAP_CHANNEL
+#undef DEFAULT_5G_SOFTAP_CHANNEL
+#endif /* WL_VIRTUAL_APSTA && APSTA_RESTRICTED_CHANNEL */
- err = wl_get_bandwidth_cap(dev, chan->band, &bw);
- if (err < 0) {
- WL_ERR(("Failed to get bandwidth information, err=%d\n", err));
- return err;
+#ifdef WL11ULB
+ if (ulb_bw) {
+ WL_DBG(("[ULB] setting AP/GO BW to ulb_bw 0x%x \n", ulb_bw));
+ bw = wl_cfg80211_ulbbw_to_ulbchspec(ulb_bw);
+ goto set_channel;
}
+#endif /* WL11ULB */
+ if (chan->band == IEEE80211_BAND_5GHZ) {
+ param.band = WLC_BAND_5G;
+ err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (err) {
+ if (err != BCME_UNSUPPORTED) {
+ WL_ERR(("bw_cap failed, %d\n", err));
+ return err;
+ } else {
+ err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
+ if (err) {
+ WL_ERR(("error get mimo_bw_cap (%d)\n", err));
+ }
+ if (bw_cap != WLC_N_BW_20ALL)
+ bw = WL_CHANSPEC_BW_40;
+ }
+ } else {
+ if (WL_BW_CAP_80MHZ(cfg->ioctl_buf[0]))
+ bw = WL_CHANSPEC_BW_80;
+ else if (WL_BW_CAP_40MHZ(cfg->ioctl_buf[0]))
+ bw = WL_CHANSPEC_BW_40;
+ else
+ bw = WL_CHANSPEC_BW_20;
+
+ }
+ } else if (chan->band == IEEE80211_BAND_2GHZ)
+ bw = WL_CHANSPEC_BW_20;
set_channel:
chspec = wf_channel2chspec(_chan, bw);
if (wf_chspec_valid(chspec)) {
} else if (err) {
WL_ERR(("failed to set chanspec error %d\n", err));
}
-#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
- else {
- /* Disable Frameburst only for stand-alone 2GHz SoftAP */
- if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP &&
- DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_HOSTAP_MODE) &&
- (_chan <= CH_MAX_2G_CHANNEL) &&
- !wl_get_drv_status(cfg, CONNECTED,
- bcmcfg_to_prmry_ndev(cfg))) {
- WL_DBG(("Disabling frameburst on "
- "stand-alone 2GHz SoftAP\n"));
- wl_cfg80211_set_frameburst(cfg, FALSE);
- }
- }
-#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
} else {
WL_ERR(("failed to convert host chanspec to fw chanspec\n"));
err = BCME_ERROR;
/* Update AP/GO operating channel */
cfg->ap_oper_channel = ieee80211_frequency_to_channel(chan->center_freq);
}
- if (err) {
- wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
- FW_LOGSET_MASK_ALL);
- }
return err;
}
wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg)
{
struct net_info *_net_info, *next;
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
if (_net_info->ndev &&
test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state))
return _net_info->ndev;
}
-
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
return NULL;
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
return 0;
}
-#define MAX_FILS_IND_IE_LEN 1024u
static s32
-wl_validate_fils_ind_ie(struct net_device *dev, const bcm_tlv_t *filsindie, s32 bssidx)
+wl_validate_wpa2ie(struct net_device *dev, bcm_tlv_t *wpa2ie, s32 bssidx)
{
+ s32 len = 0;
s32 err = BCME_OK;
- struct bcm_cfg80211 *cfg = NULL;
- bcm_iov_buf_t *iov_buf = NULL;
- bcm_xtlv_t* pxtlv;
- int iov_buf_size = 0;
+ u16 auth = 0; /* d11 open authentication */
+ u32 wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ wpa_suite_auth_key_mgmt_t *mgmt;
+ wpa_pmkid_list_t *pmkid;
+ int cnt = 0;
+#ifdef MFP
+ int mfp = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+#endif /* MFP */
- if (!dev || !filsindie) {
- WL_ERR(("%s: dev/filsidie is null\n", __FUNCTION__));
- goto exit;
- }
+ u16 suite_count;
+ u8 rsn_cap[2];
+ u32 wme_bss_disable;
- cfg = wl_get_cfg(dev);
- if (!cfg) {
- WL_ERR(("%s: cfg is null\n", __FUNCTION__));
- goto exit;
- }
-
- iov_buf_size = sizeof(bcm_iov_buf_t) + sizeof(bcm_xtlv_t) + filsindie->len - 1;
- iov_buf = MALLOCZ(cfg->osh, iov_buf_size);
- if (!iov_buf) {
- WL_ERR(("%s: iov_buf alloc failed! %d bytes\n", __FUNCTION__, iov_buf_size));
- err = BCME_NOMEM;
- goto exit;
- }
- iov_buf->version = WL_FILS_IOV_VERSION;
- iov_buf->id = WL_FILS_CMD_ADD_IND_IE;
- iov_buf->len = sizeof(bcm_xtlv_t) + filsindie->len - 1;
- pxtlv = (bcm_xtlv_t*)&iov_buf->data[0];
- pxtlv->id = WL_FILS_XTLV_IND_IE;
- pxtlv->len = filsindie->len;
- /* memcpy_s return check not required as buffer is allocated based on ie
- * len
- */
- (void)memcpy_s(pxtlv->data, filsindie->len, filsindie->data, filsindie->len);
-
- err = wldev_iovar_setbuf(dev, "fils", iov_buf, iov_buf_size,
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("fils indication ioctl error (%d)\n", err));
- goto exit;
- }
-
-exit:
- if (err < 0) {
- WL_ERR(("FILS Ind setting error %d\n", err));
- }
-
- if (iov_buf) {
- MFREE(cfg->osh, iov_buf, iov_buf_size);
- }
- return err;
-}
-
-static s32
-wl_validate_wpa2ie(struct net_device *dev, const bcm_tlv_t *wpa2ie, s32 bssidx)
-{
- s32 len = 0;
- s32 err = BCME_OK;
- u16 auth = 0; /* d11 open authentication */
- u32 wsec;
- u32 pval = 0;
- u32 gval = 0;
- u32 wpa_auth = 0;
- const wpa_suite_mcast_t *mcast;
- const wpa_suite_ucast_t *ucast;
- const wpa_suite_auth_key_mgmt_t *mgmt;
- const wpa_pmkid_list_t *pmkid;
- int cnt = 0;
-#ifdef MFP
- int mfp = 0;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-#endif /* MFP */
-
- u16 suite_count;
- u8 rsn_cap[2];
- u32 wme_bss_disable;
-
- if (wpa2ie == NULL)
+ if (wpa2ie == NULL)
goto exit;
WL_DBG(("Enter \n"));
len = wpa2ie->len - WPA2_VERSION_LEN;
/* check the mcast cipher */
- mcast = (const wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+ mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
switch (mcast->type) {
case WPA_CIPHER_NONE:
gval = 0;
case WPA_CIPHER_AES_CCM:
gval = AES_ENABLED;
break;
-#ifdef BCMWAPI_WPI
- case WAPI_CIPHER_SMS4:
- gval = SMS4_ENABLED;
- break;
-#endif // endif
default:
WL_ERR(("No Security Info\n"));
break;
return BCME_BADLEN;
/* check the unicast cipher */
- ucast = (const wpa_suite_ucast_t *)&mcast[1];
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
suite_count = ltoh16_ua(&ucast->count);
switch (ucast->list[0].type) {
case WPA_CIPHER_NONE:
case WPA_CIPHER_AES_CCM:
pval = AES_ENABLED;
break;
-#ifdef BCMWAPI_WPI
- case WAPI_CIPHER_SMS4:
- pval = SMS4_ENABLED;
- break;
-#endif // endif
default:
WL_ERR(("No Security Info\n"));
}
/* FOR WPS , set SEC_OW_ENABLED */
wsec = (pval | gval | SES_OW_ENABLED);
/* check the AKM */
- mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
suite_count = cnt = ltoh16_ua(&mgmt->count);
while (cnt--) {
switch (mgmt->list[cnt].type) {
case RSN_AKM_MFP_1X:
wpa_auth |= WPA2_AUTH_1X_SHA256;
break;
- case RSN_AKM_FILS_SHA256:
- wpa_auth |= WPA2_AUTH_FILS_SHA256;
- break;
- case RSN_AKM_FILS_SHA384:
- wpa_auth |= WPA2_AUTH_FILS_SHA384;
- break;
-#ifdef WL_SAE
- case RSN_AKM_SAE_PSK:
- wpa_auth |= WPA3_AUTH_SAE_PSK;
- break;
-#endif /* WL_SAE */
#endif /* MFP */
default:
WL_ERR(("No Key Mgmt Info\n"));
}
if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
- rsn_cap[0] = *(const u8 *)&mgmt->list[suite_count];
- rsn_cap[1] = *((const u8 *)&mgmt->list[suite_count] + 1);
+ rsn_cap[0] = *(u8 *)&mgmt->list[suite_count];
+ rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1);
if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
wme_bss_disable = 0;
len -= RSN_CAP_LEN;
if (len >= WPA2_PMKID_COUNT_LEN) {
- pmkid = (const wpa_pmkid_list_t *)
- ((const u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN);
+ pmkid = (wpa_pmkid_list_t *)((u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN);
cnt = ltoh16_ua(&pmkid->count);
if (cnt != 0) {
WL_ERR(("AP has non-zero PMKID count. Wrong!\n"));
#ifdef MFP
len -= WPA2_PMKID_COUNT_LEN;
if (len >= WPA_SUITE_LEN) {
- cfg->bip_pos =
- (const u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN + WPA2_PMKID_COUNT_LEN;
+ cfg->bip_pos = (u8 *)&mgmt->list[suite_count] + RSN_CAP_LEN + WPA2_PMKID_COUNT_LEN;
} else {
cfg->bip_pos = NULL;
}
-#endif // endif
+#endif
/* set auth */
err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
}
static s32
-wl_validate_wpaie(struct net_device *dev, const wpa_ie_fixed_t *wpaie, s32 bssidx)
+wl_validate_wpaie(struct net_device *dev, wpa_ie_fixed_t *wpaie, s32 bssidx)
{
- const wpa_suite_mcast_t *mcast;
- const wpa_suite_ucast_t *ucast;
- const wpa_suite_auth_key_mgmt_t *mgmt;
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ wpa_suite_auth_key_mgmt_t *mgmt;
u16 auth = 0; /* d11 open authentication */
u16 count;
s32 err = BCME_OK;
len -= WPA_IE_TAG_FIXED_LEN;
/* check for multicast cipher suite */
if (len < WPA_SUITE_LEN) {
- WL_INFORM_MEM(("no multicast cipher suite\n"));
+ WL_INFORM(("no multicast cipher suite\n"));
goto exit;
}
/* pick up multicast cipher */
- mcast = (const wpa_suite_mcast_t *)&wpaie[1];
+ mcast = (wpa_suite_mcast_t *)&wpaie[1];
len -= WPA_SUITE_LEN;
if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
if (IS_WPA_CIPHER(mcast->type)) {
}
/* Check for unicast suite(s) */
if (len < WPA_IE_SUITE_COUNT_LEN) {
- WL_INFORM_MEM(("no unicast suite\n"));
+ WL_INFORM(("no unicast suite\n"));
goto exit;
}
/* walk thru unicast cipher list and pick up what we recognize */
- ucast = (const wpa_suite_ucast_t *)&mcast[1];
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
count = ltoh16_ua(&ucast->count);
len -= WPA_IE_SUITE_COUNT_LEN;
for (i = 0; i < count && len >= WPA_SUITE_LEN;
len -= (count - i) * WPA_SUITE_LEN;
/* Check for auth key management suite(s) */
if (len < WPA_IE_SUITE_COUNT_LEN) {
- WL_INFORM_MEM((" no auth key mgmt suite\n"));
+ WL_INFORM((" no auth key mgmt suite\n"));
goto exit;
}
/* walk thru auth management suite list and pick up what we recognize */
- mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
count = ltoh16_ua(&mgmt->count);
len -= WPA_IE_SUITE_COUNT_LEN;
for (i = 0; i < count && len >= WPA_SUITE_LEN;
case WAPI_CIPHER_SMS4:
ret = SMS4_ENABLED;
break;
-#endif // endif
+#endif
default:
WL_ERR(("No Security Info\n"));
}
return ret;
}
-static u32 wl_get_suite_auth_key_mgmt_type(uint8 type, const wpa_suite_mcast_t *mcast)
+static u32 wl_get_suite_auth_key_mgmt_type(uint8 type)
{
u32 ret = 0;
- u32 is_wpa2 = 0;
-
- if (!bcmp(mcast->oui, WPA2_OUI, WPA2_OUI_LEN)) {
- is_wpa2 = 1;
+ switch (type) {
+ case RSN_AKM_NONE:
+ ret = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ ret = WPA_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ ret = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
}
+ return ret;
+}
- WL_INFORM_MEM(("%s, type = %d\n", is_wpa2 ? "WPA2":"WPA", type));
+static u32 wl_get_suite_auth2_key_mgmt_type(uint8 type)
+{
+ u32 ret = 0;
switch (type) {
case RSN_AKM_NONE:
- /* For WPA and WPA2, AUTH_NONE is common */
ret = WPA_AUTH_NONE;
break;
case RSN_AKM_UNSPECIFIED:
- if (is_wpa2) {
- ret = WPA2_AUTH_UNSPECIFIED;
- } else {
- ret = WPA_AUTH_UNSPECIFIED;
- }
+ ret = WPA2_AUTH_UNSPECIFIED;
break;
case RSN_AKM_PSK:
- if (is_wpa2) {
- ret = WPA2_AUTH_PSK;
- } else {
- ret = WPA_AUTH_PSK;
- }
- break;
-#ifdef WL_SAE
- case RSN_AKM_SAE_PSK:
- ret = WPA3_AUTH_SAE_PSK;
+ ret = WPA2_AUTH_PSK;
break;
-#endif /* WL_SAE */
default:
WL_ERR(("No Key Mgmt Info\n"));
}
-
return ret;
}
static s32
-wl_validate_wpaie_wpa2ie(struct net_device *dev, const wpa_ie_fixed_t *wpaie,
- const bcm_tlv_t *wpa2ie, s32 bssidx)
+wl_validate_wpaie_wpa2ie(struct net_device *dev, wpa_ie_fixed_t *wpaie,
+ bcm_tlv_t *wpa2ie, s32 bssidx)
{
- const wpa_suite_mcast_t *mcast;
- const wpa_suite_ucast_t *ucast;
- const wpa_suite_auth_key_mgmt_t *mgmt;
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ wpa_suite_auth_key_mgmt_t *mgmt;
u16 auth = 0; /* d11 open authentication */
u16 count;
s32 err = BCME_OK;
u32 wpa_auth = 0;
u32 wpa_auth1 = 0;
u32 wpa_auth2 = 0;
+ u8* ptmp;
if (wpaie == NULL || wpa2ie == NULL)
goto exit;
len -= WPA_IE_TAG_FIXED_LEN;
/* check for multicast cipher suite */
if (len < WPA_SUITE_LEN) {
- WL_INFORM_MEM(("no multicast cipher suite\n"));
+ WL_INFORM(("no multicast cipher suite\n"));
goto exit;
}
/* pick up multicast cipher */
- mcast = (const wpa_suite_mcast_t *)&wpaie[1];
+ mcast = (wpa_suite_mcast_t *)&wpaie[1];
len -= WPA_SUITE_LEN;
if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
if (IS_WPA_CIPHER(mcast->type)) {
gval |= wl_get_cipher_type(mcast->type);
}
}
- WL_DBG(("\nwpa ie validate\n"));
- WL_DBG(("wpa ie mcast cipher = 0x%X\n", gval));
+ WL_ERR(("\nwpa ie validate\n"));
+ WL_ERR(("wpa ie mcast cipher = 0x%X\n", gval));
/* Check for unicast suite(s) */
if (len < WPA_IE_SUITE_COUNT_LEN) {
- WL_INFORM_MEM(("no unicast suite\n"));
+ WL_INFORM(("no unicast suite\n"));
goto exit;
}
/* walk thru unicast cipher list and pick up what we recognize */
- ucast = (const wpa_suite_ucast_t *)&mcast[1];
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
count = ltoh16_ua(&ucast->count);
len -= WPA_IE_SUITE_COUNT_LEN;
for (i = 0; i < count && len >= WPA_SUITE_LEN;
len -= (count - i) * WPA_SUITE_LEN;
/* Check for auth key management suite(s) */
if (len < WPA_IE_SUITE_COUNT_LEN) {
- WL_INFORM_MEM((" no auth key mgmt suite\n"));
+ WL_INFORM((" no auth key mgmt suite\n"));
goto exit;
}
/* walk thru auth management suite list and pick up what we recognize */
- mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
count = ltoh16_ua(&mgmt->count);
len -= WPA_IE_SUITE_COUNT_LEN;
for (i = 0; i < count && len >= WPA_SUITE_LEN;
i++, len -= WPA_SUITE_LEN) {
if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
if (IS_WPA_AKM(mgmt->list[i].type)) {
- wpa_auth1 |=
- wl_get_suite_auth_key_mgmt_type(mgmt->list[i].type, mcast);
+
+ wpa_auth1 |= wl_get_suite_auth_key_mgmt_type(mgmt->list[i].type);
}
}
gval = 0;
len = wpa2ie->len;
/* check the mcast cipher */
- mcast = (const wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
- gval = wl_get_cipher_type(mcast->type);
+ mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+ ptmp = mcast->oui;
+ gval = wl_get_cipher_type(ptmp[DOT11_OUI_LEN]);
WL_ERR(("wpa2 ie mcast cipher = 0x%X\n", gval));
if ((len -= WPA_SUITE_LEN) <= 0)
}
/* check the unicast cipher */
- ucast = (const wpa_suite_ucast_t *)&mcast[1];
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
suite_count = ltoh16_ua(&ucast->count);
WL_ERR((" WPA2 ucast cipher count=%d\n", suite_count));
pval |= wl_get_cipher_type(ucast->list[0].type);
WL_ERR(("wpa2 ie wsec = 0x%X\n", wsec2));
/* check the AKM */
- mgmt = (const wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
suite_count = ltoh16_ua(&mgmt->count);
- wpa_auth2 = wl_get_suite_auth_key_mgmt_type(mgmt->list[0].type, mcast);
+ ptmp = (u8 *)&mgmt->list[0];
+ wpa_auth2 = wl_get_suite_auth2_key_mgmt_type(ptmp[DOT11_OUI_LEN]);
WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth2));
if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
- rsn_cap[0] = *(const u8 *)&mgmt->list[suite_count];
- rsn_cap[1] = *((const u8 *)&mgmt->list[suite_count] + 1);
+ rsn_cap[0] = *(u8 *)&mgmt->list[suite_count];
+ rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1);
if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
wme_bss_disable = 0;
} else {
return BCME_ERROR;
}
- if (ies->fils_ind_ie &&
- (wl_validate_fils_ind_ie(dev, ies->fils_ind_ie, bssidx) < 0)) {
- bss->security_mode = false;
- return BCME_ERROR;
- }
-
bss->security_mode = true;
if (bss->rsn_ie) {
- MFREE(cfg->osh, bss->rsn_ie, bss->rsn_ie[1]
- + WPA_RSN_IE_TAG_FIXED_LEN);
+ kfree(bss->rsn_ie);
bss->rsn_ie = NULL;
}
if (bss->wpa_ie) {
- MFREE(cfg->osh, bss->wpa_ie, bss->wpa_ie[1]
- + WPA_RSN_IE_TAG_FIXED_LEN);
+ kfree(bss->wpa_ie);
bss->wpa_ie = NULL;
}
if (bss->wps_ie) {
- MFREE(cfg->osh, bss->wps_ie, bss->wps_ie[1] + 2);
+ kfree(bss->wps_ie);
bss->wps_ie = NULL;
}
- if (bss->fils_ind_ie) {
- MFREE(cfg->osh, bss->fils_ind_ie, bss->fils_ind_ie[1]
- + FILS_INDICATION_IE_TAG_FIXED_LEN);
- bss->fils_ind_ie = NULL;
- }
if (ies->wpa_ie != NULL) {
/* WPAIE */
bss->rsn_ie = NULL;
- bss->wpa_ie = MALLOCZ(cfg->osh,
- ies->wpa_ie->length
- + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->wpa_ie) {
- memcpy(bss->wpa_ie, ies->wpa_ie,
- ies->wpa_ie->length
- + WPA_RSN_IE_TAG_FIXED_LEN);
- }
+ bss->wpa_ie = kmemdup(ies->wpa_ie,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
} else if (ies->wpa2_ie != NULL) {
/* RSNIE */
bss->wpa_ie = NULL;
- bss->rsn_ie = MALLOCZ(cfg->osh,
- ies->wpa2_ie->len
- + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->rsn_ie) {
- memcpy(bss->rsn_ie, ies->wpa2_ie,
- ies->wpa2_ie->len
- + WPA_RSN_IE_TAG_FIXED_LEN);
- }
- }
-#ifdef WL_FILS
- if (ies->fils_ind_ie) {
- bss->fils_ind_ie = MALLOCZ(cfg->osh,
- ies->fils_ind_ie->len
- + FILS_INDICATION_IE_TAG_FIXED_LEN);
- if (bss->fils_ind_ie) {
- memcpy(bss->fils_ind_ie, ies->fils_ind_ie,
- ies->fils_ind_ie->len
- + FILS_INDICATION_IE_TAG_FIXED_LEN);
- }
+ bss->rsn_ie = kmemdup(ies->wpa2_ie,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
}
-#endif /* WL_FILS */
#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
}
#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
}
if (ies->wps_ie) {
- bss->wps_ie = MALLOCZ(cfg->osh, ies->wps_ie_len);
- if (bss->wps_ie) {
- memcpy(bss->wps_ie, ies->wps_ie, ies->wps_ie_len);
- }
+ bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
}
}
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
static s32 wl_cfg80211_bcn_set_params(
struct cfg80211_ap_settings *info,
struct net_device *dev,
WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
if (dev_role == NL80211_IFTYPE_AP) {
/* Store the hostapd SSID */
- bzero(cfg->hostapd_ssid.SSID, DOT11_MAX_SSID_LEN);
+ memset(cfg->hostapd_ssid.SSID, 0x00, DOT11_MAX_SSID_LEN);
memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
- cfg->hostapd_ssid.SSID_len = (uint32)info->ssid_len;
+ cfg->hostapd_ssid.SSID_len = info->ssid_len;
} else {
/* P2P GO */
- bzero(cfg->p2p->ssid.SSID, DOT11_MAX_SSID_LEN);
+ memset(cfg->p2p->ssid.SSID, 0x00, DOT11_MAX_SSID_LEN);
memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
- cfg->p2p->ssid.SSID_len = (uint32)info->ssid_len;
+ cfg->p2p->ssid.SSID_len = info->ssid_len;
}
}
+ if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) {
+ if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
+ WL_ERR(("failed to set hidden : %d\n", err));
+ WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
+ }
+
return err;
}
-#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+#endif
static s32
-wl_cfg80211_parse_ies(const u8 *ptr, u32 len, struct parsed_ies *ies)
+wl_cfg80211_parse_ies(u8 *ptr, u32 len, struct parsed_ies *ies)
{
s32 err = BCME_OK;
- bzero(ies, sizeof(struct parsed_ies));
+ memset(ies, 0, sizeof(struct parsed_ies));
/* find the WPSIE */
if ((ies->wps_ie = wl_cfgp2p_find_wpsie(ptr, len)) != NULL) {
WL_DBG(("WPSIE in beacon \n"));
ies->wps_ie_len = ies->wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
} else {
- WL_ERR(("No WPSIE in beacon \n"));
+ WL_DBG(("No WPSIE in beacon \n"));
}
/* find the RSN_IE */
ies->wpa2_ie_len = ies->wpa2_ie->len;
}
- /* find the FILS_IND_IE */
- if ((ies->fils_ind_ie = bcm_parse_tlvs(ptr, len,
- DOT11_MNG_FILS_IND_ID)) != NULL) {
- WL_DBG((" FILS IND IE found\n"));
- ies->fils_ind_ie_len = ies->fils_ind_ie->len;
- }
-
/* find the WPA_IE */
if ((ies->wpa_ie = wl_cfgp2p_find_wpaie(ptr, len)) != NULL) {
WL_DBG((" WPA found\n"));
return err;
}
-
static s32
wl_cfg80211_set_ap_role(
struct bcm_cfg80211 *cfg,
{
s32 err = BCME_OK;
s32 infra = 1;
- s32 ap = 0;
+ s32 ap = 1;
s32 pm;
+ s32 is_rsdb_supported = BCME_ERROR;
s32 bssidx;
s32 apsta = 0;
- bool legacy_chip;
- legacy_chip = wl_legacy_chip_check(cfg);
+ is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+ if (is_rsdb_supported < 0)
+ return (-ENODEV);
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return -EINVAL;
}
- WL_INFORM_MEM(("[%s] Bringup SoftAP on bssidx:%d \n", dev->name, bssidx));
-
- if (bssidx != 0 || !legacy_chip) {
- if ((err = wl_cfg80211_add_del_bss(cfg, dev, bssidx,
- WL_IF_TYPE_AP, 0, NULL)) < 0) {
- WL_ERR(("wl add_del_bss returned error:%d\n", err));
- return err;
- }
- }
-
- /*
- * For older chips, "bss" iovar does not support
- * bsscfg role change/upgradation, and still
- * return BCME_OK on attempt
- * Hence, below traditional way to handle the same
- */
-
- if ((err = wldev_ioctl_get(dev,
- WLC_GET_AP, &ap, sizeof(s32))) < 0) {
- WL_ERR(("Getting AP mode failed %d \n", err));
- return err;
- }
-
- if (!ap) {
- /* AP mode switch not supported. Try setting up AP explicitly */
- err = wldev_iovar_getint(dev, "apsta", (s32 *)&apsta);
- if (unlikely(err)) {
- WL_ERR(("Could not get apsta %d\n", err));
- return err;
- }
- if (apsta == 0) {
- /* If apsta is not set, set it */
-
- /* Check for any connected interfaces before wl down */
- if (wl_get_drv_status_all(cfg, CONNECTED) > 0) {
- WL_ERR(("Concurrent i/f operational. can't do wl down"));
- return BCME_ERROR;
- }
- err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
- if (err < 0) {
- WL_ERR(("WLC_DOWN error %d\n", err));
+ /* AP on primary Interface */
+ if (bssidx == 0) {
+ if (is_rsdb_supported) {
+ if ((err = wl_cfg80211_add_del_bss(cfg, dev, bssidx,
+ NL80211_IFTYPE_AP, 0, NULL)) < 0) {
+ WL_ERR(("wl add_del_bss returned error:%d\n", err));
return err;
}
- err = wldev_iovar_setint(dev, "apsta", 0);
- if (err < 0) {
- WL_ERR(("wl apsta 0 error %d\n", err));
- return err;
+ } else if (is_rsdb_supported == 0) {
+ /* AP mode switch not supported. Try setting up AP explicitly */
+ err = wldev_iovar_getint(dev, "apsta", (s32 *)&apsta);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get apsta %d\n", err));
}
- ap = 1;
- if ((err = wldev_ioctl_set(dev,
+ if (1) { // terence: fix me
+ /* If apsta is not set, set it */
+ err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
+ if (err < 0) {
+ WL_ERR(("WLC_DOWN error %d\n", err));
+ return err;
+ }
+ err = wldev_iovar_setint(dev, "apsta", 0);
+ if (err < 0) {
+ WL_ERR(("wl apsta 0 error %d\n", err));
+ return err;
+ }
+ if ((err = wldev_ioctl_set(dev,
WLC_SET_AP, &ap, sizeof(s32))) < 0) {
- WL_ERR(("setting AP mode failed %d \n", err));
- return err;
+ WL_ERR(("setting AP mode failed %d \n", err));
+ return err;
+ }
}
}
- } else if (bssidx == 0 && legacy_chip) {
- err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
- if (err < 0) {
- WL_ERR(("WLC_DOWN error %d\n", err));
- return err;
- }
- err = wldev_iovar_setint(dev, "apsta", 0);
- if (err < 0) {
- WL_ERR(("wl apsta 0 error %d\n", err));
- return err;
- }
- ap = 1;
- if ((err = wldev_ioctl_set(dev, WLC_SET_AP, &ap, sizeof(s32))) < 0) {
- WL_ERR(("setting AP mode failed %d \n", err));
- return err;
- }
- }
- if (bssidx == 0) {
pm = 0;
if ((err = wldev_ioctl_set(dev, WLC_SET_PM, &pm, sizeof(pm))) != 0) {
WL_ERR(("wl PM 0 returned error:%d\n", err));
WL_ERR(("SET INFRA error %d\n", err));
return err;
}
+ } else {
+ WL_DBG(("Bringup SoftAP on virtual Interface bssidx:%d \n", bssidx));
+ if ((err = wl_cfg80211_add_del_bss(cfg, dev,
+ bssidx, NL80211_IFTYPE_AP, 0, NULL)) < 0) {
+ WL_ERR(("wl bss ap returned error:%d\n", err));
+ return err;
+ }
}
/* On success, mark AP creation in progress. */
return 0;
}
+
/* In RSDB downgrade cases, the link up event can get delayed upto 7-8 secs */
#define MAX_AP_LINK_WAIT_TIME 10000
static s32
s32 join_params_size = 0;
s32 ap = 1;
s32 wsec;
-#ifdef DISABLE_11H_SOFTAP
- s32 spect = 0;
-#endif /* DISABLE_11H_SOFTAP */
+#ifdef WLMESH
+ bool retried = false;
+#endif
#ifdef SOFTAP_UAPSD_OFF
uint32 wme_apsd = 0;
#endif /* SOFTAP_UAPSD_OFF */
s32 err = BCME_OK;
s32 is_rsdb_supported = BCME_ERROR;
- long timeout;
+ u32 timeout;
+#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- char sec[32];
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
if (is_rsdb_supported < 0)
wl_cfg80211_scan_abort(cfg);
if (dev_role == NL80211_IFTYPE_P2P_GO) {
- wl_ext_get_sec(dev, 0, sec, sizeof(sec));
- WL_MSG(dev->name, "Creating GO with sec=%s\n", sec);
is_bssup = wl_cfg80211_bss_isup(dev, bssidx);
if (!is_bssup && (ies->wpa2_ie != NULL)) {
/* Clear the status bit after use */
wl_clr_drv_status(cfg, AP_CREATING, dev);
-#ifdef DISABLE_11H_SOFTAP
- if (is_rsdb_supported == 0) {
- err = wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
- if (err < 0) {
- WL_ERR(("WLC_DOWN error %d\n", err));
- goto exit;
- }
- }
- err = wldev_ioctl_set(dev, WLC_SET_SPECT_MANAGMENT,
- &spect, sizeof(s32));
- if (err < 0) {
- WL_ERR(("SET SPECT_MANAGMENT error %d\n", err));
- goto exit;
- }
-#endif /* DISABLE_11H_SOFTAP */
-
-#ifdef WL_DISABLE_HE_SOFTAP
- err = wl_cfg80211_set_he_mode(dev, cfg, bssidx, WL_IF_TYPE_AP, FALSE);
- if (err < 0) {
- WL_ERR(("failed to set he features, error=%d\n", err));
- }
-#endif /* WL_DISABLE_HE_SOFTAP */
#ifdef SOFTAP_UAPSD_OFF
err = wldev_iovar_setbuf_bsscfg(dev, "wme_apsd", &wme_apsd, sizeof(wme_apsd),
WL_ERR(("failed to disable uapsd, error=%d\n", err));
}
#endif /* SOFTAP_UAPSD_OFF */
+ dhd_conf_set_wme(cfg->pub, 1);
err = wldev_ioctl_set(dev, WLC_UP, &ap, sizeof(s32));
if (unlikely(err)) {
#ifdef MFP
if (cfg->bip_pos) {
err = wldev_iovar_setbuf_bsscfg(dev, "bip",
- (const void *)(cfg->bip_pos), WPA_SUITE_LEN, cfg->ioctl_buf,
+ (void *)(cfg->bip_pos), WPA_SUITE_LEN, cfg->ioctl_buf,
WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
if (err < 0) {
WL_ERR(("bip set error %d\n", err));
- {
+ if (wl_customer6_legacy_chip_check(cfg,
+ bcmcfg_to_prmry_ndev(cfg))) {
+ /* Ignore bip error: Some older firmwares doesn't
+ * support bip iovar/ return BCME_NOTUP while trying
+ * to set bip from AP bring up context. These firmares
+ * include bip in RSNIE by default. So its okay to ignore
+ * the error.
+ */
+ err = BCME_OK;
+ } else {
goto exit;
}
}
WL_ERR(("Could not get wsec %d\n", err));
goto exit;
}
- if (dhdp->conf->chip == BCM43430_CHIP_ID && bssidx > 0 &&
- (wsec & (TKIP_ENABLED|AES_ENABLED))) {
- wsec |= WSEC_SWFLAG; // terence 20180628: fix me, this is a workaround
- err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
- if (err < 0) {
- WL_ERR(("wsec error %d\n", err));
- goto exit;
- }
- }
if ((wsec == WEP_ENABLED) && cfg->wep_key.len) {
WL_DBG(("Applying buffered WEP KEY \n"));
err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &cfg->wep_key,
sizeof(struct wl_wsec_key), cfg->ioctl_buf,
WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
/* clear the key after use */
- bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
+ memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
if (unlikely(err)) {
WL_ERR(("WLC_SET_KEY error (%d)\n", err));
goto exit;
}
#endif /* MFP */
- bzero(&join_params, sizeof(join_params));
+#ifdef WLMESH
+ssid_retry:
+#endif
+ memset(&join_params, 0, sizeof(join_params));
/* join parameters starts with ssid */
join_params_size = sizeof(join_params.ssid);
join_params.ssid.SSID_len = MIN(cfg->hostapd_ssid.SSID_len,
join_params.ssid.SSID_len);
join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
- wl_ext_get_sec(dev, 0, sec, sizeof(sec));
- WL_MSG(dev->name, "Creating AP with sec=%s\n", sec);
/* create softap */
if ((err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params,
join_params_size)) != 0) {
- WL_ERR(("SoftAP/GO set ssid failed! \n"));
+ WL_ERR(("SoftAP/GO set ssid failed! %d\n", err));
goto exit;
} else {
WL_DBG((" SoftAP SSID \"%s\" \n", join_params.ssid.SSID));
}
}
- } else {
- WL_ERR(("Wrong interface type %d\n", dev_role));
- goto exit;
}
/* Wait for Linkup event to mark successful AP/GO bring up */
timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
wl_get_drv_status(cfg, AP_CREATED, dev), msecs_to_jiffies(MAX_AP_LINK_WAIT_TIME));
if (timeout <= 0 || !wl_get_drv_status(cfg, AP_CREATED, dev)) {
+#ifdef WLMESH
+ if (!retried) {
+ retried = true;
+ WL_ERR(("Link up didn't come for AP interface. Try to set ssid again to recover it! \n"));
+ goto ssid_retry;
+ }
+#endif
WL_ERR(("Link up didn't come for AP interface. AP/GO creation failed! \n"));
if (timeout == -ERESTARTSYS) {
WL_ERR(("waitqueue was interrupted by a signal, returns -ERESTARTSYS\n"));
err = -ERESTARTSYS;
goto exit;
}
- if (dhd_query_bus_erros(dhdp)) {
- err = -ENODEV;
- goto exit;
- }
- dhdp->iface_op_failed = TRUE;
#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
if (dhdp->memdump_enabled) {
dhdp->memdump_type = DUMP_TYPE_AP_LINKUP_FAILURE;
err = -ENODEV;
goto exit;
}
- SUPP_LOG(("AP/GO Link up\n"));
exit:
if (cfg->wep_key.len) {
- bzero(&cfg->wep_key, sizeof(struct wl_wsec_key));
+ memset(&cfg->wep_key, 0, sizeof(struct wl_wsec_key));
}
#ifdef MFP
}
#endif /* MFP */
- if (err) {
- SUPP_LOG(("AP/GO bring up fail. err:%d\n", err));
- }
return err;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
s32
wl_cfg80211_parse_ap_ies(
struct net_device *dev,
s32 err = BCME_OK;
/* Parse Beacon IEs */
- if (wl_cfg80211_parse_ies((const u8 *)info->tail,
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ if (wl_cfg80211_parse_ies((u8 *)info->tail,
info->tail_len, ies) < 0) {
WL_ERR(("Beacon get IEs failed \n"));
err = -EINVAL;
goto fail;
}
-
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
vndr = (const u8 *)info->proberesp_ies;
- vndr_ie_len = (uint32)info->proberesp_ies_len;
+ vndr_ie_len = info->proberesp_ies_len;
if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
/* SoftAP mode */
mgmt = (const struct ieee80211_mgmt *)info->probe_resp;
if (mgmt != NULL) {
vndr = (const u8 *)&mgmt->u.probe_resp.variable;
- vndr_ie_len = (uint32)(info->probe_resp_len -
- offsetof(const struct ieee80211_mgmt, u.probe_resp.variable));
+ vndr_ie_len = info->probe_resp_len -
+ offsetof(const struct ieee80211_mgmt, u.probe_resp.variable);
}
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
/* Parse Probe Response IEs */
- if (wl_cfg80211_parse_ies((const u8 *)vndr, vndr_ie_len, &prb_ies) < 0) {
+ if (wl_cfg80211_parse_ies((u8 *)vndr, vndr_ie_len, &prb_ies) < 0) {
WL_ERR(("PROBE RESP get IEs failed \n"));
err = -EINVAL;
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
fail:
return err;
}
vndr = (const u8 *)info->proberesp_ies;
- vndr_ie_len = (uint32)info->proberesp_ies_len;
+ vndr_ie_len = info->proberesp_ies_len;
if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
/* SoftAP mode */
mgmt = (const struct ieee80211_mgmt *)info->probe_resp;
if (mgmt != NULL) {
vndr = (const u8 *)&mgmt->u.probe_resp.variable;
- vndr_ie_len = (uint32)(info->probe_resp_len -
- offsetof(struct ieee80211_mgmt, u.probe_resp.variable));
+ vndr_ie_len = info->probe_resp_len -
+ offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
}
}
return err;
}
-#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+#endif
static s32 wl_cfg80211_hostapd_sec(
struct net_device *dev,
if (bss->wps_ie &&
memcmp(bss->wps_ie, ies->wps_ie, ies->wps_ie_len)) {
WL_DBG((" WPS IE is changed\n"));
- MFREE(cfg->osh, bss->wps_ie, bss->wps_ie[1] + 2);
- bss->wps_ie = MALLOCZ(cfg->osh, ies->wps_ie_len);
- if (bss->wps_ie) {
- memcpy(bss->wps_ie, ies->wps_ie, ies->wps_ie_len);
- }
+ kfree(bss->wps_ie);
+ bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
} else if (bss->wps_ie == NULL) {
WL_DBG((" WPS IE is added\n"));
- bss->wps_ie = MALLOCZ(cfg->osh, ies->wps_ie_len);
- if (bss->wps_ie) {
- memcpy(bss->wps_ie, ies->wps_ie, ies->wps_ie_len);
- }
+ bss->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
}
-#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
- if (ies->wpa_ie != NULL && ies->wpa2_ie != NULL) {
- WL_ERR(("update bss - wpa_ie and wpa2_ie is not null\n"));
- if (!bss->security_mode) {
- /* change from open mode to security mode */
- update_bss = true;
- bss->wpa_ie = MALLOCZ(cfg->osh,
- ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->wpa_ie) {
- memcpy(bss->wpa_ie, ies->wpa_ie,
- ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
- }
- bss->rsn_ie = MALLOCZ(cfg->osh,
- ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->rsn_ie) {
- memcpy(bss->rsn_ie, ies->wpa2_ie,
- ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN);
- }
- } else {
- /* change from (WPA or WPA2 or WPA/WPA2) to WPA/WPA2 mixed mode */
- if (bss->wpa_ie) {
- if (memcmp(bss->wpa_ie,
- ies->wpa_ie, ies->wpa_ie->length +
- WPA_RSN_IE_TAG_FIXED_LEN)) {
- MFREE(cfg->osh, bss->wpa_ie,
- bss->wpa_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
- update_bss = true;
- bss->wpa_ie = MALLOCZ(cfg->osh,
- ies->wpa_ie->length
- + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->wpa_ie) {
- memcpy(bss->wpa_ie, ies->wpa_ie,
- ies->wpa_ie->length
- + WPA_RSN_IE_TAG_FIXED_LEN);
- }
- }
- }
- else {
- update_bss = true;
- bss->wpa_ie = MALLOCZ(cfg->osh,
- ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->wpa_ie) {
- memcpy(bss->wpa_ie, ies->wpa_ie,
- ies->wpa_ie->length
- + WPA_RSN_IE_TAG_FIXED_LEN);
- }
- }
- if (bss->rsn_ie) {
- if (memcmp(bss->rsn_ie,
- ies->wpa2_ie,
- ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) {
- update_bss = true;
- MFREE(cfg->osh, bss->rsn_ie,
- bss->rsn_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
- bss->rsn_ie = MALLOCZ(cfg->osh,
- ies->wpa2_ie->len
- + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->rsn_ie) {
- memcpy(bss->rsn_ie, ies->wpa2_ie,
- ies->wpa2_ie->len
- + WPA_RSN_IE_TAG_FIXED_LEN);
- }
- }
- }
- else {
- update_bss = true;
- bss->rsn_ie = MALLOCZ(cfg->osh,
- ies->wpa2_ie->len
- + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->rsn_ie) {
- memcpy(bss->rsn_ie, ies->wpa2_ie,
- ies->wpa2_ie->len
- + WPA_RSN_IE_TAG_FIXED_LEN);
- }
- }
- }
- WL_ERR(("update_bss=%d\n", update_bss));
- if (update_bss) {
- bss->security_mode = true;
- wl_cfg80211_bss_up(cfg, dev, bssidx, 0);
- if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie,
- ies->wpa2_ie, bssidx) < 0) {
- return BCME_ERROR;
- }
- wl_cfg80211_bss_up(cfg, dev, bssidx, 1);
- }
-
- }
- else
-#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) {
if (!bss->security_mode) {
/* change from open mode to security mode */
update_bss = true;
if (ies->wpa_ie != NULL) {
- bss->wpa_ie = MALLOCZ(cfg->osh,
- ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->wpa_ie) {
- memcpy(bss->wpa_ie,
- ies->wpa_ie,
- ies->wpa_ie->length
- + WPA_RSN_IE_TAG_FIXED_LEN);
- }
+ bss->wpa_ie = kmemdup(ies->wpa_ie,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
} else {
- bss->rsn_ie = MALLOCZ(cfg->osh,
- ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->rsn_ie) {
- memcpy(bss->rsn_ie,
- ies->wpa2_ie,
- ies->wpa2_ie->len
- + WPA_RSN_IE_TAG_FIXED_LEN);
- }
+ bss->rsn_ie = kmemdup(ies->wpa2_ie,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
}
} else if (bss->wpa_ie) {
/* change from WPA2 mode to WPA mode */
if (ies->wpa_ie != NULL) {
update_bss = true;
- MFREE(cfg->osh, bss->rsn_ie,
- bss->rsn_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
+ kfree(bss->rsn_ie);
bss->rsn_ie = NULL;
- bss->wpa_ie = MALLOCZ(cfg->osh,
- ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->wpa_ie) {
- memcpy(bss->wpa_ie,
- ies->wpa_ie,
- ies->wpa_ie->length
- + WPA_RSN_IE_TAG_FIXED_LEN);
- }
+ bss->wpa_ie = kmemdup(ies->wpa_ie,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
} else if (memcmp(bss->rsn_ie,
ies->wpa2_ie, ies->wpa2_ie->len
+ WPA_RSN_IE_TAG_FIXED_LEN)) {
update_bss = true;
- MFREE(cfg->osh, bss->rsn_ie,
- bss->rsn_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
- bss->rsn_ie = MALLOCZ(cfg->osh,
- ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN);
- if (bss->rsn_ie) {
- memcpy(bss->rsn_ie,
- ies->wpa2_ie,
- ies->wpa2_ie->len
- + WPA_RSN_IE_TAG_FIXED_LEN);
- }
+ kfree(bss->rsn_ie);
+ bss->rsn_ie = kmemdup(ies->wpa2_ie,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
bss->wpa_ie = NULL;
}
}
return 0;
}
-static s32
#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
2, 0))
+static s32
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
wl_cfg80211_del_station(
struct wiphy *wiphy, struct net_device *ndev,
}
}
err = wl_cfg80211_check_in4way(cfg, ndev, DONT_DELETE_GC_AFTER_WPS,
- WL_EXT_STATUS_DELETE_STA, (void *)mac_addr);
+ WL_EXT_STATUS_DELETE_GC, (void *)mac_addr);
if (err) {
return 0;
}
} else {
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
#endif /* CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE */
-
-#ifdef WL_WPS_SYNC
- if (wl_wps_session_update(ndev,
- WPS_STATE_DISCONNECT_CLIENT, mac_addr) == BCME_UNSUPPORTED) {
- /* Ignore disconnect command from upper layer */
- WL_INFORM_MEM(("[WPS] Ignore client disconnect.\n"));
- } else
-#endif /* WL_WPS_SYNC */
- {
- scb_val.val = DOT11_RC_DEAUTH_LEAVING;
- WL_MSG(dev->name, "Disconnect STA : " MACDBG " scb_val.val %d\n",
- MAC2STRDBG(bcm_ether_ntoa((const struct ether_addr *)mac_addr,
- eabuf)), scb_val.val);
- /* need to guarantee EAP-Failure send out before deauth */
- dhd_wait_pend8021x(dev);
- err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
- sizeof(scb_val_t));
- if (err < 0) {
- WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
- }
- }
+#ifndef BCMDBUS
+ dhd_wait_pend8021x(dev);
+#endif /* !BCMDBUS */
+ scb_val.val = DOT11_RC_DEAUTH_LEAVING;
+ err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
+ sizeof(scb_val_t));
+ if (err < 0)
+ WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
#ifdef CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
}
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
#endif /* CUSTOM_BLOCK_DEAUTH_AT_EAP_FAILURE */
+ printf("%s: Disconnect STA : %s scb_val.val %d\n", __FUNCTION__,
+ bcm_ether_ntoa((const struct ether_addr *)mac_addr, eabuf),
+ scb_val.val);
if (num_associated > 0 && ETHER_ISBCAST(mac_addr))
wl_delay(400);
return 0;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
wl_cfg80211_change_station(
struct wiphy *wiphy,
struct net_device *dev,
const u8 *mac,
struct station_parameters *params)
#else
-static s32
wl_cfg80211_change_station(
struct wiphy *wiphy,
struct net_device *dev,
u8 *mac,
struct station_parameters *params)
-#endif // endif
+#endif
{
- int err = BCME_OK;
+ int err;
+#if defined(WL_ENABLE_P2P_IF)
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#endif
struct net_device *ndev = ndev_to_wlc_ndev(dev, cfg);
WL_DBG(("SCB_AUTHORIZE mac_addr:"MACDBG" sta_flags_mask:0x%x "
"sta_flags_set:0x%x iface:%s \n", MAC2STRDBG(mac),
params->sta_flags_mask, params->sta_flags_set, ndev->name));
- if ((wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS) &&
- !(wl_get_drv_status(cfg, CONNECTED, dev))) {
- /* Return error indicating not in connected state */
- WL_ERR(("Ignore SCB_AUTHORIZE/DEAUTHORIZE in non connected state\n"));
- return -ENOTSUPP;
- }
-
/* Processing only authorize/de-authorize flag for now */
if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
WL_ERR(("WLC_SCB_AUTHORIZE sta_flags_mask not set \n"));
}
if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+ err = wldev_ioctl_set(ndev, WLC_SCB_DEAUTHORIZE, (u8 *)mac, ETH_ALEN);
+#else
err = wldev_ioctl_set(ndev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN);
- if (unlikely(err)) {
+#endif
+ if (err)
WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err));
- } else {
- WL_INFORM_MEM(("[%s] WLC_SCB_DEAUTHORIZE " MACDBG "\n",
- ndev->name, MAC2STRDBG(mac)));
- }
return err;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+ err = wldev_ioctl_set(ndev, WLC_SCB_AUTHORIZE, (u8 *)mac, ETH_ALEN);
+#else
err = wldev_ioctl_set(ndev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN);
- if (unlikely(err)) {
+#endif
+ if (err)
WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err));
- } else {
- WL_INFORM_MEM(("[%s] WLC_SCB_AUTHORIZE " MACDBG "\n",
- ndev->name, MAC2STRDBG(mac)));
-#ifdef WL_WPS_SYNC
- wl_wps_session_update(ndev, WPS_STATE_AUTHORIZE, mac);
-#endif /* WL_WPS_SYNC */
- }
#ifdef DHD_LOSSLESS_ROAMING
wl_del_roam_timeout(cfg);
-#endif // endif
-
+#endif
return err;
}
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
int err;
u32 ps_pretend;
wl_scb_probe_t scb_probe;
- u32 ps_pretend_retries;
bzero(&scb_probe, sizeof(wl_scb_probe_t));
scb_probe.scb_timeout = WL_SCB_TIMEOUT;
return err;
}
- ps_pretend_retries = WL_PSPRETEND_RETRY_LIMIT;
- err = wldev_iovar_setint(dev, "pspretend_retry_limit", ps_pretend_retries);
- if (unlikely(err)) {
- if (err == BCME_UNSUPPORTED) {
- /* Ignore error if fw doesn't support the iovar */
- WL_DBG(("set 'pspretend_retry_limit %d' failed, error = %d\n",
- ps_pretend_retries, err));
- } else {
- WL_ERR(("set 'pspretend_retry_limit %d' failed, error = %d\n",
- ps_pretend_retries, err));
- return err;
- }
- }
-
ps_pretend = MAX(WL_SCB_MAX_PROBE / 2, WL_MIN_PSPRETEND_THRESHOLD);
err = wldev_iovar_setint(dev, "pspretend_threshold", ps_pretend);
if (unlikely(err)) {
return 0;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
static s32
wl_cfg80211_start_ap(
struct wiphy *wiphy,
s32 bssidx = 0;
u32 dev_role = 0;
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#ifdef WLMESH
+ struct wl_join_params join_params;
+ s32 join_params_size = 0;
+#endif
WL_DBG(("Enter \n"));
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ wl_cfg80211_set_random_mac(dev, FALSE);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
dev_role = NL80211_IFTYPE_AP;
dhd->op_mode |= DHD_FLAG_HOSTAP_MODE;
err = dhd_ndo_enable(dhd, FALSE);
- WL_DBG(("Disabling NDO on Hostapd mode %d\n", err));
+ WL_DBG(("%s: Disabling NDO on Hostapd mode %d\n", __FUNCTION__, err));
if (err) {
- WL_ERR(("Disabling NDO Failed %d\n", err));
+ WL_ERR(("%s: Disabling NDO Failed %d\n", __FUNCTION__, err));
}
-#ifdef WL_EXT_IAPSTA
- wl_ext_iapsta_update_iftype(dev, dhd_net2idx(dhd->info, dev), WL_IF_TYPE_AP);
-#endif /* WL_EXT_IAPSTA */
#ifdef PKT_FILTER_SUPPORT
/* Disable packet filter */
if (dhd->early_suspended) {
WL_ERR(("Disable pkt_filter\n"));
dhd_enable_packet_filter(0, dhd);
-#ifdef APF
- dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
-#endif /* APF */
}
#endif /* PKT_FILTER_SUPPORT */
#ifdef ARP_OFFLOAD_SUPPORT
dhd_arp_offload_enable(dhd, FALSE);
}
#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef SUPPORT_SET_CAC
+ wl_cfg80211_set_cac(cfg, 0);
+#endif /* SUPPORT_SET_CAC */
} else {
/* only AP or GO role need to be handled here. */
err = -EINVAL;
goto fail;
}
-#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
if ((err = wl_cfg80211_set_channel(wiphy, dev,
dev->ieee80211_ptr->preset_chandef.chan,
NL80211_CHAN_HT20) < 0)) {
WL_ERR(("Set channel failed \n"));
goto fail;
}
-#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */
+#endif
if ((err = wl_cfg80211_bcn_set_params(info, dev,
dev_role, bssidx)) < 0) {
// goto fail;
}
- wl_set_drv_status(cfg, CONNECTED, dev);
+#ifdef WLMESH
+ OSL_SLEEP(1000);
+ if ((dev_role == NL80211_IFTYPE_P2P_GO) || (dev_role == NL80211_IFTYPE_AP)) {
+ memset(&join_params, 0, sizeof(join_params));
+ /* join parameters starts with ssid */
+ join_params_size = sizeof(join_params.ssid);
+ if (dev_role == NL80211_IFTYPE_P2P_GO) {
+ join_params.ssid.SSID_len = min(cfg->p2p->ssid.SSID_len,
+ (uint32)DOT11_MAX_SSID_LEN);
+ memcpy(join_params.ssid.SSID, cfg->p2p->ssid.SSID,
+ join_params.ssid.SSID_len);
+ } else if (dev_role == NL80211_IFTYPE_AP) {
+ join_params.ssid.SSID_len = min(cfg->hostapd_ssid.SSID_len,
+ (uint32)DOT11_MAX_SSID_LEN);
+ memcpy(join_params.ssid.SSID, cfg->hostapd_ssid.SSID,
+ join_params.ssid.SSID_len);
+ }
+ join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
+ /* create softap */
+ if ((err = wldev_ioctl_set(dev, WLC_SET_SSID, &join_params,
+ join_params_size)) != 0) {
+ WL_ERR(("SoftAP/GO set ssid failed! \n"));
+ goto fail;
+ } else {
+ WL_DBG((" SoftAP SSID \"%s\" \n", join_params.ssid.SSID));
+ }
+ }
+#endif
+
WL_DBG(("** AP/GO Created **\n"));
#ifdef WL_CFG80211_ACL
/* Set IEs to FW */
if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0)
WL_ERR(("Set IEs failed \n"));
-
-#ifdef WLDWDS
- if (dev->ieee80211_ptr->use_4addr) {
- if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev), bssidx,
- VNDR_IE_ASSOCRSP_FLAG, (const u8 *)info->beacon.assocresp_ies,
- info->beacon.assocresp_ies_len)) < 0) {
- WL_ERR(("Set ASSOC RESP IE Failed\n"));
- }
- }
-#endif /* WLDWDS */
/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
bool pbc = 0;
- wl_validate_wps_ie((const char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+ wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
if (pbc) {
WL_DBG(("set WLC_E_PROBREQ_MSG\n"));
wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
}
}
- /* Configure hidden SSID */
- if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE) {
- if ((err = wldev_iovar_setint(dev, "closednet", 1)) < 0)
- WL_ERR(("failed to set hidden : %d\n", err));
- WL_DBG(("hidden_ssid_enum_val: %d \n", info->hidden_ssid));
- }
-
#ifdef SUPPORT_AP_RADIO_PWRSAVE
- if (dev_role == NL80211_IFTYPE_AP) {
- if (!wl_set_ap_rps(dev, FALSE, dev->name)) {
- wl_cfg80211_init_ap_rps(cfg);
- } else {
- WL_ERR(("Set rpsnoa failed \n"));
- }
+ if ((dev_role == NL80211_IFTYPE_AP)) {
+ wl_set_ap_rps(dev, FALSE, dev->name);
+ wl_cfg80211_init_ap_rps(cfg);
}
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
fail:
if (err) {
WL_ERR(("ADD/SET beacon failed\n"));
- wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
wl_cfg80211_stop_ap(wiphy, dev);
if (dev_role == NL80211_IFTYPE_AP) {
-#ifdef WL_EXT_IAPSTA
- if (!wl_ext_iapsta_iftype_enabled(dev, WL_IF_TYPE_AP)) {
-#endif /* WL_EXT_IAPSTA */
dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
#ifdef PKT_FILTER_SUPPORT
/* Enable packet filter */
if (dhd->early_suspended) {
WL_ERR(("Enable pkt_filter\n"));
dhd_enable_packet_filter(1, dhd);
-#ifdef APF
- dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
-#endif /* APF */
}
#endif /* PKT_FILTER_SUPPORT */
#ifdef ARP_OFFLOAD_SUPPORT
dhd_arp_offload_enable(dhd, TRUE);
}
#endif /* ARP_OFFLOAD_SUPPORT */
-#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
- wl_cfg80211_set_frameburst(cfg, TRUE);
-#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
-#ifdef WL_EXT_IAPSTA
- }
-#endif /* WL_EXT_IAPSTA */
}
#ifdef WLTDLS
if (bssidx == 0) {
WL_DBG(("Enter \n"));
- if (wl_cfg80211_get_bus_state(cfg)) {
- /* since bus is down, iovar will fail. recovery path will bringup the bus. */
- WL_ERR(("bus is not ready\n"));
- return BCME_OK;
- }
is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
if (is_rsdb_supported < 0)
return (-ENODEV);
goto exit;
}
- /* Free up resources */
- wl_cfg80211_cleanup_if(dev);
-
/* Clear AP/GO connected status */
wl_clr_drv_status(cfg, CONNECTED, dev);
if ((err = wl_cfg80211_bss_up(cfg, dev, bssidx, 0)) < 0) {
}
if (dev_role == NL80211_IFTYPE_AP) {
-#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
- wl_cfg80211_set_frameburst(cfg, TRUE);
-#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
#ifdef PKT_FILTER_SUPPORT
/* Enable packet filter */
if (dhd->early_suspended) {
WL_ERR(("Enable pkt_filter\n"));
dhd_enable_packet_filter(1, dhd);
-#ifdef APF
- dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
-#endif /* APF */
}
#endif /* PKT_FILTER_SUPPORT */
#ifdef ARP_OFFLOAD_SUPPORT
#endif /* ARP_OFFLOAD_SUPPORT */
if (is_rsdb_supported == 0) {
+ if (dhd_download_fw_on_driverload && bssidx == 0) {
+ wldev_ioctl_set(dev, WLC_DOWN, &ap, sizeof(s32));
+ wldev_iovar_setint(dev, "apsta", 1);
+ }
/* For non-rsdb chips, we use stand alone AP. Do wl down on stop AP */
err = wldev_ioctl_set(dev, WLC_UP, &ap, sizeof(s32));
if (unlikely(err)) {
}
}
-#ifdef WL_DISABLE_HE_SOFTAP
- if (wl_cfg80211_set_he_mode(dev, cfg, bssidx, WL_IF_TYPE_AP, TRUE) != BCME_OK) {
- WL_ERR(("failed to set he features\n"));
- }
-#endif /* WL_DISABLE_HE_SOFTAP */
-
- wl_cfg80211_clear_per_bss_ies(cfg, dev->ieee80211_ptr);
+ wl_cfg80211_clear_per_bss_ies(cfg, bssidx);
#ifdef SUPPORT_AP_RADIO_PWRSAVE
- if (!wl_set_ap_rps(dev, FALSE, dev->name)) {
- wl_cfg80211_init_ap_rps(cfg);
- } else {
- WL_ERR(("Set rpsnoa failed \n"));
- }
+ wl_set_ap_rps(dev, FALSE, dev->name);
+ wl_cfg80211_init_ap_rps(cfg);
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
} else {
WL_DBG(("Stopping P2P GO \n"));
DHD_EVENT_TIMEOUT_MS*3);
DHD_OS_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub));
}
-
- SUPP_LOG(("AP/GO Link down\n"));
exit:
- if (err) {
- /* In case of failure, flush fw logs */
- wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
- SUPP_LOG(("AP/GO Link down fail. err:%d\n", err));
- }
#ifdef WLTDLS
if (bssidx == 0) {
/* re-enable TDLS if the number of connected interfaces is less than 2 */
#endif /* WLTDLS */
if (dev_role == NL80211_IFTYPE_AP) {
-#ifdef WL_EXT_IAPSTA
- if (!wl_ext_iapsta_iftype_enabled(dev, WL_IF_TYPE_AP)) {
-#endif /* WL_EXT_IAPSTA */
/* clear the AP mode */
dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
-#ifdef WL_EXT_IAPSTA
- }
-#endif /* WL_EXT_IAPSTA */
}
+#ifdef SUPPORT_SET_CAC
+ wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
return err;
}
}
/* Enable Probe Req filter, WPS-AP certification 4.2.13 */
if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
- wl_validate_wps_ie((const char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+ wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
WL_DBG((" WPS AP, wps_ie is exists pbc=%d\n", pbc));
if (pbc)
wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
}
fail:
- if (err) {
- wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
- }
return err;
}
#else
DOT11_MNG_SSID_ID)) != NULL) {
if (dev_role == NL80211_IFTYPE_AP) {
/* Store the hostapd SSID */
- bzero(&cfg->hostapd_ssid.SSID[0], DOT11_MAX_SSID_LEN);
+ memset(&cfg->hostapd_ssid.SSID[0], 0x00, DOT11_MAX_SSID_LEN);
cfg->hostapd_ssid.SSID_len = MIN(ssid_ie->len, DOT11_MAX_SSID_LEN);
memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data,
cfg->hostapd_ssid.SSID_len);
} else {
/* P2P GO */
- bzero(&cfg->p2p->ssid.SSID[0], DOT11_MAX_SSID_LEN);
+ memset(&cfg->p2p->ssid.SSID[0], 0x00, DOT11_MAX_SSID_LEN);
cfg->p2p->ssid.SSID_len = MIN(ssid_ie->len, DOT11_MAX_SSID_LEN);
memcpy(cfg->p2p->ssid.SSID, ssid_ie->data,
cfg->p2p->ssid.SSID_len);
} else {
WL_DBG(("Applied Vndr IEs for ProbeRsp \n"));
}
-#endif // endif
+#endif
is_bss_up = wl_cfg80211_bss_isup(dev, bssidx);
privacy = info->privacy;
#else
privacy = 0;
-#endif // endif
+#endif
if (!is_bss_up &&
(wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx, privacy) < 0))
{
/* Set GC/STA SCB expiry timings. */
if ((err = wl_cfg80211_set_scb_timings(cfg, dev))) {
WL_ERR(("scb setting failed \n"));
- if (err == BCME_UNSUPPORTED)
- err = 0;
// goto fail;
}
}
WL_DBG(("** ADD/SET beacon done **\n"));
- wl_set_drv_status(cfg, CONNECTED, dev);
fail:
if (err) {
WL_ERR(("ADD/SET beacon failed\n"));
if (dev_role == NL80211_IFTYPE_AP) {
-#ifdef WL_EXT_IAPSTA
- if (!wl_ext_iapsta_iftype_enabled(dev, WL_IF_TYPE_AP)) {
-#endif /* WL_EXT_IAPSTA */
/* clear the AP mode */
dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
-#ifdef WL_EXT_IAPSTA
- }
-#endif /* WL_EXT_IAPSTA */
}
}
return err;
cfg->ap_oper_channel = 0;
+
if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
WL_ERR(("find p2p index from wdev(%p) failed\n", dev->ieee80211_ptr));
return BCME_ERROR;
if (err < 0) {
WL_ERR(("SET INFRA error %d\n", err));
}
- wl_cfg80211_clear_per_bss_ies(cfg, dev->ieee80211_ptr);
+ wl_cfg80211_clear_per_bss_ies(cfg, bssidx);
if (wdev->iftype == NL80211_IFTYPE_AP) {
-#ifdef WL_EXT_IAPSTA
- if (!wl_ext_iapsta_iftype_enabled(dev, WL_IF_TYPE_AP)) {
-#endif /* WL_EXT_IAPSTA */
/* clear the AP mode */
dhd->op_mode &= ~DHD_FLAG_HOSTAP_MODE;
-#ifdef WL_EXT_IAPSTA
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef WL_SCHED_SCAN
+#define PNO_TIME 30
+#define PNO_REPEAT 4
+#define PNO_FREQ_EXPO_MAX 2
+static bool
+is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count)
+{
+ int i;
+
+ if (!ssid || !ssid_list)
+ return FALSE;
+
+ for (i = 0; i < count; i++) {
+ if (ssid->ssid_len == ssid_list[i].ssid_len) {
+ if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0)
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+static int
+wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ ushort pno_time = PNO_TIME;
+ int pno_repeat = PNO_REPEAT;
+ int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
+ wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ struct cfg80211_ssid *ssid = NULL;
+ struct cfg80211_ssid *hidden_ssid_list = NULL;
+ log_conn_event_t *event_data = NULL;
+ tlv_log *tlv_data = NULL;
+ u32 alloc_len, tlv_len;
+ u32 payload_len;
+ int ssid_cnt = 0;
+ int i;
+ int ret = 0;
+ unsigned long flags;
+
+ if (!request) {
+ WL_ERR(("Sched scan request was NULL\n"));
+ return -EINVAL;
+ }
+
+ WL_DBG(("Enter \n"));
+ WL_PNO((">>> SCHED SCAN START\n"));
+ WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n",
+ request->n_match_sets, request->n_ssids));
+ WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n",
+ request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
+
+
+ if (!request->n_ssids || !request->n_match_sets) {
+ WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
+ return -EINVAL;
+ }
+
+ memset(&ssids_local, 0, sizeof(ssids_local));
+
+ if (request->n_ssids > 0) {
+ hidden_ssid_list = request->ssids;
+ }
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN;
+ event_data = MALLOC(dhdp->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ return -ENOMEM;
}
-#endif /* WL_EXT_IAPSTA */
+ memset(event_data, 0, alloc_len);
+ event_data->tlvs = NULL;
+ tlv_len = sizeof(tlv_log);
+ event_data->tlvs = (tlv_log *)MALLOC(dhdp->osh, tlv_len);
+ if (!event_data->tlvs) {
+ WL_ERR(("%s: failed to allocate log_tlv with "
+ "length(%d)\n", __func__, tlv_len));
+ MFREE(dhdp->osh, event_data, alloc_len);
+ return -ENOMEM;
+ }
+ }
+ for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) {
+ ssid = &request->match_sets[i].ssid;
+ /* No need to include null ssid */
+ if (ssid->ssid_len) {
+ ssids_local[ssid_cnt].SSID_len = MIN(ssid->ssid_len,
+ (uint32)DOT11_MAX_SSID_LEN);
+ memcpy(ssids_local[ssid_cnt].SSID, ssid->ssid,
+ ssids_local[ssid_cnt].SSID_len);
+ if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) {
+ ssids_local[ssid_cnt].hidden = TRUE;
+ WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid));
+ } else {
+ ssids_local[ssid_cnt].hidden = FALSE;
+ WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid));
+ }
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0))
+ if (request->match_sets[i].rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
+ ssids_local[ssid_cnt].rssi_thresh =
+ (int8)request->match_sets[i].rssi_thold;
+ }
+#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0)) */
+ ssid_cnt++;
+ }
+ }
+
+ if (ssid_cnt) {
+ if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt,
+ pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
+ WL_ERR(("PNO setup failed!! ret=%d \n", ret));
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ for (i = 0; i < ssid_cnt; i++) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_ADD;
+ tlv_data = event_data->tlvs;
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = ssids_local[i].SSID_len;
+ memcpy(tlv_data->value, ssids_local[i].SSID,
+ ssids_local[i].SSID_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ event_data, payload_len);
+ }
+ }
+
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ cfg->sched_scan_req = request;
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ } else {
+ ret = -EINVAL;
+ }
+exit:
+ if (event_data) {
+ MFREE(dhdp->osh, event_data->tlvs, tlv_len);
+ MFREE(dhdp->osh, event_data, alloc_len);
+ }
+ return ret;
+}
+
+static int
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ , u64 reqid
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ unsigned long flags;
+
+ WL_DBG(("Enter \n"));
+ WL_PNO((">>> SCHED SCAN STOP\n"));
+
+ BCM_REFERENCE(dhdp);
+ if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
+ WL_ERR(("PNO Stop for SSID failed"));
+ } else {
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_REMOVE);
}
+ if (cfg->scan_request && cfg->sched_scan_running) {
+ WL_PNO((">>> Sched scan running. Aborting it..\n"));
+ wl_notify_escan_complete(cfg, dev, true, true);
+ }
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ cfg->sched_scan_req = NULL;
+ cfg->sched_scan_running = FALSE;
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
return 0;
}
-#endif /* LINUX_VERSION < VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+#endif /* WL_SCHED_SCAN */
#ifdef WL_SUPPORT_ACS
/*
cca_stats_n_flags *results;
char *buf;
int retry, err;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- buf = (char *)MALLOCZ(cfg->osh, sizeof(char) * WLC_IOCTL_MAXLEN);
+ buf = kzalloc(sizeof(char) * WLC_IOCTL_MAXLEN, GFP_KERNEL);
if (unlikely(!buf)) {
WL_ERR(("%s: buf alloc failed\n", __func__));
return -ENOMEM;
results = (cca_stats_n_flags *)(buf);
wl_parse_dump_obss(results->buf, survey);
- MFREE(cfg->osh, buf, sizeof(char) * WLC_IOCTL_MAXLEN);
+ kfree(buf);
return 0;
exit:
- MFREE(cfg->osh, buf, sizeof(char) * WLC_IOCTL_MAXLEN);
+ kfree(buf);
return err;
}
noise = CHAN_NOISE_DUMMY;
}
- survey = (struct wl_dump_survey *)MALLOCZ(cfg->osh,
- sizeof(struct wl_dump_survey));
+ survey = (struct wl_dump_survey *) kzalloc(sizeof(struct wl_dump_survey),
+ GFP_KERNEL);
if (unlikely(!survey)) {
WL_ERR(("%s: alloc failed\n", __func__));
return -ENOMEM;
info->filled = SURVEY_INFO_NOISE_DBM |SURVEY_INFO_CHANNEL_TIME |
SURVEY_INFO_CHANNEL_TIME_BUSY | SURVEY_INFO_CHANNEL_TIME_RX |
SURVEY_INFO_CHANNEL_TIME_TX;
- MFREE(cfg->osh, survey, sizeof(struct wl_dump_survey));
+ kfree(survey);
return 0;
exit:
- MFREE(cfg->osh, survey, sizeof(struct wl_dump_survey));
+ kfree(survey);
return err;
}
#endif /* WL_SUPPORT_ACS */
.stop_p2p_device = wl_cfgp2p_stop_p2p_device,
#endif /* WL_CFG80211_P2P_DEV_IF */
.scan = wl_cfg80211_scan,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
- .abort_scan = wl_cfg80211_abort_scan,
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
.set_wiphy_params = wl_cfg80211_set_wiphy_params,
.join_ibss = wl_cfg80211_join_ibss,
.leave_ibss = wl_cfg80211_leave_ibss,
.mgmt_tx = wl_cfg80211_mgmt_tx,
.mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
.change_bss = wl_cfg80211_change_bss,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
.set_channel = wl_cfg80211_set_channel,
-#endif /* ((LINUX_VERSION < VERSION(3, 6, 0)) || WL_COMPAT_WIRELESS */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) && !defined(WL_COMPAT_WIRELESS)
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0))
.set_beacon = wl_cfg80211_add_set_beacon,
.add_beacon = wl_cfg80211_add_set_beacon,
.del_beacon = wl_cfg80211_del_beacon,
.change_beacon = wl_cfg80211_change_beacon,
.start_ap = wl_cfg80211_start_ap,
.stop_ap = wl_cfg80211_stop_ap,
-#endif /* LINUX_VERSION < KERNEL_VERSION(3,4,0) && !WL_COMPAT_WIRELESS */
+#endif
#ifdef WL_SCHED_SCAN
.sched_scan_start = wl_cfg80211_sched_scan_start,
.sched_scan_stop = wl_cfg80211_sched_scan_stop,
.change_station = wl_cfg80211_change_station,
.mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait,
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+#ifdef WLMESH
+ .join_mesh = wl_cfg80211_join_mesh,
+ .leave_mesh = wl_cfg80211_leave_mesh,
+#endif /* WLMESH */
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
.tdls_mgmt = wl_cfg80211_tdls_mgmt,
.tdls_oper = wl_cfg80211_tdls_oper,
-#endif /* LINUX_VERSION > VERSION(3, 2, 0) || WL_COMPAT_WIRELESS */
+#endif
#ifdef WL_SUPPORT_ACS
.dump_survey = wl_cfg80211_dump_survey,
#endif /* WL_SUPPORT_ACS */
.set_rekey_data = wl_cfg80211_set_rekey_data,
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
#endif /* GTK_OFFLOAD_SUPPORT */
-#if defined(WL_FILS)
- /* This should be enabled from kernel version which supports this */
- .update_connect_params = wl_cfg80211_update_connect_params,
-#endif /* WL_FILS */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
- .set_pmk = wl_cfg80211_set_pmk,
- .del_pmk = wl_cfg80211_del_pmk,
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
- .channel_switch = wl_cfg80211_channel_switch,
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
-#ifdef WL_CLIENT_SAE
- .external_auth = wl_cfg80211_external_auth,
-#endif /* WL_CLIENT_SAE */
};
s32 wl_mode_to_nl80211_iftype(s32 mode)
return NL80211_IFTYPE_ADHOC;
case WL_MODE_AP:
return NL80211_IFTYPE_AP;
-#ifdef WLMESH_CFG80211
+#ifdef WLMESH
case WL_MODE_MESH:
return NL80211_IFTYPE_MESH_POINT;
-#endif /* WLMESH_CFG80211 */
+#endif
default:
return NL80211_IFTYPE_UNSPECIFIED;
}
return err;
}
-s32
-wl_cfg80211_set_country_code(struct net_device *net, char *country_code,
- bool notify, bool user_enforced, int revinfo)
-{
- s32 ret = BCME_OK;
-#ifdef WL_NAN
- struct wireless_dev *wdev = ndev_to_wdev(net);
- struct wiphy *wiphy = wdev->wiphy;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- if (cfg->nan_enable) {
- mutex_lock(&cfg->if_sync);
- cfg->nancfg.disable_reason = NAN_COUNTRY_CODE_CHANGE;
- ret = wl_cfgnan_disable(cfg);
- mutex_unlock(&cfg->if_sync);
- if (ret != BCME_OK) {
- WL_ERR(("failed to disable nan, error[%d]\n", ret));
- return ret;
- }
- }
-#endif /* WL_NAN */
- ret = wldev_set_country(net, country_code,
- notify, user_enforced, revinfo);
- if (ret < 0) {
- WL_ERR(("set country Failed :%d\n", ret));
- }
- return ret;
-}
-
#ifdef CONFIG_CFG80211_INTERNAL_REGDB
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
#define WL_CFG80211_REG_NOTIFIER() static int wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) */
#endif /* CONFIG_PM */
-int wl_features_set(u8 *array, uint8 len, u32 ftidx)
-{
- u8* ft_byte;
-
- if ((ftidx / 8u) >= len)
- return BCME_BADARG;
-
- ft_byte = &array[ftidx / 8u];
- *ft_byte |= BIT(ftidx % 8u);
- return BCME_OK;
-}
-
static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, dhd_pub_t *context)
{
s32 err = 0;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
#endif /* CONFIG_PM */
-//#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS))
+//#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
dhd_pub_t *dhd = (dhd_pub_t *)context;
BCM_REFERENCE(dhd);
err = -ENODEV;
return err;
}
-//#endif // endif
+//#endif
wdev->wiphy =
wiphy_new(&wl_cfg80211_ops, sizeof(struct bcm_cfg80211));
wdev->wiphy->max_sched_scan_ssids = MAX_PFN_LIST_COUNT;
wdev->wiphy->max_match_sets = MAX_PFN_LIST_COUNT;
wdev->wiphy->max_sched_scan_ie_len = WL_SCAN_IE_LEN_MAX;
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ wdev->wiphy->max_sched_scan_plan_interval = PNO_SCAN_MAX_FW_SEC;
+#else
wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-#endif /* LINUX_VER < 4.12 */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
#endif /* WL_SCHED_SCAN */
-#ifdef WLMESH_CFG80211
+#ifdef WLMESH
wdev->wiphy->flags |= WIPHY_FLAG_MESH_AUTH;
-#endif /* WLMESH_CFG80211 */
+#endif
wdev->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION)
| BIT(NL80211_IFTYPE_ADHOC)
#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF)
| BIT(NL80211_IFTYPE_MONITOR)
-#endif // endif
+#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */
#if defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)
| BIT(NL80211_IFTYPE_P2P_CLIENT)
| BIT(NL80211_IFTYPE_P2P_GO)
#if defined(WL_CFG80211_P2P_DEV_IF)
| BIT(NL80211_IFTYPE_P2P_DEVICE)
#endif /* WL_CFG80211_P2P_DEV_IF */
-#ifdef WLMESH_CFG80211
+#ifdef WLMESH
| BIT(NL80211_IFTYPE_MESH_POINT)
-#endif /* WLMESH_CFG80211 */
+#endif /* WLMESH */
| BIT(NL80211_IFTYPE_AP);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
#endif /* !WL_POWERSAVE_DISABLED */
wdev->wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_4ADDR_AP |
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && !defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39))
WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
-#endif // endif
+#endif
WIPHY_FLAG_4ADDR_STATION;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+#if ((defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && (LINUX_VERSION_CODE >= \
+ KERNEL_VERSION(3, 2, 0)))
/*
- * If FW ROAM flag is advertised, upper layer doesn't provide the
- * bssid & freq in the connect command. However, kernel ver >= 3.15,
- * provides bssid_hint & freq_hint which can be used by the firmware.
- * fw_ap_select variable determines whether FW selects the AP or the
- * user space selects the target AP within the given ESS.
+ * If FW ROAM flag is advertised, upper layer wouldn't provide
+ * the bssid & freq in the connect command. This will result a
+ * delay in initial connection time due to firmware doing a full
+ * channel scan to figure out the channel & bssid. However kernel
+ * ver >= 3.15, provides bssid_hint & freq_hint and hence kernel
+ * ver >= 3.15 won't have any issue. So if this flags need to be
+ * advertised for kernel < 3.15, suggest to use RCC along with it
+ * to avoid the initial connection delay.
*/
wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS)
+#endif
+#ifdef UNSET_FW_ROAM_WIPHY_FLAG
+ wdev->wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#endif /* UNSET_FW_ROAM_WIPHY_FLAG */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_OFFCHAN_TX;
-#endif // endif
+#endif
#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
4, 0))
/* From 3.4 kernel ownards AP_SME flag can be advertised
#ifdef WL_CFG80211_ACL
/* Configure ACL capabilities. */
wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT;
-#endif // endif
+#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
/* Supplicant distinguish between the SoftAP mode and other
* modes (e.g. P2P, WPS, HS2.0) when it builds the probe
* response frame from Supplicant MR1 and Kernel 3.4.0 or
wdev->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
wdev->wiphy->probe_resp_offload = 0;
}
-#endif // endif
+#endif
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
-#endif // endif
+#endif
#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
/*
wdev->wiphy->wowlan = &brcm_wowlan_support;
/* If this is not provided cfg stack will get disconnect
* during suspend.
- * Note: wiphy->wowlan_config is freed by cfg80211 layer.
- * so use malloc instead of MALLOC(osh) to avoid false alarm.
*/
brcm_wowlan_config = kmalloc(sizeof(struct cfg80211_wowlan), GFP_KERNEL);
if (brcm_wowlan_config) {
brcm_wowlan_config->tcp = NULL;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
brcm_wowlan_config->nd_config = NULL;
-#endif // endif
+#endif
} else {
WL_ERR(("Can not allocate memory for brcm_wowlan_config,"
" So wiphy->wowlan_config is set to NULL\n"));
WL_DBG(("Registering custom regulatory)\n"));
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- wdev->wiphy->regulatory_flags |=
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
- REGULATORY_IGNORE_STALE_KICKOFF |
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) */
- REGULATORY_CUSTOM_REG;
+ wdev->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
#else
wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+#endif
wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
- WL_INFORM_MEM(("Registering Vendor80211\n"));
+ WL_ERR(("Registering Vendor80211\n"));
err = wl_cfgvendor_attach(wdev->wiphy, dhd);
if (unlikely(err < 0)) {
WL_ERR(("Couldn not attach vendor commands (%d)\n", err));
}
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
-#ifdef WL_FILS
- wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_FILS_SK_OFFLOAD);
-#endif /* WL_FILS */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
- wdev->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
- wdev->wiphy->max_num_csa_counters = WL_MAX_NUM_CSA_COUNTERS;
-#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 12, 0) */
/* Now we can register wiphy with cfg80211 module */
err = wiphy_register(wdev->wiphy);
#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && (LINUX_VERSION_CODE <= \
KERNEL_VERSION(3, 3, 0))) && defined(WL_IFACE_COMB_NUM_CHANNELS)
wdev->wiphy->flags &= ~WIPHY_FLAG_ENFORCE_COMBINATIONS;
-#endif // endif
-
-#if defined(WL_SAE) || defined(WL_CLIENT_SAE)
- wdev->wiphy->features |= NL80211_FEATURE_SAE;
-#endif /* WL_SAE || WL_CLIENT_SAE */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)) && defined(BCMSUP_4WAY_HANDSHAKE)
- if (FW_SUPPORTED(dhd, idsup)) {
- err = wiphy_ext_feature_set(wdev->wiphy,
- NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK);
- if (err) {
- return err;
- }
- err = wiphy_ext_feature_set(wdev->wiphy,
- NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X);
- if (err) {
- return err;
- }
- }
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) && defined(BCMSUP_4WAY_HANDSHAKE) */
-#ifdef WL_SCAN_TYPE
- /* These scan types will be mapped to default scan on non-supported chipset */
- /* Advertise scan type capability. */
- wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_LOW_SPAN_SCAN);
- wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_LOW_POWER_SCAN);
- wiphy_ext_feature_set(wdev->wiphy, NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN);
- wdev->wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN;
-#endif /* WL_SCAN_TYPE */
+#endif
return err;
}
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
- /* Reset wowlan & wowlan_config before Unregister to avoid Kernel Panic */
- WL_DBG(("clear wowlan\n"));
+ /* Reset wowlan & wowlan_config before Unregister to avoid Kernel Panic */
+ WL_DBG(("wl_free_wdev Clearing wowlan Config \n"));
wdev->wiphy->wowlan = NULL;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) */
#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
}
wl_delete_all_netinfo(cfg);
- if (wiphy) {
- if (wdev->netdev)
- wdev->netdev->ieee80211_ptr = NULL;
- wdev->netdev = NULL;
- MFREE(cfg->osh, wdev, sizeof(*wdev));
- cfg->wdev = NULL;
+ if (wiphy)
wiphy_free(wiphy);
- }
/* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg",
* which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!!
*/
}
-#if defined(BSSCACHE) || defined(RSSIAVG)
-void wl_cfg80211_update_bss_cache(struct bcm_cfg80211 *cfg)
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
{
+ struct wl_scan_results *bss_list;
+ struct wl_bss_info *bi = NULL; /* must be initialized */
+ s32 err = 0;
+ s32 i;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
#if defined(RSSIAVG)
int rssi;
#endif
- struct wl_scan_results *bss_list = cfg->bss_list;
+#if defined(BSSCACHE)
+ wl_bss_cache_t *node;
+#endif
+
+ bss_list = cfg->bss_list;
/* Free cache in p2p scanning*/
if (p2p_is_on(cfg) && p2p_scan(cfg)) {
/* Delete disconnected cache */
#if defined(BSSCACHE)
- wl_delete_disconnected_bss_cache(&cfg->g_bss_cache_ctrl,
- (u8*)&cfg->disconnected_bssid);
+ wl_delete_disconnected_bss_cache(&cfg->g_bss_cache_ctrl, (u8*)&cfg->disconnected_bssid);
#if defined(RSSIAVG)
- wl_delete_disconnected_rssi_cache(&cfg->g_rssi_cache_ctrl,
- (u8*)&cfg->disconnected_bssid);
+ wl_delete_disconnected_rssi_cache(&cfg->g_rssi_cache_ctrl, (u8*)&cfg->disconnected_bssid);
#endif
if (cfg->p2p_disconnected == 0)
memset(&cfg->disconnected_bssid, 0, ETHER_ADDR_LEN);
wl_reset_bss_cache(&cfg->g_bss_cache_ctrl);
#endif
-}
-#endif
-
-#if defined(BSSCACHE)
-s32 wl_inform_bss_cache(struct bcm_cfg80211 *cfg)
-{
- struct wl_scan_results *bss_list = cfg->bss_list;
- wl_bss_info_t *bi = NULL; /* must be initialized */
- s32 err = 0;
- s32 i;
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
#if defined(BSSCACHE)
- wl_bss_cache_t *node;
-#endif
-
if (cfg->p2p_disconnected > 0) {
// terence 20130703: Fix for wrong group_capab (timing issue)
wl_delete_disconnected_bss_cache(&cfg->g_bss_cache_ctrl, (u8*)&cfg->disconnected_bssid);
if (cfg->autochannel)
wl_ext_get_best_channel(ndev, &cfg->g_bss_cache_ctrl, ioctl_version,
&cfg->best_2g_ch, &cfg->best_5g_ch);
-
- return err;
-}
-#endif
-
-s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
-{
-#if !defined(BSSCACHE)
- struct wl_scan_results *bss_list;
- wl_bss_info_t *bi = NULL; /* must be initialized */
- s32 i;
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
-#endif
- s32 err = 0;
-
-#if defined(BSSCACHE) || defined(RSSIAVG)
- wl_cfg80211_update_bss_cache(cfg);
-#endif
-
-#if defined(BSSCACHE)
- err = wl_inform_bss_cache(cfg);
-#else
- bss_list = cfg->bss_list;
- WL_SCAN(("scanned AP count (%d)\n", bss_list->count));
-#ifdef ESCAN_CHANNEL_CACHE
- reset_roam_cache(cfg);
-#endif /* ESCAN_CHANNEL_CACHE */
- preempt_disable();
- bi = next_bss(bss_list, bi);
- for_each_bss(bss_list, bi, i) {
- if (cfg->p2p_disconnected > 0 &&
- !memcmp(&bi->BSSID, &cfg->disconnected_bssid, ETHER_ADDR_LEN)) {
- WL_SCAN(("Skip %pM\n", &bi->BSSID));
- continue;
- }
-#ifdef ESCAN_CHANNEL_CACHE
- add_roam_cache(cfg, bi);
-#endif /* ESCAN_CHANNEL_CACHE */
- err = wl_inform_single_bss(cfg, bi, false);
- if (unlikely(err)) {
- WL_ERR(("bss inform failed\n"));
- }
- }
- preempt_enable();
- if (cfg->autochannel)
- wl_ext_get_best_channel(ndev, bss_list, ioctl_version,
- &cfg->best_2g_ch, &cfg->best_5g_ch);
-#endif
+#else
+ WL_SCAN(("scanned AP count (%d)\n", bss_list->count));
+ preempt_disable();
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ if (cfg->p2p_disconnected > 0 && !memcmp(&bi->BSSID, &cfg->disconnected_bssid, ETHER_ADDR_LEN))
+ continue;
+ err = wl_inform_single_bss(cfg, bi, false);
+ }
+ preempt_enable();
+ if (cfg->autochannel)
+ wl_ext_get_best_channel(ndev, bss_list, ioctl_version,
+ &cfg->best_2g_ch, &cfg->best_5g_ch);
+#endif
if (cfg->p2p_disconnected > 0) {
// terence 20130703: Fix for wrong group_capab (timing issue)
cfg->p2p_disconnected++;
- if (cfg->p2p_disconnected >= 2) {
+ if (cfg->p2p_disconnected >= REPEATED_SCAN_RESULT_CNT+1) {
cfg->p2p_disconnected = 0;
memset(&cfg->disconnected_bssid, 0, ETHER_ADDR_LEN);
}
}
- WL_MEM(("cfg80211 scan cache updated\n"));
-#ifdef ROAM_CHANNEL_CACHE
- /* print_roam_cache(); */
- update_roam_cache(cfg, ioctl_version);
-#endif /* ROAM_CHANNEL_CACHE */
+
return err;
}
-static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi, bool update_ssid)
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam)
{
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
struct ieee80211_mgmt *mgmt;
u32 freq;
s32 err = 0;
gfp_t aflags;
- u8 tmp_buf[IEEE80211_MAX_SSID_LEN + 1];
chanspec_t chanspec;
if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
WL_DBG(("Beacon is larger than buffer. Discarding\n"));
return err;
}
-
- if (bi->SSID_len > IEEE80211_MAX_SSID_LEN) {
- WL_ERR(("wrong SSID len:%d\n", bi->SSID_len));
- return -EINVAL;
- }
-
aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
- notif_bss_info = (struct wl_cfg80211_bss_info *)MALLOCZ(cfg->osh,
- sizeof(*notif_bss_info) + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
+ notif_bss_info = kzalloc(sizeof(*notif_bss_info) + sizeof(*mgmt)
+ - sizeof(u8) + WL_BSS_INFO_MAX, aflags);
if (unlikely(!notif_bss_info)) {
WL_ERR(("notif_bss_info alloc failed\n"));
return -ENOMEM;
band = wiphy->bands[IEEE80211_BAND_5GHZ];
if (!band) {
WL_ERR(("No valid band\n"));
- MFREE(cfg->osh, notif_bss_info, sizeof(*notif_bss_info)
- + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
+ kfree(notif_bss_info);
return -EINVAL;
}
notif_bss_info->rssi = dtoh16(bi->RSSI);
beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
wl_rst_ie(cfg);
- wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, update_ssid);
+ wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, roam);
wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX -
offsetof(struct wl_cfg80211_bss_info, frame_buf));
notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
u.beacon.variable) + wl_get_ielen(cfg);
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
(void)band->band;
#else
freq = ieee80211_channel_to_frequency(notif_bss_info->channel, band->band);
-#endif // endif
+#endif
if (freq == 0) {
WL_ERR(("Invalid channel, fail to change channel to freq\n"));
- MFREE(cfg->osh, notif_bss_info, sizeof(*notif_bss_info)
- + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
+ kfree(notif_bss_info);
return -EINVAL;
}
channel = ieee80211_get_channel(wiphy, freq);
- memcpy(tmp_buf, bi->SSID, bi->SSID_len);
- tmp_buf[bi->SSID_len] = '\0';
- WL_SCAN(("BSSID %pM, channel %3d(%3d %sMHz), rssi %3d, capa 0x04%x, mgmt_type %d, "
+ WL_SCAN(("BSSID %pM, channel %2d(%2d %sMHz), rssi %3d, capa 0x04%x, mgmt_type %d, "
"frame_len %d, SSID \"%s\"\n",
&bi->BSSID, notif_bss_info->channel, CHSPEC_CHANNEL(chanspec),
CHSPEC_IS20(chanspec)?"20":
CHSPEC_IS40(chanspec)?"40":
CHSPEC_IS80(chanspec)?"80":"160",
notif_bss_info->rssi, mgmt->u.beacon.capab_info, mgmt_type,
- notif_bss_info->frame_len, tmp_buf));
+ notif_bss_info->frame_len, bi->SSID));
if (unlikely(!channel)) {
WL_ERR(("ieee80211_get_channel error, freq=%d, channel=%d\n",
freq, notif_bss_info->channel));
- MFREE(cfg->osh, notif_bss_info, sizeof(*notif_bss_info)
- + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
+ kfree(notif_bss_info);
return -EINVAL;
}
signal = notif_bss_info->rssi * 100;
if (!mgmt->u.probe_resp.timestamp) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
- struct osl_timespec ts;
- osl_get_monotonic_boottime(&ts);
+ struct timespec ts;
+ get_monotonic_boottime(&ts);
mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000)
+ ts.tv_nsec / 1000;
#else
- struct osl_timespec tv;
- osl_do_gettimeofday(&tv);
+ struct timeval tv;
+ do_gettimeofday(&tv);
mgmt->u.probe_resp.timestamp = ((u64)tv.tv_sec*1000000)
+ tv.tv_usec;
-#endif // endif
+#endif
}
+
cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt,
le16_to_cpu(notif_bss_info->frame_len), signal, aflags);
if (unlikely(!cbss)) {
- WL_ERR(("cfg80211_inform_bss_frame error bssid " MACDBG " channel %d \n",
- MAC2STRDBG((u8*)(&bi->BSSID)), notif_bss_info->channel));
+ WL_ERR(("cfg80211_inform_bss_frame error\n"));
err = -EINVAL;
goto out_err;
}
(cfg->sched_scan_req && !cfg->scan_request)) {
alloc_len = sizeof(log_conn_event_t) + IEEE80211_MAX_SSID_LEN + sizeof(uint16) +
sizeof(int16);
- event_data = (log_conn_event_t *)MALLOCZ(dhdp->osh, alloc_len);
+ event_data = MALLOCZ(dhdp->osh, alloc_len);
if (!event_data) {
WL_ERR(("%s: failed to allocate the log_conn_event_t with "
"length(%d)\n", __func__, alloc_len));
goto out_err;
}
tlv_len = 3 * sizeof(tlv_log);
- event_data->tlvs = (tlv_log *)MALLOCZ(cfg->osh, tlv_len);
+ event_data->tlvs = MALLOC(dhdp->osh, tlv_len);
if (!event_data->tlvs) {
WL_ERR(("%s: failed to allocate the log_conn_event_t with "
- "length(%d)\n", __func__, tlv_len));
- goto free_evt_data;
+ "length(%d)\n", __func__, tlv_len));
+ goto out_err;
}
payload_len = sizeof(log_conn_event_t);
tlv_data = TLV_LOG_NEXT(tlv_data);
dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
- event_data, payload_len);
+ event_data, payload_len);
MFREE(dhdp->osh, event_data->tlvs, tlv_len);
-free_evt_data:
MFREE(dhdp->osh, event_data, alloc_len);
}
out_err:
- MFREE(cfg->osh, notif_bss_info, sizeof(*notif_bss_info)
- + sizeof(*mgmt) - sizeof(u8) + WL_BSS_INFO_MAX);
+ kfree(notif_bss_info);
return err;
}
u32 event = ntoh32(e->event_type);
u32 status = ntoh32(e->status);
u16 flags = ntoh16(e->flags);
-#if defined(CUSTOM_SET_ANTNPM)
+#if defined(CUSTOM_SET_ANTNPM) || defined(CUSTOM_SET_OCLOFF)
dhd_pub_t *dhd;
dhd = (dhd_pub_t *)(cfg->pub);
-#endif // endif
+#endif /* CUSTOM_SET_ANTNPM */
WL_DBG(("event %d, status %d flags %x\n", event, status, flags));
if (event == WLC_E_SET_SSID) {
}
}
#endif /* CUSTOM_SET_ANTNPM */
+#ifdef CUSTOM_SET_OCLOFF
+ if (dhd->ocl_off) {
+ int err = 0;
+ int ocl_enable = 0;
+ err = wldev_iovar_setint(ndev, "ocl_enable", ocl_enable);
+ if (err != 0) {
+ WL_ERR(("[WIFI_SEC] %s: Set ocl_enable %d failed %d\n",
+ __FUNCTION__, ocl_enable, err));
+ } else {
+ WL_ERR(("[WIFI_SEC] %s: Set ocl_enable %d succeeded %d\n",
+ __FUNCTION__, ocl_enable, err));
+ }
+ }
+#endif /* CUSTOM_SET_OCLOFF */
if (!wl_is_ibssmode(cfg, ndev))
return true;
}
return false;
}
+#ifdef WL_LASTEVT
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, void *data)
+{
+ u32 event = ntoh32(e->event_type);
+ u16 flags = ntoh16(e->flags);
+ wl_last_event_t *last_event = (wl_last_event_t *)data;
+ u32 len = ntoh32(e->datalen);
+
+ if (event == WLC_E_DEAUTH_IND ||
+ event == WLC_E_DISASSOC_IND ||
+ event == WLC_E_DISASSOC ||
+ event == WLC_E_DEAUTH) {
+ WL_ERR(("Link down Reason : %s\n", bcmevent_get_name(event)));
+ return true;
+ } else if (event == WLC_E_LINK) {
+ if (!(flags & WLC_EVENT_MSG_LINK)) {
+ if (last_event && len > 0) {
+ u32 current_time = last_event->current_time;
+ u32 timestamp = last_event->timestamp;
+ u32 event_type = last_event->event.event_type;
+ u32 status = last_event->event.status;
+ u32 reason = last_event->event.reason;
+
+ WL_ERR(("Last roam event before disconnection : current_time %d,"
+ " time %d, type %d, status %d, reason %d\n",
+ current_time, timestamp, event_type, status, reason));
+ }
+ WL_ERR(("Link down Reason : %s\n", bcmevent_get_name(event)));
+ return true;
+ }
+ }
+
+ return false;
+}
+#else
static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
{
u32 event = ntoh32(e->event_type);
return false;
}
+#endif /* WL_LASTEVT */
static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
{
return true;
if (event == WLC_E_SET_SSID && status != WLC_E_STATUS_SUCCESS)
return true;
- if (event == WLC_E_ASSOC_RESP_IE && status != WLC_E_STATUS_SUCCESS)
- return true;
return false;
}
-#ifdef WL_SAE
-static s32
-wl_cfg80211_event_sae_key(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- wl_sae_key_info_t *sae_key)
-{
- struct sk_buff *skb;
- gfp_t kflags;
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- int err = BCME_OK;
-
- kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
-#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
- LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(ndev), BRCM_SAE_VENDOR_EVENT_BUF_LEN,
- BRCM_VENDOR_EVENT_SAE_KEY, kflags);
-#else
- skb = cfg80211_vendor_event_alloc(wiphy, BRCM_SAE_VENDOR_EVENT_BUF_LEN,
- BRCM_VENDOR_EVENT_SAE_KEY, kflags);
-#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
- /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
- if (!skb) {
- WL_ERR(("skb alloc failed"));
- err = BCME_NOMEM;
- goto done;
- }
-
- WL_INFORM_MEM(("Received Sae Key event for "MACDBG" key length %x %x",
- MAC2STRDBG(sae_key->peer_mac), sae_key->pmk_len, sae_key->pmkid_len));
- nla_put(skb, BRCM_SAE_KEY_ATTR_PEER_MAC, ETHER_ADDR_LEN, sae_key->peer_mac);
- nla_put(skb, BRCM_SAE_KEY_ATTR_PMK, sae_key->pmk_len, sae_key->pmk);
- nla_put(skb, BRCM_SAE_KEY_ATTR_PMKID, sae_key->pmkid_len, sae_key->pmkid);
- cfg80211_vendor_event(skb, kflags);
-
-done:
- return err;
-}
-
-static s32
-wl_bss_handle_sae_auth(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *event, void *data)
-{
- int err = BCME_OK;
- uint status = ntoh32(event->status);
- wl_auth_event_t *auth_data;
- wl_sae_key_info_t sae_key;
- uint16 tlv_buf_len;
-
- if (status == WLC_E_STATUS_SUCCESS) {
- auth_data = (wl_auth_event_t *)data;
- if (auth_data->version != WL_AUTH_EVENT_DATA_V1) {
- WL_ERR(("unknown auth event data version %x\n",
- auth_data->version));
- err = BCME_VERSION;
- goto done;
- }
-
- tlv_buf_len = auth_data->length - WL_AUTH_EVENT_FIXED_LEN_V1;
-
- /* check if PMK info present */
- sae_key.pmk = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
- WL_AUTH_PMK_TLV_ID, &(sae_key.pmk_len), BCM_XTLV_OPTION_ALIGN32);
- if (!sae_key.pmk || !sae_key.pmk_len) {
- WL_ERR(("Mandatory PMK info not present"));
- err = BCME_NOTFOUND;
- goto done;
- }
- /* check if PMKID info present */
- sae_key.pmkid = bcm_get_data_from_xtlv_buf(auth_data->xtlvs, tlv_buf_len,
- WL_AUTH_PMKID_TLV_ID, &(sae_key.pmkid_len), BCM_XTLV_OPTION_ALIGN32);
- if (!sae_key.pmkid || !sae_key.pmkid_len) {
- WL_ERR(("Mandatory PMKID info not present\n"));
- err = BCME_NOTFOUND;
- goto done;
- }
- memcpy_s(sae_key.peer_mac, ETHER_ADDR_LEN, event->addr.octet, ETHER_ADDR_LEN);
- err = wl_cfg80211_event_sae_key(cfg, ndev, &sae_key);
- if (err) {
- WL_ERR(("Failed to event sae key info\n"));
- }
- } else {
- WL_ERR(("sae auth status failure:%d\n", status));
- }
-done:
- return err;
-}
-#endif /* WL_SAE */
-
-static s32
-wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
-{
- u32 reason = ntoh32(e->reason);
- u32 event = ntoh32(e->event_type);
-#ifdef WL_SAE
- uint auth_type = ntoh32(e->auth_type);
-#endif /* WL_SAE */
- struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
- WL_DBG(("event type : %d, reason : %d\n", event, reason));
-
- if (sec) {
- switch (event) {
- case WLC_E_ASSOC:
- case WLC_E_AUTH:
- case WLC_E_AUTH_IND:
- sec->auth_assoc_res_status = reason;
-#ifdef WL_SAE
- if ((event == WLC_E_AUTH || event == WLC_E_AUTH_IND) &&
- auth_type == DOT11_SAE) {
- wl_bss_handle_sae_auth(cfg, ndev, e, data);
- }
-#endif /* WL_SAE */
- break;
- default:
- break;
- }
- } else {
- WL_ERR(("sec is NULL\n"));
- }
- return 0;
-}
-
/* The mainline kernel >= 3.2.0 has support for indicating new/del station
* to AP/P2P GO via events. If this change is backported to kernel for which
* this driver is being built, then define WL_CFG80211_STA_EVENT. You
u32 len = ntoh32(e->datalen);
u32 status = ntoh32(e->status);
-#if !defined(WL_CFG80211_STA_EVENT) && !defined(WL_COMPAT_WIRELESS) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
bool isfree = false;
u8 *mgmt_frame;
u8 bsscfgidx = e->bsscfgidx;
s32 channel;
u8 *body = NULL;
u16 fc = 0;
- u32 body_len = 0;
struct ieee80211_supported_band *band;
struct ether_addr da;
struct ether_addr bssid;
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
channel_info_t ci;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
#else
struct station_info sinfo;
-#endif /* (LINUX_VERSION < VERSION(3,2,0)) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
-
- WL_INFORM_MEM(("[%s] Mode AP/GO. Event:%d status:%d reason:%d\n",
- ndev->name, event, ntoh32(e->status), reason));
+#endif
- if (event == WLC_E_AUTH_IND) {
- wl_get_auth_assoc_status(cfg, ndev, e, data);
- return 0;
- }
+ WL_DBG(("event %d status %d reason %d\n", event, ntoh32(e->status), reason));
/* if link down, bsscfg is disabled. */
if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) {
wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
- WL_MSG(ndev->name, "AP mode link down !! \n");
+ WL_INFORM(("AP mode link down !! \n"));
complete(&cfg->iface_disable);
return 0;
}
(wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP)) {
if (!wl_get_drv_status(cfg, AP_CREATED, ndev)) {
/* AP/GO brought up successfull in firmware */
- WL_MSG(ndev->name, "AP/GO Link up\n");
+ printf("%s: ** AP/GO Link up event **\n", __FUNCTION__);
wl_set_drv_status(cfg, AP_CREATED, ndev);
- OSL_SMP_WMB();
wake_up_interruptible(&cfg->netif_change_event);
-#ifdef WL_BCNRECV
- /* check fakeapscan is in progress, if progress then abort */
- wl_android_bcnrecv_stop(ndev, WL_BCNRECV_CONCURRENCY);
-#endif /* WL_BCNRECV */
- wl_cfg80211_check_in4way(cfg, ndev, 0, WL_EXT_STATUS_AP_ENABLED, NULL);
+ if (!memcmp(ndev->name, WL_P2P_INTERFACE_PREFIX, strlen(WL_P2P_INTERFACE_PREFIX))) {
+ dhd_conf_set_mchan_bw(cfg->pub, WL_P2P_IF_GO, -1);
+ }
return 0;
}
}
if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
- WL_MSG_RLMT(ndev->name, &e->addr, ETHER_ADDR_LEN,
- "event %s(%d) status %d reason %d\n",
- bcmevent_get_name(event), event, ntoh32(e->status), reason);
+ printf("%s: event %s(%d) status %d reason %d\n", __FUNCTION__,
+ bcmevent_get_name(event), event, ntoh32(e->status), reason);
}
-#if !defined(WL_CFG80211_STA_EVENT) && !defined(WL_COMPAT_WIRELESS) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT)
WL_DBG(("Enter \n"));
if (!len && (event == WLC_E_DEAUTH)) {
len = 2; /* reason code field */
data = &reason;
}
if (len) {
- body = (u8 *)MALLOCZ(cfg->osh, len);
+ body = kzalloc(len, GFP_KERNEL);
+
if (body == NULL) {
- WL_ERR(("Failed to allocate body\n"));
+ WL_ERR(("wl_notify_connect_status: Failed to allocate body\n"));
return WL_INVALID;
}
}
- bzero(&bssid, ETHER_ADDR_LEN);
+ memset(&bssid, 0, ETHER_ADDR_LEN);
WL_DBG(("Enter event %d ndev %p\n", event, ndev));
if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
- MFREE(cfg->osh, body, len);
+ kfree(body);
return WL_INVALID;
}
if (len)
memcpy(body, data, len);
wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
- NULL, 0, ioctl_buf, sizeof(ioctl_buf), bsscfgidx, NULL);
- memcpy(da.octet, ioctl_buf, ETHER_ADDR_LEN);
- bzero(&bssid, sizeof(bssid));
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+ memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+ memset(&bssid, 0, sizeof(bssid));
err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
switch (event) {
case WLC_E_ASSOC_IND:
fc = 0;
goto exit;
}
- bzero(&ci, sizeof(ci));
+ memset(&ci, 0, sizeof(ci));
if ((err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) {
- MFREE(cfg->osh, body, len);
+ kfree(body);
return err;
}
band = wiphy->bands[IEEE80211_BAND_5GHZ];
if (!band) {
WL_ERR(("No valid band\n"));
- if (body) {
- MFREE(cfg->osh, body, len);
- }
+ if (body)
+ kfree(body);
return -EINVAL;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
freq = ieee80211_channel_to_frequency(channel);
(void)band->band;
#else
freq = ieee80211_channel_to_frequency(channel, band->band);
-#endif // endif
- body_len = len;
- err = wl_frame_get_mgmt(cfg, fc, &da, &e->addr, &bssid,
+#endif
+
+ err = wl_frame_get_mgmt(fc, &da, &e->addr, &bssid,
&mgmt_frame, &len, body);
if (err < 0)
goto exit;
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
#else
cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3, 18,0) || WL_COMPAT_WIRELESS */
+#endif
}
exit:
- if (isfree) {
- MFREE(cfg->osh, mgmt_frame, len);
- }
- if (body) {
- MFREE(cfg->osh, body, body_len);
- }
+ if (isfree)
+ kfree(mgmt_frame);
+ if (body)
+ kfree(body);
#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
- memset(&sinfo, 0, sizeof(struct station_info));
sinfo.filled = 0;
if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
reason == DOT11_SC_SUCCESS) {
}
sinfo.assoc_req_ies = data;
sinfo.assoc_req_ies_len = len;
- WL_MSG(ndev->name, "new sta event for "MACDBG "\n",
- MAC2STRDBG(e->addr.octet));
+ printf("%s: connected device "MACDBG"\n", __FUNCTION__, MAC2STRDBG(e->addr.octet));
wl_cfg80211_check_in4way(cfg, ndev, DONT_DELETE_GC_AFTER_WPS,
- WL_EXT_STATUS_STA_CONNECTED, NULL);
+ WL_EXT_STATUS_GC_CONNECTED, NULL);
cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC);
-#ifdef WL_WPS_SYNC
- wl_wps_session_update(ndev, WPS_STATE_LINKUP, e->addr.octet);
-#endif /* WL_WPS_SYNC */
+ } else if (event == WLC_E_DISASSOC_IND) {
+ printf("%s: disassociated device "MACDBG"\n", __FUNCTION__, MAC2STRDBG(e->addr.octet));
+ wl_cfg80211_check_in4way(cfg, ndev, DONT_DELETE_GC_AFTER_WPS,
+ WL_EXT_STATUS_GC_DISCONNECTED, NULL);
+ cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
} else if ((event == WLC_E_DEAUTH_IND) ||
- ((event == WLC_E_DEAUTH) && (reason != DOT11_RC_RESERVED)) ||
- (event == WLC_E_DISASSOC_IND)) {
- WL_MSG_RLMT(ndev->name, &e->addr, ETHER_ADDR_LEN,
- "del sta event for "MACDBG "\n", MAC2STRDBG(e->addr.octet));
+ ((event == WLC_E_DEAUTH) && (reason != DOT11_RC_RESERVED))) {
+ printf("%s: deauthenticated device "MACDBG"\n", __FUNCTION__, MAC2STRDBG(e->addr.octet));
wl_cfg80211_check_in4way(cfg, ndev, DONT_DELETE_GC_AFTER_WPS,
- WL_EXT_STATUS_STA_DISCONNECTED, NULL);
+ WL_EXT_STATUS_GC_DISCONNECTED, NULL);
cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
-#ifdef WL_WPS_SYNC
- wl_wps_session_update(ndev, WPS_STATE_LINKDOWN, e->addr.octet);
-#endif /* WL_WPS_SYNC */
}
-#endif /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+#endif
return err;
}
-#ifdef WL_CLIENT_SAE
-static s32
-wl_notify_start_auth(struct bcm_cfg80211 *cfg,
- bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data)
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define MAX_ASSOC_REJECT_ERR_STATUS 5
+int wl_get_connect_failed_status(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
{
- struct cfg80211_external_auth_params ext_auth_param;
- struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- u32 datalen = be32_to_cpu(e->datalen);
- wl_ext_auth_evt_t *evt_data = (wl_ext_auth_evt_t *)data;
- wl_assoc_mgr_cmd_t cmd;
- int err;
+ u32 status = ntoh32(e->status);
- WL_DBG(("Enter\n"));
+ cfg->assoc_reject_status = 0;
- if (!datalen || !data)
- return BCME_ERROR;
+ if (status == WLC_E_STATUS_FAIL) {
+ WL_ERR(("auth assoc status event=%d e->status %d e->reason %d \n",
+ ntoh32(cfg->event_auth_assoc.event_type),
+ (int)ntoh32(cfg->event_auth_assoc.status),
+ (int)ntoh32(cfg->event_auth_assoc.reason)));
- ext_auth_param.ssid.ssid_len = MIN(evt_data->ssid.SSID_len, DOT11_MAX_SSID_LEN);
- if (ext_auth_param.ssid.ssid_len)
- memcpy(&ext_auth_param.ssid.ssid, evt_data->ssid.SSID,
- ext_auth_param.ssid.ssid_len);
+ switch ((int)ntoh32(cfg->event_auth_assoc.status)) {
+ case WLC_E_STATUS_NO_ACK:
+ cfg->assoc_reject_status = 1;
+ break;
+ case WLC_E_STATUS_FAIL:
+ cfg->assoc_reject_status = 2;
+ break;
+ case WLC_E_STATUS_UNSOLICITED:
+ cfg->assoc_reject_status = 3;
+ break;
+ case WLC_E_STATUS_TIMEOUT:
+ cfg->assoc_reject_status = 4;
+ break;
+ case WLC_E_STATUS_ABORT:
+ cfg->assoc_reject_status = 5;
+ break;
+ default:
+ break;
+ }
+ if (cfg->assoc_reject_status) {
+ if (ntoh32(cfg->event_auth_assoc.event_type) == WLC_E_ASSOC) {
+ cfg->assoc_reject_status += MAX_ASSOC_REJECT_ERR_STATUS;
+ }
+ }
+ }
- memcpy(&ext_auth_param.bssid, &evt_data->bssid, ETHER_ADDR_LEN);
- ext_auth_param.action = NL80211_EXTERNAL_AUTH_START;
- ext_auth_param.key_mgmt_suite = ntoh32(WLAN_AKM_SUITE_SAE_SHA256);
+ WL_ERR(("assoc_reject_status %d \n", cfg->assoc_reject_status));
- WL_MSG(ndev->name, "BSSID: "MACDBG"\n", MAC2STRDBG(&evt_data->bssid));
+ return 0;
+}
- cfg80211_external_auth_request(ndev, &ext_auth_param, GFP_KERNEL);
+s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int bytes_written = 0;
- cmd.version = WL_ASSOC_MGR_CURRENT_VERSION;
- cmd.length = sizeof(cmd);
- cmd.cmd = WL_ASSOC_MGR_CMD_PAUSE_ON_EVT;
- cmd.params = WL_ASSOC_MGR_PARAMS_PAUSE_EVENT_AUTH_RESP;
- err = wldev_iovar_setbuf(ndev, "assoc_mgr_cmd", (void *)&cmd, sizeof(cmd),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("Failed to pause assoc(%d)\n", err));
+ if (cfg == NULL) {
+ return -1;
}
- return BCME_OK;
+ memset(cmd, 0, total_len);
+ bytes_written = snprintf(cmd, 30, "assoc_reject.status %d", cfg->assoc_reject_status);
+
+ WL_ERR(("cmd: %s \n", cmd));
+
+ return bytes_written;
}
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
static s32
-wl_notify_connect_status_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
+wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e)
{
- s32 err = 0;
- u32 event = ntoh32(e->event_type);
u32 reason = ntoh32(e->reason);
- u32 len = ntoh32(e->datalen);
- u32 status = ntoh32(e->status);
+ u32 event = ntoh32(e->event_type);
+ struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ WL_DBG(("event type : %d, reason : %d\n", event, reason));
- bool isfree = false;
- u8 *mgmt_frame;
- u8 bsscfgidx = e->bsscfgidx;
- s32 freq;
- s32 channel;
- u8 *body = NULL;
- u16 fc = 0, rssi = 0;
- bcm_struct_cfgdev *cfgdev = ndev_to_cfgdev(ndev);
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ memcpy(&cfg->event_auth_assoc, e, sizeof(wl_event_msg_t));
+ WL_ERR(("event=%d status %d reason %d \n",
+ ntoh32(cfg->event_auth_assoc.event_type),
+ ntoh32(cfg->event_auth_assoc.status),
+ ntoh32(cfg->event_auth_assoc.reason)));
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+ if (sec) {
+ switch (event) {
+ case WLC_E_ASSOC:
+ case WLC_E_AUTH:
+ sec->auth_assoc_res_status = reason;
+ default:
+ break;
+ }
+ } else
+ WL_ERR(("sec is NULL\n"));
+ return 0;
+}
- struct ieee80211_supported_band *band;
- struct ether_addr da;
- struct ether_addr bssid;
+static s32
+wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = ntoh32(e->event_type);
+ u16 flags = ntoh16(e->flags);
+ u32 status = ntoh32(e->status);
+ bool active;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ struct ieee80211_channel *channel = NULL;
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- channel_info_t ci;
-
- WL_DBG(("event %d status %d reason %d\n", event, status, reason));
-
- if (event == WLC_E_AUTH) {
- struct wl_security *sec;
- sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+ u32 chanspec, chan;
+ u32 freq, band;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
- if (!(sec->auth_type == NL80211_AUTHTYPE_SAE)) {
- WL_DBG(("Abort AUTH processing due to NOT SAE\n"));
- return 0;
- } else {
- if (status != WLC_E_STATUS_SUCCESS && !len) {
- WL_ERR(("SAE AUTH FAIL EVENT\n"));
- wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
- WL_EXT_STATUS_DISCONNECTED, NULL);
- return 0;
- }
- }
+ if (event == WLC_E_JOIN) {
+ WL_DBG(("joined in IBSS network\n"));
}
-
- if (!len && (event == WLC_E_DEAUTH)) {
- len = 2; /* reason code field */
- data = &reason;
- }
-
- if (len) {
- body = kzalloc(len, GFP_KERNEL);
- if (body == NULL) {
- WL_ERR(("wl_notify_connect_status: Failed to allocate body\n"));
- return WL_INVALID;
- }
- }
-
- memset(&bssid, 0, ETHER_ADDR_LEN);
- if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
- kfree(body);
- return WL_INVALID;
- }
- if (len)
- memcpy(body, data, len);
-
- wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
- NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
- memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
- err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
- /* Use e->addr as bssid for Sta case , before association completed */
- if (err == BCME_NOTASSOCIATED)
- memcpy(&bssid, &e->addr, ETHER_ADDR_LEN);
-
- switch (event) {
- case WLC_E_ASSOC_IND:
- fc = FC_ASSOC_REQ;
- break;
- case WLC_E_REASSOC_IND:
- fc = FC_REASSOC_REQ;
- break;
- case WLC_E_DISASSOC_IND:
- fc = FC_DISASSOC;
- break;
- case WLC_E_DEAUTH_IND:
- fc = FC_DISASSOC;
- break;
- case WLC_E_DEAUTH:
- fc = FC_DISASSOC;
- break;
- case WLC_E_AUTH:
- fc = FC_AUTH;
- break;
- default:
- fc = 0;
- goto exit;
- }
- if ((err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) {
- kfree(body);
- return err;
- }
-
- channel = dtoh32(ci.hw_channel);
- if (channel <= CH_MAX_2G_CHANNEL)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- if (!band) {
- WL_ERR(("No valid band\n"));
- if (body)
- kfree(body);
- return -EINVAL;
- }
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
- freq = ieee80211_channel_to_frequency(channel);
- (void)band->band;
-#else
- freq = ieee80211_channel_to_frequency(channel, band->band);
-#endif
-
- err = wl_frame_get_mgmt(cfg, fc, &da, &e->addr, &bssid,
- &mgmt_frame, &len, body);
- if (err < 0) {
- goto exit;
- }
- isfree = true;
-
- if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(cfgdev, freq, rssi, mgmt_frame, len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
-#else
- cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif
- } else if (event == WLC_E_DISASSOC_IND) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(cfgdev, freq, rssi, mgmt_frame, len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
-#else
- cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif
- } else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(cfgdev, freq, rssi, mgmt_frame, len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
-#else
- cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif
- } else if (event == WLC_E_AUTH) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(cfgdev, freq, rssi, mgmt_frame, len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
-#else
- cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif
- }
-exit:
- if (isfree)
- kfree(mgmt_frame);
- if (body)
- kfree(body);
- return err;
-}
-#endif /* WL_CLIENT_SAE */
-
-static s32
-wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
-{
- s32 err = 0;
- u32 event = ntoh32(e->event_type);
- u16 flags = ntoh16(e->flags);
- u32 status = ntoh32(e->status);
- bool active;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
- struct ieee80211_channel *channel = NULL;
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- u32 chanspec, chan;
- u32 freq, band;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) */
-
- if (event == WLC_E_JOIN) {
- WL_INFORM_MEM(("[%s] joined in IBSS network\n", ndev->name));
- }
- if (event == WLC_E_START) {
- WL_INFORM_MEM(("[%s] started IBSS network\n", ndev->name));
+ if (event == WLC_E_START) {
+ WL_DBG(("started IBSS network\n"));
}
if (event == WLC_E_JOIN || event == WLC_E_START ||
(event == WLC_E_LINK && (flags == WLC_EVENT_MSG_LINK))) {
MACDBG "), ignore it\n", MAC2STRDBG(cur_bssid)));
return err;
}
- WL_INFORM_MEM(("[%s] IBSS BSSID is changed from " MACDBG " to " MACDBG "\n",
- ndev->name, MAC2STRDBG(cur_bssid),
- MAC2STRDBG((const u8 *)&e->addr)));
+ WL_INFORM(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n",
+ MAC2STRDBG(cur_bssid), MAC2STRDBG((const u8 *)&e->addr)));
wl_get_assoc_ies(cfg, ndev);
wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
wl_update_bss_info(cfg, ndev, false);
cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL);
#else
cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL);
-#endif // endif
+#endif
}
else {
/* New connection */
- WL_INFORM_MEM(("[%s] IBSS connected to " MACDBG "\n",
- ndev->name, MAC2STRDBG((const u8 *)&e->addr)));
+ WL_INFORM(("IBSS connected to " MACDBG "\n",
+ MAC2STRDBG((const u8 *)&e->addr)));
wl_link_up(cfg);
wl_get_assoc_ies(cfg, ndev);
wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, channel, GFP_KERNEL);
#else
cfg80211_ibss_joined(ndev, (const s8 *)&e->addr, GFP_KERNEL);
-#endif // endif
+#endif
wl_set_drv_status(cfg, CONNECTED, ndev);
active = true;
wl_update_prof(cfg, ndev, NULL, (const void *)&active, WL_PROF_ACT);
wl_init_prof(cfg, ndev);
}
else if (event == WLC_E_SET_SSID && status == WLC_E_STATUS_NO_NETWORKS) {
- WL_INFORM_MEM(("no action - join fail (IBSS mode)\n"));
+ WL_DBG(("no action - join fail (IBSS mode)\n"));
}
else {
WL_DBG(("no action (IBSS mode)\n"));
return err;
}
-void wl_cfg80211_disassoc(struct net_device *ndev, uint32 reason)
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define WiFiALL_OUI "\x50\x6F\x9A" /* Wi-FiAll OUI */
+#define WiFiALL_OUI_LEN 3
+#define WiFiALL_OUI_TYPE 16
+
+int wl_get_bss_info(struct bcm_cfg80211 *cfg, struct net_device *dev, uint8 *mac)
{
- scb_val_t scbval;
- s32 err;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ s32 err = 0;
+ struct wl_bss_info *bi;
+ uint8 eabuf[ETHER_ADDR_LEN];
+ u32 rate, channel, freq, supported_rate, nss = 0, mcs_map, mode_80211 = 0;
+ char rate_str[4];
+ u8 *ie = NULL;
+ u32 ie_len;
+ struct wiphy *wiphy;
+ struct cfg80211_bss *bss;
+ bcm_tlv_t *interworking_ie = NULL;
+ bcm_tlv_t *tlv_ie = NULL;
+ bcm_tlv_t *vht_ie = NULL;
+ vndr_ie_t *vndrie;
+ int16 ie_11u_rel_num = -1, ie_mu_mimo_cap = -1;
+ u32 i, remained_len, count = 0;
+ char roam_count_str[4], akm_str[4];
+ s32 val = 0;
- BCM_REFERENCE(cfg);
- BCM_REFERENCE(dhdp);
- DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
- dhd_net2idx(dhdp->info, ndev), WLAN_REASON_DEAUTH_LEAVING);
+ /* get BSS information */
- memset_s(&scbval, sizeof(scb_val_t), 0x0, sizeof(scb_val_t));
- scbval.val = htod32(reason);
- err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
- if (err < 0) {
- WL_ERR(("WLC_DISASSOC error %d\n", err));
- }
-}
-void wl_cfg80211_del_all_sta(struct net_device *ndev, uint32 reason)
-{
- struct net_device *dev;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- scb_val_t scb_val;
- int err;
- char mac_buf[MAX_NUM_OF_ASSOCIATED_DEV *
- sizeof(struct ether_addr) + sizeof(uint)] = {0};
- struct maclist *assoc_maclist = (struct maclist *)mac_buf;
- int num_associated = 0;
+ strncpy(cfg->bss_info, "x x x x x x x x x x x x x", GET_BSS_INFO_LEN);
- dev = ndev_to_wlc_ndev(ndev, cfg);
+ memset(cfg->extra_buf, 0, WL_EXTRA_BUF_MAX);
+ *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
- if (p2p_is_on(cfg)) {
- /* Suspend P2P discovery search-listen to prevent it from changing the
- * channel.
- */
- if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
- WL_ERR(("Can not disable discovery mode\n"));
- return;
- }
+ err = wldev_ioctl_get(dev, WLC_GET_BSS_INFO, cfg->extra_buf, WL_EXTRA_BUF_MAX);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get bss info %d\n", err));
+ cfg->roam_count = 0;
+ return -1;
}
- assoc_maclist->count = MAX_NUM_OF_ASSOCIATED_DEV;
- err = wldev_ioctl_get(ndev, WLC_GET_ASSOCLIST,
- assoc_maclist, sizeof(mac_buf));
- if (err < 0)
- WL_ERR(("WLC_GET_ASSOCLIST error %d\n", err));
- else
- num_associated = assoc_maclist->count;
-
- memset(scb_val.ea.octet, 0xff, ETHER_ADDR_LEN);
- scb_val.val = DOT11_RC_DEAUTH_LEAVING;
- scb_val.val = htod32(reason);
- err = wldev_ioctl_set(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scb_val,
- sizeof(scb_val_t));
- if (err < 0) {
- WL_ERR(("WLC_SCB_DEAUTHENTICATE_FOR_REASON err %d\n", err));
+ if (!mac) {
+ WL_ERR(("mac is null \n"));
+ cfg->roam_count = 0;
+ return -1;
}
- if (num_associated > 0)
- wl_delay(400);
+ memcpy(eabuf, mac, ETHER_ADDR_LEN);
- return;
-}
-/* API to handle the Deauth from the AP.
-* For now we are deleting the PMKID cache in DHD/FW
-* in case of current connection is using SAE authnetication
-*/
-static s32
-wl_cfg80211_handle_deauth_ind(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
-{
- int err = BCME_OK;
-#ifdef WL_SAE
- uint8 bssid[ETHER_ADDR_LEN];
- struct cfg80211_pmksa pmksa;
- s32 val = 0;
+ bi = (struct wl_bss_info *)(cfg->extra_buf + 4);
+ channel = wf_chspec_ctlchan(bi->chanspec);
- err = wldev_iovar_getint(ndev, "wpa_auth", &val);
- if (unlikely(err)) {
- WL_ERR(("could not get wpa_auth (%d)\n", err));
- goto done;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+ freq = ieee80211_channel_to_frequency(channel);
+#else
+ if (channel > 14) {
+ freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+ } else {
+ freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
}
- if (val == WPA3_AUTH_SAE_PSK) {
- (void)memcpy_s(bssid, ETHER_ADDR_LEN,
- (const uint8*)&e->addr, ETHER_ADDR_LEN);
- memset_s(&pmksa, sizeof(pmksa), 0, sizeof(pmksa));
- pmksa.bssid = bssid;
- WL_INFORM_MEM(("Deleting the PMKSA for SAE AP "MACDBG,
- MAC2STRDBG(e->addr.octet)));
- wl_cfg80211_del_pmksa(cfg->wdev->wiphy, ndev, &pmksa);
+#endif
+ rate = 0;
+ err = wldev_ioctl_get(dev, WLC_GET_RATE, &rate, sizeof(rate));
+ if (err) {
+ WL_ERR(("Could not get rate (%d)\n", err));
+ snprintf(rate_str, sizeof(rate_str), "x"); // Unknown
+
+ } else {
+ rate = dtoh32(rate);
+ snprintf(rate_str, sizeof(rate_str), "%d", (rate/2));
}
-done:
-#endif /* WL_SAE */
- return err;
-}
-static void
-wl_cache_assoc_resp_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
-{
- struct wl_connect_info *conn_info = wl_to_conn(cfg);
- u32 datalen = ntoh32(e->datalen);
- u32 event_type = ntoh32(e->event_type);
+ //supported maximum rate
+ supported_rate = (bi->rateset.rates[bi->rateset.count - 1] & 0x7f) / 2;
- if (datalen > VNDR_IE_MIN_LEN &&
- datalen < VNDR_IE_MAX_LEN &&
- data) {
- conn_info->resp_ie_len = datalen;
- WL_DBG((" assoc resp IES len = %d\n", conn_info->resp_ie_len));
- bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
- (void)memcpy_s(conn_info->resp_ie, sizeof(conn_info->resp_ie),
- data, datalen);
+ if (supported_rate < 12) {
+ mode_80211 = 0; //11b maximum rate is 11Mbps. 11b mode
+ } else {
+ //It's not HT Capable case.
+ if (channel > 14) {
+ mode_80211 = 3; // 11a mode
+ } else {
+ mode_80211 = 1; // 11g mode
+ }
+ }
- WL_INFORM_MEM(("[%s] copied assoc resp ies, sent to upper layer:"
- "event %d reason=%d ie_len=%d from " MACDBG "\n",
- ndev->name, event_type, ntoh32(e->reason), datalen,
- MAC2STRDBG((const u8*)(&e->addr))));
+ if (bi->n_cap) {
+ /* check Rx MCS Map for HT */
+ nss = 0;
+ mode_80211 = 2;
+ for (i = 0; i < MAX_STREAMS_SUPPORTED; i++) {
+ int8 bitmap = 0xFF;
+ if (i == MAX_STREAMS_SUPPORTED-1) {
+ bitmap = 0x7F;
+ }
+ if (bi->basic_mcs[i] & bitmap) {
+ nss++;
+ }
+ }
}
-}
-#ifdef WLMESH_CFG80211
-static s32
-wl_notify_connect_status_mesh(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
-{
- s32 err = 0;
- u32 event = ntoh32(e->event_type);
- u32 reason = ntoh32(e->reason);
- u32 len = ntoh32(e->datalen);
- u32 status = ntoh32(e->status);
+ if (bi->vht_cap) {
+ nss = 0;
+ mode_80211 = 4;
+ for (i = 1; i <= VHT_CAP_MCS_MAP_NSS_MAX; i++) {
+ mcs_map = VHT_MCS_MAP_GET_MCS_PER_SS(i, dtoh16(bi->vht_rxmcsmap));
+ if (mcs_map != VHT_CAP_MCS_MAP_NONE) {
+ nss++;
+ }
+ }
+ }
-#if !defined(WL_CFG80211_STA_EVENT) && !defined(WL_COMPAT_WIRELESS) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
- bool isfree = false;
- u8 *mgmt_frame;
- u8 bsscfgidx = e->bsscfgidx;
- s32 freq;
- s32 channel;
- u8 *body = NULL;
- u16 fc = 0;
- u32 body_len = 0;
+ if (nss) {
+ nss = nss - 1;
+ }
- struct ieee80211_supported_band *band;
- struct ether_addr da;
- struct ether_addr bssid;
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- channel_info_t ci;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ wiphy = bcmcfg_to_wiphy(cfg);
+ bss = CFG80211_GET_BSS(wiphy, NULL, eabuf, bi->SSID, bi->SSID_len);
+ if (!bss) {
+ WL_ERR(("Could not find the AP\n"));
+ } else {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ ie = (u8 *)bss->ies->data;
+ ie_len = bss->ies->len;
#else
- struct station_info sinfo;
-#endif /* (LINUX_VERSION < VERSION(3,2,0)) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
+ ie = bss->information_elements;
+ ie_len = bss->len_information_elements;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ }
- WL_INFORM_MEM(("[%s] Mode Mesh. Event:%d status:%d reason:%d\n",
- ndev->name, event, ntoh32(e->status), reason));
+ if (ie) {
+ ie_mu_mimo_cap = 0;
+ ie_11u_rel_num = 0;
- /* if link down, bsscfg is disabled. */
- if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
- (ndev != bcmcfg_to_prmry_ndev(cfg))) {
- WL_MSG(ndev->name, "Mesh mode link down !! \n");
- return 0;
- }
+ if (bi->vht_cap) {
+ if ((vht_ie = bcm_parse_tlvs(ie, (u32)ie_len,
+ DOT11_MNG_VHT_CAP_ID)) != NULL) {
+ ie_mu_mimo_cap = (vht_ie->data[2] & 0x08) >> 3;
+ }
+ }
- if ((event == WLC_E_LINK) && (status == WLC_E_STATUS_SUCCESS) &&
- (reason == WLC_E_REASON_INITIAL_ASSOC)) {
- /* AP/GO brought up successfull in firmware */
- WL_MSG(ndev->name, "Mesh Link up\n");
- return 0;
- }
+ if ((interworking_ie = bcm_parse_tlvs(ie, (u32)ie_len,
+ DOT11_MNG_INTERWORKING_ID)) != NULL) {
+ if ((tlv_ie = bcm_parse_tlvs(ie, (u32)ie_len, DOT11_MNG_VS_ID)) != NULL) {
+ remained_len = ie_len;
- if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
- WL_MSG(ndev->name, "event %s(%d) status %d reason %d\n",
- bcmevent_get_name(event), event, ntoh32(e->status), reason);
- }
+ while (tlv_ie) {
+ if (count > MAX_VNDR_IE_NUMBER)
+ break;
-#if !defined(WL_CFG80211_STA_EVENT) && !defined(WL_COMPAT_WIRELESS) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
- WL_DBG(("Enter \n"));
- if (!len && (event == WLC_E_DEAUTH)) {
- len = 2; /* reason code field */
- data = &reason;
- }
- if (len) {
- body = (u8 *)MALLOCZ(cfg->osh, len);
- if (body == NULL) {
- WL_ERR(("Failed to allocate body\n"));
- return WL_INVALID;
+ if (tlv_ie->id == DOT11_MNG_VS_ID) {
+ vndrie = (vndr_ie_t *) tlv_ie;
+
+ if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+ WL_ERR(("%s: invalid vndr ie."
+ "length is too small %d\n",
+ __FUNCTION__, vndrie->len));
+ break;
+ }
+
+ if (!bcmp(vndrie->oui,
+ (u8*)WiFiALL_OUI, WiFiALL_OUI_LEN) &&
+ (vndrie->data[0] == WiFiALL_OUI_TYPE))
+ {
+ WL_ERR(("Found Wi-FiAll OUI oui.\n"));
+ ie_11u_rel_num = vndrie->data[1];
+ ie_11u_rel_num = (ie_11u_rel_num & 0xf0)>>4;
+ ie_11u_rel_num += 1;
+
+ break;
+ }
+ }
+ count++;
+ tlv_ie = bcm_next_tlv(tlv_ie, &remained_len);
+ }
+ }
}
}
- bzero(&bssid, ETHER_ADDR_LEN);
- WL_DBG(("Enter event %d ndev %p\n", event, ndev));
- if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
- MFREE(cfg->osh, body, len);
- return WL_INVALID;
- }
- if (len)
- memcpy(body, data, len);
- wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
- NULL, 0, ioctl_buf, sizeof(ioctl_buf), bsscfgidx, NULL);
- memcpy(da.octet, ioctl_buf, ETHER_ADDR_LEN);
- bzero(&bssid, sizeof(bssid));
- err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
- switch (event) {
- case WLC_E_ASSOC_IND:
- fc = FC_ASSOC_REQ;
- break;
- case WLC_E_REASSOC_IND:
- fc = FC_REASSOC_REQ;
- break;
- case WLC_E_DISASSOC_IND:
- fc = FC_DISASSOC;
- break;
- case WLC_E_DEAUTH_IND:
- fc = FC_DISASSOC;
- break;
- case WLC_E_DEAUTH:
- fc = FC_DISASSOC;
- break;
- default:
- fc = 0;
- goto exit;
- }
- bzero(&ci, sizeof(ci));
- if ((err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &ci, sizeof(ci)))) {
- MFREE(cfg->osh, body, len);
- return err;
+ for (i = 0; i < bi->SSID_len; i++) {
+ if (bi->SSID[i] == ' ') {
+ bi->SSID[i] = '_';
+ }
}
- channel = dtoh32(ci.hw_channel);
- if (channel <= CH_MAX_2G_CHANNEL)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- if (!band) {
- WL_ERR(("No valid band\n"));
- if (body) {
- MFREE(cfg->osh, body, len);
+ //0 : None, 1 : OKC, 2 : FT, 3 : CCKM
+ err = wldev_iovar_getint(dev, "wpa_auth", &val);
+ if (unlikely(err)) {
+ WL_ERR(("could not get wpa_auth (%d)\n", err));
+ snprintf(akm_str, sizeof(akm_str), "x"); // Unknown
+ } else {
+ WL_ERR(("wpa_auth val %d \n", val));
+#if defined(BCMEXTCCX)
+ if (val & (WPA_AUTH_CCKM | WPA2_AUTH_CCKM)) {
+ snprintf(akm_str, sizeof(akm_str), "3");
+ } else
+#endif
+ if (val & WPA2_AUTH_FT) {
+ snprintf(akm_str, sizeof(akm_str), "2");
+ } else if (val & (WPA_AUTH_UNSPECIFIED | WPA2_AUTH_UNSPECIFIED)) {
+ snprintf(akm_str, sizeof(akm_str), "1");
+ } else {
+ snprintf(akm_str, sizeof(akm_str), "0");
}
- return -EINVAL;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
- freq = ieee80211_channel_to_frequency(channel);
- (void)band->band;
-#else
- freq = ieee80211_channel_to_frequency(channel, band->band);
-#endif // endif
- body_len = len;
- err = wl_frame_get_mgmt(cfg, fc, &da, &e->addr, &bssid,
- &mgmt_frame, &len, body);
- if (err < 0)
- goto exit;
- isfree = true;
- if ((event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) ||
- (event == WLC_E_DISASSOC_IND) ||
- ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH))) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
- defined(WL_COMPAT_WIRELESS)
- cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
-#else
- cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3, 18,0) || WL_COMPAT_WIRELESS */
+ if (cfg->roam_offload) {
+ snprintf(roam_count_str, sizeof(roam_count_str), "x"); // Unknown
+ } else {
+ snprintf(roam_count_str, sizeof(roam_count_str), "%d", cfg->roam_count);
}
+ cfg->roam_count = 0;
-exit:
- if (isfree) {
- MFREE(cfg->osh, mgmt_frame, len);
- }
- if (body) {
- MFREE(cfg->osh, body, body_len);
+ WL_ERR(("BSSID:" MACDBG " SSID %s \n", MAC2STRDBG(eabuf), bi->SSID));
+ WL_ERR(("freq:%d, BW:%s, RSSI:%d dBm, Rate:%d Mbps, 11mode:%d, stream:%d,"
+ "MU-MIMO:%d, Passpoint:%d, SNR:%d, Noise:%d, \n"
+ "akm:%s roam:%s \n",
+ freq, wf_chspec_to_bw_str(bi->chanspec),
+ dtoh32(bi->RSSI), (rate / 2), mode_80211, nss,
+ ie_mu_mimo_cap, ie_11u_rel_num, bi->SNR, bi->phy_noise,
+ akm_str, roam_count_str));
+
+ if (ie) {
+ snprintf(cfg->bss_info, GET_BSS_INFO_LEN,
+ "%02x:%02x:%02x %d %s %d %s %d %d %d %d %d %d %s %s",
+ eabuf[0], eabuf[1], eabuf[2],
+ freq, wf_chspec_to_bw_str(bi->chanspec),
+ dtoh32(bi->RSSI), rate_str, mode_80211, nss,
+ ie_mu_mimo_cap, ie_11u_rel_num,
+ bi->SNR, bi->phy_noise, akm_str, roam_count_str);
+ } else {
+ //ie_mu_mimo_cap and ie_11u_rel_num is unknow.
+ snprintf(cfg->bss_info, GET_BSS_INFO_LEN,
+ "%02x:%02x:%02x %d %s %d %s %d %d x x %d %d %s %s",
+ eabuf[0], eabuf[1], eabuf[2],
+ freq, wf_chspec_to_bw_str(bi->chanspec),
+ dtoh32(bi->RSSI), rate_str, mode_80211, nss,
+ bi->SNR, bi->phy_noise, akm_str, roam_count_str);
}
-#else /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
- memset(&sinfo, 0, sizeof(struct station_info));
- sinfo.filled = 0;
- if (((event == WLC_E_ASSOC_IND) || (event == WLC_E_REASSOC_IND)) &&
- reason == DOT11_SC_SUCCESS) {
- /* Linux ver >= 4.0 assoc_req_ies_len is used instead of
- * STATION_INFO_ASSOC_REQ_IES flag
- */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
- sinfo.filled = STA_INFO_BIT(INFO_ASSOC_REQ_IES);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) */
- if (!data) {
- WL_ERR(("No IEs present in ASSOC/REASSOC_IND"));
- return -EINVAL;
- }
- sinfo.assoc_req_ies = data;
- sinfo.assoc_req_ies_len = len;
- WL_MSG(ndev->name, "new sta event for "MACDBG "\n",
- MAC2STRDBG(e->addr.octet));
- cfg80211_new_sta(ndev, e->addr.octet, &sinfo, GFP_ATOMIC);
- } else if ((event == WLC_E_DEAUTH_IND) ||
- ((event == WLC_E_DEAUTH) && (reason != DOT11_RC_RESERVED)) ||
- (event == WLC_E_DISASSOC_IND)) {
- WL_MSG(ndev->name, "del sta event for "MACDBG "\n",
- MAC2STRDBG(e->addr.octet));
- cfg80211_del_sta(ndev, e->addr.octet, GFP_ATOMIC);
+
+ CFG80211_PUT_BSS(wiphy, bss);
+
+ return 0;
+}
+
+s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+
+ if (cfg == NULL) {
+ return -1;
}
-#endif /* LINUX_VERSION < VERSION(3,2,0) && !WL_CFG80211_STA_EVENT && !WL_COMPAT_WIRELESS */
- return err;
+
+ memset(cmd, 0, total_len);
+ memcpy(cmd, cfg->bss_info, GET_BSS_INFO_LEN);
+
+ WL_ERR(("cmd: %s \n", cmd));
+
+ return GET_BSS_INFO_LEN;
}
-#endif /* WLMESH_CFG80211 */
+
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
static s32
wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
struct net_device *ndev = NULL;
s32 err = 0;
u32 event = ntoh32(e->event_type);
- u32 datalen = ntoh32(e->datalen);
struct wiphy *wiphy = NULL;
struct cfg80211_bss *bss = NULL;
struct wlc_ssid *ssid = NULL;
u8 *bssid = 0;
- s32 bssidx = 0;
- u8 *ie_ptr = NULL;
- uint32 ie_len = 0;
-#ifdef WL_ANALYTICS
- struct parsed_vndr_ies disco_vndr_ie;
- struct parsed_vndr_ie_info *vndrie_info = NULL;
- uint32 i = 0;
-#endif /* WL_ANALYTICS */
-
dhd_pub_t *dhdp;
- u32 mode;
int vndr_oui_num = 0;
char vndr_oui[MAX_VNDR_OUI_STR_LEN] = {0, };
- bool loc_gen = false;
-#ifdef DHD_LOSSLESS_ROAMING
- struct wl_security *sec;
-#endif /* DHD_LOSSLESS_ROAMING */
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-#ifdef DHD_LOSSLESS_ROAMING
- sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
-#endif /* DHD_LOSSLESS_ROAMING */
dhdp = (dhd_pub_t *)(cfg->pub);
BCM_REFERENCE(dhdp);
- mode = wl_get_mode_by_netdev(cfg, ndev);
- /* Push link events to upper layer log */
- SUPP_LOG(("[%s] Mode:%d event:%d status:0x%x reason:%d\n",
- ndev->name, mode, ntoh32(e->event_type),
- ntoh32(e->status), ntoh32(e->reason)));
- if (mode == WL_MODE_AP) {
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
err = wl_notify_connect_status_ap(cfg, ndev, e, data);
-#ifdef WLMESH_CFG80211
- } else if (mode == WL_MODE_MESH) {
- err = wl_notify_connect_status_mesh(cfg, ndev, e, data);
-#endif /* WLMESH_CFG80211 */
- } else if (mode == WL_MODE_IBSS) {
+ } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS) {
err = wl_notify_connect_status_ibss(cfg, ndev, e, data);
- } else if (mode == WL_MODE_BSS) {
- WL_INFORM_MEM(("[%s] Mode BSS. event:%d status:%d reason:%d\n",
- ndev->name, ntoh32(e->event_type),
- ntoh32(e->status), ntoh32(e->reason)));
-
- if (!wl_get_drv_status(cfg, CFG80211_CONNECT, ndev)) {
- /* Join attempt via non-cfg80211 interface.
- * Don't send resultant events to cfg80211
- * layer
- */
- WL_INFORM_MEM(("Event received in non-cfg80211"
- " connect state. Ignore\n"));
- return BCME_OK;
- }
-#ifdef WL_CLIENT_SAE
- if (event == WLC_E_AUTH)
- wl_notify_connect_status_bss(cfg, ndev, e, data);
-#endif /* WL_CLIENT_SAE */
-
+ } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
+ WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n",
+ ntoh32(e->event_type), ntoh32(e->status), ndev));
if (event == WLC_E_ASSOC || event == WLC_E_AUTH) {
- wl_get_auth_assoc_status(cfg, ndev, e, data);
+ wl_get_auth_assoc_status(cfg, ndev, e);
return 0;
}
- if (event == WLC_E_ASSOC_RESP_IE) {
- if (ntoh32(e->status) != WLC_E_STATUS_SUCCESS) {
- wl_cache_assoc_resp_ies(cfg, ndev, e, data);
- }
- return 0;
- }
-
DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
if (wl_is_linkup(cfg, e, ndev)) {
wl_link_up(cfg);
act = true;
if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
- WL_INFORM_MEM(("[%s] link up for bssid " MACDBG "\n",
- ndev->name, MAC2STRDBG((const u8*)(&e->addr))));
- if ((event == WLC_E_LINK) &&
- (ntoh16(e->flags) & WLC_EVENT_MSG_LINK) &&
- !wl_get_drv_status(cfg, CONNECTED, ndev) &&
- !wl_get_drv_status(cfg, CONNECTING, ndev)) {
- WL_INFORM_MEM(("link up in non-connected/"
- "non-connecting state\n"));
- wl_cfg80211_disassoc(ndev, WLAN_REASON_DEAUTH_LEAVING);
- return BCME_OK;
- }
-
-#ifdef WL_WPS_SYNC
- /* Avoid invocation for Roam cases */
- if ((event == WLC_E_LINK) &&
- !wl_get_drv_status(cfg, CONNECTED, ndev)) {
- wl_wps_session_update(ndev,
- WPS_STATE_LINKUP, e->addr.octet);
- }
-#endif /* WL_WPS_SYNC */
-
if (event == WLC_E_LINK &&
#ifdef DHD_LOSSLESS_ROAMING
!cfg->roam_offload &&
- !IS_AKM_SUITE_FT(sec) &&
#endif /* DHD_LOSSLESS_ROAMING */
wl_get_drv_status(cfg, CONNECTED, ndev)) {
wl_bss_roaming_done(cfg, ndev, e, data);
- /* Arm pkt logging timer */
- dhd_dump_mod_pkt_timer(dhdp, PKT_CNT_RSN_ROAM);
- } else {
- /* Initial Association */
- wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
- wl_bss_connect_done(cfg, ndev, e, data, true);
- if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
- vndr_oui_num = wl_vndr_ies_get_vendor_oui(cfg,
- ndev, vndr_oui, ARRAY_SIZE(vndr_oui));
- if (vndr_oui_num > 0) {
- WL_INFORM_MEM(("[%s] vendor oui: %s\n",
- ndev->name, vndr_oui));
- }
- }
- if (event == WLC_E_LINK) {
- /* Arm pkt logging timer */
- dhd_dump_mod_pkt_timer(dhdp, PKT_CNT_RSN_CONNECT);
- }
- WL_DBG(("joined in BSS network \"%s\"\n",
- ((struct wlc_ssid *)wl_read_prof(cfg, ndev,
- WL_PROF_SSID))->SSID));
}
+
+ if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ vndr_oui_num = wl_vndr_ies_get_vendor_oui(cfg,
+ ndev, vndr_oui, ARRAYSIZE(vndr_oui));
+#if defined(STAT_REPORT)
+ /* notify STA connection only */
+ wl_stat_report_notify_connected(cfg);
+#endif /* STAT_REPORT */
+ }
+
+ printf("wl_bss_connect_done succeeded with "
+ MACDBG " %s%s\n", MAC2STRDBG((const u8*)(&e->addr)),
+ vndr_oui_num > 0 ? "vndr_oui: " : "",
+ vndr_oui_num > 0 ? vndr_oui : "");
+
+ wl_bss_connect_done(cfg, ndev, e, data, true);
+ dhd_conf_set_intiovar(cfg->pub, WLC_SET_VAR, "phy_oclscdenable", cfg->pub->conf->phy_oclscdenable, 0, FALSE);
+ WL_DBG(("joined in BSS network \"%s\"\n",
+ ((struct wlc_ssid *)
+ wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID));
+
+#ifdef WBTEXT
+ if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
+ dhdp->wbtext_support &&
+ event == WLC_E_SET_SSID) {
+ /* set wnm_keepalives_max_idle after association */
+ wl_cfg80211_wbtext_set_wnm_maxidle(cfg, ndev);
+ /* send nbr request or BTM query to update RCC */
+ wl_cfg80211_wbtext_update_rcc(cfg, ndev);
+ }
+#endif /* WBTEXT */
}
wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
wl_update_prof(cfg, ndev, NULL, (const void *)&e->addr, WL_PROF_BSSID);
- } else if (wl_is_linkdown(cfg, e) ||
+ dhd_conf_set_wme(cfg->pub, 0);
+ if (!memcmp(ndev->name, WL_P2P_INTERFACE_PREFIX, strlen(WL_P2P_INTERFACE_PREFIX))) {
+ dhd_conf_set_mchan_bw(cfg->pub, WL_P2P_IF_CLIENT, -1);
+ }
+ } else if (WL_IS_LINKDOWN(cfg, e, data) ||
((event == WLC_E_SET_SSID) &&
(ntoh32(e->status) != WLC_E_STATUS_SUCCESS) &&
(wl_get_drv_status(cfg, CONNECTED, ndev)))) {
- if (wl_is_linkdown(cfg, e)) {
- /* Clear IEs for disaasoc */
- if ((bssidx = wl_get_bssidx_by_wdev(cfg,
- ndev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find index failed\n"));
- } else {
- WL_ERR(("link down--clearing disconnect IEs\n"));
- if ((err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
- ndev_to_cfgdev(ndev), bssidx, VNDR_IE_DISASSOC_FLAG,
- NULL, 0)) != BCME_OK) {
- WL_ERR(("Failed to clear ies err = %d\n", err));
- }
- }
- }
- WL_INFORM_MEM(("link down. connection state bit status: [%u:%u:%u:%u]\n",
+ WL_INFORM(("connection state bit status: [%d:%d:%d:%d]\n",
wl_get_drv_status(cfg, CONNECTING, ndev),
wl_get_drv_status(cfg, CONNECTED, ndev),
wl_get_drv_status(cfg, DISCONNECTING, ndev),
wl_get_drv_status(cfg, NESTED_CONNECT, ndev)));
-#ifdef WL_WPS_SYNC
- {
- u8 wps_state;
- if ((event == WLC_E_SET_SSID) &&
- (ntoh32(e->status) != WLC_E_STATUS_SUCCESS)) {
- /* connect fail */
- wps_state = WPS_STATE_CONNECT_FAIL;
- } else {
- wps_state = WPS_STATE_LINKDOWN;
- }
- if (wl_wps_session_update(ndev,
- wps_state, e->addr.octet) == BCME_UNSUPPORTED) {
- /* Unexpected event. Ignore it. */
- return 0;
- }
- }
-#endif /* WL_WPS_SYNC */
-
if (wl_get_drv_status(cfg, DISCONNECTING, ndev) &&
(wl_get_drv_status(cfg, NESTED_CONNECT, ndev) ||
wl_get_drv_status(cfg, CONNECTING, ndev))) {
* command issued from the wl_cfg80211_connect context. Ignore
* the event to avoid pre-empting the current connection
*/
- WL_DBG(("Nested connection case. Drop event. \n"));
- wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
- WL_EXT_STATUS_DISCONNECTED, NULL);
+ WL_INFORM(("Nested connection case. Drop event. \n"));
wl_clr_drv_status(cfg, NESTED_CONNECT, ndev);
wl_clr_drv_status(cfg, DISCONNECTING, ndev);
/* Not in 'CONNECTED' state, clear it */
return 0;
}
- if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
- wl_flush_fw_log_buffer(bcmcfg_to_prmry_ndev(cfg),
- FW_LOGSET_MASK_ALL);
- }
#ifdef DHD_LOSSLESS_ROAMING
wl_del_roam_timeout(cfg);
-#endif // endif
+#endif
#ifdef P2PLISTEN_AP_SAMECHN
if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
wl_cfg80211_set_p2p_resp_ap_chn(ndev, 0);
#endif /* P2PLISTEN_AP_SAMECHN */
wl_cfg80211_cancel_scan(cfg);
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
+ wl_get_bss_info(cfg, ndev, (u8*)(&e->addr));
+ }
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) {
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
scb_val_t scbval;
u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
- uint32 reason = 0;
+ s32 reason = 0;
struct ether_addr bssid_dongle = {{0, 0, 0, 0, 0, 0}};
struct ether_addr bssid_null = {{0, 0, 0, 0, 0, 0}};
- if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) {
+ if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND)
reason = ntoh32(e->reason);
- if (reason > WLC_E_DEAUTH_MAX_REASON) {
- WL_ERR(("Event %d original reason is %d, "
- "changed 0xFF\n", event, reason));
- reason = WLC_E_DEAUTH_MAX_REASON;
- }
- wl_cfg80211_handle_deauth_ind(cfg, ndev, e, data);
- }
-#ifdef SET_SSID_FAIL_CUSTOM_RC
- if ((event == WLC_E_SET_SSID) &&
- (ntoh32(e->status) == WLC_E_STATUS_TIMEOUT)) {
- reason = SET_SSID_FAIL_CUSTOM_RC;
- }
-#endif /* SET_SSID_FAIL_CUSTOM_RC */
+ /* WLAN_REASON_UNSPECIFIED is used for hang up event in Android */
+ reason = (reason == WLAN_REASON_UNSPECIFIED)? 0 : reason;
+
+ printf("link down if %s may call cfg80211_disconnected. "
+ "event : %d, reason=%d from " MACDBG "\n",
+ ndev->name, event, ntoh32(e->reason),
+ MAC2STRDBG((const u8*)(&e->addr)));
+ wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
+ WL_EXT_STATUS_DISCONNECTED, NULL);
/* roam offload does not sync BSSID always, get it from dongle */
if (cfg->roam_offload) {
- bzero(&bssid_dongle, sizeof(bssid_dongle));
+ memset(&bssid_dongle, 0, sizeof(bssid_dongle));
if (wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid_dongle,
sizeof(bssid_dongle)) == BCME_OK) {
/* if not roam case, it would return null bssid */
return 0;
}
}
+
#ifdef DBG_PKT_MON
- /* Stop packet monitor */
if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
DHD_DBG_PKT_MON_STOP(dhdp);
}
#endif /* DBG_PKT_MON */
+
/* clear RSSI monitor, framework will set new cfg */
#ifdef RSSI_MONITOR_SUPPORT
dhd_dev_set_rssi_monitor_cfg(bcmcfg_to_prmry_ndev(cfg),
FALSE, 0, 0);
#endif /* RSSI_MONITOR_SUPPORT */
- if (dhdp->conf->eapol_status == EAPOL_STATUS_4WAY_DONE &&
- !memcmp(ndev->name, WL_P2P_INTERFACE_PREFIX, strlen(WL_P2P_INTERFACE_PREFIX))) {
+ if (!memcmp(ndev->name, WL_P2P_INTERFACE_PREFIX, strlen(WL_P2P_INTERFACE_PREFIX))) {
// terence 20130703: Fix for wrong group_capab (timing issue)
cfg->p2p_disconnected = 1;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ if (wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
+ CFG80211_DISCONNECTED(ndev, reason, NULL, 0, false, GFP_KERNEL);
+ }
+#endif
memcpy(&cfg->disconnected_bssid, curbssid, ETHER_ADDR_LEN);
wl_clr_drv_status(cfg, CONNECTED, ndev);
if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
- DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
- dhd_net2idx(dhdp->info, ndev),
- WLAN_REASON_DEAUTH_LEAVING);
/* To make sure disconnect, explictly send dissassoc
* for BSSID 00:00:00:00:00:00 issue
*/
scbval.val = WLAN_REASON_DEAUTH_LEAVING;
- WL_INFORM_MEM(("clear fw state\n"));
+
memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
scbval.val = htod32(scbval.val);
err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval,
err = 0;
}
}
- if (wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
- loc_gen = true;
- }
- WL_INFORM_MEM(("[%s] Indicate disconnect event to upper layer. "
- "event: %d reason=%d from " MACDBG "\n",
- ndev->name, event, ntoh32(e->reason),
- MAC2STRDBG((const u8*)(&e->addr))));
- DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_DONE),
- dhd_net2idx(dhdp->info, ndev), reason);
/* Send up deauth and clear states */
-
- /*
- * FW sends body and body len as a part of deauth
- * and disassoc events (WLC_E_DISASSOC_IND, WLC_E_DEAUTH_IND)
- * The VIEs sits after reason code in the body. Reason code is
- * 2 bytes long.
- */
- WL_DBG(("recv disconnect ies ie_len = %d\n", ie_len));
- if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND) {
- if ((datalen > DOT11_DISCONNECT_RC) &&
- datalen < (VNDR_IE_MAX_LEN + DOT11_DISCONNECT_RC) &&
- data) {
- ie_ptr = (uchar*)data + DOT11_DISCONNECT_RC;
- ie_len = datalen - DOT11_DISCONNECT_RC;
- }
- } else if (event == WLC_E_LINK &&
- ntoh32(e->reason) == WLC_E_LINK_BCN_LOSS) {
-#ifdef WL_ANALYTICS
- /*
- * In case of linkdown, FW sends prb rsp IEs. Disco VIE
- * are appended with prb rsp ies. Remove prb rsp IES and
- * send disco vie to upper layer.
- * Disco VIE has fixed len of 11 octets.
- * As per SS spec.(2 octet header + 9 octet VIE)
- */
- if (datalen < (VNDR_IE_MAX_LEN + DOT11_DISCONNECT_RC) &&
- datalen >= DOT11_DISCONNECT_RC &&
- ((err = wl_cfg80211_parse_vndr_ies(
- (const u8 *)data, datalen,
- &disco_vndr_ie)) == BCME_OK)) {
- for (i = 0; i < disco_vndr_ie.count; i++) {
- vndrie_info = &disco_vndr_ie.ie_info[i];
- if ((vndrie_info->vndrie.id ==
- 0xDD) && (!memcmp(
- vndrie_info->vndrie.oui,
- SSE_OUI, DOT11_OUI_LEN)) &&
- (vndrie_info->vndrie.data[0] ==
- VENDOR_ENTERPRISE_STA_OUI_TYPE)) {
- ie_ptr = (u8 *)vndrie_info->ie_ptr;
- ie_len = vndrie_info->ie_len;
- }
+ CFG80211_DISCONNECTED(ndev, reason, NULL, 0,
+ false, GFP_KERNEL);
+ wl_link_down(cfg);
+ wl_init_prof(cfg, ndev);
+#ifdef WBTEXT
+ /* when STA was disconnected, clear join pref and set wbtext */
+ if (ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) {
+ char smbuf[WLC_IOCTL_SMLEN];
+ char clear[] = { 0x01, 0x02, 0x00, 0x00, 0x03,
+ 0x02, 0x00, 0x00, 0x04, 0x02, 0x00, 0x00 };
+ if ((err = wldev_iovar_setbuf(ndev, "join_pref",
+ clear, sizeof(clear), smbuf,
+ sizeof(smbuf), NULL))
+ == BCME_OK) {
+ if ((err = wldev_iovar_setint(ndev,
+ "wnm_bsstrans_resp",
+ WL_BSSTRANS_POLICY_PRODUCT_WBTEXT))
+ == BCME_OK) {
+ wl_cfg80211_wbtext_set_default(ndev);
+ } else {
+ WL_ERR(("%s: Failed to set wbtext = %d\n",
+ __FUNCTION__, err));
}
+ } else {
+ WL_ERR(("%s: Failed to clear join pref = %d\n",
+ __FUNCTION__, err));
}
-#endif /* WL_ANALYTICS */
+ wl_cfg80211_wbtext_clear_bssid_list(cfg);
}
-
- CFG80211_DISCONNECTED(ndev, reason, ie_ptr, ie_len,
- loc_gen, GFP_KERNEL);
- WL_INFORM_MEM(("[%s] Disconnect event sent to upper layer"
- "event:%d reason=%d ie_len=%d from " MACDBG "\n",
- ndev->name, event, ntoh32(e->reason), ie_len,
- MAC2STRDBG((const u8*)(&e->addr))));
-
- /* Wait for status to be cleared to prevent race condition
- * issues with connect context
- */
- wl_cfg80211_disconnect_state_sync(cfg, ndev);
- wl_link_down(cfg);
- wl_init_prof(cfg, ndev);
+#endif /* WBTEXT */
+ wl_vndr_ies_clear_vendor_oui_list(cfg);
}
else if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
- DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
- dhd_net2idx(dhdp->info, ndev), 0);
- WL_INFORM_MEM(("link down, during connecting\n"));
- /* Issue WLC_DISASSOC to prevent FW roam attempts.
- * Do not issue WLC_DISASSOC again if the linkdown is
- * generated due to local disassoc, to avoid connect-disconnect
- * loop.
- */
- if (!((event == WLC_E_LINK) &&
- (ntoh32(e->reason) == WLC_E_LINK_DISASSOC) &&
- (ntoh32(e->status) == WLC_E_STATUS_SUCCESS))) {
- err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
- if (err < 0) {
- WL_ERR(("CONNECTING state,"
- " WLC_DISASSOC error %d\n",
- err));
- err = 0;
- }
+ printf("link down, during connecting\n");
+ wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
+ WL_EXT_STATUS_DISCONNECTED, NULL);
+ /* Issue WLC_DISASSOC to prevent FW roam attempts */
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
+ if (err < 0) {
+ WL_ERR(("CONNECTING state, WLC_DISASSOC error %d\n", err));
+ err = 0;
+ }
+ WL_INFORM(("Clear drv CONNECTING status\n"));
+ wl_clr_drv_status(cfg, CONNECTING, ndev);
#ifdef ESCAN_RESULT_PATCH
- if ((memcmp(connect_req_bssid, broad_bssid,
- ETHER_ADDR_LEN) == 0) ||
- (memcmp(&e->addr, broad_bssid,
- ETHER_ADDR_LEN) == 0) ||
- (memcmp(&e->addr, connect_req_bssid,
- ETHER_ADDR_LEN) == 0))
- /* In case this event comes while associating
- * another AP
- */
+ if ((memcmp(connect_req_bssid, broad_bssid, ETHER_ADDR_LEN) == 0) ||
+ (memcmp(&e->addr, broad_bssid, ETHER_ADDR_LEN) == 0) ||
+ (memcmp(&e->addr, connect_req_bssid, ETHER_ADDR_LEN) == 0))
+ /* In case this event comes while associating another AP */
#endif /* ESCAN_RESULT_PATCH */
- wl_bss_connect_done(cfg, ndev, e, data, false);
- }
+ wl_bss_connect_done(cfg, ndev, e, data, false);
}
wl_clr_drv_status(cfg, DISCONNECTING, ndev);
- wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
- WL_EXT_STATUS_DISCONNECTED, NULL);
/* if link down, bsscfg is diabled */
if (ndev != bcmcfg_to_prmry_ndev(cfg))
wl_cfg80211_tdls_config(cfg, TDLS_STATE_DISCONNECT, false);
#endif /* WLTDLS */
} else if (wl_is_nonetwork(cfg, e)) {
- WL_ERR(("connect failed event=%d e->status %d e->reason %d \n",
- event, (int)ntoh32(e->status), (int)ntoh32(e->reason)));
- wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
+ printf("connect failed event=%d e->status %d e->reason %d \n",
+ event, (int)ntoh32(e->status), (int)ntoh32(e->reason));
+ wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
WL_EXT_STATUS_DISCONNECTED, NULL);
-#ifdef WL_WPS_SYNC
- if (wl_wps_session_update(ndev,
- WPS_STATE_CONNECT_FAIL, e->addr.octet) == BCME_UNSUPPORTED) {
- /* Unexpected event. Ignore it. */
- return 0;
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ if (event == WLC_E_SET_SSID) {
+ wl_get_connect_failed_status(cfg, e);
}
-#endif /* WL_WPS_SYNC */
- /* Dump FW preserve buffer content */
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
+ if (wl_get_drv_status(cfg, DISCONNECTING, ndev) &&
+ wl_get_drv_status(cfg, CONNECTING, ndev)) {
+ wl_clr_drv_status(cfg, DISCONNECTING, ndev);
+ wl_clr_drv_status(cfg, CONNECTING, ndev);
+ wl_cfg80211_scan_abort(cfg);
+ DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
+ return err;
+ }
/* Clean up any pending scan request */
wl_cfg80211_cancel_scan(cfg);
- if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
- if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
- WL_INFORM_MEM(("wl dissassoc\n"));
- err = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
- if (err < 0) {
- WL_ERR(("WLC_DISASSOC error %d\n", err));
- err = 0;
- }
- } else {
- WL_DBG(("connect fail. clear disconnecting bit\n"));
- wl_clr_drv_status(cfg, DISCONNECTING, ndev);
- }
+ if (wl_get_drv_status(cfg, CONNECTING, ndev))
wl_bss_connect_done(cfg, ndev, e, data, false);
- wl_clr_drv_status(cfg, CONNECTING, ndev);
- WL_INFORM_MEM(("connect fail reported\n"));
- }
} else {
WL_DBG(("%s nothing\n", __FUNCTION__));
}
DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)cfg->pub);
- } else {
- WL_MSG(ndev->name, "Invalid mode %d event %d status %d\n",
- wl_get_mode_by_netdev(cfg, ndev), ntoh32(e->event_type),
+ }
+ else {
+ printf("wl_notify_connect_status : Invalid %s mode %d event %d status %d\n",
+ ndev->name, wl_get_mode_by_netdev(cfg, ndev), ntoh32(e->event_type),
ntoh32(e->status));
}
return err;
u32 status = be32_to_cpu(e->status);
#ifdef DHD_LOSSLESS_ROAMING
struct wl_security *sec;
-#endif // endif
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif
WL_DBG(("Enter \n"));
- BCM_REFERENCE(dhdp);
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
if ((!cfg->disable_roam_event) && (event == WLC_E_BSSID)) {
*/
if (IS_AKM_SUITE_FT(sec)) {
wl_bss_roaming_done(cfg, ndev, e, data);
- /* Arm pkt logging timer */
- dhd_dump_mod_pkt_timer(dhdp, PKT_CNT_RSN_ROAM);
}
/* Roam timer is deleted mostly from wl_cfg80211_change_station
* after roaming is finished successfully. We need to delete
wl_del_roam_timeout(cfg);
}
#else
-#if !defined(DHD_NONFT_ROAMING)
wl_bss_roaming_done(cfg, ndev, e, data);
-#endif /* !DHD_NONFT_ROAMING */
#endif /* DHD_LOSSLESS_ROAMING */
} else {
wl_bss_connect_done(cfg, ndev, e, data, true);
else if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status != WLC_E_STATUS_SUCCESS) {
wl_del_roam_timeout(cfg);
}
-#endif // endif
+#endif
return err;
}
#ifdef CUSTOM_EVENT_PM_WAKE
uint32 last_dpm_upd_time = 0; /* ms */
-#define DPM_UPD_LMT_TIME ((CUSTOM_EVENT_PM_WAKE + (5)) * (1000) * (4)) /* ms */
+#define DPM_UPD_LMT_TIME 25000 /* ms */
#define DPM_UPD_LMT_RSSI -85 /* dbm */
static s32
uint32 cur_dpm_upd_time = 0;
dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
s32 rssi;
-#ifdef SUPPORT_RSSI_SUM_REPORT
- wl_rssi_ant_mimo_t rssi_ant_mimo;
-#endif /* SUPPORT_RSSI_SUM_REPORT */
+ scb_val_t scb_val;
+
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- pbuf = (u8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MEDLEN);
+ pbuf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
if (pbuf == NULL) {
WL_ERR(("failed to allocate local pbuf\n"));
return -ENOMEM;
}
err = wldev_iovar_getbuf_bsscfg(ndev, "dump",
- "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN,
- 0, &cfg->ioctl_buf_sync);
+ "pm", strlen("pm"), pbuf, WLC_IOCTL_MEDLEN, 0, &cfg->ioctl_buf_sync);
if (err) {
WL_ERR(("dump ioctl err = %d", err));
}
if (pbuf) {
- MFREE(cfg->osh, pbuf, WLC_IOCTL_MEDLEN);
+ kfree(pbuf);
}
if (dhd->early_suspended) {
/* LCD off */
-#ifdef SUPPORT_RSSI_SUM_REPORT
- /* Query RSSI sum across antennas */
- memset(&rssi_ant_mimo, 0, sizeof(rssi_ant_mimo));
- err = wl_get_rssi_per_ant(ndev, ndev->name, NULL, &rssi_ant_mimo);
+ memset(&scb_val, 0, sizeof(scb_val_t));
+ scb_val.val = 0;
+ err = wldev_ioctl_get(ndev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
if (err) {
- WL_ERR(("Could not get rssi sum (%d)\n", err));
- }
- rssi = rssi_ant_mimo.rssi_sum;
- if (rssi == 0)
-#endif /* SUPPORT_RSSI_SUM_REPORT */
- {
- scb_val_t scb_val;
- memset(&scb_val, 0, sizeof(scb_val_t));
- scb_val.val = 0;
- err = wldev_ioctl_get(ndev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t));
- if (err) {
- WL_ERR(("Could not get rssi (%d)\n", err));
- }
-#if defined(RSSIOFFSET)
- rssi = wl_update_rssi_offset(ndev, dtoh32(scb_val.val));
-#else
- rssi = dtoh32(scb_val.val);
-#endif
+ WL_ERR(("Could not get rssi (%d)\n", err));
}
- WL_ERR(("RSSI %d dBm\n", rssi));
+ rssi = wl_rssi_offset(dtoh32(scb_val.val));
+ WL_ERR(("[%s] RSSI %d dBm\n", ndev->name, rssi));
if (rssi > DPM_UPD_LMT_RSSI) {
return err;
}
cur_dpm_upd_time = OSL_SYSUPTIME();
if (cur_dpm_upd_time - last_dpm_upd_time < DPM_UPD_LMT_TIME) {
scb_val_t scbval;
- DHD_STATLOG_CTRL(dhd, ST(DISASSOC_INT_START),
- dhd_net2idx(dhd->info, ndev), 0);
bzero(&scbval, sizeof(scb_val_t));
- err = wldev_ioctl_set(ndev, WLC_DISASSOC,
- &scbval, sizeof(scb_val_t));
+ err = wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
if (err < 0) {
- WL_ERR(("Disassoc error %d\n", err));
+ WL_ERR(("%s: Disassoc error %d\n", __FUNCTION__, err));
return err;
}
- WL_ERR(("Force Disassoc due to updated DPM event.\n"));
+ WL_ERR(("%s: Force Disassoc due to updated DPM event.\n", __FUNCTION__));
last_dpm_upd_time = 0;
} else {
{
struct wl_security *sec;
struct net_device *ndev;
+#if defined(DHD_LOSSLESS_ROAMING) || defined(DBG_PKT_MON)
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
u32 status = ntoh32(e->status);
u32 reason = ntoh32(e->reason);
WL_ERR(("Attempting roam with reason code : %d\n", reason));
}
-#ifdef CONFIG_SILENT_ROAM
- if (dhdp->in_suspend && reason == WLC_E_REASON_SILENT_ROAM) {
- dhdp->sroamed = TRUE;
- }
-#endif /* CONFIG_SILENT_ROAM */
-
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
#ifdef DBG_PKT_MON
DHD_DBG_PKT_MON_START(dhdp);
}
#endif /* DBG_PKT_MON */
+
#ifdef DHD_LOSSLESS_ROAMING
- sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
/* Disable Lossless Roaming for specific AKM suite
* Any other AKM suite can be added below if transition time
* is delayed because of Lossless Roaming
if (IS_AKM_SUITE_FT(sec)) {
return BCME_OK;
}
-
dhdp->dequeue_prec_map = 1 << PRIO_8021D_NC;
/* Restore flow control */
dhd_txflowcontrol(dhdp, ALL_INTERFACES, OFF);
mod_timer(&cfg->roam_timeout, jiffies + msecs_to_jiffies(WL_ROAM_TIMEOUT_MS));
#endif /* DHD_LOSSLESS_ROAMING */
-
return BCME_OK;
+
}
#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
-
+#ifdef ENABLE_TEMP_THROTTLING
static s32
-wl_notify_roam_start_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+wl_check_rx_throttle_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
- struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- int event_type;
+ s32 err = 0;
+ u32 status = ntoh32(e->status);
+ u32 reason = ntoh32(e->reason);
- event_type = WIFI_EVENT_ROAM_SCAN_STARTED;
- wl_cfgvendor_send_async_event(wiphy, ndev, GOOGLE_ROAM_EVENT_START,
- &event_type, sizeof(int));
-#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || (WL_VENDOR_EXT_SUPPORT) */
+ WL_ERR_EX(("RX THROTTLE : status=%d, reason=0x%x\n", status, reason));
- return BCME_OK;
+ return err;
}
+#endif /* ENABLE_TEMP_THROTTLING */
static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
conn_info->resp_ie_len = 0;
bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
}
-
if (assoc_info.req_len) {
err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf,
- assoc_info.req_len, NULL);
+ WL_ASSOC_INFO_MAX, NULL);
if (unlikely(err)) {
WL_ERR(("could not get assoc req (%d)\n", err));
return err;
}
- if (assoc_info.req_len < sizeof(struct dot11_assoc_req)) {
- WL_ERR(("req_len %d lessthan %d \n", assoc_info.req_len,
- (int)sizeof(struct dot11_assoc_req)));
- return BCME_BADLEN;
- }
- conn_info->req_ie_len = (uint32)(assoc_info.req_len
- - sizeof(struct dot11_assoc_req));
+ conn_info->req_ie_len = assoc_info.req_len - sizeof(struct dot11_assoc_req);
if (assoc_info.flags & WLC_ASSOC_REQ_IS_REASSOC) {
conn_info->req_ie_len -= ETHER_ADDR_LEN;
}
} else {
conn_info->req_ie_len = 0;
}
-
if (assoc_info.resp_len) {
err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf,
- assoc_info.resp_len, NULL);
+ WL_ASSOC_INFO_MAX, NULL);
if (unlikely(err)) {
WL_ERR(("could not get assoc resp (%d)\n", err));
return err;
}
- if (assoc_info.resp_len < sizeof(struct dot11_assoc_resp)) {
- WL_ERR(("resp_len %d is lessthan %d \n", assoc_info.resp_len,
- (int)sizeof(struct dot11_assoc_resp)));
- return BCME_BADLEN;
- }
- conn_info->resp_ie_len = assoc_info.resp_len -
- (uint32)sizeof(struct dot11_assoc_resp);
+ conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
if (conn_info->resp_ie_len <= MAX_REQ_LINE) {
memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
} else {
DOT11_MNG_QOS_MAP_ID)) != NULL) {
WL_DBG((" QoS map set IE found in assoc response\n"));
if (!cfg->up_table) {
- cfg->up_table = (uint8 *)MALLOC(cfg->osh, UP_TABLE_MAX);
+ cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL);
}
wl_set_up_table(cfg->up_table, qos_map_ie);
} else {
- MFREE(cfg->osh, cfg->up_table, UP_TABLE_MAX);
+ kfree(cfg->up_table);
+ cfg->up_table = NULL;
}
#endif /* QOS_MAP_SET */
} else {
}
static s32 wl_ch_to_chanspec(struct net_device *dev, int ch, struct wl_join_params *join_params,
- size_t *join_params_size)
+ size_t *join_params_size)
{
+ s32 bssidx = -1;
chanspec_t chanspec = 0, chspec;
- struct bcm_cfg80211 *cfg =
- (struct bcm_cfg80211 *)wiphy_priv(dev->ieee80211_ptr->wiphy);
- if ((ch != 0) && (cfg && !cfg->rcc_enabled)) {
- join_params->params.chanspec_num = 1;
- join_params->params.chanspec_list[0] = ch;
+ if (ch != 0) {
+ struct bcm_cfg80211 *cfg =
+ (struct bcm_cfg80211 *)wiphy_priv(dev->ieee80211_ptr->wiphy);
+ join_params->params.chanspec_num = 1;
+ join_params->params.chanspec_list[0] = ch;
- if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
- chanspec |= WL_CHANSPEC_BAND_2G;
- else
- chanspec |= WL_CHANSPEC_BAND_5G;
+ if (join_params->params.chanspec_list[0] <= CH_MAX_2G_CHANNEL)
+ chanspec |= WL_CHANSPEC_BAND_2G;
+ else
+ chanspec |= WL_CHANSPEC_BAND_5G;
- /* Get the min_bw set for the interface */
- chspec = WL_CHANSPEC_BW_20;
- if (chspec == INVCHANSPEC) {
- WL_ERR(("Invalid chanspec \n"));
- return -EINVAL;
- }
- chanspec |= chspec;
- chanspec |= WL_CHANSPEC_CTL_SB_NONE;
+ /* Get the min_bw set for the interface */
+ chspec = wl_cfg80211_ulb_get_min_bw_chspec(cfg, dev->ieee80211_ptr, bssidx);
+ if (chspec == INVCHANSPEC) {
+ WL_ERR(("Invalid chanspec \n"));
+ return -EINVAL;
+ }
+ chanspec |= chspec;
+ chanspec |= WL_CHANSPEC_CTL_SB_NONE;
- *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
- join_params->params.chanspec_num * sizeof(chanspec_t);
+ *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
+ join_params->params.chanspec_num * sizeof(chanspec_t);
- join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
- join_params->params.chanspec_list[0] |= chanspec;
- join_params->params.chanspec_list[0] =
- wl_chspec_host_to_driver(join_params->params.chanspec_list[0]);
+ join_params->params.chanspec_list[0] &= WL_CHANSPEC_CHAN_MASK;
+ join_params->params.chanspec_list[0] |= chanspec;
+ join_params->params.chanspec_list[0] =
+ wl_chspec_host_to_driver(join_params->params.chanspec_list[0]);
- join_params->params.chanspec_num =
- htod32(join_params->params.chanspec_num);
+ join_params->params.chanspec_num =
+ htod32(join_params->params.chanspec_num);
+
+ WL_DBG(("join_params->params.chanspec_list[0]= %X, %d channels\n",
+ join_params->params.chanspec_list[0],
+ join_params->params.chanspec_num));
}
-#ifdef ESCAN_CHANNEL_CACHE
- else {
- /* If channel is not present and ESCAN_CHANNEL_CACHE is enabled,
- * use the cached channel list
- */
- int n_channels;
- n_channels = get_roam_channel_list(ch, join_params->params.chanspec_list,
- MAX_ROAM_CHANNEL, &join_params->ssid, ioctl_version);
- join_params->params.chanspec_num = htod32(n_channels);
- *join_params_size += WL_ASSOC_PARAMS_FIXED_SIZE +
- join_params->params.chanspec_num * sizeof(chanspec_t);
- }
-#endif /* ESCAN_CHANNEL_CACHE */
-
- WL_DBG(("join_params->params.chanspec_list[0]= %X, %d channels\n",
- join_params->params.chanspec_list[0],
- join_params->params.chanspec_num));
return 0;
}
-static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- bool update_ssid)
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam)
{
struct cfg80211_bss *bss;
- wl_bss_info_t *bi;
+ struct wl_bss_info *bi;
struct wlc_ssid *ssid;
- const struct bcm_tlv *tim;
+ struct bcm_tlv *tim;
s32 beacon_interval;
s32 dtim_period;
size_t ie_len;
- const u8 *ie;
+ u8 *ie;
u8 *curbssid;
s32 err = 0;
struct wiphy *wiphy;
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
ssid->SSID, ssid->SSID_len);
- buf = (char *)MALLOCZ(cfg->osh, WL_EXTRA_BUF_MAX);
+ buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_ATOMIC);
if (!buf) {
WL_ERR(("buffer alloc failed.\n"));
return BCME_NOMEM;
WL_ERR(("Could not get bss info %d\n", err));
goto update_bss_info_out;
}
- bi = (wl_bss_info_t *)(buf + 4);
+ bi = (struct wl_bss_info *)(buf + 4);
channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN);
err = -EIO;
goto update_bss_info_out;
}
- err = wl_inform_single_bss(cfg, bi, update_ssid);
+ err = wl_inform_single_bss(cfg, bi, roam);
if (unlikely(err))
goto update_bss_info_out;
beacon_interval = cpu_to_le16(bi->beacon_period);
} else {
WL_DBG(("Found the AP in the list - BSSID %pM\n", bss->bssid));
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38)
freq = ieee80211_channel_to_frequency(channel);
#else
band = (channel <= CH_MAX_2G_CHANNEL) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
freq = ieee80211_channel_to_frequency(channel, band);
-#endif // endif
+#endif
bss->channel = ieee80211_get_channel(wiphy, freq);
#if defined(WL_CFG80211_P2P_DEV_IF)
- ie = (const u8 *)bss->ies->data;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ ie = (u8 *)bss->ies->data;
ie_len = bss->ies->len;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
#else
ie = bss->information_elements;
ie_len = bss->len_information_elements;
WL_ERR(("Failed with error %d\n", err));
}
- MFREE(cfg->osh, buf, WL_EXTRA_BUF_MAX);
+ kfree(buf);
mutex_unlock(&cfg->usr_sync);
return err;
}
s32 err = 0;
u8 *curbssid;
u32 *channel;
- scb_val_t scbval;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
struct ieee80211_supported_band *band;
struct ieee80211_channel *notify_channel = NULL;
struct channel_info ci;
u32 cur_channel;
#endif /* BCM4359_CHIP */
-#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
-#if (defined(CONFIG_ARCH_MSM) && defined(CFG80211_ROAMED_API_UNIFIED)) || \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) || defined(WL_FILS_ROAM_OFFLD) || \
- defined(CFG80211_ROAM_API_GE_4_12)
- struct cfg80211_roam_info roam_info;
-#endif /* (CONFIG_ARCH_MSM && CFG80211_ROAMED_API_UNIFIED) || LINUX_VERSION >= 4.12.0 */
-#if defined(WL_FILS_ROAM_OFFLD)
- struct wl_fils_info *fils_info = wl_to_fils_info(cfg);
- struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
-#endif // endif
+#endif
+#if defined(WLADPS_SEAK_AP_WAR) || defined(WBTEXT)
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
- dhd_if_t *ifp = NULL;
-#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
-#ifdef WLFBT
- uint32 data_len = 0;
- if (data)
- data_len = ntoh32(e->datalen);
-#endif /* WLFBT */
+#endif /* WLADPS_SEAK_AP_WAR || WBTEXT */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ struct cfg80211_roam_info roam_info = {};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+#ifdef WLADPS_SEAK_AP_WAR
BCM_REFERENCE(dhdp);
+#endif /* WLADPS_SEAK_AP_WAR */
+
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
#ifdef BCM4359_CHIP
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
/* Skip calling cfg80211_roamed If the channels are same and
- * the current bssid & the new bssid are same
+ * the current bssid/last_roamed_bssid & the new bssid are same
* Also clear timer roam_timeout.
* Only used on BCM4359 devices.
*/
- bzero(&ci, sizeof(ci));
+ memset(&ci, 0, sizeof(ci));
if ((wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &ci,
sizeof(ci))) < 0) {
WL_ERR(("Failed to get current channel !"));
- err = BCME_ERROR;
- goto fail;
+ return BCME_ERROR;
}
cur_channel = dtoh32(ci.hw_channel);
if ((*channel == cur_channel) && ((memcmp(curbssid, &e->addr,
ETHER_ADDR_LEN) == 0) || (memcmp(&cfg->last_roamed_addr,
&e->addr, ETHER_ADDR_LEN) == 0))) {
- WL_DBG(("BSS already present, Skipping roamed event to"
+ WL_ERR(("BSS already present, Skipping roamed event to"
" upper layer\n"));
- goto fail;
- }
-#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
-#endif /* BCM4359 CHIP */
-
- if ((err = wl_get_assoc_ies(cfg, ndev)) != BCME_OK) {
- DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
- dhd_net2idx(dhdp->info, ndev), WLAN_REASON_DEAUTH_LEAVING);
- WL_ERR(("Fetching Assoc IEs failed, Skipping roamed event to"
- " upper layer\n"));
- /* To make sure disconnect, and fw sync, explictly send dissassoc
- * for BSSID 00:00:00:00:00:00 issue
- */
- bzero(&scbval, sizeof(scb_val_t));
- scbval.val = WLAN_REASON_DEAUTH_LEAVING;
- memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
- scbval.val = htod32(scbval.val);
- if (wldev_ioctl_set(ndev, WLC_DISASSOC, &scbval,
- sizeof(scb_val_t)) < 0) {
- WL_ERR(("WLC_DISASSOC error\n"));
- }
- goto fail;
+#ifdef DHD_LOSSLESS_ROAMING
+ wl_del_roam_timeout(cfg);
+#endif /* DHD_LOSSLESS_ROAMING */
+ return err;
}
+#endif /* BCM4359_CHIP */
+ wl_get_assoc_ies(cfg, ndev);
wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet), WL_PROF_BSSID);
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
- if ((err = wl_update_bss_info(cfg, ndev, true)) != BCME_OK) {
- WL_ERR(("failed to update bss info, err=%d\n", err));
- goto fail;
- }
+ wl_update_bss_info(cfg, ndev, true);
wl_update_pmklist(ndev, cfg->pmk_list, err);
channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
/* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */
if (*channel <= CH_MAX_2G_CHANNEL)
band = wiphy->bands[IEEE80211_BAND_2GHZ];
band = wiphy->bands[IEEE80211_BAND_5GHZ];
freq = ieee80211_channel_to_frequency(*channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
-#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
-#ifdef WLFBT
- /* back up the given FBT key for the further supplicant request,
- * currently not checking the FBT is enabled for current BSS in DHD,
- * because the supplicant decides to take it or not.
- */
- if (data && (data_len == FBT_KEYLEN)) {
- memcpy(cfg->fbt_key, data, FBT_KEYLEN);
- }
-#endif /* WLFBT */
-#ifdef CUSTOM_LONG_RETRY_LIMIT
- if (wl_set_retry(ndev, CUSTOM_LONG_RETRY_LIMIT, 1) < 0) {
- WL_ERR(("CUSTOM_LONG_RETRY_LIMIT set fail!\n"));
- }
-#endif /* CUSTOM_LONG_RETRY_LIMIT */
- DHD_STATLOG_CTRL(dhdp, ST(REASSOC_INFORM),
- dhd_net2idx(dhdp->info, ndev), 0);
- WL_ERR(("Report roam event to upper layer. " MACDBG " (ch:%d)\n",
- MAC2STRDBG((const u8*)(&e->addr)), *channel));
- wl_cfg80211_check_in4way(cfg, ndev, 0, WL_EXT_STATUS_CONNECTED, NULL);
-
-#if (defined(CONFIG_ARCH_MSM) && defined(CFG80211_ROAMED_API_UNIFIED)) || \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) || defined(WL_FILS_ROAM_OFFLD) || \
- defined(CFG80211_ROAM_API_GE_4_12)
- memset(&roam_info, 0, sizeof(struct cfg80211_roam_info));
+#endif
+#ifdef WLADPS_SEAK_AP_WAR
+ if ((dhdp->op_mode & DHD_FLAG_STA_MODE) &&
+ (!dhdp->disabled_adps)) {
+ bool find = FALSE;
+ uint8 enable_mode;
+ if (!memcmp(curbssid, (u8*)CAMEO_MAC_PREFIX, MAC_PREFIX_LEN)) {
+ find = wl_find_vndr_ies_specific_vender(cfg, ndev, ATHEROS_OUI);
+ }
+ enable_mode = (find == TRUE) ? OFF : ON;
+ wl_set_adps_mode(cfg, ndev, enable_mode);
+ }
+#endif /* WLADPS_SEAK_AP_WAR */
+ printf("%s succeeded to " MACDBG " (ch:%d)\n", __FUNCTION__,
+ MAC2STRDBG((const u8*)(&e->addr)), *channel);
+ dhd_conf_set_wme(cfg->pub, 0);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
roam_info.channel = notify_channel;
roam_info.bssid = curbssid;
roam_info.req_ie = conn_info->req_ie;
roam_info.req_ie_len = conn_info->req_ie_len;
roam_info.resp_ie = conn_info->resp_ie;
roam_info.resp_ie_len = conn_info->resp_ie_len;
-#if defined(WL_FILS_ROAM_OFFLD)
- if ((sec->auth_type == DOT11_FILS_SKEY_PFS)||(sec->auth_type == DOT11_FILS_SKEY)) {
- roam_info.fils.kek = fils_info->fils_kek;
- roam_info.fils.kek_len = fils_info->fils_kek_len;
- roam_info.fils.update_erp_next_seq_num = true;
- roam_info.fils.erp_next_seq_num = fils_info->fils_erp_next_seq_num;
- roam_info.fils.pmk = fils_info->fils_pmk;
- roam_info.fils.pmk_len = fils_info->fils_kek_len;
- roam_info.fils.pmkid = fils_info->fils_pmkid;
- }
-#endif // endif
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
#else
cfg80211_roamed(ndev,
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39))
notify_channel,
-#endif // endif
+#endif
curbssid,
conn_info->req_ie, conn_info->req_ie_len,
conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
-#endif /* (CONFIG_ARCH_MSM && CFG80211_ROAMED_API_UNIFIED) || LINUX_VERSION >= 4.12.0 */
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+ WL_DBG(("Report roaming result\n"));
memcpy(&cfg->last_roamed_addr, &e->addr, ETHER_ADDR_LEN);
wl_set_drv_status(cfg, CONNECTED, ndev);
-#ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
- ifp = dhd_get_ifp(dhdp, e->ifidx);
- if (ifp) {
- ifp->post_roam_evt = TRUE;
- }
-#endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ cfg->roam_count++;
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
- return err;
+#ifdef WBTEXT
+ if (dhdp->wbtext_support) {
+ /* set wnm_keepalives_max_idle after association */
+ wl_cfg80211_wbtext_set_wnm_maxidle(cfg, ndev);
+ /* send nbr request or BTM query to update RCC */
+ wl_cfg80211_wbtext_update_rcc(cfg, ndev);
+ }
+#endif /* WBTEXT */
-fail:
-#ifdef DHD_LOSSLESS_ROAMING
- wl_del_roam_timeout(cfg);
-#endif /* DHD_LOSSLESS_ROAMING */
return err;
}
static bool
-wl_cfg80211_verify_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- struct cfg80211_bss **bss)
+wl_cfg80211_verify_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
+ struct cfg80211_bss *bss;
struct wiphy *wiphy;
struct wlc_ssid *ssid;
uint8 *curbssid;
int count = 0;
int ret = false;
- u8 cur_ssid[DOT11_MAX_SSID_LEN + 1];
wiphy = bcmcfg_to_wiphy(cfg);
ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
}
do {
- *bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
+ bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
ssid->SSID, ssid->SSID_len);
- if (*bss || (count > 5)) {
+ if (bss || (count > 5)) {
break;
}
count++;
msleep(100);
- } while (*bss == NULL);
+ } while (bss == NULL);
- WL_DBG(("cfg80211 bss_ptr:%p loop_cnt:%d\n", *bss, count));
- if (*bss) {
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
- /* Update the reference count after use. In case of kernel version >= 4.7
- * the cfg802_put_bss is called in cfg80211_connect_bss context
- */
- CFG80211_PUT_BSS(wiphy, *bss);
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) */
+ WL_DBG(("cfg80211 bss_ptr:%p loop_cnt:%d\n", bss, count));
+ if (bss) {
+ /* Update the reference count after use */
+ CFG80211_PUT_BSS(wiphy, bss);
ret = true;
- } else {
- memset(cur_ssid, 0, DOT11_MAX_SSID_LEN);
- strncpy(cur_ssid, ssid->SSID,
- MIN(ssid->SSID_len, DOT11_MAX_SSID_LEN));
- WL_ERR(("No bss entry for ssid:%s bssid:"MACDBG"\n",
- cur_ssid, MAC2STRDBG(curbssid)));
}
return ret;
}
-#ifdef WL_FILS
-static s32
-wl_get_fils_connect_params(struct bcm_cfg80211 *cfg, struct net_device *ndev)
-{
- const bcm_xtlv_t* pxtlv_out;
- struct wl_fils_info *fils_info = wl_to_fils_info(cfg);
- int err = BCME_OK;
- bcm_iov_buf_t *iov_buf_in = NULL;
- bcm_iov_buf_t iov_buf_out = {0};
- u16 len;
- u16 type;
- const u8 *data;
- iov_buf_in = MALLOCZ(cfg->osh, WLC_IOCTL_SMLEN);
- if (!iov_buf_in) {
- WL_ERR(("buf memory alloc failed\n"));
- err = BCME_NOMEM;
- goto exit;
- }
- iov_buf_out.version = WL_FILS_IOV_VERSION;
- iov_buf_out.id = WL_FILS_CMD_GET_CONNECT_PARAMS;
- err = wldev_iovar_getbuf(ndev, "fils", (uint8*)&iov_buf_out, sizeof(bcm_iov_buf_t),
- iov_buf_in, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("Get FILS Params Error (%d)\n", err));
- goto exit;
- }
- pxtlv_out = (bcm_xtlv_t*)((bcm_iov_buf_t*)iov_buf_in)->data;
- len = iov_buf_in->len;
- do {
- if (!bcm_valid_xtlv(pxtlv_out, iov_buf_in->len, BCM_XTLV_OPTION_ALIGN32)) {
- WL_ERR(("%s: XTLV is not valid\n", __func__));
- err = BCME_BADARG;
- goto exit;
- }
- bcm_xtlv_unpack_xtlv(pxtlv_out, &type, &len, &data, BCM_XTLV_OPTION_ALIGN32);
- switch (type) {
- case WL_FILS_XTLV_ERP_NEXT_SEQ_NUM:
- fils_info->fils_erp_next_seq_num = *(const u16 *)data;
- break;
- case WL_FILS_XTLV_KEK:
- if (memcpy_s(fils_info->fils_kek,
- WL_MAX_FILS_KEY_LEN, data, len) < 0) {
- err = BCME_BADARG;
- goto exit;
- }
- fils_info->fils_kek_len = len;
- break;
- case WL_FILS_XTLV_PMK:
- if (memcpy_s(fils_info->fils_pmk,
- WL_MAX_FILS_KEY_LEN, data, len) < 0) {
- err = BCME_BADARG;
- goto exit;
- }
- fils_info->fils_pmk_len = len;
- break;
- case WL_FILS_XTLV_PMKID:
- if (memcpy_s(fils_info->fils_pmkid,
- WL_MAX_FILS_KEY_LEN, data, len) < 0) {
- err = BCME_BADARG;
- goto exit;
- }
- break;
- default:
- WL_ERR(("%s: wrong XTLV code\n", __func__));
- break;
-
- }
- } while ((pxtlv_out = bcm_next_xtlv(pxtlv_out, (int *)&iov_buf_in->len,
- BCM_XTLV_OPTION_ALIGN32)) && iov_buf_in->len);
-exit:
- if (iov_buf_in) {
- MFREE(cfg->osh, iov_buf_in, WLC_IOCTL_SMLEN);
- }
- return err;
-}
-#endif /* WL_FILS */
static s32
wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data, bool completed)
struct wl_connect_info *conn_info = wl_to_conn(cfg);
struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
s32 err = 0;
-#ifdef WL_FILS
- struct cfg80211_connect_resp_params resp_params = {0};
- struct wl_fils_info *fils_info = NULL;
- struct wlc_ssid *ssid = NULL;
- struct wiphy *wiphy = NULL;
-
-#endif /* WL_FILS */
u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
- u32 event_type = ntoh32(e->event_type);
- struct cfg80211_bss *bss = NULL;
dhd_pub_t *dhdp;
+
dhdp = (dhd_pub_t *)(cfg->pub);
BCM_REFERENCE(dhdp);
#ifdef ESCAN_RESULT_PATCH
if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
if (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0) {
- WL_INFORM_MEM((" Connected event of connected device "
- "e=%d s=%d, ignore it\n",
+ WL_DBG((" Connected event of connected device e=%d s=%d, ignore it\n",
ntoh32(e->event_type), ntoh32(e->status)));
return err;
}
WL_DBG(("copy bssid\n"));
memcpy(curbssid, connect_req_bssid, ETHER_ADDR_LEN);
}
+
#else
if (cfg->scan_request) {
- wl_cfg80211_cancel_scan(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, true);
}
#endif /* ESCAN_RESULT_PATCH */
if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
wl_cfg80211_scan_abort(cfg);
+ wl_clr_drv_status(cfg, CONNECTING, ndev);
if (completed) {
wl_get_assoc_ies(cfg, ndev);
wl_update_prof(cfg, ndev, NULL, (const void *)(e->addr.octet),
WL_PROF_BSSID);
curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
- /*
- * CFG layer relies on cached IEs (from probe/beacon) to fetch matching bss.
- * For cases, there is no match available,
- * need to update the cache based on bss info from fw.
- */
- wl_update_bss_info(cfg, ndev, true);
+ wl_update_bss_info(cfg, ndev, false);
wl_update_pmklist(ndev, cfg->pmk_list, err);
wl_set_drv_status(cfg, CONNECTED, ndev);
-#if defined(ROAM_ENABLE) && defined(ROAM_AP_ENV_DETECTION)
- if (dhdp->roam_env_detection)
- wldev_iovar_setint(ndev, "roam_env_detection",
- AP_ENV_INDETERMINATE);
-#endif /* ROAM_AP_ENV_DETECTION */
+#ifdef WLADPS_SEAK_AP_WAR
+ if ((dhdp->op_mode & DHD_FLAG_STA_MODE) &&
+ (!dhdp->disabled_adps)) {
+ bool find = FALSE;
+ uint8 enable_mode;
+ if (!memcmp(curbssid, (u8*)CAMEO_MAC_PREFIX, MAC_PREFIX_LEN)) {
+ find = wl_find_vndr_ies_specific_vender(cfg,
+ ndev, ATHEROS_OUI);
+ }
+ enable_mode = (find == TRUE) ? OFF : ON;
+ wl_set_adps_mode(cfg, ndev, enable_mode);
+ }
+#endif /* WLADPS_SEAK_AP_WAR */
if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
init_completion(&cfg->iface_disable);
#else
/* reinitialize completion to clear previous count */
INIT_COMPLETION(cfg->iface_disable);
-#endif // endif
+#endif
}
#ifdef CUSTOM_SET_CPUCORE
if (wl_get_chan_isvht80(ndev, dhdp)) {
dhd_set_cpucore(dhdp, TRUE);
}
#endif /* CUSTOM_SET_CPUCORE */
-#ifdef CUSTOM_LONG_RETRY_LIMIT
- if (wl_set_retry(ndev, CUSTOM_LONG_RETRY_LIMIT, 1) < 0) {
- WL_ERR(("CUSTOM_LONG_RETRY_LIMIT set fail!\n"));
- }
-#endif /* CUSTOM_LONG_RETRY_LIMIT */
- bzero(&cfg->last_roamed_addr, ETHER_ADDR_LEN);
+ memset(&cfg->last_roamed_addr, 0, ETHER_ADDR_LEN);
}
- wl_clr_drv_status(cfg, CONNECTING, ndev);
- if (completed && (wl_cfg80211_verify_bss(cfg, ndev, &bss) != true)) {
+ if (completed && (wl_cfg80211_verify_bss(cfg, ndev) != true)) {
/* If bss entry is not available in the cfg80211 bss cache
* the wireless stack will complain and won't populate
* wdev->current_bss ptr
completed = false;
sec->auth_assoc_res_status = WLAN_STATUS_UNSPECIFIED_FAILURE;
}
+ cfg80211_connect_result(ndev,
+ curbssid,
+ conn_info->req_ie,
+ conn_info->req_ie_len,
+ conn_info->resp_ie,
+ conn_info->resp_ie_len,
+ completed ? WLAN_STATUS_SUCCESS :
+ (sec->auth_assoc_res_status) ?
+ sec->auth_assoc_res_status :
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
if (completed) {
- WL_MSG(ndev->name, "Report connect result - connection succeeded\n");
- wl_cfg80211_check_in4way(cfg, ndev, 0, WL_EXT_STATUS_CONNECTED, NULL);
+ WL_INFORM(("Report connect result - connection succeeded\n"));
+ dhd_conf_set_wme(cfg->pub, 0);
} else {
- WL_MSG(ndev->name, "Report connect result - connection failed\n");
- wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
+ WL_ERR(("Report connect result - connection failed\n"));
+ wl_cfg80211_check_in4way(cfg, ndev, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
WL_EXT_STATUS_DISCONNECTED, NULL);
}
-#ifdef WL_FILS
- if ((sec->auth_type == DOT11_FILS_SKEY_PFS)||(sec->auth_type == DOT11_FILS_SKEY)) {
- wl_get_fils_connect_params(cfg, ndev);
- fils_info = wl_to_fils_info(cfg);
- ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
- wiphy = bcmcfg_to_wiphy(cfg);
- resp_params.status = completed ? WLAN_STATUS_SUCCESS :
- (sec->auth_assoc_res_status) ?
- sec->auth_assoc_res_status :
- WLAN_STATUS_UNSPECIFIED_FAILURE;
- resp_params.bssid = curbssid;
- resp_params.bss = CFG80211_GET_BSS(wiphy, NULL, curbssid,
- ssid->SSID, ssid->SSID_len);
- resp_params.req_ie = conn_info->req_ie;
- resp_params.req_ie_len = conn_info->req_ie_len;
- resp_params.resp_ie = conn_info->resp_ie;
- resp_params.resp_ie_len = conn_info->resp_ie_len;
-#if defined(WL_FILS_ROAM_OFFLD) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0))
- resp_params.fils.kek = fils_info->fils_kek;
- resp_params.fils.kek_len = fils_info->fils_kek_len;
- resp_params.fils.update_erp_next_seq_num = true;
- resp_params.fils.erp_next_seq_num = fils_info->fils_erp_next_seq_num;
- resp_params.fils.pmk = fils_info->fils_pmk;
- resp_params.fils.pmk_len = fils_info->fils_kek_len;
- resp_params.fils.pmkid = fils_info->fils_pmkid;
-#else
- resp_params.fils_kek = fils_info->fils_kek;
- resp_params.fils_kek_len = fils_info->fils_kek_len;
- resp_params.update_erp_next_seq_num = true;
- resp_params.fils_erp_next_seq_num = fils_info->fils_erp_next_seq_num;
- resp_params.pmk = fils_info->fils_pmk;
- resp_params.pmk_len = fils_info->fils_kek_len;
- resp_params.pmkid = fils_info->fils_pmkid;
-#endif /* WL_FILS_ROAM_OFFLD */
- cfg80211_connect_done(ndev, &resp_params, GFP_KERNEL);
- }
- else
-#endif /* WL_FILS */
- {
- CFG80211_CONNECT_RESULT(ndev,
- curbssid,
- bss,
- conn_info->req_ie,
- conn_info->req_ie_len,
- conn_info->resp_ie,
- conn_info->resp_ie_len,
- completed ? WLAN_STATUS_SUCCESS :
- (sec->auth_assoc_res_status) ?
- sec->auth_assoc_res_status :
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- }
- } else {
- WL_INFORM_MEM(("[%s] Ignore event:%d. drv status"
- " connecting:%x. connected:%d\n",
- ndev->name, event_type, wl_get_drv_status(cfg, CONNECTING, ndev),
- wl_get_drv_status(cfg, CONNECTED, ndev)));
}
#ifdef CONFIG_TCPACK_FASTTX
if (wl_get_chan_isvht80(ndev, dhdp))
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- WL_INFORM_MEM(("[%s] mic fail event - " MACDBG " \n",
- ndev->name, MAC2STRDBG(e->addr.octet)));
mutex_lock(&cfg->usr_sync);
if (flags & WLC_EVENT_MSG_GROUP)
key_type = NL80211_KEYTYPE_GROUP;
else
key_type = NL80211_KEYTYPE_PAIRWISE;
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
cfg80211_michael_mic_failure(ndev, (const u8 *)&e->addr, key_type, -1,
NULL, GFP_KERNEL);
mutex_unlock(&cfg->usr_sync);
}
#endif /* BT_WIFI_HANDOVER */
+#ifdef PNO_SUPPORT
+static s32
+wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+#ifdef GSCAN_SUPPORT
+ void *ptr;
+ int send_evt_bytes = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+#endif /* GSCAN_SUPPORT */
+
+ WL_ERR((">>> PNO Event\n"));
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+#ifdef GSCAN_SUPPORT
+ ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
+ kfree(ptr);
+ }
+ if (!dhd_dev_is_legacy_pno_enabled(ndev))
+ return 0;
+#endif /* GSCAN_SUPPORT */
+
+
+#ifndef WL_SCHED_SCAN
+ mutex_lock(&cfg->usr_sync);
+ /* TODO: Use cfg80211_sched_scan_results(wiphy); */
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+ mutex_unlock(&cfg->usr_sync);
+#else
+ /* If cfg80211 scheduled scan is supported, report the pno results via sched
+ * scan results
+ */
+ wl_notify_sched_scan_results(cfg, ndev, e, data);
+#endif /* WL_SCHED_SCAN */
+ return 0;
+}
+#endif /* PNO_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
+static s32
+wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ s32 err = 0;
+ u32 event = be32_to_cpu(e->event_type);
+ void *ptr;
+ int send_evt_bytes = 0;
+ int event_type;
+ struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ u32 len = ntoh32(e->datalen);
+
+ switch (event) {
+ case WLC_E_PFN_BEST_BATCHING:
+ err = dhd_dev_retrieve_batch_scan(ndev);
+ if (err < 0) {
+ WL_ERR(("Batch retrieval already in progress %d\n", err));
+ } else {
+ event_type = WIFI_SCAN_THRESHOLD_NUM_SCANS;
+ if (data && len) {
+ event_type = *((int *)data);
+ }
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+ &event_type, sizeof(int));
+ }
+ break;
+ case WLC_E_PFN_SCAN_COMPLETE:
+ event_type = WIFI_SCAN_COMPLETE;
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_COMPLETE_EVENT,
+ &event_type, sizeof(int));
+ break;
+ case WLC_E_PFN_BSSID_NET_FOUND:
+ ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+ HOTLIST_FOUND);
+ if (ptr) {
+ wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+ ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT);
+ dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ case WLC_E_PFN_BSSID_NET_LOST:
+ /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE
+ * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore
+ */
+ if (len) {
+ ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
+ HOTLIST_LOST);
+ if (ptr) {
+ wl_cfgvendor_send_hotlist_event(wiphy, ndev,
+ ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT);
+ dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST);
+ } else {
+ err = -ENOMEM;
+ }
+ } else {
+ err = -EINVAL;
+ }
+ break;
+ case WLC_E_PFN_GSCAN_FULL_RESULT:
+ ptr = dhd_dev_process_full_gscan_result(ndev, data, len, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes);
+ kfree(ptr);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ case WLC_E_PFN_SSID_EXT:
+ ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
+ if (ptr) {
+ wl_cfgvendor_send_async_event(wiphy, ndev,
+ GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
+ kfree(ptr);
+ } else {
+ err = -ENOMEM;
+ }
+ break;
+ default:
+ WL_ERR(("Unknown event %d\n", event));
+ break;
+ }
+ return err;
+}
+#endif /* GSCAN_SUPPORT */
+
+static s32
+wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct channel_info channel_inform;
+ struct wl_scan_results *bss_list;
+ struct net_device *ndev = NULL;
+ u32 len = WL_SCAN_BUF_MAX;
+ s32 err = 0;
+ unsigned long flags;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+#endif
+
+ WL_DBG(("Enter \n"));
+ if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
+ WL_ERR(("scan is not ready \n"));
+ return err;
+ }
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ mutex_lock(&cfg->usr_sync);
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ memset(&channel_inform, 0, sizeof(channel_inform));
+ err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &channel_inform,
+ sizeof(channel_inform));
+ if (unlikely(err)) {
+ WL_ERR(("scan busy (%d)\n", err));
+ goto scan_done_out;
+ }
+ channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
+ if (unlikely(channel_inform.scan_channel)) {
+
+ WL_DBG(("channel_inform.scan_channel (%d)\n",
+ channel_inform.scan_channel));
+ }
+ cfg->bss_list = cfg->scan_results;
+ bss_list = cfg->bss_list;
+ memset(bss_list, 0, len);
+ bss_list->buflen = htod32(len);
+ err = wldev_ioctl_get(ndev, WLC_SCAN_RESULTS, bss_list, len);
+ if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
+ WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
+ err = -EINVAL;
+ goto scan_done_out;
+ }
+ bss_list->buflen = dtoh32(bss_list->buflen);
+ bss_list->version = dtoh32(bss_list->version);
+ bss_list->count = dtoh32(bss_list->count);
+
+ err = wl_inform_bss(cfg);
+
+scan_done_out:
+ del_timer_sync(&cfg->scan_timeout);
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ if (cfg->scan_request) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ info.aborted = false;
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, false);
+#endif
+ cfg->scan_request = NULL;
+ }
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ WL_DBG(("cfg80211_scan_done\n"));
+ mutex_unlock(&cfg->usr_sync);
+ return err;
+}
+
static s32
-wl_frame_get_mgmt(struct bcm_cfg80211 *cfg, u16 fc,
- const struct ether_addr *da, const struct ether_addr *sa,
- const struct ether_addr *bssid, u8 **pheader, u32 *body_len, u8 *pbody)
+wl_frame_get_mgmt(u16 fc, const struct ether_addr *da,
+ const struct ether_addr *sa, const struct ether_addr *bssid,
+ u8 **pheader, u32 *body_len, u8 *pbody)
{
struct dot11_management_header *hdr;
u32 totlen = 0;
break;
}
totlen += DOT11_MGMT_HDR_LEN + prebody_len;
- *pheader = (u8 *)MALLOCZ(cfg->osh, totlen);
+ *pheader = kzalloc(totlen, GFP_KERNEL);
if (*pheader == NULL) {
WL_ERR(("memory alloc failed \n"));
return -ENOMEM;
return err;
}
-#ifdef WL_CFG80211_GON_COLLISION
-static void
-wl_gon_req_collision(struct bcm_cfg80211 *cfg, wl_action_frame_t *tx_act_frm,
- wifi_p2p_pub_act_frame_t *rx_act_frm, struct net_device *ndev,
- struct ether_addr sa, struct ether_addr da)
-{
- if (cfg->afx_hdl->pending_tx_act_frm == NULL)
- return;
-
- if (tx_act_frm &&
- wl_cfgp2p_is_pub_action(tx_act_frm->data, tx_act_frm->len)) {
- wifi_p2p_pub_act_frame_t *pact_frm;
- pact_frm = (wifi_p2p_pub_act_frame_t *)tx_act_frm->data;
+void
+wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev, u8 bsscfgidx)
+{
+ s32 err = 0;
- if (!(pact_frm->subtype == P2P_PAF_GON_REQ &&
- rx_act_frm->subtype == P2P_PAF_GON_REQ)) {
- return;
- }
- }
-
- WL_ERR((" GO NEGO Request COLLISION !!! \n"));
-
- /* if sa(peer) addr is less than da(my) addr,
- * my device will process peer's gon request and block to send my gon req.
- *
- * if not (sa addr > da addr),
- * my device will process gon request and drop gon req of peer.
- */
- if (memcmp(sa.octet, da.octet, ETHER_ADDR_LEN) < 0) {
- /* block to send tx gon request */
- cfg->block_gon_req_tx_count = BLOCK_GON_REQ_MAX_NUM;
- WL_ERR((" block to send gon req tx !!!\n"));
-
- /* if we are finding a common channel for sending af,
- * do not scan more to block to send current gon req
- */
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, ndev);
- complete(&cfg->act_frm_scan);
- }
- } else {
- /* drop gon request of peer to process gon request by my device. */
- WL_ERR((" drop to receive gon req rx !!! \n"));
- cfg->block_gon_req_rx_count = BLOCK_GON_REQ_MAX_NUM;
- }
-
- return;
-}
-#endif /* WL_CFG80211_GON_COLLISION */
-
-void
-wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev, u8 bsscfgidx)
-{
- s32 err = 0;
-
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- if (timer_pending(&cfg->p2p->listen_timer)) {
- del_timer_sync(&cfg->p2p->listen_timer);
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ if (timer_pending(&cfg->p2p->listen_timer)) {
+ del_timer_sync(&cfg->p2p->listen_timer);
}
if (cfg->afx_hdl != NULL) {
if (cfg->afx_hdl->dev != NULL) {
wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
WL_DBG(("*** Wake UP ** abort actframe iovar on bsscfxidx %d\n", bsscfgidx));
- /* Scan engine is not used for sending action frames in the latest driver
- * branches. actframe_abort is used in the latest driver branches
- * instead of scan abort.
- * If actframe_abort iovar succeeds, don't execute scan abort.
- * If actframe_abort fails with unsupported error,
- * execute scan abort (for backward copmatibility).
+ /* if channel is not zero, "actfame" uses off channel scan.
+ * So abort scan for off channel completion.
*/
if (cfg->af_sent_channel) {
+ /* Scan engine is not used for sending action frames in the latest driver
+ * branches. So, actframe_abort is used in the latest driver branches
+ * instead of scan abort.
+ * New driver branches:
+ * Issue actframe_abort and it succeeded. So, don't execute scan abort.
+ * Old driver branches:
+ * Issue actframe_abort and it fails. So, execute scan abort.
+ */
err = wldev_iovar_setint_bsscfg(ndev, "actframe_abort", 1, bsscfgidx);
if (err < 0) {
- if (err == BCME_UNSUPPORTED) {
- wl_cfg80211_scan_abort(cfg);
- } else {
- WL_ERR(("actframe_abort failed. ret:%d\n", err));
- }
+ wl_cfg80211_scan_abort(cfg);
}
}
}
}
#endif /* WLTDLS */
+
int wl_cfg80211_get_ioctl_version(void)
{
return ioctl_version;
bool isfree = false;
s32 err = 0;
s32 freq;
+#if defined(TDLS_MSG_ONLY_WFD)
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#endif /* TDLS_MSG_ONLY_WFD && BCMDONGLEHOST */
struct net_device *ndev = NULL;
wifi_p2p_pub_act_frame_t *act_frm = NULL;
wifi_p2p_action_frame_t *p2p_act_frm = NULL;
u8 bsscfgidx;
u32 mgmt_frame_len;
u16 channel;
-#if defined(TDLS_MSG_ONLY_WFD) && defined(WLTDLS)
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-#endif /* BCMDONGLEHOST && TDLS_MSG_ONLY_WFD && WLTDLS */
if (ntoh32(e->datalen) < sizeof(wl_event_rx_frame_data_t)) {
WL_ERR(("wrong datalen:%d\n", ntoh32(e->datalen)));
return -EINVAL;
}
- mgmt_frame_len = ntoh32(e->datalen) - (uint32)sizeof(wl_event_rx_frame_data_t);
+ mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
event = ntoh32(e->event_type);
bsscfgidx = e->bsscfgidx;
rxframe = (wl_event_rx_frame_data_t *)data;
return -EINVAL;
}
channel = (ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK);
- bzero(&bssid, ETHER_ADDR_LEN);
+ memset(&bssid, 0, ETHER_ADDR_LEN);
ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
if ((ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) &&
(event == WLC_E_PROBREQ_MSG)) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
struct net_info *iter, *next;
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
if (iter->ndev && iter->wdev &&
iter->wdev->iftype == NL80211_IFTYPE_AP) {
ndev = iter->ndev;
break;
}
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
}
if (channel <= CH_MAX_2G_CHANNEL)
WL_ERR(("No valid band\n"));
return -EINVAL;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
freq = ieee80211_channel_to_frequency(channel);
(void)band->band;
#else
freq = ieee80211_channel_to_frequency(channel, band->band);
-#endif // endif
+#endif
if (event == WLC_E_ACTION_FRAME_RX) {
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
-
if ((err = wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
- NULL, 0, ioctl_buf, sizeof(ioctl_buf), bsscfgidx,
- NULL)) != BCME_OK) {
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx,
+ &cfg->ioctl_buf_sync)) != BCME_OK) {
WL_ERR(("WLC_GET_CUR_ETHERADDR failed, error %d\n", err));
goto exit;
}
err = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
if (err < 0)
WL_ERR(("WLC_GET_BSSID error %d\n", err));
- memcpy(da.octet, ioctl_buf, ETHER_ADDR_LEN);
- err = wl_frame_get_mgmt(cfg, FC_ACTION, &da, &e->addr, &bssid,
+ memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+ err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid,
&mgmt_frame, &mgmt_frame_len,
(u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
if (err < 0) {
#ifdef TDLS_MSG_ONLY_WFD
if (!dhdp->tdls_mode) {
WL_DBG((" TDLS Frame filtered \n"));
- goto exit;
+ return 0;
}
#else
if (mgmt_frame[DOT11_MGMT_HDR_LEN + 1] == TDLS_ACTION_SETUP_RESP) {
DOT11_MNG_QOS_MAP_ID)) != NULL) {
WL_DBG((" QoS map set IE found in QoS action frame\n"));
if (!cfg->up_table) {
- cfg->up_table = (uint8 *)MALLOC(cfg->osh, UP_TABLE_MAX);
+ cfg->up_table = kmalloc(UP_TABLE_MAX, GFP_KERNEL);
}
wl_set_up_table(cfg->up_table, qos_map_ie);
} else {
- MFREE(cfg->osh, cfg->up_table, UP_TABLE_MAX);
+ kfree(cfg->up_table);
+ cfg->up_table = NULL;
}
#endif /* QOS_MAP_SET */
+#ifdef WBTEXT
+ } else if (mgmt_frame[DOT11_MGMT_HDR_LEN] == DOT11_ACTION_CAT_RRM) {
+ /* radio measurement category */
+ switch (mgmt_frame[DOT11_MGMT_HDR_LEN+1]) {
+ case DOT11_RM_ACTION_NR_REP:
+ if (wl_cfg80211_recv_nbr_resp(ndev,
+ &mgmt_frame[DOT11_MGMT_HDR_LEN],
+ mgmt_frame_len - DOT11_MGMT_HDR_LEN)
+ == BCME_OK) {
+ WL_DBG(("RCC updated by nbr response\n"));
+ }
+ break;
+ default:
+ break;
+ }
+#endif /* WBTEXT */
} else {
/*
* if we got normal action frame and ndev is p2p0,
* we have to change ndev from p2p0 to wlan0
*/
+
if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
u8 action = 0;
if (wl_get_public_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
}
if (act_frm) {
-#ifdef WL_CFG80211_GON_COLLISION
- if (act_frm->subtype == P2P_PAF_GON_REQ) {
- wl_gon_req_collision(cfg,
- &cfg->afx_hdl->pending_tx_act_frm->action_frame,
- act_frm, ndev, e->addr, da);
-
- if (cfg->block_gon_req_rx_count) {
- WL_ERR(("drop frame GON Req Rx : count (%d)\n",
- cfg->block_gon_req_rx_count));
- cfg->block_gon_req_rx_count--;
- goto exit;
- }
- } else if (act_frm->subtype == P2P_PAF_GON_CONF) {
- /* if go formation done, clear it */
- cfg->block_gon_req_tx_count = 0;
- cfg->block_gon_req_rx_count = 0;
- }
-#endif /* WL_CFG80211_GON_COLLISION */
if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
if (cfg->next_af_subtype == act_frm->subtype) {
/* Stop waiting for next AF. */
wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
- } else if ((cfg->next_af_subtype == P2P_PAF_GON_RSP) &&
- (act_frm->subtype == P2P_PAF_GON_REQ)) {
- /* If current received frame is GO NEG REQ and next
- * expected frame is GO NEG RESP, do not send it up.
- */
- WL_ERR(("GO Neg req received while waiting for RESP."
- "Discard incoming frame\n"));
- goto exit;
}
}
}
return 0;
}
if (prbreq_ies.wps_ie != NULL) {
- wl_validate_wps_ie(
- (const char *)prbreq_ies.wps_ie, prbreq_ies.wps_ie_len, &pbc);
+ wl_validate_wps_ie((char *)prbreq_ies.wps_ie, prbreq_ies.wps_ie_len, &pbc);
WL_DBG((" wps_ie exist pbc = %d\n", pbc));
/* if pbc method, send prob_req mgmt frame to upper layer */
if (!pbc)
WL_DBG((" Event %s\n", (event == WLC_E_P2P_PROBREQ_MSG) ?
"WLC_E_P2P_PROBREQ_MSG":"WLC_E_PROBREQ_MSG"));
-#ifdef WL_CFG80211_USE_PRB_REQ_FOR_AF_TX
- if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) &&
- !memcmp(cfg->afx_hdl->tx_dst_addr.octet, e->addr.octet,
- ETHER_ADDR_LEN)) {
- if (cfg->afx_hdl->pending_tx_act_frm &&
- wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- s32 channel = CHSPEC_CHANNEL(hton16(rxframe->channel));
- WL_DBG(("PROBE REQUEST : Peer found, channel : %d\n",
- channel));
- cfg->afx_hdl->peer_chan = channel;
- complete(&cfg->act_frm_scan);
- }
- }
-#endif /* WL_CFG80211_USE_PRB_REQ_FOR_AF_TX */
/* Filter any P2P probe reqs arriving during the
* GO-NEG Phase
if (cfg->p2p &&
#if defined(P2P_IE_MISSING_FIX)
cfg->p2p_prb_noti &&
-#endif // endif
+#endif
wl_get_p2p_status(cfg, GO_NEG_PHASE)) {
WL_DBG(("Filtering P2P probe_req while "
"being in GO-Neg state\n"));
WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n",
mgmt_frame_len, ntoh32(e->datalen), channel, freq));
exit:
- if (isfree) {
- MFREE(cfg->osh, mgmt_frame, mgmt_frame_len);
+ if (isfree)
+ kfree(mgmt_frame);
+ return err;
+}
+
+#ifdef WL_SCHED_SCAN
+/* If target scan is not reliable, set the below define to "1" to do a
+ * full escan
+ */
+#define FULL_ESCAN_ON_PFN_NET_FOUND 0
+static s32
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, void *data)
+{
+ wl_pfn_net_info_v2_t *netinfo, *pnetinfo;
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ int err = 0;
+ struct cfg80211_scan_request *request = NULL;
+ struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
+ struct ieee80211_channel *channel = NULL;
+ int channel_req = 0;
+ int band = 0;
+ wl_pfn_scanresults_t *pfn_result = (wl_pfn_scanresults_t *)data;
+ int n_pfn_results = pfn_result->count;
+ log_conn_event_t *event_data = NULL;
+ tlv_log *tlv_data = NULL;
+ u32 alloc_len, tlv_len;
+ u32 payload_len;
+
+ WL_DBG(("Enter\n"));
+
+ if (pfn_result->version != PFN_SCANRESULT_VERSION) {
+ WL_ERR(("Incorrect version %d, expected %d\n", pfn_result->version,
+ PFN_SCANRESULT_VERSION));
+ return BCME_VERSION;
+ }
+
+ if (e->event_type == WLC_E_PFN_NET_LOST) {
+ WL_PNO(("PFN NET LOST event. Do Nothing\n"));
+ return 0;
+ }
+
+ WL_PNO((">>> PFN NET FOUND event. count:%d \n", n_pfn_results));
+ if (n_pfn_results > 0) {
+ int i;
+
+ if (n_pfn_results > MAX_PFN_LIST_COUNT)
+ n_pfn_results = MAX_PFN_LIST_COUNT;
+ pnetinfo = (wl_pfn_net_info_v2_t *)((char *)data + sizeof(wl_pfn_scanresults_v2_t)
+ - sizeof(wl_pfn_net_info_v2_t));
+
+ memset(&ssid, 0x00, sizeof(ssid));
+
+ request = kzalloc(sizeof(*request)
+ + sizeof(*request->channels) * n_pfn_results,
+ GFP_KERNEL);
+ channel = (struct ieee80211_channel *)kzalloc(
+ (sizeof(struct ieee80211_channel) * n_pfn_results),
+ GFP_KERNEL);
+ if (!request || !channel) {
+ WL_ERR(("No memory"));
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ request->wiphy = wiphy;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN + sizeof(uint16) +
+ sizeof(int16);
+ event_data = MALLOC(dhdp->osh, alloc_len);
+ if (!event_data) {
+ WL_ERR(("%s: failed to allocate the log_conn_event_t with "
+ "length(%d)\n", __func__, alloc_len));
+ goto out_err;
+ }
+ tlv_len = 3 * sizeof(tlv_log);
+ event_data->tlvs = MALLOC(dhdp->osh, tlv_len);
+ if (!event_data->tlvs) {
+ WL_ERR(("%s: failed to allocate the tlv_log with "
+ "length(%d)\n", __func__, tlv_len));
+ goto out_err;
+ }
+ }
+
+ for (i = 0; i < n_pfn_results; i++) {
+ netinfo = &pnetinfo[i];
+ if (!netinfo) {
+ WL_ERR(("Invalid netinfo ptr. index:%d", i));
+ err = -EINVAL;
+ goto out_err;
+ }
+ WL_PNO((">>> SSID:%s Channel:%d \n",
+ netinfo->pfnsubnet.u.SSID, netinfo->pfnsubnet.channel));
+ /* PFN result doesn't have all the info which are required by the supplicant
+ * (For e.g IEs) Do a target Escan so that sched scan results are reported
+ * via wl_inform_single_bss in the required format. Escan does require the
+ * scan request in the form of cfg80211_scan_request. For timebeing, create
+ * cfg80211_scan_request one out of the received PNO event.
+ */
+ ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN, netinfo->pfnsubnet.SSID_len);
+ memcpy(ssid[i].ssid, netinfo->pfnsubnet.u.SSID,
+ ssid[i].ssid_len);
+ request->n_ssids++;
+
+ channel_req = netinfo->pfnsubnet.channel;
+ band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
+ channel[i].center_freq = ieee80211_channel_to_frequency(channel_req, band);
+ channel[i].band = band;
+ channel[i].flags |= IEEE80211_CHAN_NO_HT40;
+ request->channels[i] = &channel[i];
+ request->n_channels++;
+
+ if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
+ payload_len = sizeof(log_conn_event_t);
+ event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
+ tlv_data = event_data->tlvs;
+
+ /* ssid */
+ tlv_data->tag = WIFI_TAG_SSID;
+ tlv_data->len = netinfo->pfnsubnet.SSID_len;
+ memcpy(tlv_data->value, ssid[i].ssid, ssid[i].ssid_len);
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* channel */
+ tlv_data->tag = WIFI_TAG_CHANNEL;
+ tlv_data->len = sizeof(uint16);
+ memcpy(tlv_data->value, &channel_req, sizeof(uint16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ /* rssi */
+ tlv_data->tag = WIFI_TAG_RSSI;
+ tlv_data->len = sizeof(int16);
+ memcpy(tlv_data->value, &netinfo->RSSI, sizeof(int16));
+ payload_len += TLV_LOG_SIZE(tlv_data);
+ tlv_data = TLV_LOG_NEXT(tlv_data);
+
+ dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
+ &event_data->event, payload_len);
+ }
+ }
+
+ /* assign parsed ssid array */
+ if (request->n_ssids)
+ request->ssids = &ssid[0];
+
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ /* Abort any on-going scan */
+ wl_notify_escan_complete(cfg, ndev, true, true);
+ }
+
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
+ err = wl_cfgp2p_discover_enable_search(cfg, false);
+ if (unlikely(err)) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ p2p_scan(cfg) = false;
+ }
+
+ wl_set_drv_status(cfg, SCANNING, ndev);
+#if FULL_ESCAN_ON_PFN_NET_FOUND
+ WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
+ err = wl_do_escan(cfg, wiphy, ndev, NULL);
+#else
+ WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+#endif
+ if (err) {
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ goto out_err;
+ }
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
+ cfg->sched_scan_running = TRUE;
+ }
+ else {
+ WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
+ }
+out_err:
+ if (request)
+ kfree(request);
+ if (channel)
+ kfree(channel);
+
+ if (event_data) {
+ MFREE(dhdp->osh, event_data->tlvs, tlv_len);
+ MFREE(dhdp->osh, event_data, alloc_len);
}
return err;
}
+#endif /* WL_SCHED_SCAN */
static void wl_init_conf(struct wl_conf *conf)
{
unsigned long flags;
struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
- if (!profile) {
- WL_ERR(("profile null\n"));
- return;
+ if (profile) {
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ memset(profile, 0, sizeof(struct wl_profile));
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ } else {
+ WL_ERR(("%s: No profile\n", __FUNCTION__));
}
-
- WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
- bzero(profile, sizeof(struct wl_profile));
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
+ return;
}
static void wl_init_event_handler(struct bcm_cfg80211 *cfg)
{
- bzero(cfg->evt_handler, sizeof(cfg->evt_handler));
+ memset(cfg->evt_handler, 0, sizeof(cfg->evt_handler));
cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status;
cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status;
cfg->evt_handler[WLC_E_START] = wl_notify_connect_status;
- cfg->evt_handler[WLC_E_AUTH_IND] = wl_notify_connect_status;
- cfg->evt_handler[WLC_E_ASSOC_RESP_IE] = wl_notify_connect_status;
#ifdef PNO_SUPPORT
cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
#endif /* PNO_SUPPORT */
#endif /* WL_RELMCAST */
#ifdef BT_WIFI_HANDOVER
cfg->evt_handler[WLC_E_BT_WIFI_HANDOVER_REQ] = wl_notify_bt_wifi_handover_req;
-#endif // endif
-#ifdef WL_NAN
- cfg->evt_handler[WLC_E_NAN_CRITICAL] = wl_cfgnan_notify_nan_status;
- cfg->evt_handler[WLC_E_NAN_NON_CRITICAL] = wl_cfgnan_notify_nan_status;
-#endif /* WL_NAN */
+#endif
cfg->evt_handler[WLC_E_CSA_COMPLETE_IND] = wl_csa_complete_ind;
cfg->evt_handler[WLC_E_AP_STARTED] = wl_ap_start_ind;
#ifdef CUSTOM_EVENT_PM_WAKE
#if defined(DHD_LOSSLESS_ROAMING) || defined(DBG_PKT_MON)
cfg->evt_handler[WLC_E_ROAM_PREP] = wl_notify_roam_prep_status;
#endif /* DHD_LOSSLESS_ROAMING || DBG_PKT_MON */
- cfg->evt_handler[WLC_E_ROAM_START] = wl_notify_roam_start_status;
- cfg->evt_handler[WLC_E_PSK_SUP] = wl_cfg80211_sup_event_handler;
-#ifdef WL_BCNRECV
- cfg->evt_handler[WLC_E_BCNRECV_ABORTED] = wl_bcnrecv_aborted_event_handler;
-#endif /* WL_BCNRECV */
-#ifdef WL_MBO
- cfg->evt_handler[WLC_E_MBO] = wl_mbo_event_handler;
-#endif /* WL_MBO */
-#ifdef WL_CAC_TS
- cfg->evt_handler[WLC_E_ADDTS_IND] = wl_cfg80211_cac_event_handler;
- cfg->evt_handler[WLC_E_DELTS_IND] = wl_cfg80211_cac_event_handler;
-#endif /* WL_CAC_TS */
-#if defined(WL_MBO) || defined(WL_OCE)
- cfg->evt_handler[WLC_E_PRUNE] = wl_bssid_prune_event_handler;
-#endif /* WL_MBO || WL_OCE */
-#ifdef RTT_SUPPORT
- cfg->evt_handler[WLC_E_PROXD] = wl_cfg80211_rtt_event_handler;
-#endif // endif
-#ifdef WL_CHAN_UTIL
- cfg->evt_handler[WLC_E_BSS_LOAD] = wl_cfg80211_bssload_report_event_handler;
-#endif /* WL_CHAN_UTIL */
-#ifdef WL_CLIENT_SAE
- cfg->evt_handler[WLC_E_JOIN_START] = wl_notify_start_auth;
-#endif /* WL_CLIENT_SAE */
-}
-
-#ifdef WL_CLIENT_SAE
-/** Called by the cfg80211 framework */
-static s32
-wl_cfg80211_external_auth(struct wiphy *wiphy,
- struct net_device *ndev, struct cfg80211_external_auth_params *ext_auth_param)
-{
- int err = 0;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- wl_assoc_mgr_cmd_t cmd;
-
- WL_DBG(("Enter\n"));
-
- if (!ext_auth_param ||
- ETHER_ISNULLADDR(ext_auth_param->bssid)) {
- WL_ERR(("Invalid wl_cfg80211_external_auth param\n"));
- return -EINVAL;
- }
+#ifdef ENABLE_TEMP_THROTTLING
+ cfg->evt_handler[WLC_E_TEMP_THROTTLE] = wl_check_rx_throttle_status;
+#endif /* ENABLE_TEMP_THROTTLING */
- cmd.version = WL_ASSOC_MGR_CURRENT_VERSION;
- cmd.length = sizeof(cmd);
- cmd.cmd = WL_ASSOC_MGR_CMD_PAUSE_ON_EVT;
- cmd.params = WL_ASSOC_MGR_PARAMS_EVENT_NONE;
- err = wldev_iovar_setbuf(ndev, "assoc_mgr_cmd", (void *)&cmd, sizeof(cmd), cfg->ioctl_buf,
- WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("Failed to pause assoc(%d)\n", err));
- }
-
- return err;
}
-#endif /* WL_CLIENT_SAE */
#if defined(STATIC_WL_PRIV_STRUCT)
static int
wl_init_escan_result_buf(struct bcm_cfg80211 *cfg)
{
-#ifdef DUAL_ESCAN_RESULT_BUFFER
- cfg->escan_info.escan_buf[0] = DHD_OS_PREALLOC(cfg->pub,
- DHD_PREALLOC_WIPHY_ESCAN0, ESCAN_BUF_SIZE);
- if (cfg->escan_info.escan_buf[0] == NULL) {
- WL_ERR(("Failed to alloc ESCAN_BUF0\n"));
- return -ENOMEM;
- }
-
- cfg->escan_info.escan_buf[1] = DHD_OS_PREALLOC(cfg->pub,
- DHD_PREALLOC_WIPHY_ESCAN1, ESCAN_BUF_SIZE);
- if (cfg->escan_info.escan_buf[1] == NULL) {
- WL_ERR(("Failed to alloc ESCAN_BUF1\n"));
- return -ENOMEM;
- }
-
- bzero(cfg->escan_info.escan_buf[0], ESCAN_BUF_SIZE);
- bzero(cfg->escan_info.escan_buf[1], ESCAN_BUF_SIZE);
- cfg->escan_info.escan_type[0] = 0;
- cfg->escan_info.escan_type[1] = 0;
-#else
cfg->escan_info.escan_buf = DHD_OS_PREALLOC(cfg->pub,
DHD_PREALLOC_WIPHY_ESCAN0, ESCAN_BUF_SIZE);
if (cfg->escan_info.escan_buf == NULL) {
return -ENOMEM;
}
bzero(cfg->escan_info.escan_buf, ESCAN_BUF_SIZE);
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
return 0;
}
static void
wl_deinit_escan_result_buf(struct bcm_cfg80211 *cfg)
{
-#ifdef DUAL_ESCAN_RESULT_BUFFER
- if (cfg->escan_info.escan_buf[0] != NULL) {
- cfg->escan_info.escan_buf[0] = NULL;
- cfg->escan_info.escan_type[0] = 0;
- }
-
- if (cfg->escan_info.escan_buf[1] != NULL) {
- cfg->escan_info.escan_buf[1] = NULL;
- cfg->escan_info.escan_type[1] = 0;
- }
-#else
if (cfg->escan_info.escan_buf != NULL) {
cfg->escan_info.escan_buf = NULL;
}
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
}
#endif /* STATIC_WL_PRIV_STRUCT */
{
WL_DBG(("Enter \n"));
- cfg->scan_results = (struct wl_scan_results *)MALLOCZ(cfg->osh,
- WL_SCAN_BUF_MAX);
+ cfg->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
if (unlikely(!cfg->scan_results)) {
WL_ERR(("Scan results alloc failed\n"));
goto init_priv_mem_out;
}
- cfg->conf = (struct wl_conf *)MALLOCZ(cfg->osh, sizeof(*cfg->conf));
+ cfg->conf = (void *)kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
if (unlikely(!cfg->conf)) {
WL_ERR(("wl_conf alloc failed\n"));
goto init_priv_mem_out;
}
- cfg->scan_req_int = (void *)MALLOCZ(cfg->osh,
- sizeof(*cfg->scan_req_int));
+ cfg->scan_req_int =
+ (void *)kzalloc(sizeof(*cfg->scan_req_int), GFP_KERNEL);
if (unlikely(!cfg->scan_req_int)) {
WL_ERR(("Scan req alloc failed\n"));
goto init_priv_mem_out;
}
- cfg->ioctl_buf = (u8 *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ cfg->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
if (unlikely(!cfg->ioctl_buf)) {
WL_ERR(("Ioctl buf alloc failed\n"));
goto init_priv_mem_out;
}
- cfg->escan_ioctl_buf = (void *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
+ cfg->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
if (unlikely(!cfg->escan_ioctl_buf)) {
WL_ERR(("Ioctl buf alloc failed\n"));
goto init_priv_mem_out;
}
- cfg->extra_buf = (void *)MALLOCZ(cfg->osh, WL_EXTRA_BUF_MAX);
+ cfg->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
if (unlikely(!cfg->extra_buf)) {
WL_ERR(("Extra buf alloc failed\n"));
goto init_priv_mem_out;
}
- cfg->pmk_list = (void *)MALLOCZ(cfg->osh, sizeof(*cfg->pmk_list));
+ cfg->pmk_list = (void *)kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
if (unlikely(!cfg->pmk_list)) {
WL_ERR(("pmk list alloc failed\n"));
goto init_priv_mem_out;
}
#if defined(STATIC_WL_PRIV_STRUCT)
- cfg->conn_info = (void *)MALLOCZ(cfg->osh, sizeof(*cfg->conn_info));
+ cfg->conn_info = (void *)kzalloc(sizeof(*cfg->conn_info), GFP_KERNEL);
if (unlikely(!cfg->conn_info)) {
- WL_ERR(("cfg->conn_info alloc failed\n"));
+ WL_ERR(("cfg->conn_info alloc failed\n"));
goto init_priv_mem_out;
}
- cfg->ie = (void *)MALLOC(cfg->osh, sizeof(*cfg->ie));
+ cfg->ie = (void *)kzalloc(sizeof(*cfg->ie), GFP_KERNEL);
if (unlikely(!cfg->ie)) {
- WL_ERR(("cfg->ie alloc failed\n"));
+ WL_ERR(("cfg->ie alloc failed\n"));
goto init_priv_mem_out;
}
if (unlikely(wl_init_escan_result_buf(cfg))) {
goto init_priv_mem_out;
}
#endif /* STATIC_WL_PRIV_STRUCT */
- cfg->afx_hdl = (void *)MALLOCZ(cfg->osh, sizeof(*cfg->afx_hdl));
+ cfg->afx_hdl = (void *)kzalloc(sizeof(*cfg->afx_hdl), GFP_KERNEL);
if (unlikely(!cfg->afx_hdl)) {
- WL_ERR(("afx hdl alloc failed\n"));
+ WL_ERR(("afx hdl alloc failed\n"));
goto init_priv_mem_out;
} else {
init_completion(&cfg->act_frm_scan);
}
#ifdef WLTDLS
if (cfg->tdls_mgmt_frame) {
- MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
+ kfree(cfg->tdls_mgmt_frame);
cfg->tdls_mgmt_frame = NULL;
- cfg->tdls_mgmt_frame_len = 0;
}
#endif /* WLTDLS */
return 0;
static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg)
{
- MFREE(cfg->osh, cfg->scan_results, WL_SCAN_BUF_MAX);
- MFREE(cfg->osh, cfg->conf, sizeof(*cfg->conf));
- MFREE(cfg->osh, cfg->scan_req_int, sizeof(*cfg->scan_req_int));
- MFREE(cfg->osh, cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
- MFREE(cfg->osh, cfg->escan_ioctl_buf, WLC_IOCTL_MAXLEN);
- MFREE(cfg->osh, cfg->extra_buf, WL_EXTRA_BUF_MAX);
- MFREE(cfg->osh, cfg->pmk_list, sizeof(*cfg->pmk_list));
+ kfree(cfg->scan_results);
+ cfg->scan_results = NULL;
+ kfree(cfg->conf);
+ cfg->conf = NULL;
+ kfree(cfg->scan_req_int);
+ cfg->scan_req_int = NULL;
+ kfree(cfg->ioctl_buf);
+ cfg->ioctl_buf = NULL;
+ kfree(cfg->escan_ioctl_buf);
+ cfg->escan_ioctl_buf = NULL;
+ kfree(cfg->extra_buf);
+ cfg->extra_buf = NULL;
+ kfree(cfg->pmk_list);
+ cfg->pmk_list = NULL;
#if defined(STATIC_WL_PRIV_STRUCT)
- MFREE(cfg->osh, cfg->conn_info, sizeof(*cfg->conn_info));
- MFREE(cfg->osh, cfg->ie, sizeof(*cfg->ie));
+ kfree(cfg->conn_info);
+ cfg->conn_info = NULL;
+ kfree(cfg->ie);
+ cfg->ie = NULL;
wl_deinit_escan_result_buf(cfg);
#endif /* STATIC_WL_PRIV_STRUCT */
if (cfg->afx_hdl) {
cancel_work_sync(&cfg->afx_hdl->work);
- MFREE(cfg->osh, cfg->afx_hdl, sizeof(*cfg->afx_hdl));
+ kfree(cfg->afx_hdl);
+ cfg->afx_hdl = NULL;
}
}
/* Allocate workqueue for event */
if (!cfg->event_workq) {
- cfg->event_workq = alloc_workqueue("dhd_eventd",
- WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 1);
+ cfg->event_workq = alloc_workqueue("dhd_eventd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
}
if (!cfg->event_workq) {
}
}
+static void wl_scan_timeout(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct timer_list *t
+#else
+ unsigned long data
+#endif
+)
+{
+ wl_event_msg_t msg;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct bcm_cfg80211 *cfg = from_timer(cfg, t, scan_timeout);
+#else
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+#endif
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
+ struct wl_scan_results *bss_list;
+ struct wl_bss_info *bi = NULL;
+ s32 i;
+ u32 channel;
+#if 0
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ uint32 prev_memdump_mode = dhdp->memdump_enabled;
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+
+ if (!(cfg->scan_request)) {
+ WL_ERR(("timer expired but no scan request\n"));
+ return;
+ }
+
+ bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!bss_list) {
+ WL_ERR(("bss_list is null. Didn't receive any partial scan results\n"));
+ } else {
+ WL_ERR(("scanned AP count (%d)\n", bss_list->count));
+
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ if (bi != NULL && &(bi->chanspec) != NULL && (bi->SSID)) {
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+ WL_ERR(("SSID :%s SSID_LEN :%d Channel :%d\n", bi->SSID, bi->SSID_len, channel));
+ if (bi->SSID[0] == '\0') {
+ WL_ERR(("SSID :%s is null ssid_len:%d ,need return\n", bi->SSID, bi->SSID_len));
+ return;
+ }
+ } else {
+ WL_ERR(("SSID or Channel is null\n"));
+ return;
+ }
+ }
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+ if (cfg->scan_request->dev)
+ wdev = cfg->scan_request->dev->ieee80211_ptr;
+#else
+ if (cfg->scan_request)
+ wdev = cfg->scan_request->wdev;
+#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */
+ if (!wdev) {
+ WL_ERR(("No wireless_dev present\n"));
+ return;
+ }
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+
+ bzero(&msg, sizeof(wl_event_msg_t));
+ WL_ERR(("timer expired\n"));
+#if 0
+ if (dhdp->memdump_enabled) {
+ dhdp->memdump_enabled = DUMP_MEMFILE;
+ dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT;
+ dhd_bus_mem_dump(dhdp);
+ dhdp->memdump_enabled = prev_memdump_mode;
+ }
+#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
+ msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+ msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+ msg.reason = 0xFFFFFFFF;
+ wl_cfg80211_event(ndev, &msg, NULL);
+#ifdef CUSTOMER_HW4_DEBUG
+ if (!wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_set();
+#endif /* CUSTOMER_HW4_DEBUG */
+
+ // terence 20130729: workaround to fix out of memory in firmware
+// if (dhd_conf_get_chip(dhd_get_pub(ndev)) == BCM43362_CHIP_ID) {
+// WL_ERR(("Send hang event\n"));
+// net_os_send_hang_message(ndev);
+// }
+}
+
#ifdef DHD_LOSSLESS_ROAMING
static void wl_del_roam_timeout(struct bcm_cfg80211 *cfg)
{
}
-static void wl_roam_timeout(unsigned long data)
+static void wl_roam_timeout(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct timer_list *t
+#else
+ unsigned long data
+#endif
+)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct bcm_cfg80211 *cfg = from_timer(cfg, t, roam_timeout);
+#else
struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+#endif
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
WL_ERR(("roam timer expired\n"));
#endif /* DHD_LOSSLESS_ROAMING */
-#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
-#define CP_CHAN_INFO_RAT_MODE_LTE 3
-#define CP_CHAN_INFO_RAT_MODE_NR5G 7
-int g_mhs_chan_for_cpcoex = 0;
-
-struct __packed cam_cp_noti_info {
- u8 rat;
- u32 band;
- u32 channel;
-};
-
-int
-wl_cfg80211_send_msg_to_ril()
-{
- int id, buf = 1;
-
- id = IPC_SYSTEM_CP_CHANNEL_INFO;
- dev_ril_bridge_send_msg(id, sizeof(int), &buf);
- WL_ERR(("[BeyondX] send message to ril.\n"));
-
- OSL_SLEEP(500);
- return 0;
-}
-
-int
-wl_cfg80211_ril_bridge_notifier_call(struct notifier_block *nb,
- unsigned long size, void *buf)
-{
- struct dev_ril_bridge_msg *msg;
- struct cam_cp_noti_info *cp_noti_info;
- static int mhs_channel_for_4g, mhs_channel_for_5g;
- static int recv_msg_4g, recv_msg_5g;
-
- WL_ERR(("[BeyondX] receive message from ril.\n"));
- msg = (struct dev_ril_bridge_msg *)buf;
-
- if (msg->dev_id == IPC_SYSTEM_CP_CHANNEL_INFO &&
- msg->data_len <= sizeof(struct cam_cp_noti_info)) {
- u8 rat;
- u32 band;
- u32 channel;
-
- cp_noti_info = (struct cam_cp_noti_info *)msg->data;
- rat = cp_noti_info->rat;
- band = cp_noti_info->band;
- channel = cp_noti_info->channel;
-
- /* LTE/5G Band/Freq information => Mobile Hotspot channel mapping.
- * LTE/B40: 38650~39649 => Ch.11
- * LTE/B41: 39650~41589 => Ch.1
- * 5G/N41: 499200~537999 => Ch.1
- */
- if (rat == CP_CHAN_INFO_RAT_MODE_LTE) {
- recv_msg_4g = 1;
- if (channel >= 38650 && channel <= 39649) {
- mhs_channel_for_4g = 11;
- } else if (channel >= 39650 && channel <= 41589) {
- mhs_channel_for_4g = 1;
- }
- }
- if (rat == CP_CHAN_INFO_RAT_MODE_NR5G) {
- recv_msg_5g = 1;
- if (channel >= 499200 && channel <= 537999) {
- mhs_channel_for_5g = 1;
- }
- }
-
- WL_DBG(("[BeyondX] rat: %u, band: %u, channel: %u, mhs_channel_for_4g: %u, "
- "mhs_channel_for_5g: %u\n", rat, band, channel,
- mhs_channel_for_4g, mhs_channel_for_5g));
-
- if (recv_msg_4g && recv_msg_5g) {
- if (mhs_channel_for_4g && mhs_channel_for_5g) {
- /* if 4G/B40 + 5G/N41, select channel 6 for MHS */
- if (mhs_channel_for_4g == 11 && mhs_channel_for_5g == 1) {
- g_mhs_chan_for_cpcoex = 6;
- /* if 4G(except for B40) + 5G/N41, select channel 1 for MHS */
- } else {
- g_mhs_chan_for_cpcoex = 1;
- }
- } else {
- g_mhs_chan_for_cpcoex = mhs_channel_for_4g ? mhs_channel_for_4g :
- mhs_channel_for_5g ? mhs_channel_for_5g : 0;
- }
- mhs_channel_for_4g = mhs_channel_for_5g = 0;
- recv_msg_4g = recv_msg_5g = 0;
- }
- }
-
- return 0;
-}
-
-static struct notifier_block wl_cfg80211_ril_bridge_notifier = {
- .notifier_call = wl_cfg80211_ril_bridge_notifier_call,
-};
-
-static bool wl_cfg80211_ril_bridge_notifier_registered = FALSE;
-#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
-
static s32
wl_cfg80211_netdev_notifier_call(struct notifier_block * nb,
unsigned long state, void *ptr)
#else
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
#endif /* LINUX_VERSION < VERSION(3, 11, 0) */
- struct wireless_dev *wdev = NULL;
- struct bcm_cfg80211 *cfg = NULL;
-
- WL_DBG(("Enter state:%lu ndev%p \n", state, dev));
- if (!dev) {
- WL_ERR(("dev null\n"));
- return NOTIFY_DONE;
- }
-
- wdev = ndev_to_wdev(dev);
- if (!wdev) {
- WL_ERR(("wdev null. Do nothing\n"));
- return NOTIFY_DONE;
- }
+ struct wireless_dev *wdev = ndev_to_wdev(dev);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
- if (!cfg || (cfg != wl_cfg80211_get_bcmcfg())) {
- /* If cfg80211 priv is null or doesn't match return */
- WL_ERR(("wrong cfg ptr (%p)\n", cfg));
- return NOTIFY_DONE;
- }
+ WL_DBG(("Enter \n"));
- if (dev == bcmcfg_to_prmry_ndev(cfg)) {
- /* Nothing to be done for primary I/F */
+ if (!wdev || !cfg || dev == bcmcfg_to_prmry_ndev(cfg))
return NOTIFY_DONE;
- }
switch (state) {
case NETDEV_DOWN:
break;
}
case NETDEV_UNREGISTER:
- wl_cfg80211_clear_per_bss_ies(cfg, wdev);
/* after calling list_del_rcu(&wdev->list) */
+ wl_cfg80211_clear_per_bss_ies(cfg,
+ wl_get_bssidx_by_wdev(cfg, wdev));
wl_dealloc_netinfo_by_wdev(cfg, wdev);
break;
case NETDEV_GOING_DOWN:
* wdev_cleanup_work call WARN_ON and make the scan done forcibly.
*/
if (wl_get_drv_status(cfg, SCANNING, dev))
- wl_cfg80211_cancel_scan(cfg);
+ wl_notify_escan_complete(cfg, dev, true, true);
break;
}
return NOTIFY_DONE;
*/
static bool wl_cfg80211_netdev_notifier_registered = FALSE;
-static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
+static void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg)
{
- u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
- bool p2p_connected = wl_cfgp2p_vif_created(cfg);
- struct net_info *iter, *next;
+ struct wireless_dev *wdev = NULL;
+ struct net_device *ndev = NULL;
- if (!(cfg->roam_flags & WL_ROAM_OFF_ON_CONCURRENT))
+ if (!cfg->scan_request)
return;
- WL_DBG(("roam off:%d p2p_connected:%d connected_cnt:%d \n",
- enable, p2p_connected, connected_cnt));
- /* Disable FW roam when we have a concurrent P2P connection */
- if (enable && p2p_connected && connected_cnt > 1) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+ if (cfg->scan_request->dev)
+ wdev = cfg->scan_request->dev->ieee80211_ptr;
+#else
+ wdev = cfg->scan_request->wdev;
+#endif /* LINUX_VERSION < KERNEL_VERSION(3, 6, 0) */
- /* Mark it as to be reverted */
- cfg->roam_flags |= WL_ROAM_REVERT_STATUS;
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if (iter->ndev && iter->wdev &&
- iter->wdev->iftype == NL80211_IFTYPE_STATION) {
- if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE)
- == BCME_OK) {
- iter->roam_off = TRUE;
- }
- else {
- WL_ERR(("error to enable roam_off\n"));
- }
- }
- }
+ if (!wdev) {
+ WL_ERR(("No wireless_dev present\n"));
+ return;
}
- else if (!enable && (cfg->roam_flags & WL_ROAM_REVERT_STATUS)) {
- cfg->roam_flags &= ~WL_ROAM_REVERT_STATUS;
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if (iter->ndev && iter->wdev &&
- iter->wdev->iftype == NL80211_IFTYPE_STATION) {
- if (iter->roam_off != WL_INVALID) {
- if (wldev_iovar_setint(iter->ndev, "roam_off", FALSE)
- == BCME_OK) {
- iter->roam_off = FALSE;
- }
- else {
- WL_ERR(("error to disable roam_off\n"));
- }
- }
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ wl_notify_escan_complete(cfg, ndev, true, true);
+ WL_ERR(("Scan aborted! \n"));
+}
+
+void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
+{
+ wl_scan_params_t *params = NULL;
+ s32 params_size = 0;
+ s32 err = BCME_OK;
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ if (!in_atomic()) {
+ /* Our scan params only need space for 1 channel and 0 ssids */
+ params = wl_cfg80211_scan_alloc_params(cfg, -1, 0, ¶ms_size);
+ if (params == NULL) {
+ WL_ERR(("scan params allocation failed \n"));
+ err = -ENOMEM;
+ } else {
+ /* Do a scan abort to stop the driver's scan engine */
+ err = wldev_ioctl_set(dev, WLC_SCAN, params, params_size);
+ if (err < 0) {
+ WL_ERR(("scan abort failed \n"));
}
+ kfree(params);
}
}
-
- return;
+#ifdef WLTDLS
+ if (cfg->tdls_mgmt_frame) {
+ kfree(cfg->tdls_mgmt_frame);
+ cfg->tdls_mgmt_frame = NULL;
+ }
+#endif /* WLTDLS */
}
-static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg)
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev,
+ bool aborted, bool fw_abort)
{
- struct net_info *iter, *next;
- u32 ctl_chan = 0;
- u32 chanspec = 0;
- u32 pre_ctl_chan = 0;
- u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
- cfg->vsdb_mode = false;
+ s32 err = BCME_OK;
+ unsigned long flags;
+ struct net_device *dev;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+ info.aborted = aborted;
+#endif
- if (connected_cnt <= 1) {
- return;
+ WL_DBG(("Enter \n"));
+ BCM_REFERENCE(dhdp);
+
+ mutex_lock(&cfg->scan_complete);
+
+ if (!ndev) {
+ WL_ERR(("ndev is null\n"));
+ err = BCME_ERROR;
+ goto out;
}
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- /* p2p discovery iface ndev could be null */
- if (iter->ndev) {
- chanspec = 0;
- ctl_chan = 0;
- if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
- if (wldev_iovar_getint(iter->ndev, "chanspec",
- (s32 *)&chanspec) == BCME_OK) {
- chanspec = wl_chspec_driver_to_host(chanspec);
- ctl_chan = wf_chspec_ctlchan(chanspec);
- wl_update_prof(cfg, iter->ndev, NULL,
- &ctl_chan, WL_PROF_CHAN);
+
+ if (cfg->escan_info.ndev != ndev) {
+ WL_ERR(("ndev is different %p %p\n", cfg->escan_info.ndev, ndev));
+ err = BCME_ERROR;
+ goto out;
+ }
+
+ if (cfg->scan_request) {
+ dev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_ENABLE_P2P_IF)
+ if (cfg->scan_request->dev != cfg->p2p_net)
+ dev = cfg->scan_request->dev;
+#elif defined(WL_CFG80211_P2P_DEV_IF)
+ if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
+ dev = cfg->scan_request->wdev->netdev;
+#endif /* WL_ENABLE_P2P_IF */
+ }
+ else {
+ WL_DBG(("cfg->scan_request is NULL may be internal scan."
+ "doing scan_abort for ndev %p primary %p",
+ ndev, bcmcfg_to_prmry_ndev(cfg)));
+ dev = ndev;
+ }
+ if (fw_abort && !in_atomic())
+ wl_cfg80211_scan_abort(cfg);
+ if (timer_pending(&cfg->scan_timeout))
+ del_timer_sync(&cfg->scan_timeout);
+#if defined(ESCAN_RESULT_PATCH)
+ if (likely(cfg->scan_request)) {
+ cfg->bss_list = wl_escan_get_buf(cfg, aborted);
+ wl_inform_bss(cfg);
+ }
+#endif /* ESCAN_RESULT_PATCH */
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_SCHED_SCAN
+ if (cfg->sched_scan_req && !cfg->scan_request) {
+ WL_PNO((">>> REPORTING SCHED SCAN RESULTS \n"));
+ if (!aborted) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy, 0);
+#else
+ cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) */
+ }
+
+ DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE);
+ cfg->sched_scan_running = FALSE;
+ }
+#endif /* WL_SCHED_SCAN */
+ if (likely(cfg->scan_request)) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, aborted);
+#endif
+ cfg->scan_request = NULL;
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
+ DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
+ }
+ if (p2p_is_on(cfg))
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, dev);
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+out:
+ mutex_unlock(&cfg->scan_complete);
+ return err;
+}
+
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+#ifndef WL_DRV_AVOID_SCANCACHE
+static void
+wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
+{
+ int idx;
+ for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
+ int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
+ if (bss->RSSI < candidate[idx].RSSI) {
+ if (len)
+ memcpy(&candidate[idx + 1], &candidate[idx],
+ sizeof(removal_element_t) * len);
+ candidate[idx].RSSI = bss->RSSI;
+ candidate[idx].length = bss->length;
+ memcpy(&candidate[idx].BSSID, &bss->BSSID, ETHER_ADDR_LEN);
+ return;
+ }
+ }
+}
+
+static void
+wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
+ wl_bss_info_t *bi)
+{
+ int idx1, idx2;
+ int total_delete_len = 0;
+ for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+ wl_bss_info_t *bss = NULL;
+ if (candidate[idx1].RSSI >= bi->RSSI)
+ continue;
+ for (idx2 = 0; idx2 < list->count; idx2++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
+ list->bss_info;
+ if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ candidate[idx1].RSSI == bss->RSSI &&
+ candidate[idx1].length == dtoh32(bss->length)) {
+ u32 delete_len = dtoh32(bss->length);
+ WL_DBG(("delete scan info of " MACDBG " to add new AP\n",
+ MAC2STRDBG(bss->BSSID.octet)));
+ if (idx2 < list->count -1) {
+ memmove((u8 *)bss, (u8 *)bss + delete_len,
+ list->buflen - cur_len - delete_len);
}
- if (!cfg->vsdb_mode) {
- if (!pre_ctl_chan && ctl_chan)
- pre_ctl_chan = ctl_chan;
- else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
- cfg->vsdb_mode = true;
- }
+ list->buflen -= delete_len;
+ list->count--;
+ total_delete_len += delete_len;
+ /* if delete_len is greater than or equal to result length */
+ if (total_delete_len >= bi->length) {
+ return;
}
+ break;
}
+ cur_len += dtoh32(bss->length);
}
}
- WL_MSG("wlan", "%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel");
- return;
}
+#endif /* WL_DRV_AVOID_SCANCACHE */
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-int
-wl_cfg80211_determine_p2p_rsdb_mode(struct bcm_cfg80211 *cfg)
+#ifdef WL_DRV_AVOID_SCANCACHE
+static u32 wl_p2p_find_peer_channel(struct bcm_cfg80211 *cfg, s32 status, wl_bss_info_t *bi,
+ u32 bi_length)
{
- struct net_info *iter, *next;
- u32 chanspec = 0;
- u32 band = 0;
- u32 pre_band = 0;
- bool is_rsdb_supported = FALSE;
- bool rsdb_mode = FALSE;
-
- is_rsdb_supported = DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_RSDB_MODE);
+ u32 ret;
+ u8 *p2p_dev_addr = NULL;
- if (!is_rsdb_supported) {
- return 0;
+ ret = wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL);
+ if (!ret) {
+ return ret;
}
+ if (status == WLC_E_STATUS_PARTIAL) {
+ p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+ if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+ cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ s32 channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- /* p2p discovery iface ndev could be null */
- if (iter->ndev) {
- chanspec = 0;
- band = 0;
- if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
- if (wldev_iovar_getint(iter->ndev, "chanspec",
- (s32 *)&chanspec) == BCME_OK) {
- chanspec = wl_chspec_driver_to_host(chanspec);
- band = CHSPEC_BAND(chanspec);
- }
+ if ((channel > MAXCHANNEL) || (channel <= 0))
+ channel = WL_INVALID;
+ else
+ WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+ " channel : %d\n",
+ MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+ channel));
- if (!pre_band && band) {
- pre_band = band;
- } else if (pre_band && (pre_band != band)) {
- rsdb_mode = TRUE;
- }
- }
+ wl_clr_p2p_status(cfg, SCANNING);
+ cfg->afx_hdl->peer_chan = channel;
+ complete(&cfg->act_frm_scan);
}
+ } else {
+ WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
}
- WL_DBG(("RSDB mode is %s\n", rsdb_mode ? "enabled" : "disabled"));
- return rsdb_mode;
+ return ret;
}
-static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
- enum wl_status state, bool set)
+static s32 wl_escan_without_scan_cache(struct bcm_cfg80211 *cfg, wl_escan_result_t *escan_result,
+ struct net_device *ndev, const wl_event_msg_t *e, s32 status)
{
- s32 pm = PM_FAST;
s32 err = BCME_OK;
- u32 mode;
- u32 chan = 0;
- struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
- dhd_pub_t *dhd = cfg->pub;
-#ifdef RTT_SUPPORT
- rtt_status_info_t *rtt_status;
-#endif /* RTT_SUPPORT */
- if (dhd->busstate == DHD_BUS_DOWN) {
- WL_ERR(("busstate is DHD_BUS_DOWN!\n"));
- return 0;
+ wl_bss_info_t *bi;
+ u32 bi_length;
+ bool aborted = false;
+ bool fw_abort = false;
+ bool notify_escan_complete = false;
+
+ if (wl_escan_check_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id) < 0) {
+ goto exit;
}
- WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
- state, set, _net_info->pm_restore, _net_info->ndev->name));
- if (state != WL_STATUS_CONNECTED)
- return 0;
- mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
- if (set) {
- wl_cfg80211_concurrent_roam(cfg, 1);
- wl_cfg80211_determine_vsdb_mode(cfg);
- if (mode == WL_MODE_AP) {
- if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
- WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
+ wl_escan_print_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id);
+
+ if (!(status == WLC_E_STATUS_TIMEOUT) || !(status == WLC_E_STATUS_PARTIAL)) {
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ }
+
+ if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ notify_escan_complete = true;
+ }
+
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_INFORM(("WLC_E_STATUS_PARTIAL \n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
+ if ((!escan_result) || (dtoh16(escan_result->bss_count) != 1)) {
+ WL_ERR(("Invalid escan result (NULL pointer) or invalid bss_count\n"));
+ goto exit;
}
- pm = PM_OFF;
- if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
- sizeof(pm))) != 0) {
- if (err == -ENODEV)
- WL_DBG(("%s:netdev not ready\n",
- _net_info->ndev->name));
- else
- WL_ERR(("%s:error (%d)\n",
- _net_info->ndev->name, err));
- wl_cfg80211_update_power_mode(_net_info->ndev);
+ bi = escan_result->bss_info;
+ bi_length = dtoh32(bi->length);
+ if ((!bi) ||
+ (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE))) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)"
+ "or invalid bss_info length\n"));
+ goto exit;
}
- wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_SHORT);
-#if defined(WLTDLS)
- if (wl_cfg80211_is_concurrent_mode(primary_dev)) {
- err = wldev_iovar_setint(primary_dev, "tdls_enable", 0);
- }
-#endif /* defined(WLTDLS) */
-
-#ifdef DISABLE_FRAMEBURST_VSDB
- if (!DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_HOSTAP_MODE) &&
- wl_cfg80211_is_concurrent_mode(primary_dev) &&
- !wl_cfg80211_determine_p2p_rsdb_mode(cfg)) {
- wl_cfg80211_set_frameburst(cfg, FALSE);
- }
-#endif /* DISABLE_FRAMEBURST_VSDB */
-#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
- if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
- wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
- /* Enable frameburst for
- * STA/SoftAP concurrent mode
- */
- wl_cfg80211_set_frameburst(cfg, TRUE);
+
+ if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+ WL_DBG(("Ignoring IBSS result\n"));
+ goto exit;
+ }
}
-#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
- } else { /* clear */
- chan = 0;
- /* clear chan information when the net device is disconnected */
- wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
- wl_cfg80211_determine_vsdb_mode(cfg);
- if (primary_dev == _net_info->ndev) {
- pm = PM_FAST;
-#ifdef RTT_SUPPORT
- rtt_status = GET_RTTSTATE(dhd);
- if (rtt_status->status != RTT_ENABLED) {
-#endif /* RTT_SUPPORT */
- if (dhd_conf_get_pm(dhd) >= 0)
- pm = dhd_conf_get_pm(dhd);
- if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
- sizeof(pm))) != 0) {
- if (err == -ENODEV)
- WL_DBG(("%s:netdev not ready\n",
- _net_info->ndev->name));
- else
- WL_ERR(("%s:error (%d)\n",
- _net_info->ndev->name, err));
- wl_cfg80211_update_power_mode(_net_info->ndev);
- }
-#ifdef RTT_SUPPORT
+ if (wl_p2p_find_peer_channel(cfg, status, bi, bi_length)) {
+ goto exit;
+ } else {
+ if (scan_req_match(cfg)) {
+ /* p2p scan && allow only probe response */
+ if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
}
-#endif /* RTT_SUPPORT */
+ err = wl_inform_single_bss(cfg, bi, false);
+
+ /*
+ * !Broadcast && number of ssid = 1 && number of channels =1
+ * means specific scan to association
+ */
+ if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+ WL_ERR(("P2P assoc scan fast aborted.\n"));
+ aborted = false;
+ fw_abort = true;
+ }
+ /* Directly exit from function here and
+ * avoid sending notify completion to cfg80211
+ */
+ goto exit;
}
- wl_cfg80211_concurrent_roam(cfg, 0);
-#if defined(WLTDLS)
- if (!wl_cfg80211_is_concurrent_mode(primary_dev)) {
- err = wldev_iovar_setint(primary_dev, "tdls_enable", 1);
+ } else if (status == WLC_E_STATUS_SUCCESS) {
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
+ }
+ WL_INFORM(("ESCAN COMPLETED\n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
+
+ /* Update escan complete status */
+ aborted = false;
+ fw_abort = false;
+
+#ifdef CUSTOMER_HW4_DEBUG
+ if (wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_clear();
+#endif /* CUSTOMER_HW4_DEBUG */
+ } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+ (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+ (status == WLC_E_STATUS_NEWASSOC)) {
+ /* Handle all cases of scan abort */
+
+ WL_DBG(("ESCAN ABORT reason: %d\n", status));
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
}
-#endif /* defined(WLTDLS) */
+ WL_INFORM(("ESCAN ABORTED\n"));
+
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = false;
-#if defined(DISABLE_FRAMEBURST_VSDB)
- if (!DHD_OPMODE_SUPPORTED(cfg->pub, DHD_FLAG_HOSTAP_MODE)) {
- wl_cfg80211_set_frameburst(cfg, TRUE);
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+ WL_ERR(("reason[0x%x]\n", e->reason));
+ if (e->reason == 0xFFFFFFFF) {
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = true;
}
-#endif /* DISABLE_FRAMEBURST_VSDB */
-#ifdef DISABLE_WL_FRAMEBURST_SOFTAP
- if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) &&
- (cfg->ap_oper_channel <= CH_MAX_2G_CHANNEL)) {
- /* Disable frameburst for stand-alone 2GHz SoftAP */
- wl_cfg80211_set_frameburst(cfg, FALSE);
+ } else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+
+ if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
+ goto exit;
}
-#endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
+ /* Update escan complete status */
+ aborted = true;
+ fw_abort = false;
}
- return err;
-}
-#ifdef DHD_LOSSLESS_ROAMING
-static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg)
-{
- int err = 0;
-
- /* Init roam timer */
- init_timer_compat(&cfg->roam_timeout, wl_roam_timeout, cfg);
+ /* Notify escan complete status */
+ if (notify_escan_complete) {
+ wl_notify_escan_complete(cfg, ndev, aborted, fw_abort);
+ }
+exit:
return err;
+
}
-#endif /* DHD_LOSSLESS_ROAMING */
+#endif /* WL_DRV_AVOID_SCANCACHE */
-static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
+static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
{
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- s32 err = 0;
+ s32 err = BCME_OK;
+ s32 status = ntoh32(e->status);
+ wl_escan_result_t *escan_result;
+ struct net_device *ndev = NULL;
+#ifndef WL_DRV_AVOID_SCANCACHE
+ wl_bss_info_t *bi;
+ u32 bi_length;
+ wifi_p2p_ie_t * p2p_ie;
+ u8 *p2p_dev_addr = NULL;
+ wl_scan_results_t *list;
+ wl_bss_info_t *bss = NULL;
+ u32 i;
+#endif /* WL_DRV_AVOID_SCANCACHE */
+ u16 channel;
+ struct ieee80211_supported_band *band;
- cfg->scan_request = NULL;
- cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
-#ifdef DISABLE_BUILTIN_ROAM
- cfg->roam_on = false;
-#else
- cfg->roam_on = true;
-#endif /* DISABLE_BUILTIN_ROAM */
- cfg->active_scan = true;
- cfg->rf_blocked = false;
- cfg->vsdb_mode = false;
-#if defined(BCMSDIO) || defined(BCMDBUS)
- cfg->wlfc_on = false;
-#endif /* BCMSDIO || BCMDBUS */
- cfg->roam_flags |= WL_ROAM_OFF_ON_CONCURRENT;
- cfg->disable_roam_event = false;
- /* register interested state */
- set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
- spin_lock_init(&cfg->cfgdrv_lock);
- mutex_init(&cfg->ioctl_buf_sync);
- init_waitqueue_head(&cfg->netif_change_event);
- init_waitqueue_head(&cfg->wps_done_event);
- init_completion(&cfg->send_af_done);
- init_completion(&cfg->iface_disable);
- mutex_init(&cfg->usr_sync);
- mutex_init(&cfg->event_sync);
- mutex_init(&cfg->if_sync);
- mutex_init(&cfg->scan_sync);
- mutex_init(&cfg->pm_sync);
- mutex_init(&cfg->in4way_sync);
-#ifdef WLTDLS
- mutex_init(&cfg->tdls_sync);
-#endif /* WLTDLS */
-#ifdef WL_BCNRECV
- mutex_init(&cfg->bcn_sync);
-#endif /* WL_BCNRECV */
-#ifdef WL_WPS_SYNC
- wl_init_wps_reauth_sm(cfg);
-#endif /* WL_WPS_SYNC */
- wl_init_eq(cfg);
- err = wl_init_priv_mem(cfg);
- if (err)
- return err;
- if (wl_create_event_handler(cfg))
- return -ENOMEM;
- wl_init_event_handler(cfg);
- err = wl_init_scan(cfg);
- if (err)
- return err;
-#ifdef DHD_LOSSLESS_ROAMING
- err = wl_init_roam_timeout(cfg);
- if (err) {
- return err;
- }
-#endif /* DHD_LOSSLESS_ROAMING */
- wl_init_conf(cfg->conf);
- wl_init_prof(cfg, ndev);
- wl_link_down(cfg);
- DNGL_FUNC(dhd_cfg80211_init, (cfg));
-#ifdef WL_NAN
- cfg->nan_dp_state = NAN_DP_STATE_DISABLED;
- init_waitqueue_head(&cfg->ndp_if_change_event);
- mutex_init(&cfg->nancfg.nan_sync);
- init_waitqueue_head(&cfg->nancfg.nan_event_wait);
-#endif /* WL_NAN */
- cfg->pmk_list->pmkids.length = OFFSETOF(pmkid_list_v3_t, pmkid);
- cfg->pmk_list->pmkids.count = 0;
- cfg->pmk_list->pmkids.version = PMKID_LIST_VER_3;
- return err;
-}
+ WL_DBG((" enter event type : %d, status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status)));
-static void wl_deinit_priv(struct bcm_cfg80211 *cfg)
-{
- DNGL_FUNC(dhd_cfg80211_deinit, (cfg));
- wl_destroy_event_handler(cfg);
- wl_flush_eq(cfg);
- wl_link_down(cfg);
- del_timer_sync(&cfg->scan_timeout);
-#ifdef DHD_LOSSLESS_ROAMING
- del_timer_sync(&cfg->roam_timeout);
-#endif // endif
- wl_deinit_priv_mem(cfg);
- if (wl_cfg80211_netdev_notifier_registered) {
- wl_cfg80211_netdev_notifier_registered = FALSE;
- unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
- }
-}
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-#if defined(WL_ENABLE_P2P_IF)
-static s32 wl_cfg80211_attach_p2p(struct bcm_cfg80211 *cfg)
-{
- WL_TRACE(("Enter \n"));
+ mutex_lock(&cfg->usr_sync);
+ /* P2P SCAN is coming from primary interface */
+ if (wl_get_p2p_status(cfg, SCANNING)) {
+ if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+ ndev = cfg->afx_hdl->dev;
+ else
+ ndev = cfg->escan_info.ndev;
- if (wl_cfgp2p_register_ndev(cfg) < 0) {
- WL_ERR(("P2P attach failed. \n"));
- return -ENODEV;
}
+ if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
+ WL_ERR(("escan is not ready ndev %p drv_status 0x%x e_type %d e_states %d\n",
+ ndev, wl_get_drv_status(cfg, SCANNING, ndev),
+ ntoh32(e->event_type), ntoh32(e->status)));
+ goto exit;
+ }
+ escan_result = (wl_escan_result_t *)data;
- return 0;
-}
+#ifndef WL_DRV_AVOID_SCANCACHE
+ if (status == WLC_E_STATUS_PARTIAL) {
+ WL_INFORM(("WLC_E_STATUS_PARTIAL \n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
+ if (!escan_result) {
+ WL_ERR(("Invalid escan result (NULL pointer)\n"));
+ goto exit;
+ }
+ if ((dtoh32(escan_result->buflen) > (int)ESCAN_BUF_SIZE) ||
+ (dtoh32(escan_result->buflen) < sizeof(wl_escan_result_t))) {
+ WL_ERR(("Invalid escan buffer len:%d\n", dtoh32(escan_result->buflen)));
+ goto exit;
+ }
+ if (dtoh16(escan_result->bss_count) != 1) {
+ WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
+ goto exit;
+ }
+ bi = escan_result->bss_info;
+ if (!bi) {
+ WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
+ goto exit;
+ }
+ bi_length = dtoh32(bi->length);
+ if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
+ WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
+ goto exit;
+ }
-static s32 wl_cfg80211_detach_p2p(struct bcm_cfg80211 *cfg)
-{
- struct wireless_dev *wdev;
+ /* +++++ terence 20130524: skip invalid bss */
+ channel =
+ bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
+ else
+ band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_5GHZ];
+ if (!band) {
+ WL_ERR(("No valid band\n"));
+ goto exit;
+ }
+ if (!dhd_conf_match_channel(cfg->pub, channel))
+ goto exit;
+ /* ----- terence 20130524: skip invalid bss */
- WL_DBG(("Enter \n"));
- if (!cfg) {
- WL_ERR(("Invalid Ptr\n"));
- return -EINVAL;
- }
- else {
- wdev = cfg->p2p_wdev;
- if (!wdev) {
- WL_ERR(("Invalid Ptr\n"));
- return -EINVAL;
+ if (wl_escan_check_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id) < 0)
+ goto exit;
+
+ if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
+ WL_DBG(("Ignoring IBSS result\n"));
+ goto exit;
+ }
}
- }
- wl_cfgp2p_unregister_ndev(cfg);
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
+ if (p2p_dev_addr && !memcmp(p2p_dev_addr,
+ cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ s32 channel = wf_chspec_ctlchan(
+ wl_chspec_driver_to_host(bi->chanspec));
+
+ if ((channel > MAXCHANNEL) || (channel <= 0))
+ channel = WL_INVALID;
+ else
+ WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
+ " channel : %d\n",
+ MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
+ channel));
- cfg->p2p_wdev = NULL;
- cfg->p2p_net = NULL;
- WL_DBG(("Freeing 0x%p \n", wdev));
- kfree(wdev);
+ wl_clr_p2p_status(cfg, SCANNING);
+ cfg->afx_hdl->peer_chan = channel;
+ complete(&cfg->act_frm_scan);
+ goto exit;
+ }
- return 0;
-}
-#endif
+ } else {
+ int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
+ int remove_lower_rssi = FALSE;
+
+ bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ list = wl_escan_get_buf(cfg, FALSE);
+ if (scan_req_match(cfg)) {
+ /* p2p scan && allow only probe response */
+ if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
+ if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
+ bi->ie_length)) == NULL) {
+ WL_ERR(("Couldn't find P2PIE in probe"
+ " response/beacon\n"));
+ goto exit;
+ }
+ }
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen)
+ remove_lower_rssi = TRUE;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ WL_SCAN(("%s("MACDBG") RSSI %d flags 0x%x length %d\n", bi->SSID,
+ MAC2STRDBG(bi->BSSID.octet), bi->RSSI, bi->flags, bi->length));
+ for (i = 0; i < list->count; i++) {
+ bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
+ : list->bss_info;
+ if (!bss) {
+ WL_ERR(("bss is NULL\n"));
+ goto exit;
+ }
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ WL_SCAN(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
+ bss->SSID, MAC2STRDBG(bss->BSSID.octet),
+ i, bss->RSSI, list->count));
+
+ if (remove_lower_rssi)
+ wl_cfg80211_find_removal_candidate(bss, candidate);
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+
+ if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
+ (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
+ == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
+ bi->SSID_len == bss->SSID_len &&
+ !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+
+ /* do not allow beacon data to update
+ *the data recd from a probe response
+ */
+ if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
+ (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
+ goto exit;
-static s32 wl_cfg80211_attach_post(struct net_device *ndev)
-{
- struct bcm_cfg80211 * cfg;
- s32 err = 0;
- s32 ret = 0;
- WL_TRACE(("In\n"));
- if (unlikely(!ndev)) {
- WL_ERR(("ndev is invaild\n"));
- return -ENODEV;
- }
- cfg = wl_get_cfg(ndev);
- if (unlikely(!cfg)) {
- WL_ERR(("cfg is invaild\n"));
- return -EINVAL;
- }
- if (!wl_get_drv_status(cfg, READY, ndev)) {
- if (cfg->wdev) {
- ret = wl_cfgp2p_supported(cfg, ndev);
- if (ret > 0) {
-#if !defined(WL_ENABLE_P2P_IF)
- cfg->wdev->wiphy->interface_modes |=
- (BIT(NL80211_IFTYPE_P2P_CLIENT)|
- BIT(NL80211_IFTYPE_P2P_GO));
-#endif /* !WL_ENABLE_P2P_IF */
- if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
- goto fail;
+ WL_SCAN(("%s("MACDBG"), i=%d prev: RSSI %d"
+ " flags 0x%x, new: RSSI %d flags 0x%x\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
+ bss->RSSI, bss->flags, bi->RSSI, bi->flags));
-#if defined(WL_ENABLE_P2P_IF)
- if (cfg->p2p_net) {
- /* Update MAC addr for p2p0 interface here. */
- memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
- cfg->p2p_net->dev_addr[0] |= 0x02;
- WL_MSG(cfg->p2p_net->name, "p2p_dev_addr="MACDBG "\n",
- MAC2STRDBG(cfg->p2p_net->dev_addr));
- } else {
- WL_ERR(("p2p_net not yet populated."
- " Couldn't update the MAC Address for p2p0 \n"));
- return -ENODEV;
+ if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
+ (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
+ /* preserve max RSSI if the measurements are
+ * both on-channel or both off-channel
+ */
+ WL_SCAN(("%s("MACDBG"), same onchan"
+ ", RSSI: prev %d new %d\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ bss->RSSI, bi->RSSI));
+ bi->RSSI = MAX(bss->RSSI, bi->RSSI);
+ } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
+ (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
+ /* preserve the on-channel rssi measurement
+ * if the new measurement is off channel
+ */
+ WL_SCAN(("%s("MACDBG"), prev onchan"
+ ", RSSI: prev %d new %d\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ bss->RSSI, bi->RSSI));
+ bi->RSSI = bss->RSSI;
+ bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
+ }
+ if (dtoh32(bss->length) != bi_length) {
+ u32 prev_len = dtoh32(bss->length);
+
+ WL_SCAN(("bss info replacement"
+ " is occured(bcast:%d->probresp%d)\n",
+ bss->ie_length, bi->ie_length));
+ WL_SCAN(("%s("MACDBG"), replacement!(%d -> %d)\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ prev_len, bi_length));
+
+ if (list->buflen - prev_len + bi_length
+ > ESCAN_BUF_SIZE) {
+ WL_ERR(("Buffer is too small: keep the"
+ " previous result of this AP\n"));
+ /* Only update RSSI */
+ bss->RSSI = bi->RSSI;
+ bss->flags |= (bi->flags
+ & WL_BSS_FLAGS_RSSI_ONCHANNEL);
+ goto exit;
+ }
+
+ if (i < list->count - 1) {
+ /* memory copy required by this case only */
+ memmove((u8 *)bss + bi_length,
+ (u8 *)bss + prev_len,
+ list->buflen - cur_len - prev_len);
+ }
+ list->buflen -= prev_len;
+ list->buflen += bi_length;
+ }
+ list->version = dtoh32(bi->version);
+ memcpy((u8 *)bss, (u8 *)bi, bi_length);
+ goto exit;
}
-#endif /* WL_ENABLE_P2P_IF */
- cfg->p2p_supported = true;
- } else if (ret == 0) {
- if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
- goto fail;
- } else {
- /* SDIO bus timeout */
- err = -ENODEV;
- goto fail;
+ cur_len += dtoh32(bss->length);
+ }
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+#ifdef ESCAN_BUF_OVERFLOW_MGMT
+ wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
+ if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
+ WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
+ MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
+ goto exit;
+ }
+#else
+ WL_ERR(("Buffer is too small: ignoring\n"));
+ goto exit;
+#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+ }
+
+ memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
+ list->version = dtoh32(bi->version);
+ list->buflen += bi_length;
+ list->count++;
+
+ /*
+ * !Broadcast && number of ssid = 1 && number of channels =1
+ * means specific scan to association
+ */
+ if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
+ WL_ERR(("P2P assoc scan fast aborted.\n"));
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true);
+ goto exit;
}
}
}
- wl_set_drv_status(cfg, READY, ndev);
-fail:
+ else if (status == WLC_E_STATUS_SUCCESS) {
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
+ escan_result->sync_id);
+
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ WL_INFORM(("ESCAN COMPLETED\n"));
+ DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
+ cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!scan_req_match(cfg)) {
+ WL_SCAN(("SCAN COMPLETED: scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, false, false);
+ }
+ wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
+#ifdef CUSTOMER_HW4_DEBUG
+ if (wl_scan_timeout_dbg_enabled)
+ wl_scan_timeout_dbg_clear();
+#endif /* CUSTOMER_HW4_DEBUG */
+ } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
+ (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
+ (status == WLC_E_STATUS_NEWASSOC)) {
+ /* Handle all cases of scan abort */
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_print_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id);
+ WL_DBG(("ESCAN ABORT reason: %d\n", status));
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ wl_clr_p2p_status(cfg, SCANNING);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ WL_INFORM(("ESCAN ABORTED\n"));
+ cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+ if (!scan_req_match(cfg)) {
+ WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, false);
+ } else {
+ /* If there is no pending host initiated scan, do nothing */
+ WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n"));
+ }
+ wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
+ } else if (status == WLC_E_STATUS_TIMEOUT) {
+ WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+ WL_ERR(("reason[0x%x]\n", e->reason));
+ if (e->reason == 0xFFFFFFFF) {
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ }
+ } else {
+ WL_ERR(("unexpected Escan Event %d : abort\n", status));
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_print_sync_id(status, escan_result->sync_id,
+ cfg->escan_info.cur_sync_id);
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+ if (!scan_req_match(cfg)) {
+ WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
+ "scanned AP count=%d\n",
+ cfg->bss_list->count));
+ }
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, false);
+ }
+ wl_escan_increment_sync_id(cfg, 2);
+ }
+#else /* WL_DRV_AVOID_SCANCACHE */
+ err = wl_escan_without_scan_cache(cfg, escan_result, ndev, e, status);
+#endif /* WL_DRV_AVOID_SCANCACHE */
+exit:
+ mutex_unlock(&cfg->usr_sync);
return err;
}
-struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev)
+static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
{
- struct wireless_dev *wdev = ndev->ieee80211_ptr;
-
- if (!wdev || !wdev->wiphy)
- return NULL;
+ u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ bool p2p_connected = wl_cfgp2p_vif_created(cfg);
+ struct net_info *iter, *next;
- return wiphy_priv(wdev->wiphy);
-}
+ if (!(cfg->roam_flags & WL_ROAM_OFF_ON_CONCURRENT))
+ return;
-s32
-wl_cfg80211_net_attach(struct net_device *primary_ndev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(primary_ndev);
+ WL_DBG(("roam off:%d p2p_connected:%d connected_cnt:%d \n",
+ enable, p2p_connected, connected_cnt));
+ /* Disable FW roam when we have a concurrent P2P connection */
+ if (enable && p2p_connected && connected_cnt > 1) {
- if (!cfg) {
- WL_ERR(("cfg null\n"));
- return BCME_ERROR;
+ /* Mark it as to be reverted */
+ cfg->roam_flags |= WL_ROAM_REVERT_STATUS;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev && iter->wdev &&
+ iter->wdev->iftype == NL80211_IFTYPE_STATION) {
+ if (wldev_iovar_setint(iter->ndev, "roam_off", TRUE)
+ == BCME_OK) {
+ iter->roam_off = TRUE;
+ }
+ else {
+ WL_ERR(("error to enable roam_off\n"));
+ }
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
}
-#ifdef WL_STATIC_IF
- /* Register dummy n/w iface. FW init will happen only from dev_open */
- if (wl_cfg80211_register_static_if(cfg, NL80211_IFTYPE_STATION,
- WL_STATIC_IFNAME_PREFIX) == NULL) {
- WL_ERR(("static i/f registration failed!\n"));
- return BCME_ERROR;
+ else if (!enable && (cfg->roam_flags & WL_ROAM_REVERT_STATUS)) {
+ cfg->roam_flags &= ~WL_ROAM_REVERT_STATUS;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev && iter->wdev &&
+ iter->wdev->iftype == NL80211_IFTYPE_STATION) {
+ if (iter->roam_off != WL_INVALID) {
+ if (wldev_iovar_setint(iter->ndev, "roam_off", FALSE)
+ == BCME_OK) {
+ iter->roam_off = FALSE;
+ }
+ else {
+ WL_ERR(("error to disable roam_off\n"));
+ }
+ }
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
}
-#endif /* WL_STATIC_IF */
- return BCME_OK;
+
+ return;
}
-s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
+static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg)
{
- struct wireless_dev *wdev;
- struct bcm_cfg80211 *cfg;
- s32 err = 0;
- struct device *dev;
- u16 bssidx = 0;
- u16 ifidx = 0;
- dhd_pub_t *dhd = (struct dhd_pub *)(context);
-
- WL_TRACE(("In\n"));
- if (!ndev) {
- WL_ERR(("ndev is invaild\n"));
- return -ENODEV;
- }
- WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev()));
- dev = wl_cfg80211_get_parent_dev();
+ struct net_info *iter, *next;
+ u32 ctl_chan = 0;
+ u32 chanspec = 0;
+ u32 pre_ctl_chan = 0;
+ u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ cfg->vsdb_mode = false;
- wdev = (struct wireless_dev *)MALLOCZ(dhd->osh, sizeof(*wdev));
- if (unlikely(!wdev)) {
- WL_ERR(("Could not allocate wireless device\n"));
- return -ENOMEM;
- }
- err = wl_setup_wiphy(wdev, dev, context);
- if (unlikely(err)) {
- MFREE(dhd->osh, wdev, sizeof(*wdev));
- return -ENOMEM;
+ if (connected_cnt <= 1) {
+ return;
}
-#ifdef WLMESH_CFG80211
- wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_MESH);
-#else
- wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
#endif
- cfg = wiphy_priv(wdev->wiphy);
- cfg->wdev = wdev;
- cfg->pub = context;
- cfg->osh = dhd->osh;
- INIT_LIST_HEAD(&cfg->net_list);
- INIT_LIST_HEAD(&cfg->vndr_oui_list);
- spin_lock_init(&cfg->vndr_oui_sync);
- spin_lock_init(&cfg->net_list_sync);
- ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- wdev->netdev = ndev;
- cfg->state_notifier = wl_notifier_change_state;
- err = wl_alloc_netinfo(cfg, ndev, wdev, WL_IF_TYPE_STA, PM_ENABLE, bssidx, ifidx);
- if (err) {
- WL_ERR(("Failed to alloc net_info (%d)\n", err));
- goto cfg80211_attach_out;
- }
- err = wl_init_priv(cfg);
- if (err) {
- WL_ERR(("Failed to init iwm_priv (%d)\n", err));
- goto cfg80211_attach_out;
+ for_each_ndev(cfg, iter, next) {
+ /* p2p discovery iface ndev could be null */
+ if (iter->ndev) {
+ chanspec = 0;
+ ctl_chan = 0;
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ if (wldev_iovar_getint(iter->ndev, "chanspec",
+ (s32 *)&chanspec) == BCME_OK) {
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+ wl_update_prof(cfg, iter->ndev, NULL,
+ &ctl_chan, WL_PROF_CHAN);
+ }
+ if (!cfg->vsdb_mode) {
+ if (!pre_ctl_chan && ctl_chan)
+ pre_ctl_chan = ctl_chan;
+ else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
+ cfg->vsdb_mode = true;
+ }
+ }
+ }
+ }
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ printf("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel");
+ return;
+}
- err = wl_setup_rfkill(cfg, TRUE);
- if (err) {
- WL_ERR(("Failed to setup rfkill %d\n", err));
- goto cfg80211_attach_out;
- }
-#ifdef DEBUGFS_CFG80211
- err = wl_setup_debugfs(cfg);
- if (err) {
- WL_ERR(("Failed to setup debugfs %d\n", err));
- goto cfg80211_attach_out;
+#if defined(DISABLE_FRAMEBURST_VSDB) && defined(USE_WFA_CERT_CONF)
+extern int g_frameburst;
+#endif /* DISABLE_FRAMEBURST_VSDB && USE_WFA_CERT_CONF */
+
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
+ enum wl_status state, bool set)
+{
+ s32 pm = PM_FAST;
+ s32 err = BCME_OK;
+ u32 mode;
+ u32 chan = 0;
+ struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+ dhd_pub_t *dhd = cfg->pub;
+#ifdef RTT_SUPPORT
+ rtt_status_info_t *rtt_status;
+#endif /* RTT_SUPPORT */
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ WL_ERR(("%s : busstate is DHD_BUS_DOWN!\n", __FUNCTION__));
+ return 0;
}
-#endif // endif
- if (!wl_cfg80211_netdev_notifier_registered) {
- wl_cfg80211_netdev_notifier_registered = TRUE;
- err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
- if (err) {
- wl_cfg80211_netdev_notifier_registered = FALSE;
- WL_ERR(("Failed to register notifierl %d\n", err));
- goto cfg80211_attach_out;
+ WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
+ state, set, _net_info->pm_restore, _net_info->ndev->name));
+
+ if (state != WL_STATUS_CONNECTED)
+ return 0;
+ mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
+ if (set) {
+ wl_cfg80211_concurrent_roam(cfg, 1);
+ wl_cfg80211_determine_vsdb_mode(cfg);
+ if (mode == WL_MODE_AP) {
+ if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
+ WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
}
- }
-#if defined(COEX_DHCP)
- cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev);
- if (!cfg->btcoex_info)
- goto cfg80211_attach_out;
-#endif // endif
+ pm = PM_OFF;
+ if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
+ sizeof(pm))) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ _net_info->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ _net_info->ndev->name, err));
-#ifdef CONFIG_CFG80211_INTERNAL_REGDB
- wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier;
-#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+ wl_cfg80211_update_power_mode(_net_info->ndev);
+ }
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_SHORT);
-#if defined(WL_ENABLE_P2P_IF)
- err = wl_cfg80211_attach_p2p(cfg);
- if (err)
- goto cfg80211_attach_out;
-#endif
+ } else { /* clear */
+ chan = 0;
+ /* clear chan information when the net device is disconnected */
+ wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
+ wl_cfg80211_determine_vsdb_mode(cfg);
+ if (primary_dev == _net_info->ndev) {
+ pm = PM_FAST;
+#ifdef RTT_SUPPORT
+ rtt_status = GET_RTTSTATE(dhd);
+ if (rtt_status->status != RTT_ENABLED) {
+#endif /* RTT_SUPPORT */
+ if (dhd_conf_get_pm(dhd) >= 0)
+ pm = dhd_conf_get_pm(dhd);
+ if ((err = wldev_ioctl_set(_net_info->ndev, WLC_SET_PM, &pm,
+ sizeof(pm))) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ _net_info->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ _net_info->ndev->name, err));
- INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
-#ifdef WL_NAN
- WL_DBG(("NAN: Armed wl_cfgnan_delayed_disable work\n"));
- INIT_DELAYED_WORK(&cfg->nan_disable, wl_cfgnan_delayed_disable);
-#endif /* WL_NAN */
- cfg->rssi_sum_report = FALSE;
- return err;
+ wl_cfg80211_update_power_mode(_net_info->ndev);
+ }
+#ifdef RTT_SUPPORT
+ }
+#endif /* RTT_SUPPORT */
+ }
+ wl_cfg80211_concurrent_roam(cfg, 0);
-cfg80211_attach_out:
- wl_cfg80211_detach(cfg);
+ }
return err;
}
-void wl_cfg80211_detach(struct bcm_cfg80211 *cfg)
+static s32 wl_init_scan(struct bcm_cfg80211 *cfg)
{
- WL_DBG(("Enter\n"));
- if (!cfg) {
- return;
- }
- wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
-
-#if defined(COEX_DHCP)
- wl_cfg80211_btcoex_deinit();
- cfg->btcoex_info = NULL;
-#endif // endif
+ int err = 0;
- wl_setup_rfkill(cfg, FALSE);
-#ifdef DEBUGFS_CFG80211
- wl_free_debugfs(cfg);
-#endif // endif
- if (cfg->p2p_supported) {
- if (timer_pending(&cfg->p2p->listen_timer))
- del_timer_sync(&cfg->p2p->listen_timer);
- wl_cfgp2p_deinit_priv(cfg);
- }
+ cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_init_sync_id(cfg);
-#ifdef WL_WPS_SYNC
- wl_deinit_wps_reauth_sm(cfg);
-#endif /* WL_WPS_SYNC */
+ /* Init scan_timeout timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ timer_setup(&cfg->scan_timeout, wl_scan_timeout, 0);
+#else
+ init_timer(&cfg->scan_timeout);
+ cfg->scan_timeout.data = (unsigned long) cfg;
+ cfg->scan_timeout.function = wl_scan_timeout;
+#endif
+
+ return err;
+}
- if (timer_pending(&cfg->scan_timeout))
- del_timer_sync(&cfg->scan_timeout);
#ifdef DHD_LOSSLESS_ROAMING
- if (timer_pending(&cfg->roam_timeout)) {
- del_timer_sync(&cfg->roam_timeout);
- }
-#endif /* DHD_LOSSLESS_ROAMING */
+static s32 wl_init_roam_timeout(struct bcm_cfg80211 *cfg)
+{
+ int err = 0;
-#ifdef WL_STATIC_IF
- wl_cfg80211_unregister_static_if(cfg);
-#endif /* WL_STATIC_IF */
-#if defined(WL_CFG80211_P2P_DEV_IF)
- if (cfg->p2p_wdev)
- wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-#if defined(WL_ENABLE_P2P_IF)
- wl_cfg80211_detach_p2p(cfg);
-#endif
- wl_cfg80211_ibss_vsie_free(cfg);
- wl_dealloc_netinfo_by_wdev(cfg, cfg->wdev);
- wl_cfg80211_set_bcmcfg(NULL);
- wl_deinit_priv(cfg);
- wl_cfg80211_clear_parent_dev();
-#if defined(RSSIAVG)
- wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
- wl_free_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
-#endif
-#if defined(BSSCACHE)
- wl_release_bss_cache_ctrl(&cfg->g_bss_cache_ctrl);
+ /* Init roam timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ timer_setup(&cfg->roam_timeout, wl_roam_timeout, 0);
+#else
+ init_timer(&cfg->roam_timeout);
+ cfg->roam_timeout.data = (unsigned long) cfg;
+ cfg->roam_timeout.function = wl_roam_timeout;
#endif
- wl_free_wdev(cfg);
- /* PLEASE do NOT call any function after wl_free_wdev, the driver's private
- * structure "cfg", which is the private part of wiphy, has been freed in
- * wl_free_wdev !!!!!!!!!!!
- */
- WL_DBG(("Exit\n"));
+
+ return err;
}
+#endif /* DHD_LOSSLESS_ROAMING */
-#if defined(CONFIG_WLAN_BEYONDX) || defined(CONFIG_SEC_5GMODEL)
-void wl_cfg80211_register_dev_ril_bridge_event_notifier()
+static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
{
- WL_DBG(("Enter\n"));
- if (!wl_cfg80211_ril_bridge_notifier_registered) {
- s32 err = 0;
- wl_cfg80211_ril_bridge_notifier_registered = TRUE;
- err = register_dev_ril_bridge_event_notifier(&wl_cfg80211_ril_bridge_notifier);
- if (err) {
- wl_cfg80211_ril_bridge_notifier_registered = FALSE;
- WL_ERR(("Failed to register ril_notifier! %d\n", err));
- }
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ s32 err = 0;
+
+ cfg->scan_request = NULL;
+ cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+#ifdef DISABLE_BUILTIN_ROAM
+ cfg->roam_on = false;
+#else
+ cfg->roam_on = true;
+#endif /* DISABLE_BUILTIN_ROAM */
+ cfg->active_scan = true;
+ cfg->rf_blocked = false;
+ cfg->vsdb_mode = false;
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ cfg->wlfc_on = false;
+#endif /* BCMSDIO || BCMDBUS */
+ cfg->roam_flags |= WL_ROAM_OFF_ON_CONCURRENT;
+ cfg->disable_roam_event = false;
+ /* register interested state */
+ set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
+ spin_lock_init(&cfg->cfgdrv_lock);
+ mutex_init(&cfg->ioctl_buf_sync);
+ init_waitqueue_head(&cfg->netif_change_event);
+ init_waitqueue_head(&cfg->wps_done_event);
+ init_completion(&cfg->send_af_done);
+ init_completion(&cfg->iface_disable);
+ mutex_init(&cfg->usr_sync);
+ mutex_init(&cfg->event_sync);
+ mutex_init(&cfg->scan_complete);
+ mutex_init(&cfg->if_sync);
+ mutex_init(&cfg->pm_sync);
+ mutex_init(&cfg->in4way_sync);
+#ifdef WLTDLS
+ mutex_init(&cfg->tdls_sync);
+#endif /* WLTDLS */
+ wl_init_eq(cfg);
+ err = wl_init_priv_mem(cfg);
+ if (err)
+ return err;
+ if (wl_create_event_handler(cfg))
+ return -ENOMEM;
+ wl_init_event_handler(cfg);
+ err = wl_init_scan(cfg);
+ if (err)
+ return err;
+#ifdef DHD_LOSSLESS_ROAMING
+ err = wl_init_roam_timeout(cfg);
+ if (err) {
+ return err;
}
+#endif /* DHD_LOSSLESS_ROAMING */
+ wl_init_conf(cfg->conf);
+ wl_init_prof(cfg, ndev);
+ wl_link_down(cfg);
+ DNGL_FUNC(dhd_cfg80211_init, (cfg));
+#ifdef NAN_DP
+ cfg->nan_dp_state = NAN_DP_STATE_DISABLED;
+ init_waitqueue_head(&cfg->ndp_if_change_event);
+#endif /* NAN_DP */
+ return err;
}
-void wl_cfg80211_unregister_dev_ril_bridge_event_notifier()
+static void wl_deinit_priv(struct bcm_cfg80211 *cfg)
{
- WL_DBG(("Enter\n"));
- if (wl_cfg80211_ril_bridge_notifier_registered) {
- wl_cfg80211_ril_bridge_notifier_registered = FALSE;
- unregister_dev_ril_bridge_event_notifier(&wl_cfg80211_ril_bridge_notifier);
+ DNGL_FUNC(dhd_cfg80211_deinit, (cfg));
+ wl_destroy_event_handler(cfg);
+ wl_flush_eq(cfg);
+ wl_link_down(cfg);
+ if (cfg->scan_timeout.function)
+ del_timer_sync(&cfg->scan_timeout);
+#ifdef DHD_LOSSLESS_ROAMING
+ if (cfg->roam_timeout.function)
+ del_timer_sync(&cfg->roam_timeout);
+#endif
+ wl_deinit_priv_mem(cfg);
+ if (wl_cfg80211_netdev_notifier_registered) {
+ wl_cfg80211_netdev_notifier_registered = FALSE;
+ unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
}
}
-#endif /* CONFIG_WLAN_BEYONDX || defined(CONFIG_SEC_5GMODEL) */
-static void wl_print_event_data(struct bcm_cfg80211 *cfg,
- uint32 event_type, const wl_event_msg_t *e)
+#if defined(WL_ENABLE_P2P_IF)
+static s32 wl_cfg80211_attach_p2p(struct bcm_cfg80211 *cfg)
{
- s32 status = ntoh32(e->status);
- s32 reason = ntoh32(e->reason);
- s32 ifidx = ntoh32(e->ifidx);
- s32 bssidx = ntoh32(e->bsscfgidx);
-
- switch (event_type) {
- case WLC_E_ESCAN_RESULT:
- if ((status == WLC_E_STATUS_SUCCESS) ||
- (status == WLC_E_STATUS_ABORT)) {
- WL_INFORM_MEM(("event_type (%d), ifidx: %d"
- " bssidx: %d scan_type:%d\n",
- event_type, ifidx, bssidx, status));
- }
- break;
- case WLC_E_LINK:
- case WLC_E_DISASSOC:
- case WLC_E_DISASSOC_IND:
- case WLC_E_DEAUTH:
- case WLC_E_DEAUTH_IND:
- WL_INFORM_MEM(("event_type (%d), ifidx: %d bssidx: %d"
- " status:%d reason:%d\n",
- event_type, ifidx, bssidx, status, reason));
- break;
+ WL_TRACE(("Enter \n"));
- default:
- /* Print only when DBG verbose is enabled */
- WL_DBG(("event_type (%d), ifidx: %d bssidx: %d status:%d reason: %d\n",
- event_type, ifidx, bssidx, status, reason));
+ if (wl_cfgp2p_register_ndev(cfg) < 0) {
+ WL_ERR(("P2P attach failed. \n"));
+ return -ENODEV;
}
+
+ return 0;
}
-static void wl_event_handler(struct work_struct *work_data)
+static s32 wl_cfg80211_detach_p2p(struct bcm_cfg80211 *cfg)
{
- struct bcm_cfg80211 *cfg = NULL;
- struct wl_event_q *e;
- struct wireless_dev *wdev = NULL;
+ struct wireless_dev *wdev;
WL_DBG(("Enter \n"));
- BCM_SET_CONTAINER_OF(cfg, work_data, struct bcm_cfg80211, event_work);
- cfg->wl_evt_hdlr_entry_time = OSL_LOCALTIME_NS();
- DHD_EVENT_WAKE_LOCK(cfg->pub);
- while ((e = wl_deq_event(cfg))) {
- s32 status = ntoh32(e->emsg.status);
- u32 event_type = ntoh32(e->emsg.event_type);
- bool scan_cmplt_evt = (event_type == WLC_E_ESCAN_RESULT) &&
- ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT));
-
- cfg->wl_evt_deq_time = OSL_LOCALTIME_NS();
- if (scan_cmplt_evt) {
- cfg->scan_deq_time = OSL_LOCALTIME_NS();
+ if (!cfg) {
+ WL_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
+ }
+ else {
+ wdev = cfg->p2p_wdev;
+ if (!wdev) {
+ WL_ERR(("Invalid Ptr\n"));
+ return -EINVAL;
}
- /* Print only critical events to avoid too many prints */
- wl_print_event_data(cfg, e->etype, &e->emsg);
+ }
- if (e->emsg.ifidx > WL_MAX_IFS) {
- WL_ERR((" Event ifidx not in range. val:%d \n", e->emsg.ifidx));
- goto fail;
- }
+ wl_cfgp2p_unregister_ndev(cfg);
- /* Make sure iface operations, don't creat race conditions */
- mutex_lock(&cfg->if_sync);
- if (!(wdev = wl_get_wdev_by_fw_idx(cfg,
- e->emsg.bsscfgidx, e->emsg.ifidx))) {
- /* For WLC_E_IF would be handled by wl_host_event */
- if (e->etype != WLC_E_IF)
- WL_ERR(("No wdev corresponding to bssidx: 0x%x found!"
- " Ignoring event.\n", e->emsg.bsscfgidx));
- } else if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
- if (dhd->busstate == DHD_BUS_DOWN) {
- WL_ERR((": BUS is DOWN.\n"));
- } else
- {
- WL_DBG(("event_type %d event_sub %d\n",
- ntoh32(e->emsg.event_type),
- ntoh32(e->emsg.reason)));
- cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev),
- &e->emsg, e->edata);
- if (scan_cmplt_evt) {
- cfg->scan_hdlr_cmplt_time = OSL_LOCALTIME_NS();
+ cfg->p2p_wdev = NULL;
+ cfg->p2p_net = NULL;
+ WL_DBG(("Freeing 0x%p \n", wdev));
+ kfree(wdev);
+
+ return 0;
+}
+#endif
+
+static s32 wl_cfg80211_attach_post(struct net_device *ndev)
+{
+ struct bcm_cfg80211 * cfg;
+ s32 err = 0;
+ s32 ret = 0;
+ WL_TRACE(("In\n"));
+ if (unlikely(!ndev)) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ cfg = wl_get_cfg(ndev);
+ if (unlikely(!cfg)) {
+ WL_ERR(("cfg is invaild\n"));
+ return -EINVAL;
+ }
+ if (!wl_get_drv_status(cfg, READY, ndev)) {
+ if (cfg->wdev) {
+ ret = wl_cfgp2p_supported(cfg, ndev);
+ if (ret > 0) {
+#if !defined(WL_ENABLE_P2P_IF)
+ cfg->wdev->wiphy->interface_modes |=
+ (BIT(NL80211_IFTYPE_P2P_CLIENT)|
+ BIT(NL80211_IFTYPE_P2P_GO));
+#endif /* !WL_ENABLE_P2P_IF */
+ if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+ goto fail;
+
+#if defined(WL_ENABLE_P2P_IF)
+ if (cfg->p2p_net) {
+ /* Update MAC addr for p2p0 interface here. */
+ memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
+ cfg->p2p_net->dev_addr[0] |= 0x02;
+ printf("%s: p2p_dev_addr="MACDBG "\n",
+ cfg->p2p_net->name,
+ MAC2STRDBG(cfg->p2p_net->dev_addr));
+ } else {
+ WL_ERR(("p2p_net not yet populated."
+ " Couldn't update the MAC Address for p2p0 \n"));
+ return -ENODEV;
}
+#endif /* WL_ENABLE_P2P_IF */
+ cfg->p2p_supported = true;
+ } else if (ret == 0) {
+ if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+ goto fail;
+ } else {
+ /* SDIO bus timeout */
+ err = -ENODEV;
+ goto fail;
}
- } else {
- WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
- }
- mutex_unlock(&cfg->if_sync);
-fail:
- wl_put_event(cfg, e);
- if (scan_cmplt_evt) {
- cfg->scan_cmplt_time = OSL_LOCALTIME_NS();
}
- cfg->wl_evt_hdlr_exit_time = OSL_LOCALTIME_NS();
}
- DHD_EVENT_WAKE_UNLOCK(cfg->pub);
+ wl_set_drv_status(cfg, READY, ndev);
+fail:
+ return err;
}
-/*
-* Generic API to handle critical events which doesnt need
-* cfg enquening and sleepable API calls.
-*/
-s32
-wl_cfg80211_handle_critical_events(struct bcm_cfg80211 *cfg,
- const wl_event_msg_t * e)
+struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev)
{
- s32 ret = BCME_ERROR;
- u32 event_type = ntoh32(e->event_type);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
- if (event_type >= WLC_E_LAST) {
- return BCME_ERROR;
- }
+ if (!wdev || !wdev->wiphy)
+ return NULL;
- switch (event_type) {
- case WLC_E_NAN_CRITICAL: {
-#ifdef WL_NAN
- if (ntoh32(e->reason) == WL_NAN_EVENT_STOP) {
- ret =
- wl_cfgvendor_nan_send_async_disable_resp(cfg->static_ndev->ieee80211_ptr);
- }
-#endif /* WL_NAN */
- break;
- }
- default:
- ret = BCME_ERROR;
- }
- return ret;
+ return wiphy_priv(wdev->wiphy);
}
-void
-wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
+s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
+{
+ struct wireless_dev *wdev;
+ struct bcm_cfg80211 *cfg;
+ s32 err = 0;
+ struct device *dev;
+
+ WL_TRACE(("In\n"));
+ if (!ndev) {
+ WL_ERR(("ndev is invaild\n"));
+ return -ENODEV;
+ }
+ WL_DBG(("func %p\n", wl_cfg80211_get_parent_dev()));
+ dev = wl_cfg80211_get_parent_dev();
+
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (unlikely(!wdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ return -ENOMEM;
+ }
+ err = wl_setup_wiphy(wdev, dev, context);
+ if (unlikely(err)) {
+ kfree(wdev);
+ return -ENOMEM;
+ }
+#ifdef WLMESH
+ wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_MESH);
+#else
+ wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
+#endif
+ cfg = wiphy_priv(wdev->wiphy);
+ cfg->wdev = wdev;
+ cfg->pub = context;
+ INIT_LIST_HEAD(&cfg->net_list);
+#ifdef WBTEXT
+ INIT_LIST_HEAD(&cfg->wbtext_bssid_list);
+#endif /* WBTEXT */
+ INIT_LIST_HEAD(&cfg->vndr_oui_list);
+ spin_lock_init(&cfg->net_list_sync);
+ ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+ wdev->netdev = ndev;
+ cfg->state_notifier = wl_notifier_change_state;
+ err = wl_init_priv(cfg);
+ if (err) {
+ WL_ERR(("Failed to init iwm_priv (%d)\n", err));
+ goto cfg80211_attach_out;
+ }
+ err = wl_alloc_netinfo(cfg, ndev, wdev, wdev->iftype, PM_ENABLE, 0);
+ if (err) {
+ WL_ERR(("Failed to alloc net_info (%d)\n", err));
+ goto cfg80211_attach_out;
+ }
+
+ err = wl_setup_rfkill(cfg, TRUE);
+ if (err) {
+ WL_ERR(("Failed to setup rfkill %d\n", err));
+ goto cfg80211_attach_out;
+ }
+#ifdef DEBUGFS_CFG80211
+ err = wl_setup_debugfs(cfg);
+ if (err) {
+ WL_ERR(("Failed to setup debugfs %d\n", err));
+ goto cfg80211_attach_out;
+ }
+#endif
+ if (!wl_cfg80211_netdev_notifier_registered) {
+ wl_cfg80211_netdev_notifier_registered = TRUE;
+ err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+ if (err) {
+ wl_cfg80211_netdev_notifier_registered = FALSE;
+ WL_ERR(("Failed to register notifierl %d\n", err));
+ goto cfg80211_attach_out;
+ }
+ }
+#if defined(COEX_DHCP)
+ cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev);
+ if (!cfg->btcoex_info)
+ goto cfg80211_attach_out;
+#endif
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ cfg->random_mac_enabled = FALSE;
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+ wdev->wiphy->reg_notifier = wl_cfg80211_reg_notifier;
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
+#if defined(WL_ENABLE_P2P_IF)
+ err = wl_cfg80211_attach_p2p(cfg);
+ if (err)
+ goto cfg80211_attach_out;
+#endif
+
+ INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+
+#if defined(STAT_REPORT)
+ err = wl_attach_stat_report(cfg);
+ if (err) {
+ goto cfg80211_attach_out;
+ }
+#endif /* STAT_REPORT */
+ return err;
+
+cfg80211_attach_out:
+ wl_cfg80211_detach(cfg);
+ return err;
+}
+
+void wl_cfg80211_detach(struct bcm_cfg80211 *cfg)
+{
+
+ WL_TRACE(("In\n"));
+ if (!cfg)
+ return;
+
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+
+#if defined(COEX_DHCP)
+ wl_cfg80211_btcoex_deinit();
+ cfg->btcoex_info = NULL;
+#endif
+
+ wl_setup_rfkill(cfg, FALSE);
+#ifdef DEBUGFS_CFG80211
+ wl_free_debugfs(cfg);
+#endif
+ if (cfg->p2p_supported) {
+ if (timer_pending(&cfg->p2p->listen_timer))
+ del_timer_sync(&cfg->p2p->listen_timer);
+ wl_cfgp2p_deinit_priv(cfg);
+ }
+
+ if (timer_pending(&cfg->scan_timeout))
+ del_timer_sync(&cfg->scan_timeout);
+#ifdef DHD_LOSSLESS_ROAMING
+ if (timer_pending(&cfg->roam_timeout)) {
+ del_timer_sync(&cfg->roam_timeout);
+ }
+#endif /* DHD_LOSSLESS_ROAMING */
+
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ if (cfg->p2p_wdev)
+ wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_ENABLE_P2P_IF)
+ wl_cfg80211_detach_p2p(cfg);
+#endif
+#if defined(STAT_REPORT)
+ wl_detach_stat_report(cfg);
+#endif /* STAT_REPORT */
+
+ wl_cfg80211_ibss_vsie_free(cfg);
+ wl_cfg80211_clear_mgmt_vndr_ies(cfg);
+ wl_deinit_priv(cfg);
+ wl_cfg80211_clear_parent_dev();
+#if defined(RSSIAVG)
+ wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+ wl_free_rssi_cache(&cfg->g_connected_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_release_bss_cache_ctrl(&cfg->g_bss_cache_ctrl);
+#endif
+ wl_free_wdev(cfg);
+ /* PLEASE do NOT call any function after wl_free_wdev, the driver's private
+ * structure "cfg", which is the private part of wiphy, has been freed in
+ * wl_free_wdev !!!!!!!!!!!
+ */
+}
+
+static void wl_event_handler(struct work_struct *work_data)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ struct wl_event_q *e;
+ struct wireless_dev *wdev = NULL;
+
+ BCM_SET_CONTAINER_OF(cfg, work_data, struct bcm_cfg80211, event_work);
+ DHD_EVENT_WAKE_LOCK(cfg->pub);
+ while ((e = wl_deq_event(cfg))) {
+ WL_DBG(("event type (%d), ifidx: %d bssidx: %d \n",
+ e->etype, e->emsg.ifidx, e->emsg.bsscfgidx));
+
+ if (e->emsg.ifidx > WL_MAX_IFS) {
+ WL_ERR((" Event ifidx not in range. val:%d \n", e->emsg.ifidx));
+ goto fail;
+ }
+
+ /* Make sure iface operations, don't creat race conditions */
+ mutex_lock(&cfg->if_sync);
+ if (!(wdev = wl_get_wdev_by_bssidx(cfg, e->emsg.bsscfgidx))) {
+ /* For WLC_E_IF would be handled by wl_host_event */
+ if (e->etype != WLC_E_IF)
+ WL_ERR(("No wdev corresponding to bssidx: 0x%x found!"
+ " Ignoring event.\n", e->emsg.bsscfgidx));
+ } else if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
+ dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ WL_ERR((": BUS is DOWN.\n"));
+ } else
+ cfg->evt_handler[e->etype](cfg, wdev_to_cfgdev(wdev),
+ &e->emsg, e->edata);
+ } else {
+ WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
+ }
+ mutex_unlock(&cfg->if_sync);
+fail:
+ wl_put_event(e);
+ }
+ DHD_EVENT_WAKE_UNLOCK(cfg->pub);
+}
+
+void
+wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
{
- s32 status = ntoh32(e->status);
u32 event_type = ntoh32(e->event_type);
struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
struct net_info *netinfo;
- WL_DBG(("event_type (%d): reason (%d): %s\n", event_type, ntoh32(e->reason),
- bcmevent_get_name(event_type)));
+ WL_DBG(("event_type (%d): %s\n", event_type, bcmevent_get_name(event_type)));
+
if ((cfg == NULL) || (cfg->p2p_supported && cfg->p2p == NULL)) {
WL_ERR(("Stale event ignored\n"));
return;
return;
}
- if (event_type == WLC_E_IF) {
- /* Don't process WLC_E_IF events in wl_cfg80211 layer */
+ if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) {
+ WL_ERR(("during IF change, ignore event %d\n", event_type));
return;
}
- netinfo = wl_get_netinfo_by_fw_idx(cfg, e->bsscfgidx, e->ifidx);
+ netinfo = wl_get_netinfo_by_bssidx(cfg, e->bsscfgidx);
if (!netinfo) {
/* Since the netinfo entry is not there, the netdev entry is not
* created via cfg80211 interface. so the event is not of interest
* to the cfg80211 layer.
*/
- WL_TRACE(("ignore event %d, not interested\n", event_type));
- return;
- }
-
- /* Handle wl_cfg80211_critical_events */
- if (wl_cfg80211_handle_critical_events(cfg, e) == BCME_OK) {
+ WL_ERR(("ignore event %d, not interested\n", event_type));
return;
}
if (likely(!wl_enq_event(cfg, ndev, event_type, e, data))) {
queue_work(cfg->event_workq, &cfg->event_work);
}
- /* Mark timeout value for thread sched */
- if ((event_type == WLC_E_ESCAN_RESULT) &&
- ((status == WLC_E_STATUS_SUCCESS) ||
- (status == WLC_E_STATUS_ABORT))) {
- cfg->scan_enq_time = OSL_LOCALTIME_NS();
- WL_INFORM_MEM(("Enqueing escan completion (%d). WQ state:0x%x \n",
- status, work_busy(&cfg->event_work)));
- }
}
static void wl_init_eq(struct bcm_cfg80211 *cfg)
while (!list_empty_careful(&cfg->eq_list)) {
BCM_SET_LIST_FIRST_ENTRY(e, &cfg->eq_list, struct wl_event_q, eq_list);
list_del(&e->eq_list);
- MFREE(cfg->osh, e, e->datalen + sizeof(struct wl_event_q));
+ kfree(e);
}
wl_unlock_eq(cfg, flags);
}
uint32 evtq_size;
uint32 data_len;
unsigned long flags;
+ gfp_t aflags;
data_len = 0;
if (data)
data_len = ntoh32(msg->datalen);
- evtq_size = (uint32)(sizeof(struct wl_event_q) + data_len);
- e = (struct wl_event_q *)MALLOCZ(cfg->osh, evtq_size);
+ evtq_size = sizeof(struct wl_event_q) + data_len;
+ aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ e = kzalloc(evtq_size, aflags);
if (unlikely(!e)) {
WL_ERR(("event alloc failed\n"));
return -ENOMEM;
memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
if (data)
memcpy(e->edata, data, data_len);
- e->datalen = data_len;
flags = wl_lock_eq(cfg);
list_add_tail(&e->eq_list, &cfg->eq_list);
wl_unlock_eq(cfg, flags);
return err;
}
-static void wl_put_event(struct bcm_cfg80211 *cfg, struct wl_event_q *e)
+static void wl_put_event(struct wl_event_q *e)
{
- MFREE(cfg->osh, e, e->datalen + sizeof(struct wl_event_q));
+ kfree(e);
}
-static s32 wl_config_infra(struct bcm_cfg80211 *cfg, struct net_device *ndev, u16 iftype)
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype)
{
s32 infra = 0;
s32 err = 0;
- bool skip_infra = false;
-
+ s32 mode = 0;
switch (iftype) {
- case WL_IF_TYPE_IBSS:
- case WL_IF_TYPE_AIBSS:
- infra = 0;
- break;
- case WL_IF_TYPE_AP:
- case WL_IF_TYPE_STA:
- case WL_IF_TYPE_P2P_GO:
- case WL_IF_TYPE_P2P_GC:
- /* Intentional fall through */
- infra = 1;
- break;
-#ifdef WLMESH_CFG80211
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_WDS:
+ WL_ERR(("type (%d) : currently we do not support this mode\n",
+ iftype));
+ err = -EINVAL;
+ return err;
+ case NL80211_IFTYPE_ADHOC:
+ mode = WL_MODE_IBSS;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ mode = WL_MODE_BSS;
+ infra = 1;
+ break;
+#ifdef WLMESH
case NL80211_IFTYPE_MESH_POINT:
+ mode = WL_MODE_MESH;
infra = WL_BSSTYPE_MESH;
break;
-#endif /* WLMESH_CFG80211 */
- case WL_IF_TYPE_MONITOR:
- case WL_IF_TYPE_NAN:
- /* Intentionall fall through */
- default:
- skip_infra = true;
- WL_ERR(("Skipping infra setting for type:%d\n", iftype));
- break;
+#endif /* WLMESH */
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ mode = WL_MODE_AP;
+ infra = 1;
+ break;
+ default:
+ err = -EINVAL;
+ WL_ERR(("invalid type (%d)\n", iftype));
+ return err;
}
-
- if (!skip_infra) {
- infra = htod32(infra);
- err = wldev_ioctl_set(ndev, WLC_SET_INFRA, &infra, sizeof(infra));
- if (unlikely(err)) {
- WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
- return err;
- }
+ infra = htod32(infra);
+ err = wldev_ioctl_set(ndev, WLC_SET_INFRA, &infra, sizeof(infra));
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_INFRA error (%d)\n", err));
+ return err;
}
+
+ wl_set_mode_by_netdev(cfg, ndev, mode);
+
return 0;
}
u8 *pbuf = NULL;
bool dfs_radar_disabled = FALSE;
-#define LOCAL_BUF_LEN 2048
- pbuf = (u8 *)MALLOCZ(cfg->osh, LOCAL_BUF_LEN);
+#define LOCAL_BUF_LEN 1024
+ pbuf = kzalloc(LOCAL_BUF_LEN, GFP_KERNEL);
+
if (pbuf == NULL) {
WL_ERR(("failed to allocate local buf\n"));
return -ENOMEM;
0, pbuf, LOCAL_BUF_LEN, 0, &cfg->ioctl_buf_sync);
if (err != 0) {
WL_ERR(("get chanspecs failed with %d\n", err));
- MFREE(cfg->osh, pbuf, LOCAL_BUF_LEN);
+ kfree(pbuf);
return err;
}
+#undef LOCAL_BUF_LEN
list = (wl_uint32_list_t *)(void *)pbuf;
band = array_size = n_2g = n_5g = 0;
if (!dhd_conf_match_channel(cfg->pub, channel))
continue;
if (index < array_size) {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
band_chan_arr[index].center_freq =
ieee80211_channel_to_frequency(channel);
#else
band_chan_arr[index].center_freq =
ieee80211_channel_to_frequency(channel, band);
-#endif // endif
+#endif
band_chan_arr[index].hw_value = channel;
band_chan_arr[index].beacon_found = false;
#else
band_chan_arr[index].flags |=
IEEE80211_CHAN_RADAR;
-#endif // endif
+#endif
}
if (channel & WL_CHAN_PASSIVE)
#else
band_chan_arr[index].flags |=
IEEE80211_CHAN_NO_IR;
-#endif // endif
+#endif
} else if (err == BCME_UNSUPPORTED) {
dfs_radar_disabled = TRUE;
WL_ERR(("does not support per_chan_info\n"));
}
__wl_band_2ghz.n_channels = n_2g;
__wl_band_5ghz_a.n_channels = n_5g;
- MFREE(cfg->osh, pbuf, LOCAL_BUF_LEN);
-#undef LOCAL_BUF_LEN
-
+ kfree(pbuf);
return err;
}
s32 stbc_tx = 0;
s32 txbf_bfe_cap = 0;
s32 txbf_bfr_cap = 0;
-#endif // endif
+#endif
s32 bw_cap = 0;
s32 cur_band = -1;
struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS] = {NULL, };
- bzero(bandlist, sizeof(bandlist));
+ memset(bandlist, 0, sizeof(bandlist));
err = wldev_ioctl_get(dev, WLC_GET_BANDLIST, bandlist,
sizeof(bandlist));
if (unlikely(err)) {
WL_ERR(("error reading txbf_bfr_cap (%d)\n", err));
}
}
-#endif // endif
+#endif
/* For nmode and vhtmode check bw cap */
if (nmode ||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
vhtmode ||
-#endif // endif
+#endif
0) {
err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
if (unlikely(err)) {
}
}
+
/* Capabilities */
/* 80 MHz is mandatory */
bands[index]->vht_cap.cap |=
/* AMPDU length limit, support max 1MB (2 ^ (13 + 7)) */
bands[index]->vht_cap.cap |=
(7 << VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT);
- WL_DBG(("__wl_update_wiphybands band[%d] vht_enab=%d vht_cap=%08x "
+ WL_INFORM(("%s band[%d] vht_enab=%d vht_cap=%08x "
"vht_rx_mcs_map=%04x vht_tx_mcs_map=%04x\n",
- index,
+ __FUNCTION__, index,
bands[index]->vht_cap.vht_supported,
bands[index]->vht_cap.cap,
bands[index]->vht_cap.vht_mcs.rx_mcs_map,
bands[index]->vht_cap.vht_mcs.tx_mcs_map));
}
-#endif // endif
+#endif
}
else if (bandlist[i] == WLC_BAND_2G && __wl_band_2ghz.n_channels > 0) {
bands[IEEE80211_BAND_2GHZ] =
bands[index]->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
}
- if ((index >= 0) && nmode) {
- bands[index]->ht_cap.cap |=
- (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40);
- bands[index]->ht_cap.ht_supported = TRUE;
- bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
- /* An HT shall support all EQM rates for one spatial stream */
- bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
- }
-
- }
-
- wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
- wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ];
-
- /* check if any bands populated otherwise makes 2Ghz as default */
- if (wiphy->bands[IEEE80211_BAND_2GHZ] == NULL &&
- wiphy->bands[IEEE80211_BAND_5GHZ] == NULL) {
- /* Setup 2Ghz band as default */
- wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
- }
-
- if (notify)
- wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
-
- return 0;
-}
-
-s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify)
-{
- s32 err;
-
- mutex_lock(&cfg->usr_sync);
- err = __wl_update_wiphybands(cfg, notify);
- mutex_unlock(&cfg->usr_sync);
-
- return err;
-}
-
-static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg)
-{
- s32 err = 0;
-#ifdef WL_HOST_BAND_MGMT
- s32 ret = 0;
-#endif /* WL_HOST_BAND_MGMT */
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- struct wireless_dev *wdev = ndev->ieee80211_ptr;
-#if defined(WL_NANP2P)
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
-#endif // endif
-#ifdef WLTDLS
- u32 tdls;
-#endif /* WLTDLS */
- u16 wl_iftype = 0;
- u16 wl_mode = 0;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
-
- WL_DBG(("In\n"));
-
- if (!dhd_download_fw_on_driverload) {
- err = wl_create_event_handler(cfg);
- if (err) {
- WL_ERR(("wl_create_event_handler failed\n"));
- return err;
- }
- wl_init_event_handler(cfg);
- }
- /* Reserve 0x8000 toggle bit for P2P GO/GC */
- cfg->vif_macaddr_mask = 0x8000;
-
- err = dhd_config_dongle(cfg);
- if (unlikely(err))
- return err;
-
-#if 0
- /* terence 20180108: this patch will cause to kernel panic with below
- * steps in Android 4.4 with kernel 3.4
- * insmod bcmdhd.ko; hostapd /data/misc/wifi/hostapd.conf
- */
- /* Always bring up interface in STA mode.
- * Did observe , if previous SofAP Bringup/cleanup
- * is not done properly, iftype is stuck with AP mode.
- * So during next wlan0 up, forcing the type to STA
- */
- netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
- if (!netinfo) {
- WL_ERR(("there is no netinfo\n"));
- return -ENODEV;
- }
- ndev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
- netinfo->iftype = WL_IF_TYPE_STA;
-#endif
-
- if (cfg80211_to_wl_iftype(wdev->iftype, &wl_iftype, &wl_mode) < 0) {
- return -EINVAL;
- }
- err = wl_config_infra(cfg, ndev, wl_iftype);
- if (unlikely(err && err != -EINPROGRESS)) {
- WL_ERR(("wl_config_infra failed\n"));
- if (err == -1) {
- WL_ERR(("return error %d\n", err));
- return err;
- }
- }
-
- err = wl_init_scan(cfg);
- if (err) {
- WL_ERR(("wl_init_scan failed\n"));
- return err;
- }
- err = __wl_update_wiphybands(cfg, true);
- if (unlikely(err)) {
- WL_ERR(("wl_update_wiphybands failed\n"));
- if (err == -1) {
- WL_ERR(("return error %d\n", err));
- return err;
- }
- }
-
- err = wldev_iovar_getbuf(ndev, "wlc_ver", NULL, 0,
- &cfg->wlc_ver, sizeof(wl_wlc_version_t), NULL);
- if (likely(!err)) {
- WL_INFORM(("wl version. Major: %d\n",
- cfg->wlc_ver.wlc_ver_major));
- if ((cfg->wlc_ver.wlc_ver_major >= MIN_ESCAN_PARAM_V2_FW_MAJOR) &&
- (wldev_iovar_getbuf(ndev, "scan_ver", NULL, 0,
- ioctl_buf, sizeof(ioctl_buf), NULL) == BCME_OK)) {
- WL_INFORM_MEM(("scan_params v2\n"));
- /* use scan_params ver2 */
- cfg->scan_params_v2 = true;
- }
- } else {
- if (err == BCME_UNSUPPORTED) {
- /* Ignore on unsupported chips */
- err = BCME_OK;
- } else {
- WL_ERR(("wlc_ver query failed. err: %d\n", err));
- return err;
- }
- }
-#ifdef DHD_LOSSLESS_ROAMING
- if (timer_pending(&cfg->roam_timeout)) {
- del_timer_sync(&cfg->roam_timeout);
- }
-#endif /* DHD_LOSSLESS_ROAMING */
-
- err = dhd_monitor_init(cfg->pub);
-
-#ifdef WL_HOST_BAND_MGMT
- /* By default the curr_band is initialized to BAND_AUTO */
- if ((ret = wl_cfg80211_set_band(ndev, WLC_BAND_AUTO)) < 0) {
- if (ret == BCME_UNSUPPORTED) {
- /* Don't fail the initialization, lets just
- * fall back to the original method
- */
- WL_ERR(("WL_HOST_BAND_MGMT defined, "
- "but roam_band iovar not supported \n"));
- } else {
- WL_ERR(("roam_band failed. ret=%d", ret));
- err = -1;
- }
- }
-#endif /* WL_HOST_BAND_MGMT */
-#ifdef WLTDLS
- if (wldev_iovar_getint(ndev, "tdls_enable", &tdls) == 0) {
- WL_DBG(("TDLS supported in fw\n"));
- cfg->tdls_supported = true;
- }
-#endif /* WLTDLS */
-#ifdef WL_IFACE_MGMT
-#ifdef CUSTOM_IF_MGMT_POLICY
- cfg->iface_data.policy = CUSTOM_IF_MGMT_POLICY;
-#else
- cfg->iface_data.policy = WL_IF_POLICY_DEFAULT;
-#endif /* CUSTOM_IF_MGMT_POLICY */
-#endif /* WL_IFACE_MGMT */
-#ifdef WL_NAN
-#ifdef WL_NANP2P
- if (FW_SUPPORTED(dhd, nanp2p)) {
- /* Enable NANP2P concurrent support */
- cfg->conc_disc = WL_NANP2P_CONC_SUPPORT;
- WL_INFORM_MEM(("nan + p2p conc discovery is supported\n"));
- cfg->nan_p2p_supported = true;
- }
-#endif /* WL_NANP2P */
-#endif /* WL_NAN */
-
- INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
- wl_set_drv_status(cfg, READY, ndev);
- return err;
-}
-
-static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg)
-{
- s32 err = 0;
- struct net_info *iter, *next;
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
-#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF) || \
- defined(WL_NEW_CFG_PRIVCMD_SUPPORT)) && !defined(PLATFORM_SLP)
- struct net_device *p2p_net = cfg->p2p_net;
-#endif
-
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- WL_INFORM_MEM(("cfg80211 down\n"));
-
- /* Check if cfg80211 interface is already down */
- if (!wl_get_drv_status(cfg, READY, ndev)) {
- WL_DBG(("cfg80211 interface is already down\n"));
- return err; /* it is even not ready */
- }
-
-#ifdef SHOW_LOGTRACE
- /* Stop the event logging */
- wl_add_remove_eventmsg(ndev, WLC_E_TRACE, FALSE);
-#endif /* SHOW_LOGTRACE */
-
- /* clear vendor OUI list */
- wl_vndr_ies_clear_vendor_oui_list(cfg);
-
- /* Delete pm_enable_work */
- wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
-
- if (cfg->p2p_supported) {
- wl_clr_p2p_status(cfg, GO_NEG_PHASE);
-#ifdef PROP_TXSTATUS_VSDB
-#if defined(BCMSDIO) || defined(BCMDBUS)
- if (wl_cfgp2p_vif_created(cfg)) {
- bool enabled = false;
- dhd_wlfc_get_enable(dhd, &enabled);
- if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
- dhd->op_mode != DHD_FLAG_IBSS_MODE) {
- dhd_wlfc_deinit(dhd);
- cfg->wlfc_on = false;
- }
- }
-#endif /* BCMSDIO || BCMDBUS */
-#endif /* PROP_TXSTATUS_VSDB */
- }
-
-#ifdef WL_NAN
- mutex_lock(&cfg->if_sync);
- /* Cancel pending nan disable work if any */
- if (delayed_work_pending(&cfg->nan_disable)) {
- WL_DBG(("Unarm the nan_disable work\n"));
- cancel_delayed_work_sync(&cfg->nan_disable);
- }
- cfg->nancfg.disable_reason = NAN_BUS_IS_DOWN;
- wl_cfgnan_disable(cfg);
- mutex_unlock(&cfg->if_sync);
-#endif /* WL_NAN */
-
- if (!dhd_download_fw_on_driverload) {
- /* For built-in drivers/other drivers that do reset on
- * "ifconfig <primary_iface> down", cleanup any left
- * over interfaces
- */
- wl_cfg80211_cleanup_virtual_ifaces(cfg, false);
- }
- /* Clear used mac addr mask */
- cfg->vif_macaddr_mask = 0;
-
- if (dhd->up)
- {
- /* If primary BSS is operational (for e.g SoftAP), bring it down */
- if (wl_cfg80211_bss_isup(ndev, 0)) {
- if (wl_cfg80211_bss_up(cfg, ndev, 0, 0) < 0)
- WL_ERR(("BSS down failed \n"));
- }
-
- /* clear all the security setting on primary Interface */
- wl_cfg80211_clear_security(cfg);
- }
-
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if (iter->ndev) /* p2p discovery iface is null */
- wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
- }
-
-#ifdef P2P_LISTEN_OFFLOADING
- wl_cfg80211_p2plo_deinit(cfg);
-#endif /* P2P_LISTEN_OFFLOADING */
-
- /* cancel and notify scan complete, if scan request is pending */
- wl_cfg80211_cancel_scan(cfg);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- /* p2p discovery iface ndev ptr could be null */
- if (iter->ndev == NULL)
- continue;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- WL_INFORM_MEM(("wl_cfg80211_down. connection state bit status: [%u:%u:%u:%u]\n",
- wl_get_drv_status(cfg, CONNECTING, ndev),
- wl_get_drv_status(cfg, CONNECTED, ndev),
- wl_get_drv_status(cfg, DISCONNECTING, ndev),
- wl_get_drv_status(cfg, NESTED_CONNECT, ndev)));
-
- if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
- CFG80211_DISCONNECTED(iter->ndev, 0, NULL, 0, false, GFP_KERNEL);
- }
-
- if ((iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) &&
- wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
-
- u8 *latest_bssid = wl_read_prof(cfg, ndev, WL_PROF_LATEST_BSSID);
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- struct wireless_dev *wdev = ndev->ieee80211_ptr;
- struct cfg80211_bss *bss = CFG80211_GET_BSS(wiphy, NULL, latest_bssid,
- wdev->ssid, wdev->ssid_len);
-
- BCM_REFERENCE(bss);
-
- CFG80211_CONNECT_RESULT(ndev,
- latest_bssid, bss, NULL, 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- }
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
- wl_clr_drv_status(cfg, READY, iter->ndev);
- wl_clr_drv_status(cfg, SCANNING, iter->ndev);
- wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
- wl_clr_drv_status(cfg, CONNECTING, iter->ndev);
- wl_clr_drv_status(cfg, CONNECTED, iter->ndev);
- wl_clr_drv_status(cfg, DISCONNECTING, iter->ndev);
- wl_clr_drv_status(cfg, AP_CREATED, iter->ndev);
- wl_clr_drv_status(cfg, AP_CREATING, iter->ndev);
- wl_clr_drv_status(cfg, NESTED_CONNECT, iter->ndev);
- wl_clr_drv_status(cfg, CFG80211_CONNECT, iter->ndev);
- }
- bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
- NL80211_IFTYPE_STATION;
-#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF) || \
- defined(WL_NEW_CFG_PRIVCMD_SUPPORT)) && !defined(PLATFORM_SLP)
- if (p2p_net)
- dev_close(p2p_net);
-#endif
-
- /* Avoid deadlock from wl_cfg80211_down */
- if (!dhd_download_fw_on_driverload) {
- mutex_unlock(&cfg->usr_sync);
- wl_destroy_event_handler(cfg);
- mutex_lock(&cfg->usr_sync);
- }
-
- wl_flush_eq(cfg);
- wl_link_down(cfg);
- if (cfg->p2p_supported) {
- if (timer_pending(&cfg->p2p->listen_timer))
- del_timer_sync(&cfg->p2p->listen_timer);
- wl_cfgp2p_down(cfg);
- }
-
- if (timer_pending(&cfg->scan_timeout)) {
- del_timer_sync(&cfg->scan_timeout);
- }
-
- wl_cfg80211_clear_mgmt_vndr_ies(cfg);
- DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
-
- dhd_monitor_uninit();
-#ifdef WLAIBSS_MCHAN
- bcm_cfg80211_del_ibss_if(cfg->wdev->wiphy, cfg->ibss_cfgdev);
-#endif /* WLAIBSS_MCHAN */
-
-#ifdef WL11U
- /* Clear interworking element. */
- if (cfg->wl11u) {
- cfg->wl11u = FALSE;
- }
-#endif /* WL11U */
-
- cfg->disable_roam_event = false;
-
- DNGL_FUNC(dhd_cfg80211_down, (cfg));
-
-#ifdef DHD_IFDEBUG
- /* Printout all netinfo entries */
- wl_probe_wdev_all(cfg);
-#endif /* DHD_IFDEBUG */
-
- return err;
-}
-
-s32 wl_cfg80211_up(struct net_device *net)
-{
- struct bcm_cfg80211 *cfg;
- s32 err = 0;
- int val = 1;
- dhd_pub_t *dhd;
-#ifdef DISABLE_PM_BCNRX
- s32 interr = 0;
- uint param = 0;
- s8 iovbuf[WLC_IOCTL_SMLEN];
-#endif /* DISABLE_PM_BCNRX */
-
- WL_DBG(("In\n"));
- cfg = wl_get_cfg(net);
-
- if ((err = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg), WLC_GET_VERSION, &val,
- sizeof(int)) < 0)) {
- WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err));
- return err;
- }
- val = dtoh32(val);
- if (val != WLC_IOCTL_VERSION && val != 1) {
- WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
- val, WLC_IOCTL_VERSION));
- return BCME_VERSION;
- }
- ioctl_version = val;
- WL_TRACE(("WLC_GET_VERSION=%d\n", ioctl_version));
- wl_cfg80211_check_in4way(cfg, net, NO_SCAN_IN4WAY|NO_BTC_IN4WAY|WAIT_DISCONNECTED,
- WL_EXT_STATUS_DISCONNECTED, NULL);
-
- mutex_lock(&cfg->usr_sync);
- dhd = (dhd_pub_t *)(cfg->pub);
- if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg));
- if (unlikely(err)) {
- mutex_unlock(&cfg->usr_sync);
- return err;
- }
- }
-#ifdef WLMESH_CFG80211
- cfg->wdev->wiphy->features |= NL80211_FEATURE_USERSPACE_MPM;
-#endif /* WLMESH_CFG80211 */
-#if defined(BCMSUP_4WAY_HANDSHAKE)
- if (dhd->fw_4way_handshake) {
- /* This is a hacky method to indicate fw 4WHS support and
- * is used only for kernels (kernels < 3.14). For newer
- * kernels, we would be using vendor extn. path to advertise
- * FW based 4-way handshake feature support.
- */
- cfg->wdev->wiphy->features |= NL80211_FEATURE_FW_4WAY_HANDSHAKE;
- }
-#endif /* BCMSUP_4WAY_HANDSHAKE */
- err = __wl_cfg80211_up(cfg);
- if (unlikely(err))
- WL_ERR(("__wl_cfg80211_up failed\n"));
-
-#ifdef ROAM_CHANNEL_CACHE
- if (init_roam_cache(cfg, ioctl_version) == 0) {
- /* Enable support for Roam cache */
- cfg->rcc_enabled = true;
- WL_ERR(("Roam channel cache enabled\n"));
- } else {
- WL_ERR(("Failed to enable RCC.\n"));
- }
-#endif /* ROAM_CHANNEL_CACHE */
-
- /* IOVAR configurations with 'up' condition */
-#ifdef DISABLE_PM_BCNRX
- interr = wldev_iovar_setbuf(net, "pm_bcnrx", (char *)¶m, sizeof(param), iovbuf,
- sizeof(iovbuf), &cfg->ioctl_buf_sync);
-
- if (unlikely(interr)) {
- WL_ERR(("Set pm_bcnrx returned (%d)\n", interr));
- }
-#endif /* DISABLE_PM_BCNRX */
-#ifdef WL_CHAN_UTIL
- interr = wl_cfg80211_start_bssload_report(net);
- if (unlikely(interr)) {
- WL_ERR(("%s: Failed to start bssload_report eventing, err=%d\n",
- __FUNCTION__, interr));
- }
-#endif /* WL_CHAN_UTIL */
-
- mutex_unlock(&cfg->usr_sync);
-
-#ifdef WLAIBSS_MCHAN
- bcm_cfg80211_add_ibss_if(cfg->wdev->wiphy, IBSS_IF_NAME);
-#endif /* WLAIBSS_MCHAN */
- return err;
-}
-
-/* Private Event to Supplicant with indication that chip hangs */
-int wl_cfg80211_hang(struct net_device *dev, u16 reason)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhd;
- if (!cfg) {
- return BCME_ERROR;
- }
-
- RETURN_EIO_IF_NOT_UP(cfg);
-
- dhd = (dhd_pub_t *)(cfg->pub);
- if ((dhd->hang_reason <= HANG_REASON_MASK) || (dhd->hang_reason >= HANG_REASON_MAX)) {
- WL_ERR(("wl_cfg80211_hang, Invalid hang reason 0x%x\n",
- dhd->hang_reason));
- dhd->hang_reason = HANG_REASON_UNKNOWN;
- }
- WL_ERR(("In : chip crash eventing, reason=0x%x\n", (uint32)(dhd->hang_reason)));
-
- wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
- {
- if (dhd->up == TRUE) {
- CFG80211_DISCONNECTED(dev, reason, NULL, 0, false, GFP_KERNEL);
- }
- }
-#if defined(RSSIAVG)
- wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
-#endif
-#if defined(BSSCACHE)
- wl_free_bss_cache(&cfg->g_bss_cache_ctrl);
-#endif
- if (cfg != NULL) {
- wl_link_down(cfg);
- }
- return 0;
-}
-
-s32 wl_cfg80211_down(struct net_device *dev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- s32 err = BCME_ERROR;
-
- WL_DBG(("In\n"));
-
- if (cfg && (cfg == wl_cfg80211_get_bcmcfg())) {
- mutex_lock(&cfg->usr_sync);
-#if defined(RSSIAVG)
- wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
-#endif
-#if defined(BSSCACHE)
- wl_free_bss_cache(&cfg->g_bss_cache_ctrl);
-#endif
- err = __wl_cfg80211_down(cfg);
- mutex_unlock(&cfg->usr_sync);
- }
-
- return err;
-}
-
-void
-wl_cfg80211_sta_ifdown(struct net_device *dev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-
- WL_DBG(("In\n"));
-
- if (cfg) {
- /* cancel scan if anything pending */
- wl_cfg80211_cancel_scan(cfg);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- if ((dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION) &&
- wl_get_drv_status(cfg, CONNECTED, dev)) {
- CFG80211_DISCONNECTED(dev, 0, NULL, 0, false, GFP_KERNEL);
- }
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
- }
-}
-
-void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
-{
- unsigned long flags;
- void *rptr = NULL;
- struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
-
- if (!profile)
- return NULL;
- WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
- switch (item) {
- case WL_PROF_SEC:
- rptr = &profile->sec;
- break;
- case WL_PROF_ACT:
- rptr = &profile->active;
- break;
- case WL_PROF_BSSID:
- rptr = profile->bssid;
- break;
- case WL_PROF_SSID:
- rptr = &profile->ssid;
- break;
- case WL_PROF_CHAN:
- rptr = &profile->channel;
- break;
- case WL_PROF_LATEST_BSSID:
- rptr = profile->latest_bssid;
- break;
- }
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
- if (!rptr)
- WL_ERR(("invalid item (%d)\n", item));
- return rptr;
-}
-
-static s32
-wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, const void *data, s32 item)
-{
- s32 err = 0;
- const struct wlc_ssid *ssid;
- unsigned long flags;
- struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
-
- if (!profile)
- return WL_INVALID;
- WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
- switch (item) {
- case WL_PROF_SSID:
- ssid = (const wlc_ssid_t *) data;
- bzero(profile->ssid.SSID,
- sizeof(profile->ssid.SSID));
- profile->ssid.SSID_len = MIN(ssid->SSID_len, DOT11_MAX_SSID_LEN);
- memcpy(profile->ssid.SSID, ssid->SSID, profile->ssid.SSID_len);
- break;
- case WL_PROF_BSSID:
- if (data)
- memcpy(profile->bssid, data, ETHER_ADDR_LEN);
- else
- bzero(profile->bssid, ETHER_ADDR_LEN);
- break;
- case WL_PROF_SEC:
- memcpy(&profile->sec, data, sizeof(profile->sec));
- break;
- case WL_PROF_ACT:
- profile->active = *(const bool *)data;
- break;
- case WL_PROF_BEACONINT:
- profile->beacon_interval = *(const u16 *)data;
- break;
- case WL_PROF_DTIMPERIOD:
- profile->dtim_period = *(const u8 *)data;
- break;
- case WL_PROF_CHAN:
- profile->channel = *(const u32*)data;
- break;
- case WL_PROF_LATEST_BSSID:
- if (data) {
- memcpy_s(profile->latest_bssid, sizeof(profile->latest_bssid),
- data, ETHER_ADDR_LEN);
- } else {
- memset_s(profile->latest_bssid, sizeof(profile->latest_bssid),
- 0, ETHER_ADDR_LEN);
- }
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
-
- if (err == -EOPNOTSUPP)
- WL_ERR(("unsupported item (%d)\n", item));
-
- return err;
-}
-
-void wl_cfg80211_dbg_level(u32 level)
-{
- /*
- * prohibit to change debug level
- * by insmod parameter.
- * eventually debug level will be configured
- * in compile time by using CONFIG_XXX
- */
- /* wl_dbg_level = level; */
-}
-
-static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev)
-{
- return wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS;
-}
-
-static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg)
-{
- return cfg->ibss_starter;
-}
-
-static void wl_rst_ie(struct bcm_cfg80211 *cfg)
-{
- struct wl_ie *ie = wl_to_ie(cfg);
-
- ie->offset = 0;
- bzero(ie->buf, sizeof(ie->buf));
-}
-
-static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v)
-{
- struct wl_ie *ie = wl_to_ie(cfg);
- s32 err = 0;
-
- if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
- WL_ERR(("ei crosses buffer boundary\n"));
- return -ENOSPC;
- }
- ie->buf[ie->offset] = t;
- ie->buf[ie->offset + 1] = l;
- memcpy(&ie->buf[ie->offset + 2], v, l);
- ie->offset += l + 2;
-
- return err;
-}
-
-static void wl_update_hidden_ap_ie(wl_bss_info_t *bi, const u8 *ie_stream, u32 *ie_size,
- bool update_ssid)
-{
- u8 *ssidie;
- int32 ssid_len = MIN(bi->SSID_len, DOT11_MAX_SSID_LEN);
- int32 remaining_ie_buf_len, available_buffer_len, unused_buf_len;
- /* cfg80211_find_ie defined in kernel returning const u8 */
-
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
- GCC_DIAGNOSTIC_POP();
-
- /* ERROR out if
- * 1. No ssid IE is FOUND or
- * 2. New ssid length is > what was allocated for existing ssid (as
- * we do not want to overwrite the rest of the IEs) or
- * 3. If in case of erroneous buffer input where ssid length doesnt match the space
- * allocated to it.
- */
- if (!ssidie) {
- return;
- }
- available_buffer_len = ((int)(*ie_size)) - (ssidie + 2 - ie_stream);
- remaining_ie_buf_len = available_buffer_len - (int)ssidie[1];
- unused_buf_len = WL_EXTRA_BUF_MAX - (4 + bi->length + *ie_size);
- if (ssidie[1] > available_buffer_len) {
- WL_ERR_MEM(("wl_update_hidden_ap_ie: skip wl_update_hidden_ap_ie : overflow\n"));
- return;
- }
-
- if (ssidie[1] != ssid_len) {
- if (ssidie[1]) {
- WL_ERR_RLMT(("wl_update_hidden_ap_ie: Wrong SSID len: %d != %d\n",
- ssidie[1], bi->SSID_len));
- }
- /*
- * The bss info in firmware gets updated from beacon and probe resp.
- * In case of hidden network, the bss_info that got updated by beacon,
- * will not carry SSID and this can result in cfg80211_get_bss not finding a match.
- * so include the SSID element.
- */
- if ((update_ssid && (ssid_len > ssidie[1])) && (unused_buf_len > ssid_len)) {
- WL_INFORM_MEM(("Changing the SSID Info.\n"));
- memmove(ssidie + ssid_len + 2,
- (ssidie + 2) + ssidie[1],
- remaining_ie_buf_len);
- memcpy(ssidie + 2, bi->SSID, ssid_len);
- *ie_size = *ie_size + ssid_len - ssidie[1];
- ssidie[1] = ssid_len;
- } else if (ssid_len < ssidie[1]) {
- WL_ERR_MEM(("wl_update_hidden_ap_ie: Invalid SSID len: %d < %d\n",
- bi->SSID_len, ssidie[1]));
- }
- return;
- }
- if (*(ssidie + 2) == '\0')
- memcpy(ssidie + 2, bi->SSID, ssid_len);
- return;
-}
-
-static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size)
-{
- struct wl_ie *ie = wl_to_ie(cfg);
- s32 err = 0;
-
- if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
- WL_ERR(("ei_stream crosses buffer boundary\n"));
- return -ENOSPC;
- }
- memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
- ie->offset += ie_size;
-
- return err;
-}
-
-static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size)
-{
- struct wl_ie *ie = wl_to_ie(cfg);
- s32 err = 0;
-
- if (unlikely(ie->offset > dst_size)) {
- WL_ERR(("dst_size is not enough\n"));
- return -ENOSPC;
- }
- memcpy(dst, &ie->buf[0], ie->offset);
-
- return err;
-}
-
-static u32 wl_get_ielen(struct bcm_cfg80211 *cfg)
-{
- struct wl_ie *ie = wl_to_ie(cfg);
-
- return ie->offset;
-}
-
-static void wl_link_up(struct bcm_cfg80211 *cfg)
-{
- cfg->link_up = true;
-}
-
-static void wl_link_down(struct bcm_cfg80211 *cfg)
-{
- struct wl_connect_info *conn_info = wl_to_conn(cfg);
-
- WL_DBG(("In\n"));
- cfg->link_up = false;
- if (conn_info) {
- conn_info->req_ie_len = 0;
- conn_info->resp_ie_len = 0;
- }
-}
-
-static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg)
-{
- unsigned long flags;
-
- WL_CFG_EQ_LOCK(&cfg->eq_lock, flags);
- return flags;
-}
-
-static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags)
-{
- WL_CFG_EQ_UNLOCK(&cfg->eq_lock, flags);
-}
-
-static void wl_init_eq_lock(struct bcm_cfg80211 *cfg)
-{
- spin_lock_init(&cfg->eq_lock);
-}
-
-static void wl_delay(u32 ms)
-{
- if (in_atomic() || (ms < jiffies_to_msecs(1))) {
- OSL_DELAY(ms*1000);
- } else {
- OSL_SLEEP(ms);
- }
-}
-
-s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
- struct ether_addr primary_mac;
- if (!cfg->p2p)
- return -1;
- if (!p2p_is_on(cfg)) {
- get_primary_mac(cfg, &primary_mac);
-#ifndef WL_P2P_USE_RANDMAC
- wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
-#endif /* WL_P2P_USE_RANDMAC */
- memcpy((void *)&p2pdev_addr, (void *)&primary_mac, ETHER_ADDR_LEN);
- } else {
- memcpy(p2pdev_addr->octet, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE).octet,
- ETHER_ADDR_LEN);
- }
-
- return 0;
-}
-s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
-
- return wl_cfgp2p_set_p2p_noa(cfg, net, buf, len);
-}
-
-s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
-
- return wl_cfgp2p_get_p2p_noa(cfg, net, buf, len);
-}
-
-s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
-
- return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len);
-}
-
-s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
-
- return wl_cfgp2p_set_p2p_ecsa(cfg, net, buf, len);
-}
-
-s32 wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
-
- return wl_cfgp2p_increase_p2p_bw(cfg, net, buf, len);
-}
-
-#ifdef P2PLISTEN_AP_SAMECHN
-s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable)
-{
- s32 ret = wldev_iovar_setint(net, "p2p_resp_ap_chn", enable);
-
- if ((ret == 0) && enable) {
- /* disable PM for p2p responding on infra AP channel */
- s32 pm = PM_OFF;
-
- ret = wldev_ioctl_set(net, WLC_SET_PM, &pm, sizeof(pm));
- }
-
- return ret;
-}
-#endif /* P2PLISTEN_AP_SAMECHN */
-
-s32 wl_cfg80211_channel_to_freq(u32 channel)
-{
- int freq = 0;
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
- freq = ieee80211_channel_to_frequency(channel);
-#else
- {
- u16 band = 0;
- if (channel <= CH_MAX_2G_CHANNEL)
- band = IEEE80211_BAND_2GHZ;
- else
- band = IEEE80211_BAND_5GHZ;
- freq = ieee80211_channel_to_frequency(channel, band);
- }
-#endif // endif
- return freq;
-}
-
-#ifdef WLTDLS
-s32
-wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data) {
-
- struct net_device *ndev = NULL;
- u32 reason = ntoh32(e->reason);
- s8 *msg = NULL;
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-
- switch (reason) {
- case WLC_E_TDLS_PEER_DISCOVERED :
- msg = " TDLS PEER DISCOVERD ";
- break;
- case WLC_E_TDLS_PEER_CONNECTED :
- if (cfg->tdls_mgmt_frame) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
- cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
- cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
- cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
- cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0,
- GFP_ATOMIC);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
- defined(WL_COMPAT_WIRELESS)
- cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
- cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
- GFP_ATOMIC);
-#else
- cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq,
- cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, GFP_ATOMIC);
-
-#endif /* LINUX_VERSION >= VERSION(3, 18,0) || WL_COMPAT_WIRELESS */
- }
- msg = " TDLS PEER CONNECTED ";
-#ifdef SUPPORT_SET_CAC
- /* TDLS connect reset CAC */
- wl_cfg80211_set_cac(cfg, 0);
-#endif /* SUPPORT_SET_CAC */
- break;
- case WLC_E_TDLS_PEER_DISCONNECTED :
- if (cfg->tdls_mgmt_frame) {
- MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
- cfg->tdls_mgmt_frame_len = 0;
- cfg->tdls_mgmt_freq = 0;
- }
- msg = "TDLS PEER DISCONNECTED ";
-#ifdef SUPPORT_SET_CAC
- /* TDLS disconnec, set CAC */
- wl_cfg80211_set_cac(cfg, 1);
-#endif /* SUPPORT_SET_CAC */
- break;
- }
- if (msg) {
- WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((const u8*)(&e->addr)),
- (bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary"));
- }
- return 0;
-
-}
-#endif /* WLTDLS */
-
-static s32
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
-#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \
- KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
-wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
- u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
- u32 peer_capability, const u8 *buf, size_t len)
-#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
-wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
- const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
- u32 peer_capability, const u8 *buf, size_t len)
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
-wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
- const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
- u32 peer_capability, bool initiator, const u8 *buf, size_t len)
-#else /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
-wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
- u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
- const u8 *buf, size_t len)
-#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
-{
- s32 ret = 0;
-#if defined(TDLS_MSG_ONLY_WFD) && defined(WLTDLS)
- struct bcm_cfg80211 *cfg;
- tdls_wfd_ie_iovar_t info;
- bzero(&info, sizeof(info));
- cfg = wl_get_cfg(dev);
-
-#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
- /* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10
- * and that cuases build error
- */
- BCM_REFERENCE(peer_capability);
-#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
-
- switch (action_code) {
- /* We need to set TDLS Wifi Display IE to firmware
- * using tdls_wfd_ie iovar
- */
- case WLAN_TDLS_SET_PROBE_WFD_IE:
- WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_PROBE_WFD_IE\n"));
- info.mode = TDLS_WFD_PROBE_IE_TX;
-
- if (len > sizeof(info.data)) {
- return -EINVAL;
- }
- memcpy(&info.data, buf, len);
- info.length = len;
- break;
- case WLAN_TDLS_SET_SETUP_WFD_IE:
- WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_SETUP_WFD_IE\n"));
- info.mode = TDLS_WFD_IE_TX;
-
- if (len > sizeof(info.data)) {
- return -EINVAL;
- }
- memcpy(&info.data, buf, len);
- info.length = len;
- break;
- case WLAN_TDLS_SET_WFD_ENABLED:
- WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_MODE_WFD_ENABLED\n"));
- dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), true);
- goto out;
- case WLAN_TDLS_SET_WFD_DISABLED:
- WL_ERR(("wl_cfg80211_tdls_mgmt: WLAN_TDLS_SET_MODE_WFD_DISABLED\n"));
- dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), false);
- goto out;
- default:
- WL_ERR(("Unsupported action code : %d\n", action_code));
- goto out;
+ if ((index >= 0) && nmode) {
+ bands[index]->ht_cap.cap |=
+ (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_DSSSCCK40);
+ bands[index]->ht_cap.ht_supported = TRUE;
+ bands[index]->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ bands[index]->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ /* An HT shall support all EQM rates for one spatial stream */
+ bands[index]->ht_cap.mcs.rx_mask[0] = 0xff;
+ }
+
}
- ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (ret) {
- WL_ERR(("tdls_wfd_ie error %d\n", ret));
+ wiphy->bands[IEEE80211_BAND_2GHZ] = bands[IEEE80211_BAND_2GHZ];
+ wiphy->bands[IEEE80211_BAND_5GHZ] = bands[IEEE80211_BAND_5GHZ];
+
+ /* check if any bands populated otherwise makes 2Ghz as default */
+ if (wiphy->bands[IEEE80211_BAND_2GHZ] == NULL &&
+ wiphy->bands[IEEE80211_BAND_5GHZ] == NULL) {
+ /* Setup 2Ghz band as default */
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
}
-out:
-#endif /* TDLS_MSG_ONLY_WFD && WLTDLS */
- return ret;
+ if (notify)
+ wiphy_apply_custom_regulatory(wiphy, &brcm_regdom);
+
+ return 0;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
-static s32
-wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
- const u8 *peer, enum nl80211_tdls_operation oper)
-#else
-static s32
-wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
- u8 *peer, enum nl80211_tdls_operation oper)
-#endif // endif
+s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify)
{
- s32 ret = 0;
+ s32 err;
+
+ mutex_lock(&cfg->usr_sync);
+ err = __wl_update_wiphybands(cfg, notify);
+ mutex_unlock(&cfg->usr_sync);
+
+ return err;
+}
+
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg)
+{
+ s32 err = 0;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
+#ifdef WBTEXT
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* WBTEXT */
#ifdef WLTDLS
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- tdls_iovar_t info;
- dhd_pub_t *dhdp;
- bool tdls_auto_mode = false;
- dhdp = (dhd_pub_t *)(cfg->pub);
- bzero(&info, sizeof(tdls_iovar_t));
- if (peer) {
- memcpy(&info.ea, peer, ETHER_ADDR_LEN);
- } else {
- return -1;
- }
- switch (oper) {
- case NL80211_TDLS_DISCOVERY_REQ:
- /* If the discovery request is broadcast then we need to set
- * info.mode to Tunneled Probe Request
- */
- if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) {
- info.mode = TDLS_MANUAL_EP_WFD_TPQ;
- WL_ERR(("wl_cfg80211_tdls_oper: TDLS TUNNELED PRBOBE REQUEST\n"));
- } else {
- info.mode = TDLS_MANUAL_EP_DISCOVERY;
+ u32 tdls;
+#endif /* WLTDLS */
+
+ WL_DBG(("In\n"));
+
+ if (!dhd_download_fw_on_driverload) {
+ err = wl_create_event_handler(cfg);
+ if (err) {
+ WL_ERR(("wl_create_event_handler failed\n"));
+ return err;
}
- break;
- case NL80211_TDLS_SETUP:
- if (dhdp->tdls_mode == true) {
- info.mode = TDLS_MANUAL_EP_CREATE;
- tdls_auto_mode = false;
- /* Do tear down and create a fresh one */
- ret = wl_cfg80211_tdls_config(cfg, TDLS_STATE_TEARDOWN, tdls_auto_mode);
- if (ret < 0) {
- return ret;
- }
- } else {
- tdls_auto_mode = true;
+ wl_init_event_handler(cfg);
+ }
+
+ err = dhd_config_dongle(cfg);
+ if (unlikely(err))
+ return err;
+
+ err = wl_config_ifmode(cfg, ndev, wdev->iftype);
+ if (unlikely(err && err != -EINPROGRESS)) {
+ WL_ERR(("wl_config_ifmode failed\n"));
+ if (err == -1) {
+ WL_ERR(("return error %d\n", err));
+ return err;
}
- break;
- case NL80211_TDLS_TEARDOWN:
- info.mode = TDLS_MANUAL_EP_DELETE;
- break;
- default:
- WL_ERR(("Unsupported operation : %d\n", oper));
- goto out;
}
- /* turn on TDLS */
- ret = wl_cfg80211_tdls_config(cfg, TDLS_STATE_SETUP, tdls_auto_mode);
- if (ret < 0) {
- return ret;
+
+ err = wl_init_scan(cfg);
+ if (err) {
+ WL_ERR(("wl_init_scan failed\n"));
+ return err;
}
- if (info.mode) {
- ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
- if (ret) {
- WL_ERR(("tdls_endpoint error %d\n", ret));
+ err = __wl_update_wiphybands(cfg, true);
+ if (unlikely(err)) {
+ WL_ERR(("wl_update_wiphybands failed\n"));
+ if (err == -1) {
+ WL_ERR(("return error %d\n", err));
+ return err;
}
}
-out:
- if (ret) {
- wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
- return -ENOTSUPP;
+
+#ifdef DHD_LOSSLESS_ROAMING
+ if (timer_pending(&cfg->roam_timeout)) {
+ del_timer_sync(&cfg->roam_timeout);
}
-#endif /* WLTDLS */
- return ret;
-}
-#endif /* LINUX_VERSION > VERSION(3,2,0) || WL_COMPAT_WIRELESS */
+#endif /* DHD_LOSSLESS_ROAMING */
-s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *ndev, char *buf, int len,
- enum wl_management_type type)
-{
- struct bcm_cfg80211 *cfg;
- s32 ret = 0;
- struct ether_addr primary_mac;
- s32 bssidx = 0;
- s32 pktflag = 0;
- cfg = wl_get_cfg(ndev);
+ err = dhd_monitor_init(cfg->pub);
- if (wl_get_drv_status(cfg, AP_CREATING, ndev)) {
- /* Vendor IEs should be set to FW
- * after SoftAP interface is brought up
- */
- WL_DBG(("Skipping set IE since AP is not up \n"));
- goto exit;
- } else if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
- /* Either stand alone AP case or P2P discovery */
- if (wl_get_drv_status(cfg, AP_CREATED, ndev)) {
- /* Stand alone AP case on primary interface */
- WL_DBG(("Apply IEs for Primary AP Interface \n"));
- bssidx = 0;
- } else {
- if (!cfg->p2p) {
- /* If p2p not initialized, return failure */
- WL_ERR(("P2P not initialized \n"));
- goto exit;
- }
- /* P2P Discovery case (p2p listen) */
- if (!cfg->p2p->on) {
- /* Turn on Discovery interface */
- get_primary_mac(cfg, &primary_mac);
-#ifndef WL_P2P_USE_RANDMAC
- wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
-#endif /* WL_P2P_USE_RANDMAC */
- p2p_on(cfg) = true;
- ret = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
- if (unlikely(ret)) {
- WL_ERR(("Enable discovery failed \n"));
- goto exit;
- }
- }
- WL_DBG(("Apply IEs for P2P Discovery Iface \n"));
- ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
- bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+#ifdef WBTEXT
+ /* when wifi up, set roam_prof to default value */
+ if (dhd->wbtext_support) {
+ if (dhd->op_mode & DHD_FLAG_STA_MODE) {
+ wl_cfg80211_wbtext_set_default(ndev);
+ wl_cfg80211_wbtext_clear_bssid_list(cfg);
}
- } else {
- /* Virtual AP/ P2P Group Interface */
- WL_DBG(("Apply IEs for iface:%s\n", ndev->name));
- bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
}
-
- if (ndev != NULL) {
- switch (type) {
- case WL_BEACON:
- pktflag = VNDR_IE_BEACON_FLAG;
- break;
- case WL_PROBE_RESP:
- pktflag = VNDR_IE_PRBRSP_FLAG;
- break;
- case WL_ASSOC_RESP:
- pktflag = VNDR_IE_ASSOCRSP_FLAG;
- break;
- }
- if (pktflag) {
- ret = wl_cfg80211_set_mgmt_vndr_ies(cfg,
- ndev_to_cfgdev(ndev), bssidx, pktflag, buf, len);
- }
+#endif /* WBTEXT */
+#ifdef WLTDLS
+ if (wldev_iovar_getint(ndev, "tdls_enable", &tdls) == 0) {
+ WL_DBG(("TDLS supported in fw\n"));
+ cfg->tdls_supported = true;
}
-exit:
- return ret;
+#endif /* WLTDLS */
+ INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+ wl_set_drv_status(cfg, READY, ndev);
+ return err;
}
-#ifdef WL_SUPPORT_AUTO_CHANNEL
-static s32
-wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev)
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg)
{
- u32 val = 0;
- s32 ret = BCME_ERROR;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- /* Set interface up, explicitly. */
- val = 1;
+ s32 err = 0;
+ unsigned long flags;
+ struct net_info *iter, *next;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF) || \
+ defined(WL_NEW_CFG_PRIVCMD_SUPPORT)) && !defined(PLATFORM_SLP)
+ struct net_device *p2p_net = cfg->p2p_net;
+#endif
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+#endif
+
+ WL_DBG(("In\n"));
- ret = wldev_ioctl_set(ndev, WLC_UP, (void *)&val, sizeof(val));
- if (ret < 0) {
- WL_ERR(("set interface up failed, error = %d\n", ret));
- goto done;
+ /* Check if cfg80211 interface is already down */
+ if (!wl_get_drv_status(cfg, READY, ndev)) {
+ WL_DBG(("cfg80211 interface is already down\n"));
+ return err; /* it is even not ready */
}
- /* Stop all scan explicitly, till auto channel selection complete. */
- wl_set_drv_status(cfg, SCANNING, ndev);
- if (cfg->escan_info.ndev == NULL) {
- ret = BCME_OK;
- goto done;
- }
+#ifdef WLTDLS
+ cfg->tdls_supported = false;
+#endif /* WLTDLS */
- wl_cfg80211_cancel_scan(cfg);
+ /* Delete pm_enable_work */
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
-done:
- return ret;
-}
+ /* clear all the security setting on primary Interface */
+ wl_cfg80211_clear_security(cfg);
-static bool
-wl_cfg80211_valid_channel_p2p(int channel)
-{
- bool valid = false;
- /* channel 1 to 14 */
- if ((channel >= 1) && (channel <= 14)) {
- valid = true;
- }
- /* channel 36 to 48 */
- else if ((channel >= 36) && (channel <= 48)) {
- valid = true;
+ if (cfg->p2p_supported) {
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO) || defined(BCMDBUS)
+ if (wl_cfgp2p_vif_created(cfg)) {
+ bool enabled = false;
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+ dhd_wlfc_deinit(dhd);
+ cfg->wlfc_on = false;
+ }
+ }
+#endif /* BCMSDIO || BCMDBUS */
+#endif /* PROP_TXSTATUS_VSDB */
}
- /* channel 149 to 161 */
- else if ((channel >= 149) && (channel <= 161)) {
- valid = true;
+
+
+ /* clean up any left over interfaces */
+ wl_cfg80211_cleanup_virtual_ifaces(ndev, false);
+
+ /* If primary BSS is operational (for e.g SoftAP), bring it down */
+ if (wl_cfg80211_bss_isup(ndev, 0)) {
+ if (wl_cfg80211_bss_up(cfg, ndev, 0, 0) < 0)
+ WL_ERR(("BSS down failed \n"));
}
- else {
- valid = false;
- WL_INFORM(("invalid P2P chanspec, channel = %d\n", channel));
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) /* p2p discovery iface is null */
+ wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
- return valid;
-}
+#ifdef P2P_LISTEN_OFFLOADING
+ wl_cfg80211_p2plo_deinit(cfg);
+#endif /* P2P_LISTEN_OFFLOADING */
-s32
-wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen)
-{
- s32 ret = BCME_ERROR;
- struct bcm_cfg80211 *cfg = NULL;
- chanspec_t chanspec = 0;
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ if (cfg->scan_request) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ info.aborted = true;
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, true);
+#endif
+ cfg->scan_request = NULL;
+ }
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ for_each_ndev(cfg, iter, next) {
+ /* p2p discovery iface ndev ptr could be null */
+ if (iter->ndev == NULL)
+ continue;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
+ CFG80211_DISCONNECTED(iter->ndev, 0, NULL, 0, false, GFP_KERNEL);
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) */
+ wl_clr_drv_status(cfg, READY, iter->ndev);
+ wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+ wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ wl_clr_drv_status(cfg, CONNECTING, iter->ndev);
+ wl_clr_drv_status(cfg, CONNECTED, iter->ndev);
+ wl_clr_drv_status(cfg, DISCONNECTING, iter->ndev);
+ wl_clr_drv_status(cfg, AP_CREATED, iter->ndev);
+ wl_clr_drv_status(cfg, AP_CREATING, iter->ndev);
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
+ NL80211_IFTYPE_STATION;
+#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF) || \
+ defined(WL_NEW_CFG_PRIVCMD_SUPPORT)) && !defined(PLATFORM_SLP)
+ if (p2p_net)
+ dev_close(p2p_net);
+#endif
- cfg = wl_get_cfg(ndev);
+ /* Avoid deadlock from wl_cfg80211_down */
+ if (!dhd_download_fw_on_driverload) {
+ mutex_unlock(&cfg->usr_sync);
+ wl_destroy_event_handler(cfg);
+ mutex_lock(&cfg->usr_sync);
+ }
- /* Restrict channels to 2.4GHz, 20MHz BW, no SB. */
- chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 |
- WL_CHANSPEC_CTL_SB_NONE);
- chanspec = wl_chspec_host_to_driver(chanspec);
+ wl_flush_eq(cfg);
+ if (cfg->link_up) { //army fix wifi stop call trace issue
+ CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+ wl_link_down(cfg);
+ }
+ if (cfg->p2p_supported) {
+ if (timer_pending(&cfg->p2p->listen_timer))
+ del_timer_sync(&cfg->p2p->listen_timer);
+ wl_cfgp2p_down(cfg);
+ }
- ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
- sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
- if (ret < 0) {
- WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+ if (timer_pending(&cfg->scan_timeout)) {
+ del_timer_sync(&cfg->scan_timeout);
}
- return ret;
-}
+ DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
-s32
-wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen)
-{
- u32 channel = 0;
- s32 ret = BCME_ERROR;
- s32 i = 0;
- s32 j = 0;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- wl_uint32_list_t *list = NULL;
- chanspec_t chanspec = 0;
+ dhd_monitor_uninit();
+#ifdef WLAIBSS_MCHAN
+ bcm_cfg80211_del_ibss_if(cfg->wdev->wiphy, cfg->ibss_cfgdev);
+#endif /* WLAIBSS_MCHAN */
- /* Restrict channels to 5GHz, 20MHz BW, no SB. */
- chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 |
- WL_CHANSPEC_CTL_SB_NONE);
- chanspec = wl_chspec_host_to_driver(chanspec);
- ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
- sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
- if (ret < 0) {
- WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
- goto done;
+#ifdef WL11U
+ /* Clear interworking element. */
+ if (cfg->wl11u) {
+ cfg->wl11u = FALSE;
}
+#endif /* WL11U */
- list = (wl_uint32_list_t *)buf;
- /* Skip DFS and inavlid P2P channel. */
- for (i = 0, j = 0; i < dtoh32(list->count); i++) {
- chanspec = (chanspec_t) dtoh32(list->element[i]);
- channel = CHSPEC_CHANNEL(chanspec);
-
- ret = wldev_iovar_getint(ndev, "per_chan_info", &channel);
- if (ret < 0) {
- WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret));
- goto done;
- }
+#ifdef CUSTOMER_HW4_DEBUG
+ if (wl_scan_timeout_dbg_enabled) {
+ wl_scan_timeout_dbg_clear();
+ }
+#endif /* CUSTOMER_HW4_DEBUG */
- if (CHANNEL_IS_RADAR(channel) ||
- !(wl_cfg80211_valid_channel_p2p(CHSPEC_CHANNEL(chanspec)))) {
- continue;
- } else {
- list->element[j] = list->element[i];
- }
+ cfg->disable_roam_event = false;
- j++;
- }
+ DNGL_FUNC(dhd_cfg80211_down, (cfg));
- list->count = j;
+#ifdef DHD_IFDEBUG
+ /* Printout all netinfo entries */
+ wl_probe_wdev_all(cfg);
+#endif /* DHD_IFDEBUG */
-done:
- return ret;
+ return err;
}
-static s32
-wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen,
- int *channel)
+s32 wl_cfg80211_up(struct net_device *net)
{
- s32 ret = BCME_ERROR;
- int chosen = 0;
- int retry = 0;
- uint chip;
-
- /* Start auto channel selection scan. */
- ret = wldev_ioctl_set(ndev, WLC_START_CHANNEL_SEL, buf, buflen);
- if (ret < 0) {
- WL_ERR(("can't start auto channel scan, error = %d\n", ret));
- *channel = 0;
- goto done;
- }
+ struct bcm_cfg80211 *cfg;
+ s32 err = 0;
+ int val = 1;
+ dhd_pub_t *dhd;
+#ifdef DISABLE_PM_BCNRX
+ s32 interr = 0;
+ uint param = 0;
+ s8 iovbuf[WLC_IOCTL_SMLEN];
+#endif /* DISABLE_PM_BCNRX */
- /* Wait for auto channel selection, worst case possible delay is 5250ms. */
- retry = CHAN_SEL_RETRY_COUNT;
+ WL_DBG(("In\n"));
+ cfg = wl_get_cfg(net);
- while (retry--) {
- OSL_SLEEP(CHAN_SEL_IOCTL_DELAY);
- chosen = 0;
- ret = wldev_ioctl_get(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen));
- if ((ret == 0) && (dtoh32(chosen) != 0)) {
- chip = dhd_conf_get_chip(dhd_get_pub(ndev));
- if (chip != BCM43362_CHIP_ID && chip != BCM4330_CHIP_ID &&
- chip != BCM43143_CHIP_ID) {
- u32 chanspec = 0;
- int ctl_chan;
- chanspec = wl_chspec_driver_to_host(chosen);
- WL_INFORM(("selected chanspec = 0x%x\n", chanspec));
- ctl_chan = wf_chspec_ctlchan(chanspec);
- WL_INFORM(("selected ctl_chan = %d\n", ctl_chan));
- *channel = (u16)(ctl_chan & 0x00FF);
- } else
- *channel = (u16)(chosen & 0x00FF);
- WL_INFORM(("selected channel = %d\n", *channel));
- break;
- }
- WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n",
- (CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen)));
+ if ((err = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg), WLC_GET_VERSION, &val,
+ sizeof(int)) < 0)) {
+ WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err));
+ return err;
+ }
+ val = dtoh32(val);
+ if (val != WLC_IOCTL_VERSION && val != 1) {
+ WL_ERR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+ val, WLC_IOCTL_VERSION));
+ return BCME_VERSION;
}
+ ioctl_version = val;
+ wl_cfg80211_check_in4way(cfg, net, NO_SCAN_IN4WAY|NO_BTC_IN4WAY,
+ WL_EXT_STATUS_DISCONNECTED, NULL);
+ WL_TRACE(("WLC_GET_VERSION=%d\n", ioctl_version));
- if (retry <= 0) {
- WL_ERR(("failure, auto channel selection timed out\n"));
- *channel = 0;
- ret = BCME_ERROR;
+ mutex_lock(&cfg->usr_sync);
+ dhd = (dhd_pub_t *)(cfg->pub);
+ if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg));
+ if (unlikely(err)) {
+ mutex_unlock(&cfg->usr_sync);
+ return err;
+ }
}
- WL_INFORM(("selected channel = %d\n", *channel));
+#ifdef WLMESH
+ cfg->wdev->wiphy->features |= NL80211_FEATURE_USERSPACE_MPM;
+#endif /* WLMESH */
-done:
- return ret;
-}
+ err = __wl_cfg80211_up(cfg);
+ if (unlikely(err))
+ WL_ERR(("__wl_cfg80211_up failed\n"));
-static s32
-wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- /* Clear scan stop driver status. */
- wl_clr_drv_status(cfg, SCANNING, ndev);
- return BCME_OK;
-}
-s32
-wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len)
-{
- int channel = 0, band, band_cur;
- s32 ret = BCME_ERROR;
- u8 *buf = NULL;
- char *pos = cmd;
- struct bcm_cfg80211 *cfg = NULL;
- struct net_device *ndev = NULL;
+ /* IOVAR configurations with 'up' condition */
+#ifdef DISABLE_PM_BCNRX
+ interr = wldev_iovar_setbuf(bcmcfg_to_prmry_ndev(cfg), "pm_bcnrx", (char *)¶m,
+ sizeof(param), iovbuf, sizeof(iovbuf), NULL);
+ if (unlikely(interr)) {
+ WL_ERR(("Set pm_bcnrx returned (%d)\n", interr));
+ }
+#endif /* DISABLE_PM_BCNRX */
- bzero(cmd, total_len);
- cfg = wl_get_cfg(dev);
+ mutex_unlock(&cfg->usr_sync);
- buf = (u8 *)MALLOC(cfg->osh, CHANSPEC_BUF_SIZE);
- if (buf == NULL) {
- WL_ERR(("failed to allocate chanspec buffer\n"));
- return -ENOMEM;
- }
+#ifdef WLAIBSS_MCHAN
+ bcm_cfg80211_add_ibss_if(cfg->wdev->wiphy, IBSS_IF_NAME);
+#endif /* WLAIBSS_MCHAN */
- /*
- * Always use primary interface, irrespective of interface on which
- * command came.
- */
- ndev = bcmcfg_to_prmry_ndev(cfg);
+#ifdef DUAL_STA_STATIC_IF
+#ifdef WL_VIRTUAL_APSTA
+#error "Both DUAL STA and DUAL_STA_STATIC_IF can't be enabled together"
+#endif
+ /* Static Interface support is currently supported only for STA only builds (without P2P) */
+ wl_cfg80211_create_iface(cfg->wdev->wiphy, NL80211_IFTYPE_STATION, NULL, "wlan%d");
+#endif /* DUAL_STA_STATIC_IF */
- /*
- * Make sure that FW and driver are in right state to do auto channel
- * selection scan.
- */
- ret = wl_cfg80211_set_auto_channel_scan_state(ndev);
- if (ret < 0) {
- WL_ERR(("can't set auto channel scan state, error = %d\n", ret));
- goto done;
- }
+ return err;
+}
- ret = wldev_ioctl(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur), false);
- if (band_cur != WLC_BAND_5G) {
- /* Best channel selection in 2.4GHz band. */
- ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
- if (ret < 0) {
- WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret));
- goto done;
- }
+/* Private Event to Supplicant with indication that chip hangs */
+int wl_cfg80211_hang(struct net_device *dev, u16 reason)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhd;
+#if defined(SOFTAP_SEND_HANGEVT)
+ /* specifc mac address used for hang event */
+ uint8 hang_mac[ETHER_ADDR_LEN] = {0x11, 0x11, 0x11, 0x11, 0x11, 0x11};
+#endif /* SOFTAP_SEND_HANGEVT */
+ if (!cfg) {
+ return BCME_ERROR;
+ }
- ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
- &channel);
- if (ret < 0) {
- WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret));
- goto done;
- }
+ dhd = (dhd_pub_t *)(cfg->pub);
+#if defined(DHD_HANG_SEND_UP_TEST)
+ if (dhd->req_hang_type) {
+ WL_ERR(("%s, Clear HANG test request 0x%x\n",
+ __FUNCTION__, dhd->req_hang_type));
+ dhd->req_hang_type = 0;
+ }
+#endif /* DHD_HANG_SEND_UP_TEST */
+ if ((dhd->hang_reason <= HANG_REASON_MASK) || (dhd->hang_reason >= HANG_REASON_MAX)) {
+ WL_ERR(("%s, Invalid hang reason 0x%x\n",
+ __FUNCTION__, dhd->hang_reason));
+ dhd->hang_reason = HANG_REASON_UNKNOWN;
+ }
+#ifdef DHD_USE_EXTENDED_HANG_REASON
+ if (dhd->hang_reason != 0) {
+ reason = dhd->hang_reason;
+ }
+#endif /* DHD_USE_EXTENDED_HANG_REASON */
+ WL_ERR(("In : chip crash eventing, reason=0x%x\n", (uint32)(dhd->hang_reason)));
- if (CHANNEL_IS_2G(channel)) {
-// channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
- } else {
- WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel));
- channel = 0;
- }
- pos += snprintf(pos, total_len, "2g=%d ", channel);
+ wl_add_remove_pm_enable_work(cfg, WL_PM_WORKQ_DEL);
+#ifdef SOFTAP_SEND_HANGEVT
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ cfg80211_del_sta(dev, hang_mac, GFP_ATOMIC);
+ } else
+#endif /* SOFTAP_SEND_HANGEVT */
+ {
+ CFG80211_DISCONNECTED(dev, reason, NULL, 0, false, GFP_KERNEL);
+ }
+#if defined(RSSIAVG)
+ wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_free_bss_cache(&cfg->g_bss_cache_ctrl);
+#endif
+ if (cfg != NULL) {
+ wl_link_down(cfg);
}
+ return 0;
+}
- if (band_cur != WLC_BAND_2G) {
- // terence 20140120: fix for some chipsets only return 2.4GHz channel (4330b2/43341b0/4339a0)
- band = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G;
- ret = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true);
- if (ret < 0) {
- WL_ERR(("WLC_SET_BAND error %d\n", ret));
- goto done;
- }
+s32 wl_cfg80211_down(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 err = 0;
- /* Best channel selection in 5GHz band. */
- ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
- if (ret < 0) {
- WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret));
- goto done;
- }
+ WL_DBG(("In\n"));
+ if (cfg == NULL)
+ return err;
+ mutex_lock(&cfg->usr_sync);
+#if defined(RSSIAVG)
+ wl_free_rssi_cache(&cfg->g_rssi_cache_ctrl);
+#endif
+#if defined(BSSCACHE)
+ wl_free_bss_cache(&cfg->g_bss_cache_ctrl);
+#endif
+ err = __wl_cfg80211_down(cfg);
+ mutex_unlock(&cfg->usr_sync);
- ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
- &channel);
- if (ret < 0) {
- WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret));
- goto done;
- }
+ return err;
+}
- if (CHANNEL_IS_5G(channel)) {
-// channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
- } else {
- WL_ERR(("invalid 5GHz channel, channel = %d\n", channel));
- channel = 0;
- }
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
+{
+ unsigned long flags;
+ void *rptr = NULL;
+ struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
- ret = wldev_ioctl(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur), true);
- if (ret < 0)
- WL_ERR(("WLC_SET_BAND error %d\n", ret));
- pos += snprintf(pos, total_len, "5g=%d ", channel);
+ if (!profile)
+ return NULL;
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ switch (item) {
+ case WL_PROF_SEC:
+ rptr = &profile->sec;
+ break;
+ case WL_PROF_ACT:
+ rptr = &profile->active;
+ break;
+ case WL_PROF_BSSID:
+ rptr = profile->bssid;
+ break;
+ case WL_PROF_SSID:
+ rptr = &profile->ssid;
+ break;
+ case WL_PROF_CHAN:
+ rptr = &profile->channel;
+ break;
}
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ if (!rptr)
+ WL_ERR(("invalid item (%d)\n", item));
+ return rptr;
+}
-done:
- if (NULL != buf) {
- MFREE(cfg->osh, buf, CHANSPEC_BUF_SIZE);
- }
+static s32
+wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const wl_event_msg_t *e, const void *data, s32 item)
+{
+ s32 err = 0;
+ const struct wlc_ssid *ssid;
+ unsigned long flags;
+ struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
- /* Restore FW and driver back to normal state. */
- ret = wl_cfg80211_restore_auto_channel_scan_state(ndev);
- if (ret < 0) {
- WL_ERR(("can't restore auto channel scan state, error = %d\n", ret));
+ if (!profile)
+ return WL_INVALID;
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ switch (item) {
+ case WL_PROF_SSID:
+ ssid = (const wlc_ssid_t *) data;
+ memset(profile->ssid.SSID, 0,
+ sizeof(profile->ssid.SSID));
+ profile->ssid.SSID_len = MIN(ssid->SSID_len, DOT11_MAX_SSID_LEN);
+ memcpy(profile->ssid.SSID, ssid->SSID, profile->ssid.SSID_len);
+ break;
+ case WL_PROF_BSSID:
+ if (data)
+ memcpy(profile->bssid, data, ETHER_ADDR_LEN);
+ else
+ memset(profile->bssid, 0, ETHER_ADDR_LEN);
+ break;
+ case WL_PROF_SEC:
+ memcpy(&profile->sec, data, sizeof(profile->sec));
+ break;
+ case WL_PROF_ACT:
+ profile->active = *(const bool *)data;
+ break;
+ case WL_PROF_BEACONINT:
+ profile->beacon_interval = *(const u16 *)data;
+ break;
+ case WL_PROF_DTIMPERIOD:
+ profile->dtim_period = *(const u8 *)data;
+ break;
+ case WL_PROF_CHAN:
+ profile->channel = *(const u32*)data;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
}
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+ if (err == -EOPNOTSUPP)
+ WL_ERR(("unsupported item (%d)\n", item));
+
+ return err;
+}
- WL_MSG(ndev->name, "%s\n", cmd);
+void wl_cfg80211_dbg_level(u32 level)
+{
+ /*
+ * prohibit to change debug level
+ * by insmod parameter.
+ * eventually debug level will be configured
+ * in compile time by using CONFIG_XXX
+ */
+ /* wl_dbg_level = level; */
+}
- return (pos - cmd);
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+{
+ return wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS;
}
-#endif /* WL_SUPPORT_AUTO_CHANNEL */
-static const struct rfkill_ops wl_rfkill_ops = {
- .set_block = wl_rfkill_set
-};
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg)
+{
+ return cfg->ibss_starter;
+}
-static int wl_rfkill_set(void *data, bool blocked)
+static void wl_rst_ie(struct bcm_cfg80211 *cfg)
{
- struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+ struct wl_ie *ie = wl_to_ie(cfg);
- WL_DBG(("Enter \n"));
- WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+ ie->offset = 0;
+}
- if (!cfg)
- return -EINVAL;
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v)
+{
+ struct wl_ie *ie = wl_to_ie(cfg);
+ s32 err = 0;
- cfg->rf_blocked = blocked;
+ if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
+ WL_ERR(("ei crosses buffer boundary\n"));
+ return -ENOSPC;
+ }
+ ie->buf[ie->offset] = t;
+ ie->buf[ie->offset + 1] = l;
+ memcpy(&ie->buf[ie->offset + 2], v, l);
+ ie->offset += l + 2;
- return 0;
+ return err;
}
-static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup)
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, const u8 *ie_stream, u32 *ie_size,
+ bool roam)
{
- s32 err = 0;
+ u8 *ssidie;
+ int32 ssid_len = MIN(bi->SSID_len, DOT11_MAX_SSID_LEN);
+ int32 remaining_ie_buf_len, available_buffer_len;
+ /* cfg80211_find_ie defined in kernel returning const u8 */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ /* ERROR out if
+ * 1. No ssid IE is FOUND or
+ * 2. New ssid length is > what was allocated for existing ssid (as
+ * we do not want to overwrite the rest of the IEs) or
+ * 3. If in case of erroneous buffer input where ssid length doesnt match the space
+ * allocated to it.
+ */
+ if (!ssidie) {
+ return;
+ }
+ available_buffer_len = ((int)(*ie_size)) - (ssidie + 2 - ie_stream);
+ remaining_ie_buf_len = available_buffer_len - (int)ssidie[1];
+ if ((ssid_len > ssidie[1]) ||
+ (ssidie[1] > available_buffer_len)) {
+ return;
+ }
- WL_DBG(("Enter \n"));
- if (!cfg)
- return -EINVAL;
- if (setup) {
- cfg->rfkill = rfkill_alloc("brcmfmac-wifi",
- wl_cfg80211_get_parent_dev(),
- RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg);
- if (!cfg->rfkill) {
- err = -ENOMEM;
- goto err_out;
+ if (ssidie[1] != ssid_len) {
+ if (ssidie[1]) {
+ WL_ERR(("%s: Wrong SSID len: %d != %d\n",
+ __FUNCTION__, ssidie[1], bi->SSID_len));
}
-
- err = rfkill_register(cfg->rfkill);
-
- if (err)
- rfkill_destroy(cfg->rfkill);
- } else {
- if (!cfg->rfkill) {
- err = -ENOMEM;
- goto err_out;
+ if (roam) {
+ WL_ERR(("Changing the SSID Info.\n"));
+ memmove(ssidie + ssid_len + 2,
+ (ssidie + 2) + ssidie[1],
+ remaining_ie_buf_len);
+ memcpy(ssidie + 2, bi->SSID, ssid_len);
+ *ie_size = *ie_size + ssid_len - ssidie[1];
+ ssidie[1] = ssid_len;
}
+ return;
+ }
+ if (*(ssidie + 2) == '\0')
+ memcpy(ssidie + 2, bi->SSID, ssid_len);
+ return;
+}
- rfkill_unregister(cfg->rfkill);
- rfkill_destroy(cfg->rfkill);
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size)
+{
+ struct wl_ie *ie = wl_to_ie(cfg);
+ s32 err = 0;
+
+ if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
+ WL_ERR(("ei_stream crosses buffer boundary\n"));
+ return -ENOSPC;
}
+ memcpy(&ie->buf[ie->offset], ie_stream, ie_size);
+ ie->offset += ie_size;
-err_out:
return err;
}
-#ifdef DEBUGFS_CFG80211
-/**
-* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level
-* to turn on SCAN and DBG log.
-* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level
-* To see current setting of debug level,
-* cat /sys/kernel/debug/dhd/debug_level
-*/
-static ssize_t
-wl_debuglevel_write(struct file *file, const char __user *userbuf,
- size_t count, loff_t *ppos)
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size)
{
- char tbuf[SUBLOGLEVELZ * ARRAYSIZE(sublogname_map)], sublog[SUBLOGLEVELZ];
- char *params, *token, *colon;
- uint i, tokens, log_on = 0;
- size_t minsize = min_t(size_t, (sizeof(tbuf) - 1), count);
+ struct wl_ie *ie = wl_to_ie(cfg);
+ s32 err = 0;
- bzero(tbuf, sizeof(tbuf));
- bzero(sublog, sizeof(sublog));
- if (copy_from_user(&tbuf, userbuf, minsize)) {
- return -EFAULT;
+ if (unlikely(ie->offset > dst_size)) {
+ WL_ERR(("dst_size is not enough\n"));
+ return -ENOSPC;
}
+ memcpy(dst, &ie->buf[0], ie->offset);
- tbuf[minsize] = '\0';
- params = &tbuf[0];
- colon = strchr(params, '\n');
- if (colon != NULL)
- *colon = '\0';
- while ((token = strsep(¶ms, " ")) != NULL) {
- bzero(sublog, sizeof(sublog));
- if (token == NULL || !*token)
- break;
- if (*token == '\0')
- continue;
- colon = strchr(token, ':');
- if (colon != NULL) {
- *colon = ' ';
- }
- tokens = sscanf(token, "%"S(SUBLOGLEVEL)"s %u", sublog, &log_on);
- if (colon != NULL)
- *colon = ':';
+ return err;
+}
- if (tokens == 2) {
- for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
- if (!strncmp(sublog, sublogname_map[i].sublogname,
- strlen(sublogname_map[i].sublogname))) {
- if (log_on)
- wl_dbg_level |=
- (sublogname_map[i].log_level);
- else
- wl_dbg_level &=
- ~(sublogname_map[i].log_level);
- }
- }
- } else
- WL_ERR(("%s: can't parse '%s' as a "
- "SUBMODULE:LEVEL (%d tokens)\n",
- tbuf, token, tokens));
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg)
+{
+ struct wl_ie *ie = wl_to_ie(cfg);
- }
- return count;
+ return ie->offset;
}
-static ssize_t
-wl_debuglevel_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static void wl_link_up(struct bcm_cfg80211 *cfg)
{
- char *param;
- char tbuf[SUBLOGLEVELZ * ARRAYSIZE(sublogname_map)];
- uint i;
- bzero(tbuf, sizeof(tbuf));
- param = &tbuf[0];
- for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
- param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
- sublogname_map[i].sublogname,
- (wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0);
+ cfg->link_up = true;
+}
+
+static void wl_link_down(struct bcm_cfg80211 *cfg)
+{
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+
+ WL_DBG(("In\n"));
+ cfg->link_up = false;
+ if (conn_info) {
+ conn_info->req_ie_len = 0;
+ conn_info->resp_ie_len = 0;
}
- *param = '\n';
- return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0]));
+}
+
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&cfg->eq_lock, flags);
+ return flags;
}
-static const struct file_operations fops_debuglevel = {
- .open = NULL,
- .write = wl_debuglevel_write,
- .read = wl_debuglevel_read,
- .owner = THIS_MODULE,
- .llseek = NULL,
-};
-static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg)
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags)
{
- s32 err = 0;
- struct dentry *_dentry;
- if (!cfg)
- return -EINVAL;
- cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cfg->debugfs || IS_ERR(cfg->debugfs)) {
- if (cfg->debugfs == ERR_PTR(-ENODEV))
- WL_ERR(("Debugfs is not enabled on this kernel\n"));
- else
- WL_ERR(("Can not create debugfs directory\n"));
- cfg->debugfs = NULL;
- goto exit;
+ spin_unlock_irqrestore(&cfg->eq_lock, flags);
+}
- }
- _dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR,
- cfg->debugfs, cfg, &fops_debuglevel);
- if (!_dentry || IS_ERR(_dentry)) {
- WL_ERR(("failed to create debug_level debug file\n"));
- wl_free_debugfs(cfg);
- }
-exit:
- return err;
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg)
+{
+ spin_lock_init(&cfg->eq_lock);
+}
+
+static void wl_delay(u32 ms)
+{
+ if (in_atomic() || (ms < jiffies_to_msecs(1))) {
+ OSL_DELAY(ms*1000);
+ } else {
+ OSL_SLEEP(ms);
+ }
}
-static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg)
+
+s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
{
- if (!cfg)
- return -EINVAL;
- if (cfg->debugfs)
- debugfs_remove_recursive(cfg->debugfs);
- cfg->debugfs = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+ struct ether_addr primary_mac;
+ if (!cfg->p2p)
+ return -1;
+ if (!p2p_is_on(cfg)) {
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+ } else {
+ memcpy(p2pdev_addr->octet, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE).octet,
+ ETHER_ADDR_LEN);
+ }
+
return 0;
}
-#endif /* DEBUGFS_CFG80211 */
-
-struct bcm_cfg80211 *wl_cfg80211_get_bcmcfg(void)
+s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
{
- return g_bcmcfg;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ return wl_cfgp2p_set_p2p_noa(cfg, net, buf, len);
}
-void wl_cfg80211_set_bcmcfg(struct bcm_cfg80211 *cfg)
+s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
{
- g_bcmcfg = cfg;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ return wl_cfgp2p_get_p2p_noa(cfg, net, buf, len);
}
-struct device *wl_cfg80211_get_parent_dev(void)
+s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
{
- return cfg80211_parent_dev;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len);
}
-void wl_cfg80211_set_parent_dev(void *dev)
+s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len)
{
- cfg80211_parent_dev = dev;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ return wl_cfgp2p_set_p2p_ecsa(cfg, net, buf, len);
}
-static void wl_cfg80211_clear_parent_dev(void)
+s32 wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len)
{
- cfg80211_parent_dev = NULL;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(net);
+
+ return wl_cfgp2p_increase_p2p_bw(cfg, net, buf, len);
}
-void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+#ifdef P2PLISTEN_AP_SAMECHN
+s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable)
{
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
+ s32 ret = wldev_iovar_setint(net, "p2p_resp_ap_chn", enable);
- if (wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
- "cur_etheraddr", NULL, 0, ioctl_buf, sizeof(ioctl_buf),
- 0, NULL) == BCME_OK) {
- memcpy(mac->octet, ioctl_buf, ETHER_ADDR_LEN);
- } else {
- bzero(mac->octet, ETHER_ADDR_LEN);
+ if ((ret == 0) && enable) {
+ /* disable PM for p2p responding on infra AP channel */
+ s32 pm = PM_OFF;
+
+ ret = wldev_ioctl_set(net, WLC_SET_PM, &pm, sizeof(pm));
}
+
+ return ret;
}
-static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
+#endif /* P2PLISTEN_AP_SAMECHN */
+
+s32 wl_cfg80211_channel_to_freq(u32 channel)
{
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- if (((dev_role == NL80211_IFTYPE_AP) &&
- !(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
- ((dev_role == NL80211_IFTYPE_P2P_GO) &&
- !(dhd->op_mode & DHD_FLAG_P2P_GO_MODE)))
+ int freq = 0;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)
+ freq = ieee80211_channel_to_frequency(channel);
+#else
{
- WL_ERR(("device role select failed role:%d op_mode:%d \n", dev_role, dhd->op_mode));
- return false;
+ u16 band = 0;
+ if (channel <= CH_MAX_2G_CHANNEL)
+ band = IEEE80211_BAND_2GHZ;
+ else
+ band = IEEE80211_BAND_5GHZ;
+ freq = ieee80211_channel_to_frequency(channel, band);
}
- return true;
+#endif
+ return freq;
}
-int wl_cfg80211_do_driver_init(struct net_device *net)
-{
- struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
- if (!cfg || !cfg->wdev)
- return -EINVAL;
+#ifdef WLTDLS
+static s32
+wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data) {
- if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
- return -1;
+ struct net_device *ndev = NULL;
+ u32 reason = ntoh32(e->reason);
+ s8 *msg = NULL;
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ switch (reason) {
+ case WLC_E_TDLS_PEER_DISCOVERED :
+ msg = " TDLS PEER DISCOVERD ";
+ break;
+ case WLC_E_TDLS_PEER_CONNECTED :
+ if (cfg->tdls_mgmt_frame) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, 0,
+ GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq, 0,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len,
+ GFP_ATOMIC);
+#else
+ cfg80211_rx_mgmt(cfgdev, cfg->tdls_mgmt_freq,
+ cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len, GFP_ATOMIC);
+#endif
+ }
+ msg = " TDLS PEER CONNECTED ";
+#ifdef SUPPORT_SET_CAC
+ /* TDLS connect reset CAC */
+ wl_cfg80211_set_cac(cfg, 0);
+#endif /* SUPPORT_SET_CAC */
+ break;
+ case WLC_E_TDLS_PEER_DISCONNECTED :
+ if (cfg->tdls_mgmt_frame) {
+ kfree(cfg->tdls_mgmt_frame);
+ cfg->tdls_mgmt_frame = NULL;
+ cfg->tdls_mgmt_freq = 0;
+ }
+ msg = "TDLS PEER DISCONNECTED ";
+#ifdef SUPPORT_SET_CAC
+ /* TDLS disconnec, set CAC */
+ wl_cfg80211_set_cac(cfg, 1);
+#endif /* SUPPORT_SET_CAC */
+ break;
+ }
+ if (msg) {
+ WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((const u8*)(&e->addr)),
+ (bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary"));
+ }
return 0;
-}
-void wl_cfg80211_enable_trace(u32 level)
-{
- wl_dbg_level = level;
- WL_MSG("wlan", "wl_dbg_level = 0x%x\n", wl_dbg_level);
}
+#endif /* WLTDLS */
-#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
- 2, 0))
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0))
static s32
-wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
- bcm_struct_cfgdev *cfgdev, u64 cookie)
+#if (defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)) || (LINUX_VERSION_CODE < \
+ KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *buf, size_t len)
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, const u8 *buf, size_t len)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ u32 peer_capability, bool initiator, const u8 *buf, size_t len)
+#else /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
+wl_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token, u16 status_code,
+ const u8 *buf, size_t len)
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
{
- /* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION
- * is passed with CMD_FRAME. This callback is supposed to cancel
- * the OFFCHANNEL Wait. Since we are already taking care of that
- * with the tx_mgmt logic, do nothing here.
+ s32 ret = 0;
+#ifdef WLTDLS
+ struct bcm_cfg80211 *cfg;
+ tdls_wfd_ie_iovar_t info;
+ memset(&info, 0, sizeof(tdls_wfd_ie_iovar_t));
+ cfg = wl_get_cfg(dev);
+
+#if defined(CONFIG_ARCH_MSM) && defined(TDLS_MGMT_VERSION2)
+ /* Some customer platform back ported this feature from kernel 3.15 to kernel 3.10
+ * and that cuases build error
*/
+ BCM_REFERENCE(peer_capability);
+#endif /* CONFIG_ARCH_MSM && TDLS_MGMT_VERSION2 */
- return 0;
+ switch (action_code) {
+ /* We need to set TDLS Wifi Display IE to firmware
+ * using tdls_wfd_ie iovar
+ */
+ case WLAN_TDLS_SET_PROBE_WFD_IE:
+ WL_ERR(("%s WLAN_TDLS_SET_PROBE_WFD_IE\n", __FUNCTION__));
+ info.mode = TDLS_WFD_PROBE_IE_TX;
+ memcpy(&info.data, buf, len);
+ info.length = len;
+ break;
+ case WLAN_TDLS_SET_SETUP_WFD_IE:
+ WL_ERR(("%s WLAN_TDLS_SET_SETUP_WFD_IE\n", __FUNCTION__));
+ info.mode = TDLS_WFD_IE_TX;
+ memcpy(&info.data, buf, len);
+ info.length = len;
+ break;
+ case WLAN_TDLS_SET_WFD_ENABLED:
+ WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_ENABLED\n", __FUNCTION__));
+ dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), true);
+ goto out;
+ case WLAN_TDLS_SET_WFD_DISABLED:
+ WL_ERR(("%s WLAN_TDLS_SET_MODE_WFD_DISABLED\n", __FUNCTION__));
+ dhd_tdls_set_mode((dhd_pub_t *)(cfg->pub), false);
+ goto out;
+ default:
+ WL_ERR(("Unsupported action code : %d\n", action_code));
+ goto out;
+ }
+ ret = wldev_iovar_setbuf(dev, "tdls_wfd_ie", &info, sizeof(info),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+
+ if (ret) {
+ WL_ERR(("tdls_wfd_ie error %d\n", ret));
+ }
+
+out:
+#endif /* WLTDLS */
+ return ret;
}
-#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
-#ifdef WL_HOST_BAND_MGMT
-s32
-wl_cfg80211_set_band(struct net_device *ndev, int band)
+static s32
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *peer, enum nl80211_tdls_operation oper)
+#else
+wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation oper)
+#endif
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- int ret = 0;
- char ioctl_buf[50];
-
- if ((band < WLC_BAND_AUTO) || (band > WLC_BAND_2G)) {
- WL_ERR(("Invalid band\n"));
- return -EINVAL;
+ s32 ret = 0;
+#ifdef WLTDLS
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ tdls_iovar_t info;
+ dhd_pub_t *dhdp;
+ bool tdls_auto_mode = false;
+ dhdp = (dhd_pub_t *)(cfg->pub);
+ memset(&info, 0, sizeof(tdls_iovar_t));
+ if (peer) {
+ memcpy(&info.ea, peer, ETHER_ADDR_LEN);
+ } else {
+ return -1;
+ }
+ switch (oper) {
+ case NL80211_TDLS_DISCOVERY_REQ:
+ /* If the discovery request is broadcast then we need to set
+ * info.mode to Tunneled Probe Request
+ */
+ if (memcmp(peer, (const uint8 *)BSSID_BROADCAST, ETHER_ADDR_LEN) == 0) {
+ info.mode = TDLS_MANUAL_EP_WFD_TPQ;
+ WL_ERR(("%s TDLS TUNNELED PRBOBE REQUEST\n", __FUNCTION__));
+ } else {
+ info.mode = TDLS_MANUAL_EP_DISCOVERY;
+ }
+ break;
+ case NL80211_TDLS_SETUP:
+ if (dhdp->tdls_mode == true) {
+ info.mode = TDLS_MANUAL_EP_CREATE;
+ tdls_auto_mode = false;
+ /* Do tear down and create a fresh one */
+ ret = wl_cfg80211_tdls_config(cfg, TDLS_STATE_TEARDOWN, tdls_auto_mode);
+ if (ret < 0) {
+ return ret;
+ }
+ } else {
+ tdls_auto_mode = true;
+ }
+ break;
+ case NL80211_TDLS_TEARDOWN:
+ info.mode = TDLS_MANUAL_EP_DELETE;
+ break;
+ default:
+ WL_ERR(("Unsupported operation : %d\n", oper));
+ goto out;
}
-
- if ((ret = wldev_iovar_setbuf(ndev, "roam_band", &band,
- sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
- WL_ERR(("seting roam_band failed code=%d\n", ret));
+ /* turn on TDLS */
+ wl_cfg80211_tdls_config(cfg, TDLS_STATE_SETUP, tdls_auto_mode);
+ if (ret < 0) {
return ret;
}
-
- WL_DBG(("Setting band to %d\n", band));
- cfg->curr_band = band;
-
- return 0;
+ if (info.mode) {
+ ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret) {
+ WL_ERR(("tdls_endpoint error %d\n", ret));
+ }
+ }
+out:
+ if (ret) {
+ return -ENOTSUPP;
+ }
+#endif /* WLTDLS */
+ return ret;
}
-#endif /* WL_HOST_BAND_MGMT */
+#endif
-s32
-wl_cfg80211_set_if_band(struct net_device *ndev, int band)
+/*
+ * This function returns no of bytes written
+ * In case of failure return zero, not bcme_error
+ */
+s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *ndev, char *buf, int len,
+ enum wl_management_type type)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- int ret = 0, wait_cnt;
- char ioctl_buf[32];
+ struct bcm_cfg80211 *cfg;
+ s32 ret = 0;
+ struct ether_addr primary_mac;
+ s32 bssidx = 0;
+ s32 pktflag = 0;
+ cfg = wl_get_cfg(ndev);
- if ((band < WLC_BAND_AUTO) || (band > WLC_BAND_2G)) {
- WL_ERR(("Invalid band\n"));
- return -EINVAL;
- }
- if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- BCM_REFERENCE(dhdp);
- DHD_STATLOG_CTRL(dhdp, ST(DISASSOC_INT_START),
- dhd_net2idx(dhdp->info, ndev), 0);
- ret = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
- if (ret < 0) {
- WL_ERR(("WLC_DISASSOC error %d\n", ret));
- /* continue to set 'if_band' */
- }
- else {
- /* This is to ensure that 'if_band' iovar is issued only after
- * disconnection is completed
- */
- wait_cnt = WAIT_FOR_DISCONNECT_MAX;
- while (wl_get_drv_status(cfg, CONNECTED, ndev) && wait_cnt) {
- WL_DBG(("Wait until disconnected. wait_cnt: %d\n", wait_cnt));
- wait_cnt--;
- OSL_SLEEP(50);
+ if (wl_get_drv_status(cfg, AP_CREATING, ndev)) {
+ /* Vendor IEs should be set to FW
+ * after SoftAP interface is brought up
+ */
+ WL_DBG(("Skipping set IE since AP is not up \n"));
+ goto exit;
+ } else if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
+ /* Either stand alone AP case or P2P discovery */
+ if (wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+ /* Stand alone AP case on primary interface */
+ WL_DBG(("Apply IEs for Primary AP Interface \n"));
+ bssidx = 0;
+ } else {
+ if (!cfg->p2p) {
+ /* If p2p not initialized, return failure */
+ WL_ERR(("P2P not initialized \n"));
+ goto exit;
+ }
+ /* P2P Discovery case (p2p listen) */
+ if (!cfg->p2p->on) {
+ /* Turn on Discovery interface */
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
+ p2p_on(cfg) = true;
+ ret = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
+ if (unlikely(ret)) {
+ WL_ERR(("Enable discovery failed \n"));
+ goto exit;
+ }
}
+ WL_DBG(("Apply IEs for P2P Discovery Iface \n"));
+ ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
}
+ } else {
+ /* Virtual AP/ P2P Group Interface */
+ WL_DBG(("Apply IEs for iface:%s\n", ndev->name));
+ bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
}
- if ((ret = wldev_iovar_setbuf(ndev, "if_band", &band,
- sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
- WL_ERR(("seting if_band failed ret=%d\n", ret));
- /* issue 'WLC_SET_BAND' if if_band is not supported */
- if (ret == BCME_UNSUPPORTED) {
- ret = wldev_set_band(ndev, band);
- if (ret < 0) {
- WL_ERR(("seting band failed ret=%d\n", ret));
- }
+
+ if (ndev != NULL) {
+ switch (type) {
+ case WL_BEACON:
+ pktflag = VNDR_IE_BEACON_FLAG;
+ break;
+ case WL_PROBE_RESP:
+ pktflag = VNDR_IE_PRBRSP_FLAG;
+ break;
+ case WL_ASSOC_RESP:
+ pktflag = VNDR_IE_ASSOCRSP_FLAG;
+ break;
+ }
+ if (pktflag) {
+ ret = wl_cfg80211_set_mgmt_vndr_ies(cfg,
+ ndev_to_cfgdev(ndev), bssidx, pktflag, buf, len);
}
}
+exit:
return ret;
}
-s32
-wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, char *command, int total_len)
+#ifdef WL_SUPPORT_AUTO_CHANNEL
+static s32
+wl_cfg80211_set_auto_channel_scan_state(struct net_device *ndev)
{
- char ioctl_buf[WLC_IOCTL_SMLEN];
- int err = 0;
- uint32 val = 0;
- chanspec_t chanspec = 0;
- int abort;
- int bytes_written = 0;
- struct wl_dfs_ap_move_status_v2 *status;
- char chanbuf[CHANSPEC_STR_LEN];
- const char *dfs_state_str[DFS_SCAN_S_MAX] = {
- "Radar Free On Channel",
- "Radar Found On Channel",
- "Radar Scan In Progress",
- "Radar Scan Aborted",
- "RSDB Mode switch in Progress For Scan"
- };
- if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
- bytes_written = snprintf(command, total_len, "AP is not up\n");
- return bytes_written;
- }
- if (!*data) {
- if ((err = wldev_iovar_getbuf(ndev, "dfs_ap_move", NULL, 0,
- ioctl_buf, sizeof(ioctl_buf), NULL))) {
- WL_ERR(("setting dfs_ap_move failed with err=%d \n", err));
- return err;
- }
- status = (struct wl_dfs_ap_move_status_v2 *)ioctl_buf;
-
- if (status->version != WL_DFS_AP_MOVE_VERSION) {
- err = BCME_UNSUPPORTED;
- WL_ERR(("err=%d version=%d\n", err, status->version));
- return err;
- }
+ u32 val = 0;
+ s32 ret = BCME_ERROR;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ /* Set interface up, explicitly. */
+ val = 1;
- if (status->move_status != (int8) DFS_SCAN_S_IDLE) {
- chanspec = wl_chspec_driver_to_host(status->chanspec);
- if (chanspec != 0 && chanspec != INVCHANSPEC) {
- wf_chspec_ntoa(chanspec, chanbuf);
- bytes_written = snprintf(command, total_len,
- "AP Target Chanspec %s (0x%x)\n", chanbuf, chanspec);
- }
- bytes_written += snprintf(command + bytes_written,
- total_len - bytes_written,
- "%s\n", dfs_state_str[status->move_status]);
- return bytes_written;
- } else {
- bytes_written = snprintf(command, total_len, "dfs AP move in IDLE state\n");
- return bytes_written;
- }
+ ret = wldev_ioctl_set(ndev, WLC_UP, (void *)&val, sizeof(val));
+ if (ret < 0) {
+ WL_ERR(("set interface up failed, error = %d\n", ret));
+ goto done;
}
- abort = bcm_atoi(data);
- if (abort == -1) {
- if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &abort,
- sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
- WL_ERR(("seting dfs_ap_move failed with err %d\n", err));
- return err;
- }
- } else {
- chanspec = wf_chspec_aton(data);
- if (chanspec != 0) {
- val = wl_chspec_host_to_driver(chanspec);
- if (val != INVCHANSPEC) {
- if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &val,
- sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
- WL_ERR(("seting dfs_ap_move failed with err %d\n", err));
- return err;
- }
- WL_DBG((" set dfs_ap_move successfull"));
- } else {
- err = BCME_USAGE_ERROR;
- }
- }
+ /* Stop all scan explicitly, till auto channel selection complete. */
+ wl_set_drv_status(cfg, SCANNING, ndev);
+ if (cfg->escan_info.ndev == NULL) {
+ ret = BCME_OK;
+ goto done;
}
- return err;
+ ret = wl_notify_escan_complete(cfg, ndev, true, true);
+ if (ret < 0) {
+ WL_ERR(("set scan abort failed, error = %d\n", ret));
+ ret = BCME_OK; // terence 20140115: fix escan_complete error
+ goto done;
+ }
+
+done:
+ return ret;
}
-bool wl_cfg80211_is_concurrent_mode(struct net_device *dev)
+static bool
+wl_cfg80211_valid_channel_p2p(int channel)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- if ((cfg) && (wl_get_drv_status_all(cfg, CONNECTED) > 1)) {
- return true;
- } else {
- return false;
+ bool valid = false;
+
+ /* channel 1 to 14 */
+ if ((channel >= 1) && (channel <= 14)) {
+ valid = true;
+ }
+ /* channel 36 to 48 */
+ else if ((channel >= 36) && (channel <= 48)) {
+ valid = true;
+ }
+ /* channel 149 to 161 */
+ else if ((channel >= 149) && (channel <= 161)) {
+ valid = true;
+ }
+ else {
+ valid = false;
+ WL_INFORM(("invalid P2P chanspec, channel = %d\n", channel));
}
+
+ return valid;
}
-void* wl_cfg80211_get_dhdp(struct net_device *dev)
+s32
+wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 ret = BCME_ERROR;
+ struct bcm_cfg80211 *cfg = NULL;
+ chanspec_t chanspec = 0;
- return cfg->pub;
-}
+ cfg = wl_get_cfg(ndev);
-bool wl_cfg80211_is_p2p_active(struct net_device *dev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- return (cfg && cfg->p2p);
-}
+ /* Restrict channels to 2.4GHz, 20MHz BW, no SB. */
+ chanspec |= (WL_CHANSPEC_BAND_2G | WL_CHANSPEC_BW_20 |
+ WL_CHANSPEC_CTL_SB_NONE);
+ chanspec = wl_chspec_host_to_driver(chanspec);
-bool wl_cfg80211_is_roam_offload(struct net_device * dev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- return (cfg && cfg->roam_offload);
+ ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+ sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+ }
+
+ return ret;
}
-bool wl_cfg80211_is_event_from_connected_bssid(struct net_device * dev, const wl_event_msg_t *e,
- int ifidx)
+s32
+wl_cfg80211_get_chanspecs_5g(struct net_device *ndev, void *buf, s32 buflen)
{
- u8 *curbssid = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ u32 channel = 0;
+ s32 ret = BCME_ERROR;
+ s32 i = 0;
+ s32 j = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ wl_uint32_list_t *list = NULL;
+ chanspec_t chanspec = 0;
- if (!cfg) {
- /* When interface is created using wl
- * ndev->ieee80211_ptr will be NULL.
- */
- return NULL;
- }
- curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ /* Restrict channels to 5GHz, 20MHz BW, no SB. */
+ chanspec |= (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_20 |
+ WL_CHANSPEC_CTL_SB_NONE);
+ chanspec = wl_chspec_host_to_driver(chanspec);
- if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0) {
- return true;
+ ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
+ sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
+ goto done;
}
- return false;
-}
-static void wl_cfg80211_work_handler(struct work_struct * work)
-{
- struct bcm_cfg80211 *cfg = NULL;
- struct net_info *iter, *next;
- s32 err = BCME_OK;
- s32 pm = PM_FAST;
- dhd_pub_t *dhd;
- BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, pm_enable_work.work);
- WL_DBG(("Enter \n"));
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- /* p2p discovery iface ndev could be null */
- if (iter->ndev) {
- if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
- (wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS &&
- wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_IBSS))
- continue;
- if (iter->ndev) {
- dhd = (dhd_pub_t *)(cfg->pub);
- if (dhd_conf_get_pm(dhd) >= 0)
- pm = dhd_conf_get_pm(dhd);
- if ((err = wldev_ioctl_set(iter->ndev, WLC_SET_PM,
- &pm, sizeof(pm))) != 0) {
- if (err == -ENODEV)
- WL_DBG(("%s:netdev not ready\n",
- iter->ndev->name));
- else
- WL_ERR(("%s:error (%d)\n",
- iter->ndev->name, err));
- } else
- wl_cfg80211_update_power_mode(iter->ndev);
- }
+ list = (wl_uint32_list_t *)buf;
+ /* Skip DFS and inavlid P2P channel. */
+ for (i = 0, j = 0; i < dtoh32(list->count); i++) {
+ chanspec = (chanspec_t) dtoh32(list->element[i]);
+ channel = CHSPEC_CHANNEL(chanspec);
+
+ ret = wldev_iovar_getint(ndev, "per_chan_info", &channel);
+ if (ret < 0) {
+ WL_ERR(("get 'per_chan_info' failed, error = %d\n", ret));
+ goto done;
+ }
+
+ if (CHANNEL_IS_RADAR(channel) ||
+ !(wl_cfg80211_valid_channel_p2p(CHSPEC_CHANNEL(chanspec)))) {
+ continue;
+ } else {
+ list->element[j] = list->element[i];
}
+
+ j++;
}
- DHD_PM_WAKE_UNLOCK(cfg->pub);
-}
-u8
-wl_get_action_category(void *frame, u32 frame_len)
-{
- u8 category;
- u8 *ptr = (u8 *)frame;
- if (frame == NULL)
- return DOT11_ACTION_CAT_ERR_MASK;
- if (frame_len < DOT11_ACTION_HDR_LEN)
- return DOT11_ACTION_CAT_ERR_MASK;
- category = ptr[DOT11_ACTION_CAT_OFF];
- WL_DBG(("Action Category: %d\n", category));
- return category;
-}
+ list->count = j;
-int
-wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action)
-{
- u8 *ptr = (u8 *)frame;
- if (frame == NULL || ret_action == NULL)
- return BCME_ERROR;
- if (frame_len < DOT11_ACTION_HDR_LEN)
- return BCME_ERROR;
- if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len))
- return BCME_ERROR;
- *ret_action = ptr[DOT11_ACTION_ACT_OFF];
- WL_DBG(("Public Action : %d\n", *ret_action));
- return BCME_OK;
+done:
+ return ret;
}
-#ifdef WLFBT
-int
-wl_cfg80211_get_fbt_key(struct net_device *dev, uint8 *key, int total_len)
+static s32
+wl_cfg80211_get_best_channel(struct net_device *ndev, void *buf, int buflen,
+ int *channel)
{
- struct bcm_cfg80211 * cfg = wl_get_cfg(dev);
- int bytes_written = -1;
+ s32 ret = BCME_ERROR;
+ int chosen = 0;
+ int retry = 0;
+ uint chip;
- if (total_len < FBT_KEYLEN) {
- WL_ERR(("wl_cfg80211_get_fbt_key: Insufficient buffer \n"));
- goto end;
+ /* Start auto channel selection scan. */
+ ret = wldev_ioctl_set(ndev, WLC_START_CHANNEL_SEL, buf, buflen);
+ if (ret < 0) {
+ WL_ERR(("can't start auto channel scan, error = %d\n", ret));
+ *channel = 0;
+ goto done;
}
- if (cfg) {
- memcpy(key, cfg->fbt_key, FBT_KEYLEN);
- bytes_written = FBT_KEYLEN;
- } else {
- bzero(key, FBT_KEYLEN);
- WL_ERR(("wl_cfg80211_get_fbt_key: Failed to copy KCK and KEK \n"));
+
+ /* Wait for auto channel selection, worst case possible delay is 5250ms. */
+ retry = CHAN_SEL_RETRY_COUNT;
+
+ while (retry--) {
+ OSL_SLEEP(CHAN_SEL_IOCTL_DELAY);
+ chosen = 0;
+ ret = wldev_ioctl_get(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen));
+ if ((ret == 0) && (dtoh32(chosen) != 0)) {
+ chip = dhd_conf_get_chip(dhd_get_pub(ndev));
+ if (chip != BCM43362_CHIP_ID && chip != BCM4330_CHIP_ID &&
+ chip != BCM43143_CHIP_ID) {
+ u32 chanspec = 0;
+ int ctl_chan;
+ chanspec = wl_chspec_driver_to_host(chosen);
+ WL_INFORM(("selected chanspec = 0x%x\n", chanspec));
+ ctl_chan = wf_chspec_ctlchan(chanspec);
+ WL_INFORM(("selected ctl_chan = %d\n", ctl_chan));
+ *channel = (u16)(ctl_chan & 0x00FF);
+ } else
+ *channel = (u16)(chosen & 0x00FF);
+ WL_INFORM(("selected channel = %d\n", *channel));
+ break;
+ }
+ WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n",
+ (CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen)));
}
- prhex("KCK, KEK", (uchar *)key, FBT_KEYLEN);
-end:
- return bytes_written;
+
+ if (retry <= 0) {
+ WL_ERR(("failure, auto channel selection timed out\n"));
+ *channel = 0;
+ ret = BCME_ERROR;
+ }
+ WL_INFORM(("selected channel = %d\n", *channel));
+
+done:
+ return ret;
}
-#endif /* WLFBT */
-static int
-wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const struct ether_addr *bssid)
+static s32
+wl_cfg80211_restore_auto_channel_scan_state(struct net_device *ndev)
{
- s32 err;
- wl_event_msg_t e;
-
- bzero(&e, sizeof(e));
- e.event_type = cpu_to_be32(WLC_E_ROAM);
- memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
- /* trigger the roam event handler */
- err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ /* Clear scan stop driver status. */
+ wl_clr_drv_status(cfg, SCANNING, ndev);
- return err;
+ return BCME_OK;
}
-static s32
-wl_cfg80211_parse_vndr_ies(const u8 *parse, u32 len,
- struct parsed_vndr_ies *vndr_ies)
+s32
+wl_cfg80211_get_best_channels(struct net_device *dev, char* cmd, int total_len)
{
- s32 err = BCME_OK;
- const vndr_ie_t *vndrie;
- const bcm_tlv_t *ie;
- struct parsed_vndr_ie_info *parsed_info;
- u32 count = 0;
- u32 remained_len;
+ int channel = 0, band, band_cur;
+ s32 ret = BCME_ERROR;
+ u8 *buf = NULL;
+ char *pos = cmd;
+ struct bcm_cfg80211 *cfg = NULL;
+ struct net_device *ndev = NULL;
- remained_len = len;
- bzero(vndr_ies, sizeof(*vndr_ies));
+ memset(cmd, 0, total_len);
- WL_DBG(("---> len %d\n", len));
- ie = (const bcm_tlv_t *) parse;
- if (!bcm_valid_tlv(ie, remained_len))
- ie = NULL;
- while (ie) {
- if (count >= MAX_VNDR_IE_NUMBER)
- break;
- if (ie->id == DOT11_MNG_VS_ID || (ie->id == DOT11_MNG_ID_EXT_ID)) {
- vndrie = (const vndr_ie_t *) ie;
- if (ie->id == DOT11_MNG_ID_EXT_ID) {
- /* len should be bigger than sizeof ID extn field at least */
- if (vndrie->len < MIN_VENDOR_EXTN_IE_LEN) {
- WL_ERR(("%s: invalid vndr extn ie."
- " length %d\n",
- __FUNCTION__, vndrie->len));
- goto end;
- }
- } else {
- /* len should be bigger than OUI length +
- * one data length at least
- */
- if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
- WL_ERR(("wl_cfg80211_parse_vndr_ies:"
- " invalid vndr ie. length is too small %d\n",
- vndrie->len));
- goto end;
- }
+ buf = kmalloc(CHANSPEC_BUF_SIZE, GFP_KERNEL);
+ if (buf == NULL) {
+ WL_ERR(("failed to allocate chanspec buffer\n"));
+ return -ENOMEM;
+ }
+
+ /*
+ * Always use primary interface, irrespective of interface on which
+ * command came.
+ */
+ cfg = wl_get_cfg(dev);
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ /*
+ * Make sure that FW and driver are in right state to do auto channel
+ * selection scan.
+ */
+ ret = wl_cfg80211_set_auto_channel_scan_state(ndev);
+ if (ret < 0) {
+ WL_ERR(("can't set auto channel scan state, error = %d\n", ret));
+ goto done;
+ }
- /* if wpa or wme ie, do not add ie */
- if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
- ((vndrie->data[0] == WPA_OUI_TYPE) ||
- (vndrie->data[0] == WME_OUI_TYPE))) {
- CFGP2P_DBG(("Found WPA/WME oui. Do not add it\n"));
- goto end;
- }
- }
+ ret = wldev_ioctl(dev, WLC_GET_BAND, &band_cur, sizeof(band_cur), false);
+ if (band_cur != WLC_BAND_5G) {
+ /* Best channel selection in 2.4GHz band. */
+ ret = wl_cfg80211_get_chanspecs_2g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+ if (ret < 0) {
+ WL_ERR(("can't get chanspecs in 2.4GHz, error = %d\n", ret));
+ goto done;
+ }
- parsed_info = &vndr_ies->ie_info[count++];
+ ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+ &channel);
+ if (ret < 0) {
+ WL_ERR(("can't select best channel scan in 2.4GHz, error = %d\n", ret));
+ goto done;
+ }
- /* save vndr ie information */
- parsed_info->ie_ptr = (const char *)vndrie;
- parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
- memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
- vndr_ies->count = count;
- if (ie->id == DOT11_MNG_ID_EXT_ID) {
- WL_DBG(("\t ** Vendor Extension ie id: 0x%02x, len:%d\n",
- ie->id, parsed_info->ie_len));
- } else {
- WL_DBG(("\t ** OUI "MACOUIDBG", type 0x%02x len:%d\n",
- MACOUI2STRDBG(parsed_info->vndrie.oui),
- parsed_info->vndrie.data[0], parsed_info->ie_len));
- }
+ if (CHANNEL_IS_2G(channel)) {
+#if 0
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+ channel = ieee80211_channel_to_frequency(channel);
+#else
+ channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+#endif
+#endif
+ } else {
+ WL_ERR(("invalid 2.4GHz channel, channel = %d\n", channel));
+ channel = 0;
}
-end:
- ie = bcm_next_tlv(ie, &remained_len);
+ pos += snprintf(pos, total_len, "2g=%d ", channel);
}
- return err;
-}
-
-static bool
-wl_vndr_ies_exclude_vndr_oui(struct parsed_vndr_ie_info *vndr_info)
-{
- int i = 0;
- while (exclude_vndr_oui_list[i]) {
- if (!memcmp(vndr_info->vndrie.oui,
- exclude_vndr_oui_list[i],
- DOT11_OUI_LEN)) {
- return TRUE;
+ if (band_cur != WLC_BAND_2G) {
+ // terence 20140120: fix for some chipsets only return 2.4GHz channel (4330b2/43341b0/4339a0)
+ band = band_cur==WLC_BAND_2G ? band_cur : WLC_BAND_5G;
+ ret = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), true);
+ if (ret < 0) {
+ WL_ERR(("WLC_SET_BAND error %d\n", ret));
+ goto done;
}
- i++;
- }
- return FALSE;
-}
+ /* Best channel selection in 5GHz band. */
+ ret = wl_cfg80211_get_chanspecs_5g(ndev, (void *)buf, CHANSPEC_BUF_SIZE);
+ if (ret < 0) {
+ WL_ERR(("can't get chanspecs in 5GHz, error = %d\n", ret));
+ goto done;
+ }
-static bool
-wl_vndr_ies_check_duplicate_vndr_oui(struct bcm_cfg80211 *cfg,
- struct parsed_vndr_ie_info *vndr_info)
-{
- wl_vndr_oui_entry_t *oui_entry = NULL;
- unsigned long flags;
+ ret = wl_cfg80211_get_best_channel(ndev, (void *)buf, CHANSPEC_BUF_SIZE,
+ &channel);
+ if (ret < 0) {
+ WL_ERR(("can't select best channel scan in 5GHz, error = %d\n", ret));
+ goto done;
+ }
- WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
- GCC_DIAGNOSTIC_POP();
- if (!memcmp(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN)) {
- WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
- return TRUE;
+ if (CHANNEL_IS_5G(channel)) {
+#if 0
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && !defined(WL_COMPAT_WIRELESS)
+ channel = ieee80211_channel_to_frequency(channel);
+#else
+ channel = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_5GHZ);
+#endif
+#endif
+ } else {
+ WL_ERR(("invalid 5GHz channel, channel = %d\n", channel));
+ channel = 0;
}
- }
- WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
- return FALSE;
-}
-static bool
-wl_vndr_ies_add_vendor_oui_list(struct bcm_cfg80211 *cfg,
- struct parsed_vndr_ie_info *vndr_info)
-{
- wl_vndr_oui_entry_t *oui_entry = NULL;
- unsigned long flags;
+ ret = wldev_ioctl(dev, WLC_SET_BAND, &band_cur, sizeof(band_cur), true);
+ if (ret < 0)
+ WL_ERR(("WLC_SET_BAND error %d\n", ret));
+ pos += snprintf(pos, total_len, "5g=%d ", channel);
+ }
- oui_entry = kmalloc(sizeof(*oui_entry), GFP_KERNEL);
- if (oui_entry == NULL) {
- WL_ERR(("alloc failed\n"));
- return FALSE;
+done:
+ if (NULL != buf) {
+ kfree(buf);
}
- memcpy(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN);
+ /* Restore FW and driver back to normal state. */
+ ret = wl_cfg80211_restore_auto_channel_scan_state(ndev);
+ if (ret < 0) {
+ WL_ERR(("can't restore auto channel scan state, error = %d\n", ret));
+ }
- INIT_LIST_HEAD(&oui_entry->list);
- WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
- list_add_tail(&oui_entry->list, &cfg->vndr_oui_list);
- WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
+ printf("%s: %s\n", __FUNCTION__, cmd);
- return TRUE;
+ return (pos - cmd);
}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
-static void
-wl_vndr_ies_clear_vendor_oui_list(struct bcm_cfg80211 *cfg)
+static const struct rfkill_ops wl_rfkill_ops = {
+ .set_block = wl_rfkill_set
+};
+
+static int wl_rfkill_set(void *data, bool blocked)
{
- wl_vndr_oui_entry_t *oui_entry = NULL;
- unsigned long flags;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
- WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
- while (!list_empty(&cfg->vndr_oui_list)) {
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- oui_entry = list_entry(cfg->vndr_oui_list.next, wl_vndr_oui_entry_t, list);
- GCC_DIAGNOSTIC_POP();
- if (oui_entry) {
- list_del(&oui_entry->list);
- kfree(oui_entry);
- }
- }
- WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
+ WL_DBG(("Enter \n"));
+ WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
+
+ if (!cfg)
+ return -EINVAL;
+
+ cfg->rf_blocked = blocked;
+
+ return 0;
}
-static int
-wl_vndr_ies_get_vendor_oui(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- char *vndr_oui, u32 vndr_oui_len)
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup)
{
- int i;
- int vndr_oui_num = 0;
+ s32 err = 0;
- struct wl_connect_info *conn_info = wl_to_conn(cfg);
- wl_vndr_oui_entry_t *oui_entry = NULL;
- struct parsed_vndr_ie_info *vndr_info;
- struct parsed_vndr_ies vndr_ies;
+ WL_DBG(("Enter \n"));
+ if (!cfg)
+ return -EINVAL;
+ if (setup) {
+ cfg->rfkill = rfkill_alloc("brcmfmac-wifi",
+ wl_cfg80211_get_parent_dev(),
+ RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg);
- char *pos = vndr_oui;
- u32 remained_buf_len = vndr_oui_len;
- unsigned long flags;
+ if (!cfg->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
- if (!conn_info->resp_ie_len) {
- return BCME_ERROR;
- }
+ err = rfkill_register(cfg->rfkill);
- wl_vndr_ies_clear_vendor_oui_list(cfg);
+ if (err)
+ rfkill_destroy(cfg->rfkill);
+ } else {
+ if (!cfg->rfkill) {
+ err = -ENOMEM;
+ goto err_out;
+ }
- if ((wl_cfg80211_parse_vndr_ies((u8 *)conn_info->resp_ie,
- conn_info->resp_ie_len, &vndr_ies)) == BCME_OK) {
- for (i = 0; i < vndr_ies.count; i++) {
- vndr_info = &vndr_ies.ie_info[i];
- if (wl_vndr_ies_exclude_vndr_oui(vndr_info)) {
- continue;
- }
+ rfkill_unregister(cfg->rfkill);
+ rfkill_destroy(cfg->rfkill);
+ }
- if (wl_vndr_ies_check_duplicate_vndr_oui(cfg, vndr_info)) {
- continue;
- }
+err_out:
+ return err;
+}
- wl_vndr_ies_add_vendor_oui_list(cfg, vndr_info);
- vndr_oui_num++;
- }
+#ifdef DEBUGFS_CFG80211
+/**
+* Format : echo "SCAN:1 DBG:1" > /sys/kernel/debug/dhd/debug_level
+* to turn on SCAN and DBG log.
+* To turn off SCAN partially, echo "SCAN:0" > /sys/kernel/debug/dhd/debug_level
+* To see current setting of debug level,
+* cat /sys/kernel/debug/dhd/debug_level
+*/
+static ssize_t
+wl_debuglevel_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)], sublog[S_SUBLOGLEVEL];
+ char *params, *token, *colon;
+ uint i, tokens, log_on = 0;
+ size_t minsize = min_t(size_t, (sizeof(tbuf) - 1), count);
+
+ memset(tbuf, 0, sizeof(tbuf));
+ memset(sublog, 0, sizeof(sublog));
+ if (copy_from_user(&tbuf, userbuf, minsize)) {
+ return -EFAULT;
}
- if (vndr_oui) {
- WL_CFG_VNDR_OUI_SYNC_LOCK(&cfg->vndr_oui_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
- GCC_DIAGNOSTIC_POP();
- if (remained_buf_len < VNDR_OUI_STR_LEN) {
- WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
- return BCME_ERROR;
- }
- pos += snprintf(pos, VNDR_OUI_STR_LEN, "%02X-%02X-%02X ",
- oui_entry->oui[0], oui_entry->oui[1], oui_entry->oui[2]);
- remained_buf_len -= VNDR_OUI_STR_LEN;
+ tbuf[minsize + 1] = '\0';
+ params = &tbuf[0];
+ colon = strchr(params, '\n');
+ if (colon != NULL)
+ *colon = '\0';
+ while ((token = strsep(¶ms, " ")) != NULL) {
+ memset(sublog, 0, sizeof(sublog));
+ if (token == NULL || !*token)
+ break;
+ if (*token == '\0')
+ continue;
+ colon = strchr(token, ':');
+ if (colon != NULL) {
+ *colon = ' ';
}
- WL_CFG_VNDR_OUI_SYNC_UNLOCK(&cfg->vndr_oui_sync, flags);
- }
+ tokens = sscanf(token, "%s %u", sublog, &log_on);
+ if (colon != NULL)
+ *colon = ':';
- return vndr_oui_num;
-}
+ if (tokens == 2) {
+ for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+ if (!strncmp(sublog, sublogname_map[i].sublogname,
+ strlen(sublogname_map[i].sublogname))) {
+ if (log_on)
+ wl_dbg_level |=
+ (sublogname_map[i].log_level);
+ else
+ wl_dbg_level &=
+ ~(sublogname_map[i].log_level);
+ }
+ }
+ } else
+ WL_ERR(("%s: can't parse '%s' as a "
+ "SUBMODULE:LEVEL (%d tokens)\n",
+ tbuf, token, tokens));
-void
-wl_cfg80211_clear_p2p_disc_ies(struct bcm_cfg80211 *cfg)
-{
- /* Legacy P2P used to store it in primary dev cache */
- s32 index;
- struct net_device *ndev;
- s32 bssidx;
- s32 ret;
- s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG,
- VNDR_IE_ASSOCRSP_FLAG, VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
- WL_DBG(("Clear IEs for P2P Discovery Iface \n"));
- /* certain vendors uses p2p0 interface in addition to
- * the dedicated p2p interface supported by the linux
- * kernel.
- */
- ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
- bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
- if (bssidx == WL_INVALID) {
- WL_DBG(("No discovery I/F available. Do nothing.\n"));
- return;
}
+ return count;
+}
- for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
- if ((ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(ndev),
- bssidx, vndrie_flag[index], NULL, 0)) < 0) {
- if (ret != BCME_NOTFOUND) {
- WL_ERR(("vndr_ies clear failed (%d). Ignoring.. \n", ret));
- }
- }
+static ssize_t
+wl_debuglevel_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *param;
+ char tbuf[S_SUBLOGLEVEL * ARRAYSIZE(sublogname_map)];
+ uint i;
+ memset(tbuf, 0, sizeof(tbuf));
+ param = &tbuf[0];
+ for (i = 0; i < ARRAYSIZE(sublogname_map); i++) {
+ param += snprintf(param, sizeof(tbuf) - 1, "%s:%d ",
+ sublogname_map[i].sublogname,
+ (wl_dbg_level & sublogname_map[i].log_level) ? 1 : 0);
}
+ *param = '\n';
+ return simple_read_from_buffer(user_buf, count, ppos, tbuf, strlen(&tbuf[0]));
- if (cfg->p2p_wdev && (ndev->ieee80211_ptr != cfg->p2p_wdev)) {
- /* clear IEs for dedicated p2p interface */
- wl_cfg80211_clear_per_bss_ies(cfg, cfg->p2p_wdev);
- }
}
+static const struct file_operations fops_debuglevel = {
+ .open = NULL,
+ .write = wl_debuglevel_write,
+ .read = wl_debuglevel_read,
+ .owner = THIS_MODULE,
+ .llseek = NULL,
+};
-s32
-wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg)
{
- s32 index;
- s32 ret;
- struct net_info *netinfo;
- s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG,
- VNDR_IE_ASSOCRSP_FLAG, VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
+ s32 err = 0;
+ struct dentry *_dentry;
+ if (!cfg)
+ return -EINVAL;
+ cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!cfg->debugfs || IS_ERR(cfg->debugfs)) {
+ if (cfg->debugfs == ERR_PTR(-ENODEV))
+ WL_ERR(("Debugfs is not enabled on this kernel\n"));
+ else
+ WL_ERR(("Can not create debugfs directory\n"));
+ cfg->debugfs = NULL;
+ goto exit;
- netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
- if (!netinfo || !netinfo->wdev) {
- WL_ERR(("netinfo or netinfo->wdev is NULL\n"));
- return -1;
}
-
- WL_DBG(("clear management vendor IEs for bssidx:%d \n", netinfo->bssidx));
- /* Clear the IEs set in the firmware so that host is in sync with firmware */
- for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
- if ((ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, wdev_to_cfgdev(netinfo->wdev),
- netinfo->bssidx, vndrie_flag[index], NULL, 0)) < 0)
- if (ret != BCME_NOTFOUND) {
- WL_ERR(("vndr_ies clear failed. Ignoring.. \n"));
- }
+ _dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR,
+ cfg->debugfs, cfg, &fops_debuglevel);
+ if (!_dentry || IS_ERR(_dentry)) {
+ WL_ERR(("failed to create debug_level debug file\n"));
+ wl_free_debugfs(cfg);
}
-
- return 0;
+exit:
+ return err;
}
-
-s32
-wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg)
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg)
{
- struct net_info *iter, *next;
-
- WL_DBG(("clear management vendor IEs \n"));
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- wl_cfg80211_clear_per_bss_ies(cfg, iter->wdev);
- }
+ if (!cfg)
+ return -EINVAL;
+ if (cfg->debugfs)
+ debugfs_remove_recursive(cfg->debugfs);
+ cfg->debugfs = NULL;
return 0;
}
+#endif /* DEBUGFS_CFG80211 */
-#define WL_VNDR_IE_MAXLEN 2048
-static s8 g_mgmt_ie_buf[WL_VNDR_IE_MAXLEN];
-int
-wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- s32 bssidx, s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
+struct device *wl_cfg80211_get_parent_dev(void)
{
- struct net_device *ndev = NULL;
- s32 ret = BCME_OK;
- u8 *curr_ie_buf = NULL;
- u8 *mgmt_ie_buf = NULL;
- u32 mgmt_ie_buf_len = 0;
- u32 *mgmt_ie_len = 0;
- u32 del_add_ie_buf_len = 0;
- u32 total_ie_buf_len = 0;
- u32 parsed_ie_buf_len = 0;
- struct parsed_vndr_ies old_vndr_ies;
- struct parsed_vndr_ies new_vndr_ies;
- s32 i;
- u8 *ptr;
- s32 remained_buf_len;
- wl_bss_vndr_ies_t *ies = NULL;
- struct net_info *netinfo;
- struct wireless_dev *wdev;
-
- if (!cfgdev) {
- WL_ERR(("cfgdev is NULL\n"));
- return -EINVAL;
- }
+ return cfg80211_parent_dev;
+}
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- wdev = cfgdev_to_wdev(cfgdev);
+void wl_cfg80211_set_parent_dev(void *dev)
+{
+ cfg80211_parent_dev = dev;
+}
- if (bssidx > WL_MAX_IFS) {
- WL_ERR(("bssidx > supported concurrent Ifaces \n"));
- return -EINVAL;
- }
+static void wl_cfg80211_clear_parent_dev(void)
+{
+ cfg80211_parent_dev = NULL;
+}
- netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
- if (!netinfo) {
- WL_ERR(("net_info ptr is NULL \n"));
- return -EINVAL;
+void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
+{
+ if (wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL,
+ 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync) == BCME_OK) {
+ memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
+ } else {
+ memset(mac->octet, 0, ETHER_ADDR_LEN);
}
-
- /* Clear the global buffer */
- bzero(g_mgmt_ie_buf, sizeof(g_mgmt_ie_buf));
- curr_ie_buf = g_mgmt_ie_buf;
- ies = &netinfo->bss.ies;
-
- WL_DBG(("Enter. pktflag:0x%x bssidx:%x vnd_ie_len:%d wdev:%p\n",
- pktflag, bssidx, vndr_ie_len, wdev));
-
- switch (pktflag) {
- case VNDR_IE_PRBRSP_FLAG :
- mgmt_ie_buf = ies->probe_res_ie;
- mgmt_ie_len = &ies->probe_res_ie_len;
- mgmt_ie_buf_len = sizeof(ies->probe_res_ie);
- break;
- case VNDR_IE_ASSOCRSP_FLAG :
- mgmt_ie_buf = ies->assoc_res_ie;
- mgmt_ie_len = &ies->assoc_res_ie_len;
- mgmt_ie_buf_len = sizeof(ies->assoc_res_ie);
- break;
- case VNDR_IE_BEACON_FLAG :
- mgmt_ie_buf = ies->beacon_ie;
- mgmt_ie_len = &ies->beacon_ie_len;
- mgmt_ie_buf_len = sizeof(ies->beacon_ie);
- break;
- case VNDR_IE_PRBREQ_FLAG :
- mgmt_ie_buf = ies->probe_req_ie;
- mgmt_ie_len = &ies->probe_req_ie_len;
- mgmt_ie_buf_len = sizeof(ies->probe_req_ie);
- break;
- case VNDR_IE_ASSOCREQ_FLAG :
- mgmt_ie_buf = ies->assoc_req_ie;
- mgmt_ie_len = &ies->assoc_req_ie_len;
- mgmt_ie_buf_len = sizeof(ies->assoc_req_ie);
- break;
- case VNDR_IE_DISASSOC_FLAG :
- mgmt_ie_buf = ies->disassoc_ie;
- mgmt_ie_len = &ies->disassoc_ie_len;
- mgmt_ie_buf_len = sizeof(ies->disassoc_ie);
- break;
- default:
- mgmt_ie_buf = NULL;
- mgmt_ie_len = NULL;
- WL_ERR(("not suitable packet type (%d)\n", pktflag));
- return BCME_ERROR;
+}
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ if (((dev_role == NL80211_IFTYPE_AP) &&
+ !(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
+ ((dev_role == NL80211_IFTYPE_P2P_GO) &&
+ !(dhd->op_mode & DHD_FLAG_P2P_GO_MODE)))
+ {
+ WL_ERR(("device role select failed role:%d op_mode:%d \n", dev_role, dhd->op_mode));
+ return false;
}
+ return true;
+}
- if (vndr_ie_len > mgmt_ie_buf_len) {
- WL_ERR(("extra IE size too big\n"));
- ret = -ENOMEM;
- } else {
- /* parse and save new vndr_ie in curr_ie_buff before comparing it */
- if (vndr_ie && vndr_ie_len && curr_ie_buf) {
- ptr = curr_ie_buf;
-
- if ((ret = wl_cfg80211_parse_vndr_ies((const u8 *)vndr_ie,
- vndr_ie_len, &new_vndr_ies)) < 0) {
- WL_ERR(("parse vndr ie failed \n"));
- goto exit;
- }
-
- for (i = 0; i < new_vndr_ies.count; i++) {
- struct parsed_vndr_ie_info *vndrie_info =
- &new_vndr_ies.ie_info[i];
-
- if ((parsed_ie_buf_len + vndrie_info->ie_len) > WL_VNDR_IE_MAXLEN) {
- WL_ERR(("IE size is too big (%d > %d)\n",
- parsed_ie_buf_len, WL_VNDR_IE_MAXLEN));
- ret = -EINVAL;
- goto exit;
- }
-
- memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
- vndrie_info->ie_len);
- parsed_ie_buf_len += vndrie_info->ie_len;
- }
- }
-
- if (mgmt_ie_buf != NULL) {
- if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
- (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) {
- WL_DBG(("Previous mgmt IE is equals to current IE"));
- goto exit;
- }
+int wl_cfg80211_do_driver_init(struct net_device *net)
+{
+ struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
- /* parse old vndr_ie */
- if ((ret = wl_cfg80211_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len,
- &old_vndr_ies)) < 0) {
- WL_ERR(("parse vndr ie failed \n"));
- goto exit;
- }
- /* make a command to delete old ie */
- for (i = 0; i < old_vndr_ies.count; i++) {
- struct parsed_vndr_ie_info *vndrie_info =
- &old_vndr_ies.ie_info[i];
-#if defined(WL_MBO) || defined(WL_OCE)
- {
- if ((vndrie_info->vndrie.id == 0xDD) &&
- (!memcmp(vndrie_info->vndrie.oui, WFA_OUI, WFA_OUI_LEN)) &&
- (vndrie_info->vndrie.data[0] == WFA_OUI_TYPE_MBO_OCE)) {
- WL_DBG(("skipping ID : %d, Len: %d, OUI:"MACOUIDBG
- ", type: %0x\n",
- vndrie_info->vndrie.id,
- vndrie_info->vndrie.len,
- MACOUI2STRDBG(vndrie_info->vndrie.oui),
- vndrie_info->vndrie.data[0]));
- continue;
- }
- }
-#endif /* WL_MBO || WL_OCE */
+ if (!cfg || !cfg->wdev)
+ return -EINVAL;
- if (vndrie_info->vndrie.id == DOT11_MNG_ID_EXT_ID) {
- WL_DBG(("DELETED VENDOR EXTN ID : %d, TYPE: %d Len: %d\n",
- vndrie_info->vndrie.id, vndrie_info->vndrie.oui[0],
- vndrie_info->vndrie.len));
- } else {
- WL_DBG(("DELETED ID : %d, Len: %d , OUI:"MACOUIDBG"\n",
- vndrie_info->vndrie.id, vndrie_info->vndrie.len,
- MACOUI2STRDBG(vndrie_info->vndrie.oui)));
- }
+ if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
+ return -1;
- del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
- pktflag, vndrie_info->vndrie.oui,
- vndrie_info->vndrie.id,
- vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
- vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
- "del");
+ return 0;
+}
- curr_ie_buf += del_add_ie_buf_len;
- total_ie_buf_len += del_add_ie_buf_len;
- }
- }
+void wl_cfg80211_enable_trace(u32 level)
+{
+ wl_dbg_level = level;
+ printf("%s: wl_dbg_level = 0x%x\n", __FUNCTION__, wl_dbg_level);
+}
- *mgmt_ie_len = 0;
- /* Add if there is any extra IE */
- if (mgmt_ie_buf && parsed_ie_buf_len) {
- ptr = mgmt_ie_buf;
+#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
+ 2, 0))
+static s32
+wl_cfg80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ bcm_struct_cfgdev *cfgdev, u64 cookie)
+{
+ /* CFG80211 checks for tx_cancel_wait callback when ATTR_DURATION
+ * is passed with CMD_FRAME. This callback is supposed to cancel
+ * the OFFCHANNEL Wait. Since we are already taking care of that
+ * with the tx_mgmt logic, do nothing here.
+ */
- remained_buf_len = mgmt_ie_buf_len;
+ return 0;
+}
+#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
- /* make a command to add new ie */
- for (i = 0; i < new_vndr_ies.count; i++) {
- struct parsed_vndr_ie_info *vndrie_info =
- &new_vndr_ies.ie_info[i];
-#if defined(WL_MBO) || defined(WL_OCE)
- {
- if ((vndrie_info->vndrie.id == 0xDD) &&
- (!memcmp(vndrie_info->vndrie.oui, WFA_OUI, WFA_OUI_LEN)) &&
- (vndrie_info->vndrie.data[0] == WFA_OUI_TYPE_MBO_OCE)) {
- WL_DBG(("skipping ID : %d, Len: %d, OUI:"MACOUIDBG
- ",type :%0x\n",
- vndrie_info->vndrie.id,
- vndrie_info->vndrie.len,
- MACOUI2STRDBG(vndrie_info->vndrie.oui),
- vndrie_info->vndrie.data[0]));
- continue;
- }
- }
-#endif /* WL_MBO || WL_OCE */
- if (vndrie_info->vndrie.id == DOT11_MNG_ID_EXT_ID) {
- WL_DBG(("ADDED VENDOR EXTN ID : %d, TYPE = %d, Len: %d\n",
- vndrie_info->vndrie.id, vndrie_info->vndrie.oui[0],
- vndrie_info->vndrie.len));
- } else {
- WL_DBG(("ADDED ID : %d, Len: %d(%d), OUI:"MACOUIDBG"\n",
- vndrie_info->vndrie.id, vndrie_info->vndrie.len,
- vndrie_info->ie_len - 2,
- MACOUI2STRDBG(vndrie_info->vndrie.oui)));
- }
+#ifdef WL11U
+static bcm_tlv_t *
+wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len)
+{
+ bcm_tlv_t *ie;
- del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
- pktflag, vndrie_info->vndrie.oui,
- vndrie_info->vndrie.id,
- vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
- vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
- "add");
+/* unfortunately it's too much work to dispose the const cast - bcm_parse_tlvs
+ * is used everywhere and changing its prototype to take const qualifier needs
+ * a massive change to all its callers...
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ if ((ie = bcm_parse_tlvs((void *)parse, (int)len, DOT11_MNG_INTERWORKING_ID))) {
+ return ie;
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ return NULL;
+}
- /* verify remained buf size before copy data */
- if (remained_buf_len >= vndrie_info->ie_len) {
- remained_buf_len -= vndrie_info->ie_len;
- } else {
- WL_ERR(("no space in mgmt_ie_buf: pktflag = %d, "
- "found vndr ies # = %d(cur %d), remained len %d, "
- "cur mgmt_ie_len %d, new ie len = %d\n",
- pktflag, new_vndr_ies.count, i, remained_buf_len,
- *mgmt_ie_len, vndrie_info->ie_len));
- break;
- }
+static s32
+wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx)
+{
+ ie_setbuf_t ie_setbuf;
- /* save the parsed IE in cfg struct */
- memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
- vndrie_info->ie_len);
- *mgmt_ie_len += vndrie_info->ie_len;
- curr_ie_buf += del_add_ie_buf_len;
- total_ie_buf_len += del_add_ie_buf_len;
- }
- }
+ WL_DBG(("clear interworking IE\n"));
- if (total_ie_buf_len && cfg->ioctl_buf != NULL) {
- ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
- total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &cfg->ioctl_buf_sync);
- if (ret)
- WL_ERR(("vndr ie set error : %d\n", ret));
- }
- }
-exit:
+ memset(&ie_setbuf, 0, sizeof(ie_setbuf_t));
-return ret;
+ ie_setbuf.ie_buffer.iecount = htod32(1);
+ ie_setbuf.ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
+ ie_setbuf.ie_buffer.ie_list[0].ie_data.len = 0;
+
+ return wldev_iovar_setbuf_bsscfg(ndev, "ie", &ie_setbuf, sizeof(ie_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
}
-#ifdef WL_CFG80211_ACL
-static int
-wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
- const struct cfg80211_acl_data *acl)
+static s32
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
+ uint8 ie_id, uint8 *data, uint8 data_len)
{
- int i;
- int ret = 0;
- int macnum = 0;
- int macmode = MACLIST_MODE_DISABLED;
- struct maclist *list;
- struct bcm_cfg80211 *cfg = wl_get_cfg(cfgdev);
+ s32 err = BCME_OK;
+ s32 buf_len;
+ ie_setbuf_t *ie_setbuf;
+ ie_getbuf_t ie_getbufp;
+ char getbuf[WLC_IOCTL_SMLEN];
- /* get the MAC filter mode */
- if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) {
- macmode = MACLIST_MODE_ALLOW;
- } else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
- acl->n_acl_entries) {
- macmode = MACLIST_MODE_DENY;
+ if (ie_id != DOT11_MNG_INTERWORKING_ID) {
+ WL_ERR(("unsupported (id=%d)\n", ie_id));
+ return BCME_UNSUPPORTED;
}
- /* if acl == NULL, macmode is still disabled.. */
- if (macmode == MACLIST_MODE_DISABLED) {
- if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
- WL_ERR(("wl_cfg80211_set_mac_acl: Setting MAC list"
- " failed error=%d\n", ret));
+ /* access network options (1 octet) is the mandatory field */
+ if (!data || data_len == 0 || data_len > IW_IES_MAX_BUF_LEN) {
+ WL_ERR(("wrong interworking IE (len=%d)\n", data_len));
+ return BCME_BADARG;
+ }
- return ret;
+ /* Validate the pktflag parameter */
+ if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
+ VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
+ VNDR_IE_CUSTOM_FLAG))) {
+ WL_ERR(("invalid packet flag 0x%x\n", pktflag));
+ return BCME_BADARG;
}
- macnum = acl->n_acl_entries;
- if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
- WL_ERR(("wl_cfg80211_set_mac_acl: invalid number of MAC address entries %d\n",
- macnum));
- return -1;
+ buf_len = sizeof(ie_setbuf_t) + data_len - 1;
+
+ ie_getbufp.id = DOT11_MNG_INTERWORKING_ID;
+ if (wldev_iovar_getbuf_bsscfg(ndev, "ie", (void *)&ie_getbufp,
+ sizeof(ie_getbufp), getbuf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)
+ == BCME_OK) {
+ if (!memcmp(&getbuf[TLV_HDR_LEN], data, data_len)) {
+ WL_DBG(("skip to set interworking IE\n"));
+ return BCME_OK;
+ }
}
- /* allocate memory for the MAC list */
- list = (struct maclist *)MALLOC(cfg->osh, sizeof(int) +
- sizeof(struct ether_addr) * macnum);
- if (!list) {
- WL_ERR(("wl_cfg80211_set_mac_acl: failed to allocate memory\n"));
- return -1;
+ /* if already set with previous values, delete it first */
+ if (cfg->wl11u) {
+ if ((err = wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx)) != BCME_OK) {
+ return err;
+ }
}
- /* prepare the MAC list */
- list->count = htod32(macnum);
- for (i = 0; i < macnum; i++) {
- memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN);
+ ie_setbuf = (ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL);
+ if (!ie_setbuf) {
+ WL_ERR(("Error allocating buffer for IE\n"));
+ return -ENOMEM;
}
- /* set the list */
- if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
- WL_ERR(("wl_cfg80211_set_mac_acl: Setting MAC list failed error=%d\n", ret));
+ strncpy(ie_setbuf->cmd, "add", sizeof(ie_setbuf->cmd));
+ ie_setbuf->cmd[sizeof(ie_setbuf->cmd) - 1] = '\0';
+
+ /* Buffer contains only 1 IE */
+ ie_setbuf->ie_buffer.iecount = htod32(1);
+ /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
+ ie_setbuf->ie_buffer.ie_list[0].pktflag = htod32(pktflag);
+
+ /* Now, add the IE to the buffer */
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
+ ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
+ memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len);
- MFREE(cfg->osh, list, sizeof(int) +
- sizeof(struct ether_addr) * macnum);
+ if ((err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync))
+ == BCME_OK) {
+ WL_DBG(("set interworking IE\n"));
+ cfg->wl11u = TRUE;
+ err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
+ }
+
+ kfree(ie_setbuf);
+ return err;
+}
+#endif /* WL11U */
+
+
+s32
+wl_cfg80211_set_if_band(struct net_device *ndev, int band)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int ret = 0, wait_cnt;
+ char ioctl_buf[32];
+ if ((band < WLC_BAND_AUTO) || (band > WLC_BAND_2G)) {
+ WL_ERR(("Invalid band\n"));
+ return -EINVAL;
+ }
+ if (wl_get_drv_status(cfg, CONNECTED, ndev) ||
+ wl_get_drv_status(cfg, CONNECTING, ndev)) {
+ /* if driver is connected or connecting status, try to disconnect first.
+ * if dongle is associated, iovar 'if_band' would be rejected.
+ */
+ wl_set_drv_status(cfg, DISCONNECTING, ndev);
+ ret = wldev_ioctl_set(ndev, WLC_DISASSOC, NULL, 0);
+ if (ret < 0) {
+ WL_ERR(("WLC_DISASSOC error %d\n", ret));
+ /* continue to set 'if_band' */
+ }
+ else {
+ /* This is to ensure that 'if_band' iovar is issued only after
+ * disconnection is completed
+ */
+ wait_cnt = WAIT_FOR_DISCONNECT_MAX;
+ while (wl_get_drv_status(cfg, DISCONNECTING, ndev) && wait_cnt) {
+ WL_DBG(("Wait until disconnected. wait_cnt: %d\n", wait_cnt));
+ wait_cnt--;
+ OSL_SLEEP(10);
+ }
+ }
+ }
+ if ((ret = wldev_iovar_setbuf(ndev, "if_band", &band,
+ sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+ WL_ERR(("seting if_band failed ret=%d\n", ret));
+ /* issue 'WLC_SET_BAND' if if_band is not supported */
+ if (ret == BCME_UNSUPPORTED) {
+ ret = wldev_set_band(ndev, band);
+ if (ret < 0) {
+ WL_ERR(("seting band failed ret=%d\n", ret));
+ }
+ }
+ }
return ret;
}
-#endif /* WL_CFG80211_ACL */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
-int wl_chspec_chandef(chanspec_t chanspec,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- struct cfg80211_chan_def *chandef,
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
- struct chan_info *chaninfo,
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) */
- struct wiphy *wiphy)
+s32
+wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data, char *command, int total_len)
{
- uint16 freq = 0;
- int chan_type = 0;
- int channel = 0;
- struct ieee80211_channel *chan;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ char ioctl_buf[50];
+ int err = 0;
+ uint32 val = 0;
+ chanspec_t chanspec = 0;
+ int abort;
+ int bytes_written = 0;
+ wl_dfs_ap_move_status_t *status;
+ char chanbuf[CHANSPEC_STR_LEN];
+ const char *dfs_state_str[DFS_SCAN_S_MAX] = {
+ "Radar Free On Channel",
+ "Radar Found On Channel",
+ "Radar Scan In Progress",
+ "Radar Scan Aborted",
+ "RSDB Mode switch in Progress For Scan"
+ };
+ if (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) {
+ bytes_written = snprintf(command, total_len, "AP is not up\n");
+ return bytes_written;
+ }
+ if (!*data) {
+ if ((err = wldev_iovar_getbuf(ndev, "dfs_ap_move", NULL, 0,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("setting dfs_ap_move failed with err=%d \n", err));
+ return err;
+ }
+ status = (wl_dfs_ap_move_status_t *)cfg->ioctl_buf;
+
+ if (status->version != WL_DFS_AP_MOVE_VERSION) {
+ err = BCME_UNSUPPORTED;
+ WL_ERR(("err=%d version=%d\n", err, status->version));
+ return err;
+ }
+
+ if (status->move_status != (int8) DFS_SCAN_S_IDLE) {
+ chanspec = wl_chspec_driver_to_host(status->chanspec);
+ if (chanspec != 0 && chanspec != INVCHANSPEC) {
+ wf_chspec_ntoa(chanspec, chanbuf);
+ bytes_written = snprintf(command, total_len,
+ "AP Target Chanspec %s (0x%x)\n", chanbuf, chanspec);
+
+ }
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "%s\n", dfs_state_str[status->move_status]);
+ return bytes_written;
+ } else {
+ bytes_written = snprintf(command, total_len, "dfs AP move in IDLE state\n");
+ return bytes_written;
+ }
- if (!chandef) {
- return -1;
}
- channel = CHSPEC_CHANNEL(chanspec);
- switch (CHSPEC_BW(chanspec)) {
- case WL_CHANSPEC_BW_20:
- chan_type = NL80211_CHAN_HT20;
- break;
- case WL_CHANSPEC_BW_40:
- {
- if (CHSPEC_SB_UPPER(chanspec)) {
- channel += CH_10MHZ_APART;
+ abort = bcm_atoi(data);
+ if (abort == -1) {
+ if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &abort,
+ sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+ WL_ERR(("seting dfs_ap_move failed with err %d\n", err));
+ return err;
+ }
+ } else {
+ chanspec = wf_chspec_aton(data);
+ if (chanspec != 0) {
+ val = wl_chspec_host_to_driver(chanspec);
+ if (val != INVCHANSPEC) {
+ if ((err = wldev_iovar_setbuf(ndev, "dfs_ap_move", &val,
+ sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
+ WL_ERR(("seting dfs_ap_move failed with err %d\n", err));
+ return err;
+ }
+ WL_DBG((" set dfs_ap_move successfull"));
} else {
- channel -= CH_10MHZ_APART;
+ err = BCME_USAGE_ERROR;
}
}
- chan_type = NL80211_CHAN_HT40PLUS;
- break;
+ }
+ return err;
+}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
- case WL_CHANSPEC_BW_80:
- case WL_CHANSPEC_BW_8080:
- {
- uint16 sb = CHSPEC_CTL_SB(chanspec);
+#ifdef WBTEXT
+s32
+wl_cfg80211_wbtext_set_default(struct net_device *ndev)
+{
+ char commandp[WLC_IOCTL_SMLEN];
+ s32 ret = BCME_OK;
+ char *data;
- if (sb == WL_CHANSPEC_CTL_SB_LL) {
- channel -= (CH_10MHZ_APART + CH_20MHZ_APART);
- } else if (sb == WL_CHANSPEC_CTL_SB_LU) {
- channel -= CH_10MHZ_APART;
- } else if (sb == WL_CHANSPEC_CTL_SB_UL) {
- channel += CH_10MHZ_APART;
- } else {
- /* WL_CHANSPEC_CTL_SB_UU */
- channel += (CH_10MHZ_APART + CH_20MHZ_APART);
- }
+ WL_DBG(("set wbtext to default\n"));
- if (sb == WL_CHANSPEC_CTL_SB_LL || sb == WL_CHANSPEC_CTL_SB_LU)
- chan_type = NL80211_CHAN_HT40MINUS;
- else if (sb == WL_CHANSPEC_CTL_SB_UL || sb == WL_CHANSPEC_CTL_SB_UU)
- chan_type = NL80211_CHAN_HT40PLUS;
- }
- break;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
- default:
- chan_type = NL80211_CHAN_HT20;
- break;
+ /* set roam profile */
+ memset(commandp, 0, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG, DEFAULT_WBTEXT_PROFILE_A);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set roam_prof %s error = %d\n",
+ __FUNCTION__, data, ret));
+ return ret;
+ }
+ memset(commandp, 0, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_PROFILE_CONFIG, DEFAULT_WBTEXT_PROFILE_B);
+ data = (commandp + strlen(CMD_WBTEXT_PROFILE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set roam_prof %s error = %d\n",
+ __FUNCTION__, data, ret));
+ return ret;
}
- if (CHSPEC_IS5G(chanspec))
- freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ);
- else
- freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
+ /* set RSSI weight */
+ memset(commandp, 0, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_RSSI_A);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ return ret;
+ }
- chan = ieee80211_get_channel(wiphy, freq);
- WL_DBG(("channel:%d freq:%d chan_type: %d chan_ptr:%p \n",
- channel, freq, chan_type, chan));
+ memset(commandp, 0, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_RSSI_B);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ return ret;
+ }
- if (unlikely(!chan)) {
- /* fw and cfg80211 channel lists are not in sync */
- WL_ERR(("Couldn't find matching channel in wiphy channel list \n"));
- ASSERT(0);
- return -EINVAL;
+ /* set CU weight */
+ memset(commandp, 0, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_CU_A);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ return ret;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
- cfg80211_chandef_create(chandef, chan, chan_type);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
- \
- 0)))
- chaninfo->freq = freq;
- chaninfo->chan_type = chan_type;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
- return 0;
-}
+ memset(commandp, 0, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_WEIGHT_CONFIG, DEFAULT_WBTEXT_WEIGHT_CU_B);
+ data = (commandp + strlen(CMD_WBTEXT_WEIGHT_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_weight_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set weight config %s error = %d\n",
+ __FUNCTION__, data, ret));
+ return ret;
+ }
-void
-wl_cfg80211_ch_switch_notify(struct net_device *dev, uint16 chanspec, struct wiphy *wiphy)
-{
- u32 freq;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
- struct cfg80211_chan_def chandef;
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
- struct chan_info chaninfo;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+ /* set RSSI table */
+ memset(commandp, 0, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_RSSI_A);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set RSSI table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ return ret;
+ }
- if (!wiphy) {
- WL_ERR(("wiphy is null\n"));
- return;
+ memset(commandp, 0, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_RSSI_B);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set RSSI table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ return ret;
}
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION (3, 18, 0))
- /* Channel switch support is only for AP/GO/ADHOC/MESH */
- if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION ||
- dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
- WL_ERR(("No channel switch notify support for STA/GC\n"));
- return;
+
+ /* set CU table */
+ memset(commandp, 0, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_CU_A);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set CU table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ return ret;
}
-#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION (3, 18, 0)) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
- if (wl_chspec_chandef(chanspec, &chandef, wiphy))
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
- if (wl_chspec_chandef(chanspec, &chaninfo, wiphy))
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
- {
- WL_ERR(("chspec_chandef failed\n"));
- return;
+ memset(commandp, 0, sizeof(commandp));
+ snprintf(commandp, WLC_IOCTL_SMLEN, "%s %s",
+ CMD_WBTEXT_TABLE_CONFIG, DEFAULT_WBTEXT_TABLE_CU_B);
+ data = (commandp + strlen(CMD_WBTEXT_TABLE_CONFIG) + 1);
+ ret = wl_cfg80211_wbtext_table_config(ndev, data, commandp, WLC_IOCTL_SMLEN);
+ if (ret != BCME_OK) {
+ WL_ERR(("%s: Failed to set CU table %s error = %d\n",
+ __FUNCTION__, data, ret));
+ return ret;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
- freq = chandef.chan ? chandef.chan->center_freq : chandef.center_freq1;
- cfg80211_ch_switch_notify(dev, &chandef);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
- freq = chan_info.freq;
- cfg80211_ch_switch_notify(dev, freq, chan_info.chan_type);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
- WL_MSG(dev->name, "Channel switch notification for freq: %d chanspec: 0x%x\n",
- freq, chanspec);
- return;
+ return ret;
}
-#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
-static void
-wl_ap_channel_ind(struct bcm_cfg80211 *cfg,
- struct net_device *ndev,
- chanspec_t chanspec)
+s32
+wl_cfg80211_wbtext_config(struct net_device *ndev, char *data, char *command, int total_len)
{
- u32 channel = LCHSPEC_CHANNEL(chanspec);
+ uint i = 0;
+ long int rssi_lower, roam_trigger;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ wl_roam_prof_band_t *rp;
+ int err = -EINVAL, bytes_written = 0;
+ size_t len = strlen(data);
+ int rp_len = 0;
+ data[len] = '\0';
+ rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp)
+ * WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL);
+ if (unlikely(!rp)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
+ rp->ver = WL_MAX_ROAM_PROF_VER;
+ if (*data && (!strncmp(data, "b", 1))) {
+ rp->band = WLC_BAND_2G;
+ } else if (*data && (!strncmp(data, "a", 1))) {
+ rp->band = WLC_BAND_5G;
+ } else {
+ err = snprintf(command, total_len, "Missing band\n");
+ goto exit;
+ }
+ data++;
+ rp->len = 0;
+ /* Getting roam profile from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("Getting roam_profile failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(rp, cfg->ioctl_buf, sizeof(*rp) * WL_MAX_ROAM_PROF_BRACKETS);
+ /* roam_prof version get */
+ if (rp->ver != WL_MAX_ROAM_PROF_VER) {
+ WL_ERR(("bad version (=%d) in return data\n", rp->ver));
+ err = -EINVAL;
+ goto exit;
+ }
+ if ((rp->len % sizeof(wl_roam_prof_t)) != 0) {
+ WL_ERR(("bad length (=%d) in return data\n", rp->len));
+ err = -EINVAL;
+ goto exit;
+ }
- WL_INFORM_MEM(("(%s) AP channel:%d chspec:0x%x \n",
- ndev->name, channel, chanspec));
+ if (!*data) {
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ /* printing contents of roam profile data from fw and exits
+ * if code hits any of one of the below condtion. If remaining
+ * length of buffer is less than roam profile size or
+ * if there is no valid entry.
+ */
+ if (((i * sizeof(wl_roam_prof_t)) > rp->len) ||
+ (rp->roam_prof[i].fullscan_period == 0)) {
+ break;
+ }
+ bytes_written += snprintf(command+bytes_written,
+ total_len, "RSSI[%d,%d] CU(trigger:%d%%: duration:%ds)\n",
+ rp->roam_prof[i].roam_trigger, rp->roam_prof[i].rssi_lower,
+ rp->roam_prof[i].channel_usage,
+ rp->roam_prof[i].cu_avg_calc_dur);
+ }
+ err = bytes_written;
+ goto exit;
+ } else {
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ /* reading contents of roam profile data from fw and exits
+ * if code hits any of one of the below condtion, If remaining
+ * length of buffer is less than roam profile size or if there
+ * is no valid entry.
+ */
+ if (((i * sizeof(wl_roam_prof_t)) > rp->len) ||
+ (rp->roam_prof[i].fullscan_period == 0)) {
+ break;
+ }
+ }
+ /* Do not set roam_prof from upper layer if fw doesn't have 2 rows */
+ if (i != 2) {
+ WL_ERR(("FW must have 2 rows to fill roam_prof\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ /* setting roam profile to fw */
+ data++;
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ roam_trigger = simple_strtol(data, &data, 10);
+ if (roam_trigger >= 0) {
+ WL_ERR(("roam trigger[%d] value must be negative\n", i));
+ err = -EINVAL;
+ goto exit;
+ }
+ rp->roam_prof[i].roam_trigger = roam_trigger;
+ data++;
+ rssi_lower = simple_strtol(data, &data, 10);
+ if (rssi_lower >= 0) {
+ WL_ERR(("rssi lower[%d] value must be negative\n", i));
+ err = -EINVAL;
+ goto exit;
+ }
+ rp->roam_prof[i].rssi_lower = rssi_lower;
+ data++;
+ rp->roam_prof[i].channel_usage = simple_strtol(data, &data, 10);
+ data++;
+ rp->roam_prof[i].cu_avg_calc_dur = simple_strtol(data, &data, 10);
-#ifdef SUPPORT_AP_BWCTRL
- wl_update_apchan_bwcap(cfg, ndev, chanspec);
-#endif /* SUPPORT_AP_BWCTRL */
+ rp_len += sizeof(wl_roam_prof_t);
- if (cfg->ap_oper_channel && (cfg->ap_oper_channel != channel)) {
- /*
- * If cached channel is different from the channel indicated
- * by the event, notify user space about the channel switch.
- */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
- wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
-#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
- cfg->ap_oper_channel = channel;
+ if (*data == '\0') {
+ break;
+ }
+ data++;
+ }
+ if (i != 1) {
+ WL_ERR(("Only two roam_prof rows supported.\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ rp->len = rp_len;
+ if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
+ sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN,
+ &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("seting roam_profile failed with err %d\n", err));
+ }
+ }
+exit:
+ if (rp) {
+ kfree(rp);
}
+ return err;
}
-static s32
-wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
-const wl_event_msg_t *e, void *data)
-{
- struct net_device *ndev = NULL;
- chanspec_t chanspec;
+#define BUFSZ 5
- WL_DBG(("Enter\n"));
- if (unlikely(e->status)) {
- WL_ERR(("status:0x%x \n", e->status));
- return -1;
- }
+#define _S(x) #x
+#define S(x) _S(x)
- if (!data) {
- return -EINVAL;
+int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data,
+ char *command, int total_len)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int bytes_written = 0, err = -EINVAL, argc = 0;
+ char rssi[BUFSZ], band[BUFSZ], weight[BUFSZ];
+ char *endptr = NULL;
+ wnm_bss_select_weight_cfg_t *bwcfg;
+
+ bwcfg = kzalloc(sizeof(*bwcfg), GFP_KERNEL);
+ if (unlikely(!bwcfg)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
}
+ bwcfg->version = WNM_BSSLOAD_MONITOR_VERSION;
+ bwcfg->type = 0;
+ bwcfg->weight = 0;
- if (likely(cfgdev)) {
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- chanspec = *((chanspec_t *)data);
+ argc = sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s %"S(BUFSZ)"s", rssi, band, weight);
- if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
- /* For AP/GO role */
- wl_ap_channel_ind(cfg, ndev, chanspec);
- }
+ if (!strcasecmp(rssi, "rssi"))
+ bwcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
+ else if (!strcasecmp(rssi, "cu"))
+ bwcfg->type = WNM_BSS_SELECT_TYPE_CU;
+ else {
+ /* Usage DRIVER WBTEXT_WEIGHT_CONFIG <rssi/cu> <band> <weight> */
+ WL_ERR(("%s: Command usage error\n", __func__));
+ goto exit;
}
- return 0;
-}
-
-static s32
-wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
-const wl_event_msg_t *e, void *data)
-{
- int error = 0;
- u32 chanspec = 0;
- struct net_device *ndev = NULL;
- struct ether_addr bssid;
-
- WL_DBG(("Enter\n"));
- if (unlikely(e->status)) {
- WL_ERR(("status:0x%x \n", e->status));
- return -1;
+ if (!strcasecmp(band, "a"))
+ bwcfg->band = WLC_BAND_5G;
+ else if (!strcasecmp(band, "b"))
+ bwcfg->band = WLC_BAND_2G;
+ else if (!strcasecmp(band, "all"))
+ bwcfg->band = WLC_BAND_ALL;
+ else {
+ WL_ERR(("%s: Command usage error\n", __func__));
+ goto exit;
}
- if (likely(cfgdev)) {
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- /* Get association state if not AP and then query chanspec */
- if (!((wl_get_mode_by_netdev(cfg, ndev)) == WL_MODE_AP)) {
- error = wldev_ioctl_get(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN);
- if (error) {
- WL_ERR(("CSA on %s. Not associated. error=%d\n",
- ndev->name, error));
- return BCME_ERROR;
- }
+ if (argc == 2) {
+ /* If there is no data after band, getting wnm_bss_select_weight from fw */
+ if (bwcfg->band == WLC_BAND_ALL) {
+ WL_ERR(("band option \"all\" is for set only, not get\n"));
+ goto exit;
}
-
- error = wldev_iovar_getint(ndev, "chanspec", &chanspec);
- if (unlikely(error)) {
- WL_ERR(("Get chanspec error: %d \n", error));
- return -1;
+ if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_weight", bwcfg,
+ sizeof(*bwcfg),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("Getting wnm_bss_select_weight failed with err=%d \n", err));
+ goto exit;
}
-
- WL_INFORM_MEM(("[%s] CSA ind. ch:0x%x\n", ndev->name, chanspec));
- if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
- /* For AP/GO role */
- wl_ap_channel_ind(cfg, ndev, chanspec);
- } else {
- /* STA/GC roles */
- if (!wl_get_drv_status(cfg, CONNECTED, ndev)) {
- WL_ERR(("CSA on %s. Not associated.\n", ndev->name));
- return BCME_ERROR;
- }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
- wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
-#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+ memcpy(bwcfg, cfg->ioctl_buf, sizeof(*bwcfg));
+ bytes_written = snprintf(command, total_len, "%s %s weight = %d\n",
+ (bwcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU",
+ (bwcfg->band == WLC_BAND_2G) ? "2G" : "5G", bwcfg->weight);
+ err = bytes_written;
+ goto exit;
+ } else {
+ /* if weight is non integer returns command usage error */
+ bwcfg->weight = simple_strtol(weight, &endptr, 0);
+ if (*endptr != '\0') {
+ WL_ERR(("%s: Command usage error", __func__));
+ goto exit;
+ }
+ /* setting weight for iovar wnm_bss_select_weight to fw */
+ if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_weight", bwcfg,
+ sizeof(*bwcfg),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("Getting wnm_bss_select_weight failed with err=%d\n", err));
}
-
}
-
- return 0;
+exit:
+ if (bwcfg) {
+ kfree(bwcfg);
+ }
+ return err;
}
-void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg)
+/* WBTEXT_TUPLE_MIN_LEN_CHECK :strlen(low)+" "+strlen(high)+" "+strlen(factor) */
+#define WBTEXT_TUPLE_MIN_LEN_CHECK 5
+
+int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data,
+ char *command, int total_len)
{
- struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
- int err;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int bytes_written = 0, err = -EINVAL;
+ char rssi[BUFSZ], band[BUFSZ];
+ int btcfg_len = 0, i = 0, parsed_len = 0;
+ wnm_bss_select_factor_cfg_t *btcfg;
+ size_t slen = strlen(data);
+ char *start_addr = NULL;
+ data[slen] = '\0';
+
+ btcfg = kzalloc((sizeof(*btcfg) + sizeof(*btcfg) *
+ WL_FACTOR_TABLE_MAX_LIMIT), GFP_KERNEL);
+ if (unlikely(!btcfg)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
+ }
- /* Clear the security settings on the primary Interface */
- err = wldev_iovar_setint(dev, "wsec", 0);
- if (unlikely(err)) {
- WL_ERR(("wsec clear failed \n"));
+ btcfg->version = WNM_BSS_SELECT_FACTOR_VERSION;
+ btcfg->band = WLC_BAND_AUTO;
+ btcfg->type = 0;
+ btcfg->count = 0;
+
+ sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s", rssi, band);
+
+ if (!strcasecmp(rssi, "rssi")) {
+ btcfg->type = WNM_BSS_SELECT_TYPE_RSSI;
}
- err = wldev_iovar_setint(dev, "auth", 0);
- if (unlikely(err)) {
- WL_ERR(("auth clear failed \n"));
+ else if (!strcasecmp(rssi, "cu")) {
+ btcfg->type = WNM_BSS_SELECT_TYPE_CU;
}
- err = wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
- if (unlikely(err)) {
- WL_ERR(("wpa_auth clear failed \n"));
+ else {
+ WL_ERR(("%s: Command usage error\n", __func__));
+ goto exit;
}
-}
-#ifdef WL_CFG80211_P2P_DEV_IF
-void wl_cfg80211_del_p2p_wdev(struct net_device *dev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- struct wireless_dev *wdev = NULL;
+ if (!strcasecmp(band, "a")) {
+ btcfg->band = WLC_BAND_5G;
+ }
+ else if (!strcasecmp(band, "b")) {
+ btcfg->band = WLC_BAND_2G;
+ }
+ else if (!strcasecmp(band, "all")) {
+ btcfg->band = WLC_BAND_ALL;
+ }
+ else {
+ WL_ERR(("%s: Command usage, Wrong band\n", __func__));
+ goto exit;
+ }
- WL_DBG(("Enter \n"));
- if (!cfg) {
- WL_ERR(("Invalid Ptr\n"));
- return;
+ if ((slen - 1) == (strlen(rssi) + strlen(band))) {
+ /* Getting factor table using iovar 'wnm_bss_select_table' from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "wnm_bss_select_table", btcfg,
+ sizeof(*btcfg),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("Getting wnm_bss_select_table failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(btcfg, cfg->ioctl_buf, sizeof(*btcfg));
+ memcpy(btcfg, cfg->ioctl_buf, (btcfg->count+1) * sizeof(*btcfg));
+
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "No of entries in table: %d\n", btcfg->count);
+ bytes_written += snprintf(command + bytes_written, total_len, "%s factor table\n",
+ (btcfg->type == WNM_BSS_SELECT_TYPE_RSSI) ? "RSSI" : "CU");
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "low\thigh\tfactor\n");
+ for (i = 0; i <= btcfg->count-1; i++) {
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "%d\t%d\t%d\n", btcfg->params[i].low, btcfg->params[i].high,
+ btcfg->params[i].factor);
+ }
+ err = bytes_written;
+ goto exit;
} else {
- wdev = cfg->p2p_wdev;
+ memset(btcfg->params, 0, sizeof(wnm_bss_select_factor_params_t)
+ * WL_FACTOR_TABLE_MAX_LIMIT);
+ data += (strlen(rssi) + strlen(band) + 2);
+ start_addr = data;
+ slen = slen - (strlen(rssi) + strlen(band) + 2);
+ for (i = 0; i < WL_FACTOR_TABLE_MAX_LIMIT; i++) {
+ if (parsed_len + WBTEXT_TUPLE_MIN_LEN_CHECK <= slen) {
+ btcfg->params[i].low = simple_strtol(data, &data, 10);
+ data++;
+ btcfg->params[i].high = simple_strtol(data, &data, 10);
+ data++;
+ btcfg->params[i].factor = simple_strtol(data, &data, 10);
+ btcfg->count++;
+ if (*data == '\0') {
+ break;
+ }
+ data++;
+ parsed_len = data - start_addr;
+ } else {
+ WL_ERR(("%s:Command usage:less no of args\n", __func__));
+ goto exit;
+ }
+ }
+ btcfg_len = sizeof(*btcfg) + ((btcfg->count) * sizeof(*btcfg));
+ if ((err = wldev_iovar_setbuf(ndev, "wnm_bss_select_table", btcfg, btcfg_len,
+ cfg->ioctl_buf, WLC_IOCTL_MEDLEN, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("seting wnm_bss_select_table failed with err %d\n", err));
+ goto exit;
+ }
}
-
- if (wdev) {
- wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
+exit:
+ if (btcfg) {
+ kfree(btcfg);
}
+ return err;
}
-#endif /* WL_CFG80211_P2P_DEV_IF */
-#ifdef GTK_OFFLOAD_SUPPORT
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
-static s32
-wl_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_gtk_rekey_data *data)
+s32
+wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data, char *command, int total_len)
{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- s32 err = 0;
- gtk_keyinfo_t keyinfo;
- bcol_gtk_para_t bcol_keyinfo;
-
- WL_DBG(("Enter\n"));
- if (data == NULL || cfg->p2p_net == dev) {
- WL_ERR(("data is NULL or wrong net device\n"));
- return -EINVAL;
+ uint i = 0;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
+ int err = -EINVAL, bytes_written = 0, argc = 0, val, len = 0;
+ char delta[BUFSZ], band[BUFSZ], *endptr = NULL;
+ wl_roam_prof_band_t *rp;
+
+ rp = (wl_roam_prof_band_t *) kzalloc(sizeof(*rp)
+ * WL_MAX_ROAM_PROF_BRACKETS, GFP_KERNEL);
+ if (unlikely(!rp)) {
+ WL_ERR(("%s: failed to allocate memory\n", __func__));
+ err = -ENOMEM;
+ goto exit;
}
- prhex("kck", (const u8 *) (data->kck), RSN_KCK_LENGTH);
- prhex("kek", (const u8 *) (data->kek), RSN_KEK_LENGTH);
- prhex("replay_ctr", (const u8 *) (data->replay_ctr), RSN_REPLAY_LEN);
- bcopy(data->kck, keyinfo.KCK, RSN_KCK_LENGTH);
- bcopy(data->kek, keyinfo.KEK, RSN_KEK_LENGTH);
- bcopy(data->replay_ctr, keyinfo.ReplayCounter, RSN_REPLAY_LEN);
-
- memset(&bcol_keyinfo, 0, sizeof(bcol_keyinfo));
- bcol_keyinfo.enable = 1;
- bcol_keyinfo.ptk_len = 64;
- memcpy(&bcol_keyinfo.ptk[0], data->kck, RSN_KCK_LENGTH);
- memcpy(&bcol_keyinfo.ptk[RSN_KCK_LENGTH], data->kek, RSN_KEK_LENGTH);
- err = wldev_iovar_setbuf(dev, "bcol_gtk_rekey_ptk", &bcol_keyinfo,
- sizeof(bcol_keyinfo), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
- if (!err) {
- return err;
+ argc = sscanf(data, "%"S(BUFSZ)"s %"S(BUFSZ)"s", band, delta);
+ if (!strcasecmp(band, "a"))
+ rp->band = WLC_BAND_5G;
+ else if (!strcasecmp(band, "b"))
+ rp->band = WLC_BAND_2G;
+ else {
+ WL_ERR(("%s: Missing band\n", __func__));
+ goto exit;
}
-
- if ((err = wldev_iovar_setbuf(dev, "gtk_key_info", &keyinfo, sizeof(keyinfo),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync)) < 0) {
- WL_ERR(("seting gtk_key_info failed code=%d\n", err));
- return err;
+ /* Getting roam profile from fw */
+ if ((err = wldev_iovar_getbuf(ndev, "roam_prof", rp, sizeof(*rp),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync))) {
+ WL_ERR(("Getting roam_profile failed with err=%d \n", err));
+ goto exit;
+ }
+ memcpy(rp, cfg->ioctl_buf, sizeof(wl_roam_prof_band_t));
+ if (rp->ver != WL_MAX_ROAM_PROF_VER) {
+ WL_ERR(("bad version (=%d) in return data\n", rp->ver));
+ err = -EINVAL;
+ goto exit;
+ }
+ if ((rp->len % sizeof(wl_roam_prof_t)) != 0) {
+ WL_ERR(("bad length (=%d) in return data\n", rp->len));
+ err = -EINVAL;
+ goto exit;
}
- WL_DBG(("Exit\n"));
+ if (argc == 2) {
+ /* if delta is non integer returns command usage error */
+ val = simple_strtol(delta, &endptr, 0);
+ if (*endptr != '\0') {
+ WL_ERR(("%s: Command usage error", __func__));
+ goto exit;
+ }
+ for (i = 0; i < WL_MAX_ROAM_PROF_BRACKETS; i++) {
+ /*
+ * Checking contents of roam profile data from fw and exits
+ * if code hits below condtion. If remaining length of buffer is
+ * less than roam profile size or if there is no valid entry.
+ */
+ if (((i * sizeof(wl_roam_prof_t)) > rp->len) ||
+ (rp->roam_prof[i].fullscan_period == 0)) {
+ break;
+ }
+ if (rp->roam_prof[i].channel_usage != 0) {
+ rp->roam_prof[i].roam_delta = val;
+ }
+ len += sizeof(wl_roam_prof_t);
+ }
+ }
+ else {
+ if (rp->roam_prof[i].channel_usage != 0) {
+ bytes_written = snprintf(command, total_len,
+ "%s Delta %d\n", (rp->band == WLC_BAND_2G) ? "2G" : "5G",
+ rp->roam_prof[0].roam_delta);
+ }
+ err = bytes_written;
+ goto exit;
+ }
+ rp->len = len;
+ if ((err = wldev_iovar_setbuf(ndev, "roam_prof", rp,
+ sizeof(*rp), cfg->ioctl_buf, WLC_IOCTL_MEDLEN, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("seting roam_profile failed with err %d\n", err));
+ }
+exit :
+ if (rp) {
+ kfree(rp);
+ }
return err;
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
-#endif /* GTK_OFFLOAD_SUPPORT */
+#endif /* WBTEXT */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0))
-static int wl_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev,
- const struct cfg80211_pmk_conf *conf)
+
+int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev)
{
+ struct net_device *ndev = NULL;
+ unsigned long flags;
+ int clear_flag = 0;
int ret = 0;
- wsec_pmk_t pmk;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct wl_security *sec;
- s32 bssidx;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ struct cfg80211_scan_info info;
+#endif
- pmk.key_len = conf->pmk_len;
- if (pmk.key_len > sizeof(pmk.key)) {
- ret = -EINVAL;
- return ret;
- }
- pmk.flags = 0;
- ret = memcpy_s(&pmk.key, sizeof(pmk.key), conf->pmk, conf->pmk_len);
- if (ret) {
- ret = -EINVAL;
- return ret;
- }
+ WL_TRACE(("Enter\n"));
- if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find index failed\n"));
- ret = -EINVAL;
- return ret;
- }
+ if (!cfg)
+ return -EINVAL;
- sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
- if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
- (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
- ret = wldev_iovar_setbuf_bsscfg(dev, "okc_info_pmk", pmk.key, pmk.key_len,
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
- if (ret) {
- /* could fail in case that 'okc' is not supported */
- WL_INFORM_MEM(("okc_info_pmk failed, err=%d (ignore)\n", ret));
- }
- }
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- ret = wldev_ioctl_set(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
- if (ret) {
- WL_ERR(("wl_cfg80211_set_pmk error:%d", ret));
- ret = -EINVAL;
- return ret;
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_CFG80211_P2P_DEV_IF
+ if (cfg->scan_request && cfg->scan_request->wdev == cfgdev)
+#else
+ if (cfg->scan_request && cfg->scan_request->dev == cfgdev)
+#endif
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ info.aborted = true;
+ cfg80211_scan_done(cfg->scan_request, &info);
+#else
+ cfg80211_scan_done(cfg->scan_request, true);
+#endif
+ cfg->scan_request = NULL;
+ clear_flag = 1;
}
- return 0;
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+ if (clear_flag)
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+
+ return ret;
}
-static int wl_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
- const u8 *aa)
+bool wl_cfg80211_is_concurrent_mode(struct net_device *dev)
{
- int err = BCME_OK;
- struct cfg80211_pmksa pmksa;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ if ((cfg) && (wl_get_drv_status_all(cfg, CONNECTED) > 1)) {
+ return true;
+ } else {
+ return false;
+ }
+}
- /* build up cfg80211_pmksa structure to use existing wl_cfg80211_update_pmksa API */
- bzero(&pmksa, sizeof(pmksa));
- pmksa.bssid = aa;
+void* wl_cfg80211_get_dhdp(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- err = wl_cfg80211_update_pmksa(wiphy, dev, &pmksa, FALSE);
+ return cfg->pub;
+}
- if (err) {
- WL_ERR(("wl_cfg80211_update_pmksa err:%d\n", err));
- err = -EINVAL;
- }
+bool wl_cfg80211_is_p2p_active(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ return (cfg && cfg->p2p);
+}
- return err;
+bool wl_cfg80211_is_roam_offload(struct net_device * dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ return (cfg && cfg->roam_offload);
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) */
-#if defined(WL_SUPPORT_AUTO_CHANNEL)
-int
-wl_cfg80211_set_spect(struct net_device *dev, int spect)
+bool wl_cfg80211_is_event_from_connected_bssid(struct net_device * dev, const wl_event_msg_t *e,
+ int ifidx)
{
+ u8 *curbssid = NULL;
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- int wlc_down = 1;
- int wlc_up = 1;
- int err = BCME_OK;
- if (!wl_get_drv_status_all(cfg, CONNECTED)) {
- err = wldev_ioctl_set(dev, WLC_DOWN, &wlc_down, sizeof(wlc_down));
- if (err) {
- WL_ERR(("%s: WLC_DOWN failed: code: %d\n", __func__, err));
- return err;
- }
+ if (!cfg) {
+ /* When interface is created using wl
+ * ndev->ieee80211_ptr will be NULL.
+ */
+ return NULL;
+ }
+ curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
- err = wldev_ioctl_set(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(spect));
- if (err) {
- WL_ERR(("%s: error setting spect: code: %d\n", __func__, err));
- return err;
- }
+ if (!curbssid) {
+ return NULL;
+ }
- err = wldev_ioctl_set(dev, WLC_UP, &wlc_up, sizeof(wlc_up));
- if (err) {
- WL_ERR(("%s: WLC_UP failed: code: %d\n", __func__, err));
- return err;
+ if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) == 0) {
+ return true;
+ }
+ return false;
+}
+
+static void wl_cfg80211_work_handler(struct work_struct * work)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ struct net_info *iter, *next;
+ s32 err = BCME_OK;
+ s32 pm = PM_FAST;
+ dhd_pub_t *dhd;
+ BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, pm_enable_work.work);
+ WL_DBG(("Enter \n"));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ for_each_ndev(cfg, iter, next) {
+ /* p2p discovery iface ndev could be null */
+ if (iter->ndev) {
+ if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
+ (wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS &&
+ wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_IBSS))
+ continue;
+ if (iter->ndev) {
+ dhd = (dhd_pub_t *)(cfg->pub);
+ if (dhd_conf_get_pm(dhd) >= 0)
+ pm = dhd_conf_get_pm(dhd);
+ if ((err = wldev_ioctl_set(iter->ndev, WLC_SET_PM,
+ &pm, sizeof(pm))) != 0) {
+ if (err == -ENODEV)
+ WL_DBG(("%s:netdev not ready\n",
+ iter->ndev->name));
+ else
+ WL_ERR(("%s:error (%d)\n",
+ iter->ndev->name, err));
+ } else
+ wl_cfg80211_update_power_mode(iter->ndev);
+ }
}
}
- return err;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ DHD_PM_WAKE_UNLOCK(cfg->pub);
}
-int
-wl_cfg80211_get_sta_channel(struct bcm_cfg80211 *cfg)
+u8
+wl_get_action_category(void *frame, u32 frame_len)
{
- int channel = 0;
-
- if (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) {
- channel = cfg->channel;
- }
- return channel;
+ u8 category;
+ u8 *ptr = (u8 *)frame;
+ if (frame == NULL)
+ return DOT11_ACTION_CAT_ERR_MASK;
+ if (frame_len < DOT11_ACTION_HDR_LEN)
+ return DOT11_ACTION_CAT_ERR_MASK;
+ category = ptr[DOT11_ACTION_CAT_OFF];
+ WL_INFORM(("Action Category: %d\n", category));
+ return category;
}
-#endif /* WL_SUPPORT_AUTO_CHANNEL */
-u64
-wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg)
+int
+wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action)
{
- u64 id = 0;
- id = ++cfg->last_roc_id;
-#ifdef P2P_LISTEN_OFFLOADING
- if (id == P2PO_COOKIE) {
- id = ++cfg->last_roc_id;
- }
-#endif /* P2P_LISTEN_OFFLOADING */
- if (id == 0)
- id = ++cfg->last_roc_id;
- return id;
+ u8 *ptr = (u8 *)frame;
+ if (frame == NULL || ret_action == NULL)
+ return BCME_ERROR;
+ if (frame_len < DOT11_ACTION_HDR_LEN)
+ return BCME_ERROR;
+ if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len))
+ return BCME_ERROR;
+ *ret_action = ptr[DOT11_ACTION_ACT_OFF];
+ WL_INFORM(("Public Action : %d\n", *ret_action));
+ return BCME_OK;
}
-#ifdef WLTDLS
-s32
-wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg, enum wl_tdls_config state, bool auto_mode)
+
+static int
+wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const struct ether_addr *bssid)
{
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- int err = 0;
- struct net_info *iter, *next;
- int update_reqd = 0;
- int enable = 0;
- dhd_pub_t *dhdp;
- dhdp = (dhd_pub_t *)(cfg->pub);
+ s32 err;
+ wl_event_msg_t e;
- /*
- * TDLS need to be enabled only if we have a single STA/GC
- * connection.
- */
+ bzero(&e, sizeof(e));
+ e.event_type = cpu_to_be32(WLC_E_ROAM);
+ memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
+ /* trigger the roam event handler */
+ err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
- WL_DBG(("Enter state:%d\n", state));
- if (!cfg->tdls_supported) {
- /* FW doesn't support tdls. Do nothing */
- return -ENODEV;
- }
+ return err;
+}
- /* Protect tdls config session */
- mutex_lock(&cfg->tdls_sync);
+static s32
+wl_cfg80211_parse_vndr_ies(u8 *parse, u32 len,
+ struct parsed_vndr_ies *vndr_ies)
+{
+ s32 err = BCME_OK;
+ vndr_ie_t *vndrie;
+ bcm_tlv_t *ie;
+ struct parsed_vndr_ie_info *parsed_info;
+ u32 count = 0;
+ s32 remained_len;
- if (state == TDLS_STATE_TEARDOWN) {
- /* Host initiated TDLS tear down */
- err = dhd_tdls_enable(ndev, false, auto_mode, NULL);
- goto exit;
- } else if ((state == TDLS_STATE_AP_CREATE) ||
- (state == TDLS_STATE_NMI_CREATE)) {
- /* We don't support tdls while AP/GO/NAN is operational */
- update_reqd = true;
- enable = false;
- } else if ((state == TDLS_STATE_CONNECT) || (state == TDLS_STATE_IF_CREATE)) {
- if (wl_get_drv_status_all(cfg,
- CONNECTED) >= TDLS_MAX_IFACE_FOR_ENABLE) {
- /* For STA/GC connect command request, disable
- * tdls if we have any concurrent interfaces
- * operational.
- */
- WL_DBG(("Interface limit restriction. disable tdls.\n"));
- update_reqd = true;
- enable = false;
- }
- } else if ((state == TDLS_STATE_DISCONNECT) ||
- (state == TDLS_STATE_AP_DELETE) ||
- (state == TDLS_STATE_SETUP) ||
- (state == TDLS_STATE_IF_DELETE)) {
- /* Enable back the tdls connection only if we have less than
- * or equal to a single STA/GC connection.
- */
- if (wl_get_drv_status_all(cfg,
- CONNECTED) == 0) {
- /* If there are no interfaces connected, enable tdls */
- update_reqd = true;
- enable = true;
- } else if (wl_get_drv_status_all(cfg,
- CONNECTED) == TDLS_MAX_IFACE_FOR_ENABLE) {
- /* We have one interface in CONNECTED state.
- * Verify whether its a STA interface before
- * we enable back tdls.
- */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if ((iter->ndev) && (wl_get_drv_status(cfg, CONNECTED, ndev)) &&
- (ndev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)) {
- WL_DBG(("Non STA iface operational. cfg_iftype:%d"
- " Can't enable tdls.\n",
- ndev->ieee80211_ptr->iftype));
- err = -ENOTSUPP;
- goto exit;
- }
+ remained_len = (s32)len;
+ memset(vndr_ies, 0, sizeof(*vndr_ies));
+
+ WL_DBG(("---> len %d\n", len));
+ ie = (bcm_tlv_t *) parse;
+ if (!bcm_valid_tlv(ie, remained_len))
+ ie = NULL;
+ while (ie) {
+ if (count >= MAX_VNDR_IE_NUMBER)
+ break;
+ if (ie->id == DOT11_MNG_VS_ID) {
+ vndrie = (vndr_ie_t *) ie;
+ /* len should be bigger than OUI length + one data length at least */
+ if (vndrie->len < (VNDR_IE_MIN_LEN + 1)) {
+ WL_ERR(("%s: invalid vndr ie. length is too small %d\n",
+ __FUNCTION__, vndrie->len));
+ goto end;
+ }
+ /* if wpa or wme ie, do not add ie */
+ if (!bcmp(vndrie->oui, (u8*)WPA_OUI, WPA_OUI_LEN) &&
+ ((vndrie->data[0] == WPA_OUI_TYPE) ||
+ (vndrie->data[0] == WME_OUI_TYPE))) {
+ WL_DBG(("Found WPA/WME oui. Do not add it\n"));
+ goto end;
}
- /* No AP/GO found. Enable back tdls */
- update_reqd = true;
- enable = true;
- } else {
- WL_DBG(("Concurrent connection mode. Can't enable tdls. \n"));
- err = -ENOTSUPP;
- goto exit;
- }
- } else {
- WL_ERR(("Unknown tdls state:%d \n", state));
- err = -EINVAL;
- goto exit;
- }
- if (update_reqd == true) {
- if (dhdp->tdls_enable == enable) {
- WL_DBG(("No change in tdls state. Do nothing."
- " tdls_enable:%d\n", enable));
- goto exit;
- }
- err = wldev_iovar_setint(ndev, "tdls_enable", enable);
- if (unlikely(err)) {
- WL_ERR(("tdls_enable setting failed. err:%d\n", err));
- goto exit;
- } else {
- WL_INFORM_MEM(("tdls_enable %d state:%d\n", enable, state));
- /* Update the dhd state variable to be in sync */
- dhdp->tdls_enable = enable;
- if (state == TDLS_STATE_SETUP) {
- /* For host initiated setup, apply TDLS params
- * Don't propagate errors up for param config
- * failures
- */
- dhd_tdls_enable(ndev, true, auto_mode, NULL);
+ parsed_info = &vndr_ies->ie_info[count++];
- }
- }
- } else {
- WL_DBG(("Skip tdls config. state:%d update_reqd:%d "
- "current_status:%d \n",
- state, update_reqd, dhdp->tdls_enable));
- }
+ /* save vndr ie information */
+ parsed_info->ie_ptr = (char *)vndrie;
+ parsed_info->ie_len = (vndrie->len + TLV_HDR_LEN);
+ memcpy(&parsed_info->vndrie, vndrie, sizeof(vndr_ie_t));
+ vndr_ies->count = count;
-exit:
- if (err) {
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ WL_DBG(("\t ** OUI %02x %02x %02x, type 0x%02x len:%d\n",
+ parsed_info->vndrie.oui[0], parsed_info->vndrie.oui[1],
+ parsed_info->vndrie.oui[2], parsed_info->vndrie.data[0],
+ parsed_info->ie_len));
+ }
+end:
+ ie = bcm_next_tlv(ie, &remained_len);
}
- mutex_unlock(&cfg->tdls_sync);
return err;
}
-#endif /* WLTDLS */
-struct net_device* wl_get_ap_netdev(struct bcm_cfg80211 *cfg, char *ifname)
+#ifdef WLADPS_SEAK_AP_WAR
+static bool
+wl_find_vndr_ies_specific_vender(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, const u8 *vndr_oui)
{
- struct net_info *iter, *next;
- struct net_device *ndev = NULL;
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ struct parsed_vndr_ie_info *vndr_info_list;
+ struct parsed_vndr_ies vndr_ies;
+ bool ret = FALSE;
+ int i;
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if (iter->ndev) {
- if (strncmp(iter->ndev->name, ifname, IFNAMSIZ) == 0) {
- if (iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
- ndev = iter->ndev;
+ if (conn_info->resp_ie_len) {
+ if ((wl_cfg80211_parse_vndr_ies((u8 *)conn_info->resp_ie,
+ conn_info->resp_ie_len, &vndr_ies)) == BCME_OK) {
+ for (i = 0; i < vndr_ies.count; i++) {
+ vndr_info_list = &vndr_ies.ie_info[i];
+ if (!bcmp(vndr_info_list->vndrie.oui,
+ (u8*)vndr_oui, DOT11_OUI_LEN)) {
+ WL_ERR(("Find OUI %02x %02x %02x\n",
+ vndr_info_list->vndrie.oui[0],
+ vndr_info_list->vndrie.oui[1],
+ vndr_info_list->vndrie.oui[2]));
+ ret = TRUE;
break;
}
}
}
}
-
- return ndev;
-}
-
-struct net_device*
-wl_get_netdev_by_name(struct bcm_cfg80211 *cfg, char *ifname)
-{
- struct net_info *iter, *next;
- struct net_device *ndev = NULL;
-
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if (iter->ndev) {
- if (strncmp(iter->ndev->name, ifname, IFNAMSIZ) == 0) {
- ndev = iter->ndev;
- break;
- }
- }
- }
-
- return ndev;
+ return ret;
}
-#ifdef SUPPORT_AP_HIGHER_BEACONRATE
-#define WLC_RATE_FLAG 0x80
-#define RATE_MASK 0x7f
-
-int wl_set_ap_beacon_rate(struct net_device *dev, int val, char *ifname)
+static s32
+wl_set_adps_mode(struct bcm_cfg80211 *cfg, struct net_device *ndev, uint8 enable_mode)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp;
- wl_rateset_args_t rs;
- int error = BCME_ERROR, i;
- struct net_device *ndev = NULL;
+ int i;
+ int len;
+ int ret = BCME_OK;
- dhdp = (dhd_pub_t *)(cfg->pub);
+ bcm_iov_buf_t *iov_buf = NULL;
+ wl_adps_params_v1_t *data = NULL;
- if (dhdp && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Not Hostapd mode\n"));
- return BCME_NOTAP;
+ len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
+ iov_buf = kmalloc(len, GFP_KERNEL);
+ if (iov_buf == NULL) {
+ WL_ERR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
+ ret = BCME_NOMEM;
+ goto exit;
}
- ndev = wl_get_ap_netdev(cfg, ifname);
-
- if (ndev == NULL) {
- WL_ERR(("No softAP interface named %s\n", ifname));
- return BCME_NOTAP;
- }
+ iov_buf->version = WL_ADPS_IOV_VER;
+ iov_buf->len = sizeof(*data);
+ iov_buf->id = WL_ADPS_IOV_MODE;
- bzero(&rs, sizeof(wl_rateset_args_t));
- error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
- &rs, sizeof(wl_rateset_args_t), NULL);
- if (error < 0) {
- WL_ERR(("get rateset failed = %d\n", error));
- return error;
+ data = (wl_adps_params_v1_t *)iov_buf->data;
+ data->version = ADPS_SUB_IOV_VERSION_1;
+ data->length = sizeof(*data);
+ data->mode = enable_mode;
+
+ for (i = 1; i <= MAX_BANDS; i++) {
+ data->band = i;
+ ret = wldev_iovar_setbuf(ndev, "adps", iov_buf, len,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, NULL);
}
- if (rs.count < 1) {
- WL_ERR(("Failed to get rate count\n"));
- return BCME_ERROR;
+exit:
+ if (iov_buf) {
+ kfree(iov_buf);
}
+ return ret;
- /* Host delivers target rate in the unit of 500kbps */
- /* To make it to 1mbps unit, atof should be implemented for 5.5mbps basic rate */
- for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
- if (rs.rates[i] & WLC_RATE_FLAG)
- if ((rs.rates[i] & RATE_MASK) == val)
- break;
+}
+#endif /* WLADPS_SEAK_AP_WAR */
- /* Valid rate has been delivered as an argument */
- if (i < rs.count && i < WL_NUMRATES) {
- error = wldev_iovar_setint(ndev, "force_bcn_rspec", val);
- if (error < 0) {
- WL_ERR(("set beacon rate failed = %d\n", error));
- return BCME_ERROR;
+static bool
+wl_vndr_ies_exclude_vndr_oui(struct parsed_vndr_ie_info *vndr_info)
+{
+ int i = 0;
+
+ while (exclude_vndr_oui_list[i]) {
+ if (!memcmp(vndr_info->vndrie.oui,
+ exclude_vndr_oui_list[i],
+ DOT11_OUI_LEN)) {
+ return TRUE;
}
- } else {
- WL_ERR(("Rate is invalid"));
- return BCME_BADARG;
+ i++;
}
- return BCME_OK;
+ return FALSE;
}
-int
-wl_get_ap_basic_rate(struct net_device *dev, char* command, char *ifname, int total_len)
+static bool
+wl_vndr_ies_check_duplicate_vndr_oui(struct bcm_cfg80211 *cfg,
+ struct parsed_vndr_ie_info *vndr_info)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp;
- wl_rateset_args_t rs;
- int error = BCME_ERROR;
- int i, bytes_written = 0;
- struct net_device *ndev = NULL;
-
- dhdp = (dhd_pub_t *)(cfg->pub);
+ wl_vndr_oui_entry_t *oui_entry = NULL;
- if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Not Hostapd mode\n"));
- return BCME_NOTAP;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+ if (!memcmp(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN)) {
+ return TRUE;
+ }
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
- ndev = wl_get_ap_netdev(cfg, ifname);
+ return FALSE;
+}
- if (ndev == NULL) {
- WL_ERR(("No softAP interface named %s\n", ifname));
- return BCME_NOTAP;
- }
+static bool
+wl_vndr_ies_add_vendor_oui_list(struct bcm_cfg80211 *cfg,
+ struct parsed_vndr_ie_info *vndr_info)
+{
+ wl_vndr_oui_entry_t *oui_entry = NULL;
- bzero(&rs, sizeof(wl_rateset_args_t));
- error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
- &rs, sizeof(wl_rateset_args_t), NULL);
- if (error < 0) {
- WL_ERR(("get rateset failed = %d\n", error));
- return error;
+ oui_entry = kmalloc(sizeof(*oui_entry), GFP_KERNEL);
+ if (oui_entry == NULL) {
+ WL_ERR(("alloc failed\n"));
+ return FALSE;
}
- if (rs.count < 1) {
- WL_ERR(("Failed to get rate count\n"));
- return BCME_ERROR;
- }
+ memcpy(oui_entry->oui, vndr_info->vndrie.oui, DOT11_OUI_LEN);
- /* Delivers basic rate in the unit of 500kbps to host */
- for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
- if (rs.rates[i] & WLC_RATE_FLAG)
- bytes_written += snprintf(command + bytes_written, total_len,
- "%d ", rs.rates[i] & RATE_MASK);
+ INIT_LIST_HEAD(&oui_entry->list);
+ list_add_tail(&oui_entry->list, &cfg->vndr_oui_list);
- /* Remove last space in the command buffer */
- if (bytes_written && (bytes_written < total_len)) {
- command[bytes_written - 1] = '\0';
- bytes_written--;
- }
+ return TRUE;
+}
- return bytes_written;
+static void
+wl_vndr_ies_clear_vendor_oui_list(struct bcm_cfg80211 *cfg)
+{
+ wl_vndr_oui_entry_t *oui_entry = NULL;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ while (!list_empty(&cfg->vndr_oui_list)) {
+ oui_entry = list_entry(cfg->vndr_oui_list.next, wl_vndr_oui_entry_t, list);
+ if (oui_entry) {
+ list_del(&oui_entry->list);
+ kfree(oui_entry);
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
}
-#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
-
-#ifdef SUPPORT_AP_RADIO_PWRSAVE
-#define MSEC_PER_MIN (60000L)
static int
-_wl_update_ap_rps_params(struct net_device *dev)
+wl_vndr_ies_get_vendor_oui(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ char *vndr_oui, u32 vndr_oui_len)
{
- struct bcm_cfg80211 *cfg = NULL;
- rpsnoa_iovar_params_t iovar;
- u8 smbuf[WLC_IOCTL_SMLEN];
-
- if (!dev)
- return BCME_BADARG;
-
- cfg = wl_get_cfg(dev);
+ int i;
+ int vndr_oui_num = 0;
- bzero(&iovar, sizeof(iovar));
- bzero(smbuf, sizeof(smbuf));
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ wl_vndr_oui_entry_t *oui_entry = NULL;
+ struct parsed_vndr_ie_info *vndr_info;
+ struct parsed_vndr_ies vndr_ies;
- iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
- iovar.hdr.subcmd = WL_RPSNOA_CMD_PARAMS;
- iovar.hdr.len = sizeof(iovar);
- iovar.param->band = WLC_BAND_ALL;
- iovar.param->level = cfg->ap_rps_info.level;
- iovar.param->stas_assoc_check = cfg->ap_rps_info.sta_assoc_check;
- iovar.param->pps = cfg->ap_rps_info.pps;
- iovar.param->quiet_time = cfg->ap_rps_info.quiet_time;
+ char *pos = vndr_oui;
+ u32 remained_buf_len = vndr_oui_len;
- if (wldev_iovar_setbuf(dev, "rpsnoa", &iovar, sizeof(iovar),
- smbuf, sizeof(smbuf), NULL)) {
- WL_ERR(("Failed to set rpsnoa params"));
+ if (!conn_info->resp_ie_len) {
return BCME_ERROR;
}
- return BCME_OK;
-}
-
-int
-wl_get_ap_rps(struct net_device *dev, char* command, char *ifname, int total_len)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp;
- int error = BCME_ERROR;
- int bytes_written = 0;
- struct net_device *ndev = NULL;
- rpsnoa_iovar_status_t iovar;
- u8 smbuf[WLC_IOCTL_SMLEN];
- u32 chanspec = 0;
- u8 idx = 0;
- u16 state;
- u32 sleep;
- u32 time_since_enable;
+ wl_vndr_ies_clear_vendor_oui_list(cfg);
- dhdp = (dhd_pub_t *)(cfg->pub);
+ if ((wl_cfg80211_parse_vndr_ies((u8 *)conn_info->resp_ie,
+ conn_info->resp_ie_len, &vndr_ies)) == BCME_OK) {
+ for (i = 0; i < vndr_ies.count; i++) {
+ vndr_info = &vndr_ies.ie_info[i];
+ if (wl_vndr_ies_exclude_vndr_oui(vndr_info)) {
+ continue;
+ }
- if (!dhdp) {
- error = BCME_NOTUP;
- goto fail;
- }
+ if (wl_vndr_ies_check_duplicate_vndr_oui(cfg, vndr_info)) {
+ continue;
+ }
- if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Not Hostapd mode\n"));
- error = BCME_NOTAP;
- goto fail;
+ wl_vndr_ies_add_vendor_oui_list(cfg, vndr_info);
+ vndr_oui_num++;
+ }
}
- ndev = wl_get_ap_netdev(cfg, ifname);
-
- if (ndev == NULL) {
- WL_ERR(("No softAP interface named %s\n", ifname));
- error = BCME_NOTAP;
- goto fail;
+ if (vndr_oui) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+ if (remained_buf_len < VNDR_OUI_STR_LEN) {
+ return BCME_ERROR;
+ }
+ pos += snprintf(pos, VNDR_OUI_STR_LEN, "%02X-%02X-%02X ",
+ oui_entry->oui[0], oui_entry->oui[1], oui_entry->oui[2]);
+ remained_buf_len -= VNDR_OUI_STR_LEN;
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
}
- bzero(&iovar, sizeof(iovar));
- bzero(smbuf, sizeof(smbuf));
+ return vndr_oui_num;
+}
- iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
- iovar.hdr.subcmd = WL_RPSNOA_CMD_STATUS;
- iovar.hdr.len = sizeof(iovar);
- iovar.stats->band = WLC_BAND_ALL;
+int
+wl_cfg80211_get_vndr_ouilist(struct bcm_cfg80211 *cfg, uint8 *buf, int max_cnt)
+{
+ wl_vndr_oui_entry_t *oui_entry = NULL;
+ int cnt = 0;
- error = wldev_iovar_getbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
- smbuf, sizeof(smbuf), NULL);
- if (error < 0) {
- WL_ERR(("get ap radio pwrsave failed = %d\n", error));
- goto fail;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ list_for_each_entry(oui_entry, &cfg->vndr_oui_list, list) {
+ memcpy(buf, oui_entry->oui, DOT11_OUI_LEN);
+ cnt++;
+ if (cnt >= max_cnt) {
+ return cnt;
+ }
+ buf += DOT11_OUI_LEN;
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ return cnt;
+}
- /* RSDB event doesn't seem to be handled correctly.
- * So check chanspec of AP directly from the firmware
- */
- error = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
- if (error < 0) {
- WL_ERR(("get chanspec from AP failed = %d\n", error));
- goto fail;
- }
+s32
+wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx)
+{
+ s32 index;
+ struct net_info *netinfo;
+ s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG,
+ VNDR_IE_ASSOCRSP_FLAG, VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
- chanspec = wl_chspec_driver_to_host(chanspec);
- if (CHSPEC_IS2G(chanspec))
- idx = 0;
- else if (CHSPEC_IS5G(chanspec))
- idx = 1;
- else {
- error = BCME_BADCHAN;
- goto fail;
+ netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
+ if (!netinfo || !netinfo->wdev) {
+ WL_ERR(("netinfo or netinfo->wdev is NULL\n"));
+ return -1;
}
- state = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].state;
- sleep = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].sleep_dur;
- time_since_enable = ((rpsnoa_iovar_status_t *)smbuf)->stats[idx].sleep_avail_dur;
+ WL_DBG(("clear management vendor IEs for bssidx:%d \n", bssidx));
+ /* Clear the IEs set in the firmware so that host is in sync with firmware */
+ for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
+ if (wl_cfg80211_set_mgmt_vndr_ies(cfg, wdev_to_cfgdev(netinfo->wdev),
+ bssidx, vndrie_flag[index], NULL, 0) < 0)
+ WL_ERR(("vndr_ies clear failed. Ignoring.. \n"));
+ }
- /* Conver ms to minute, round down only */
- sleep = DIV_U64_BY_U32(sleep, MSEC_PER_MIN);
- time_since_enable = DIV_U64_BY_U32(time_since_enable, MSEC_PER_MIN);
+ return 0;
+}
- bytes_written += snprintf(command + bytes_written, total_len,
- "state=%d sleep=%d time_since_enable=%d", state, sleep, time_since_enable);
- error = bytes_written;
+s32
+wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *iter, *next;
-fail:
- return error;
+ WL_DBG(("clear management vendor IEs \n"));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ for_each_ndev(cfg, iter, next) {
+ wl_cfg80211_clear_per_bss_ies(cfg, iter->bssidx);
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ return 0;
}
+#define WL_VNDR_IE_MAXLEN 2048
+static s8 g_mgmt_ie_buf[WL_VNDR_IE_MAXLEN];
int
-wl_set_ap_rps(struct net_device *dev, bool enable, char *ifname)
+wl_cfg80211_set_mgmt_vndr_ies(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ s32 bssidx, s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp;
struct net_device *ndev = NULL;
- rpsnoa_iovar_t iovar;
- u8 smbuf[WLC_IOCTL_SMLEN];
- int ret = BCME_OK;
+ s32 ret = BCME_OK;
+ u8 *curr_ie_buf = NULL;
+ u8 *mgmt_ie_buf = NULL;
+ u32 mgmt_ie_buf_len = 0;
+ u32 *mgmt_ie_len = 0;
+ u32 del_add_ie_buf_len = 0;
+ u32 total_ie_buf_len = 0;
+ u32 parsed_ie_buf_len = 0;
+ struct parsed_vndr_ies old_vndr_ies;
+ struct parsed_vndr_ies new_vndr_ies;
+ s32 i;
+ u8 *ptr;
+ s32 remained_buf_len;
+ wl_bss_vndr_ies_t *ies = NULL;
+ struct net_info *netinfo;
- dhdp = (dhd_pub_t *)(cfg->pub);
+ WL_DBG(("Enter. pktflag:0x%x bssidx:%x vnd_ie_len:%d \n",
+ pktflag, bssidx, vndr_ie_len));
- if (!dhdp) {
- ret = BCME_NOTUP;
- goto exit;
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ if (bssidx > WL_MAX_IFS) {
+ WL_ERR(("bssidx > supported concurrent Ifaces \n"));
+ return -EINVAL;
}
- if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Not Hostapd mode\n"));
- ret = BCME_NOTAP;
- goto exit;
+ netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
+ if (!netinfo) {
+ WL_ERR(("net_info ptr is NULL \n"));
+ return -EINVAL;
}
- ndev = wl_get_ap_netdev(cfg, ifname);
+ /* Clear the global buffer */
+ memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
+ curr_ie_buf = g_mgmt_ie_buf;
+ ies = &netinfo->bss.ies;
- if (ndev == NULL) {
- WL_ERR(("No softAP interface named %s\n", ifname));
- ret = BCME_NOTAP;
- goto exit;
+ switch (pktflag) {
+ case VNDR_IE_PRBRSP_FLAG :
+ mgmt_ie_buf = ies->probe_res_ie;
+ mgmt_ie_len = &ies->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->probe_res_ie);
+ break;
+ case VNDR_IE_ASSOCRSP_FLAG :
+ mgmt_ie_buf = ies->assoc_res_ie;
+ mgmt_ie_len = &ies->assoc_res_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->assoc_res_ie);
+ break;
+ case VNDR_IE_BEACON_FLAG :
+ mgmt_ie_buf = ies->beacon_ie;
+ mgmt_ie_len = &ies->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->beacon_ie);
+ break;
+ case VNDR_IE_PRBREQ_FLAG :
+ mgmt_ie_buf = ies->probe_req_ie;
+ mgmt_ie_len = &ies->probe_req_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->probe_req_ie);
+ break;
+ case VNDR_IE_ASSOCREQ_FLAG :
+ mgmt_ie_buf = ies->assoc_req_ie;
+ mgmt_ie_len = &ies->assoc_req_ie_len;
+ mgmt_ie_buf_len = sizeof(ies->assoc_req_ie);
+ break;
+ default:
+ mgmt_ie_buf = NULL;
+ mgmt_ie_len = NULL;
+ WL_ERR(("not suitable packet type (%d)\n", pktflag));
+ return BCME_ERROR;
}
- if (cfg->ap_rps_info.enable != enable) {
- cfg->ap_rps_info.enable = enable;
- if (enable) {
- ret = _wl_update_ap_rps_params(ndev);
- if (ret) {
- WL_ERR(("Filed to update rpsnoa params\n"));
+ if (vndr_ie_len > mgmt_ie_buf_len) {
+ WL_ERR(("extra IE size too big\n"));
+ ret = -ENOMEM;
+ } else {
+ /* parse and save new vndr_ie in curr_ie_buff before comparing it */
+ if (vndr_ie && vndr_ie_len && curr_ie_buf) {
+ ptr = curr_ie_buf;
+/* must discard vndr_ie constness, attempt to change vndr_ie arg to non-const
+ * causes cascade of errors in other places, fix involves const casts there
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic push")
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#endif
+ if ((ret = wl_cfg80211_parse_vndr_ies((u8 *)vndr_ie,
+ vndr_ie_len, &new_vndr_ies)) < 0) {
+ WL_ERR(("parse vndr ie failed \n"));
goto exit;
}
- }
- bzero(&iovar, sizeof(iovar));
- bzero(smbuf, sizeof(smbuf));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+_Pragma("GCC diagnostic pop")
+#endif
+ for (i = 0; i < new_vndr_ies.count; i++) {
+ struct parsed_vndr_ie_info *vndrie_info =
+ &new_vndr_ies.ie_info[i];
- iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
- iovar.hdr.subcmd = WL_RPSNOA_CMD_ENABLE;
- iovar.hdr.len = sizeof(iovar);
- iovar.data->band = WLC_BAND_ALL;
- iovar.data->value = (int16)enable;
+ if ((parsed_ie_buf_len + vndrie_info->ie_len) > WL_VNDR_IE_MAXLEN) {
+ WL_ERR(("IE size is too big (%d > %d)\n",
+ parsed_ie_buf_len, WL_VNDR_IE_MAXLEN));
+ ret = -EINVAL;
+ goto exit;
+ }
- ret = wldev_iovar_setbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
- smbuf, sizeof(smbuf), NULL);
- if (ret) {
- WL_ERR(("Failed to enable AP radio power save"));
- goto exit;
+ memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr,
+ vndrie_info->ie_len);
+ parsed_ie_buf_len += vndrie_info->ie_len;
+ }
}
- cfg->ap_rps_info.enable = enable;
- }
-exit:
- return ret;
-}
-
-int
-wl_update_ap_rps_params(struct net_device *dev, ap_rps_info_t* rps, char *ifname)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp;
- struct net_device *ndev = NULL;
-
- dhdp = (dhd_pub_t *)(cfg->pub);
- if (!dhdp)
- return BCME_NOTUP;
-
- if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Not Hostapd mode\n"));
- return BCME_NOTAP;
- }
-
- ndev = wl_get_ap_netdev(cfg, ifname);
+ if (mgmt_ie_buf != NULL) {
+ if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
+ (memcmp(mgmt_ie_buf, curr_ie_buf, parsed_ie_buf_len) == 0)) {
+ WL_INFORM(("Previous mgmt IE is equals to current IE"));
+ goto exit;
+ }
- if (ndev == NULL) {
- WL_ERR(("No softAP interface named %s\n", ifname));
- return BCME_NOTAP;
- }
+ /* parse old vndr_ie */
+ if ((ret = wl_cfg80211_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len,
+ &old_vndr_ies)) < 0) {
+ WL_ERR(("parse vndr ie failed \n"));
+ goto exit;
+ }
+ /* make a command to delete old ie */
+ for (i = 0; i < old_vndr_ies.count; i++) {
+ struct parsed_vndr_ie_info *vndrie_info =
+ &old_vndr_ies.ie_info[i];
- if (!rps)
- return BCME_BADARG;
+ WL_INFORM(("DELETED ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+ vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+ vndrie_info->vndrie.oui[2]));
- if (rps->pps < RADIO_PWRSAVE_PPS_MIN)
- return BCME_BADARG;
+ del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+ pktflag, vndrie_info->vndrie.oui,
+ vndrie_info->vndrie.id,
+ vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+ vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+ "del");
- if (rps->level < RADIO_PWRSAVE_LEVEL_MIN ||
- rps->level > RADIO_PWRSAVE_LEVEL_MAX)
- return BCME_BADARG;
+ curr_ie_buf += del_add_ie_buf_len;
+ total_ie_buf_len += del_add_ie_buf_len;
+ }
+ }
- if (rps->quiet_time < RADIO_PWRSAVE_QUIETTIME_MIN)
- return BCME_BADARG;
+ *mgmt_ie_len = 0;
+ /* Add if there is any extra IE */
+ if (mgmt_ie_buf && parsed_ie_buf_len) {
+ ptr = mgmt_ie_buf;
- if (rps->sta_assoc_check > RADIO_PWRSAVE_ASSOCCHECK_MAX ||
- rps->sta_assoc_check < RADIO_PWRSAVE_ASSOCCHECK_MIN)
- return BCME_BADARG;
+ remained_buf_len = mgmt_ie_buf_len;
- cfg->ap_rps_info.pps = rps->pps;
- cfg->ap_rps_info.level = rps->level;
- cfg->ap_rps_info.quiet_time = rps->quiet_time;
- cfg->ap_rps_info.sta_assoc_check = rps->sta_assoc_check;
+ /* make a command to add new ie */
+ for (i = 0; i < new_vndr_ies.count; i++) {
+ struct parsed_vndr_ie_info *vndrie_info =
+ &new_vndr_ies.ie_info[i];
- if (cfg->ap_rps_info.enable) {
- if (_wl_update_ap_rps_params(ndev)) {
- WL_ERR(("Failed to update rpsnoa params"));
- return BCME_ERROR;
- }
- }
+ WL_INFORM(("ADDED ID : %d, Len: %d(%d), OUI:%02x:%02x:%02x\n",
+ vndrie_info->vndrie.id, vndrie_info->vndrie.len,
+ vndrie_info->ie_len - 2,
+ vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
+ vndrie_info->vndrie.oui[2]));
- return BCME_OK;
-}
+ del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
+ pktflag, vndrie_info->vndrie.oui,
+ vndrie_info->vndrie.id,
+ vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
+ vndrie_info->ie_len - VNDR_IE_FIXED_LEN,
+ "add");
-void
-wl_cfg80211_init_ap_rps(struct bcm_cfg80211 *cfg)
-{
- cfg->ap_rps_info.enable = FALSE;
- cfg->ap_rps_info.sta_assoc_check = RADIO_PWRSAVE_STAS_ASSOC_CHECK;
- cfg->ap_rps_info.pps = RADIO_PWRSAVE_PPS;
- cfg->ap_rps_info.quiet_time = RADIO_PWRSAVE_QUIET_TIME;
- cfg->ap_rps_info.level = RADIO_PWRSAVE_LEVEL;
-}
-#endif /* SUPPORT_AP_RADIO_PWRSAVE */
+ /* verify remained buf size before copy data */
+ if (remained_buf_len >= vndrie_info->ie_len) {
+ remained_buf_len -= vndrie_info->ie_len;
+ } else {
+ WL_ERR(("no space in mgmt_ie_buf: pktflag = %d, "
+ "found vndr ies # = %d(cur %d), remained len %d, "
+ "cur mgmt_ie_len %d, new ie len = %d\n",
+ pktflag, new_vndr_ies.count, i, remained_buf_len,
+ *mgmt_ie_len, vndrie_info->ie_len));
+ break;
+ }
-int
-wl_cfg80211_iface_count(struct net_device *dev)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- struct net_info *iter, *next;
- int iface_count = 0;
+ /* save the parsed IE in cfg struct */
+ memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
+ vndrie_info->ie_len);
+ *mgmt_ie_len += vndrie_info->ie_len;
+ curr_ie_buf += del_add_ie_buf_len;
+ total_ie_buf_len += del_add_ie_buf_len;
+ }
+ }
- /* Return the count of network interfaces (skip netless p2p discovery
- * interface)
- */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if (iter->ndev) {
- iface_count++;
+ if (total_ie_buf_len && cfg->ioctl_buf != NULL) {
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
+ total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync);
+ if (ret)
+ WL_ERR(("vndr ie set error : %d\n", ret));
}
}
- return iface_count;
+exit:
+
+return ret;
}
-#ifdef SUPPORT_SET_CAC
-static void
-wl_cfg80211_set_cac(struct bcm_cfg80211 *cfg, int enable)
+#ifdef WL_CFG80211_ACL
+static int
+wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+ const struct cfg80211_acl_data *acl)
{
+ int i;
int ret = 0;
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ int macnum = 0;
+ int macmode = MACLIST_MODE_DISABLED;
+ struct maclist *list;
- WL_DBG(("cac enable %d\n", enable));
- if (!dhd) {
- WL_ERR(("dhd is NULL\n"));
- return;
- }
- if ((ret = dhd_wl_ioctl_set_intiovar(dhd, "cac", enable,
- WLC_SET_VAR, TRUE, 0)) < 0) {
- WL_ERR(("Failed set CAC, ret=%d\n", ret));
- } else {
- WL_DBG(("CAC set successfully\n"));
+ /* get the MAC filter mode */
+ if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) {
+ macmode = MACLIST_MODE_ALLOW;
+ } else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
+ acl->n_acl_entries) {
+ macmode = MACLIST_MODE_DENY;
}
- return;
-}
-#endif /* SUPPORT_SET_CAC */
-
-#ifdef SUPPORT_RSSI_SUM_REPORT
-int
-wl_get_rssi_per_ant(struct net_device *dev, char *ifname, char *peer_mac, void *param)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- wl_rssi_ant_mimo_t *get_param = (wl_rssi_ant_mimo_t *)param;
- rssi_ant_param_t *set_param = NULL;
- struct net_device *ifdev = NULL;
- char iobuf[WLC_IOCTL_SMLEN];
- int err = BCME_OK;
- int iftype = 0;
- bzero(iobuf, WLC_IOCTL_SMLEN);
+ /* if acl == NULL, macmode is still disabled.. */
+ if (macmode == MACLIST_MODE_DISABLED) {
+ if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
+ WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
- /* Check the interface type */
- ifdev = wl_get_netdev_by_name(cfg, ifname);
- if (ifdev == NULL) {
- WL_ERR(("Could not find net_device for ifname:%s\n", ifname));
- err = BCME_BADARG;
- goto fail;
+ return ret;
}
- iftype = ifdev->ieee80211_ptr->iftype;
- if (iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO) {
- if (peer_mac) {
- set_param = (rssi_ant_param_t *)MALLOCZ(cfg->osh, sizeof(rssi_ant_param_t));
- err = wl_cfg80211_ether_atoe(peer_mac, &set_param->ea);
- if (!err) {
- WL_ERR(("Invalid Peer MAC format\n"));
- err = BCME_BADARG;
- goto fail;
- }
- } else {
- WL_ERR(("Peer MAC is not provided for iftype %d\n", iftype));
- err = BCME_BADARG;
- goto fail;
- }
+ macnum = acl->n_acl_entries;
+ if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+ WL_ERR(("%s : invalid number of MAC address entries %d\n",
+ __FUNCTION__, macnum));
+ return -1;
}
- err = wldev_iovar_getbuf(ifdev, "phy_rssi_ant", peer_mac ?
- (void *)&(set_param->ea) : NULL, peer_mac ? ETHER_ADDR_LEN : 0,
- (void *)iobuf, sizeof(iobuf), NULL);
- if (unlikely(err)) {
- WL_ERR(("Failed to get rssi info, err=%d\n", err));
- } else {
- memcpy(get_param, iobuf, sizeof(wl_rssi_ant_mimo_t));
- if (get_param->count == 0) {
- WL_ERR(("Not supported on this chip\n"));
- err = BCME_UNSUPPORTED;
- }
+ /* allocate memory for the MAC list */
+ list = (struct maclist*)kmalloc(sizeof(int) +
+ sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+ if (!list) {
+ WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__));
+ return -1;
}
-fail:
- if (set_param) {
- MFREE(cfg->osh, set_param, sizeof(rssi_ant_param_t));
+ /* prepare the MAC list */
+ list->count = htod32(macnum);
+ for (i = 0; i < macnum; i++) {
+ memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN);
}
+ /* set the list */
+ if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
+ WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
- return err;
+ kfree(list);
+
+ return ret;
}
+#endif /* WL_CFG80211_ACL */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+int wl_chspec_chandef(chanspec_t chanspec,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ struct cfg80211_chan_def *chandef,
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ struct chan_info *chaninfo,
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)) */
+struct wiphy *wiphy)
-int
-wl_get_rssi_logging(struct net_device *dev, void *param)
{
- rssilog_get_param_t *get_param = (rssilog_get_param_t *)param;
- char iobuf[WLC_IOCTL_SMLEN];
- int err = BCME_OK;
+ uint16 freq = 0;
+ int chan_type = 0;
+ int channel = 0;
+ struct ieee80211_channel *chan;
- bzero(iobuf, WLC_IOCTL_SMLEN);
- bzero(get_param, sizeof(*get_param));
- err = wldev_iovar_getbuf(dev, "rssilog", NULL, 0, (void *)iobuf,
- sizeof(iobuf), NULL);
- if (err) {
- WL_ERR(("Failed to get rssi logging info, err=%d\n", err));
- } else {
- memcpy(get_param, iobuf, sizeof(*get_param));
+ if (!chandef) {
+ return -1;
}
+ channel = CHSPEC_CHANNEL(chanspec);
- return err;
-}
+ switch (CHSPEC_BW(chanspec)) {
+ case WL_CHANSPEC_BW_20:
+ chan_type = NL80211_CHAN_HT20;
+ break;
+ case WL_CHANSPEC_BW_40:
+ {
+ if (CHSPEC_SB_UPPER(chanspec)) {
+ channel += CH_10MHZ_APART;
+ } else {
+ channel -= CH_10MHZ_APART;
+ }
+ }
+ chan_type = NL80211_CHAN_HT40PLUS;
+ break;
-int
-wl_set_rssi_logging(struct net_device *dev, void *param)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- rssilog_set_param_t *set_param = (rssilog_set_param_t *)param;
- int err;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ case WL_CHANSPEC_BW_80:
+ case WL_CHANSPEC_BW_8080:
+ {
+ uint16 sb = CHSPEC_CTL_SB(chanspec);
+
+ if (sb == WL_CHANSPEC_CTL_SB_LL) {
+ channel -= (CH_10MHZ_APART + CH_20MHZ_APART);
+ } else if (sb == WL_CHANSPEC_CTL_SB_LU) {
+ channel -= CH_10MHZ_APART;
+ } else if (sb == WL_CHANSPEC_CTL_SB_UL) {
+ channel += CH_10MHZ_APART;
+ } else {
+ /* WL_CHANSPEC_CTL_SB_UU */
+ channel += (CH_10MHZ_APART + CH_20MHZ_APART);
+ }
+
+ if (sb == WL_CHANSPEC_CTL_SB_LL || sb == WL_CHANSPEC_CTL_SB_LU)
+ chan_type = NL80211_CHAN_HT40MINUS;
+ else if (sb == WL_CHANSPEC_CTL_SB_UL || sb == WL_CHANSPEC_CTL_SB_UU)
+ chan_type = NL80211_CHAN_HT40PLUS;
+ }
+ break;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+ default:
+ chan_type = NL80211_CHAN_HT20;
+ break;
- err = wldev_iovar_setbuf(dev, "rssilog", set_param,
- sizeof(*set_param), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
- &cfg->ioctl_buf_sync);
- if (err) {
- WL_ERR(("Failed to set rssi logging param, err=%d\n", err));
}
- return err;
-}
-#endif /* SUPPORT_RSSI_SUM_REPORT */
-/* Function to flush the FW preserve buffer content
-* The buffer content is sent to host in form of events.
-*/
-void
-wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- int i;
- int err = 0;
- u8 buf[WLC_IOCTL_SMLEN] = {0};
- wl_el_set_params_t set_param;
+ if (CHSPEC_IS5G(chanspec))
+ freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_5GHZ);
+ else
+ freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
- /* Set the size of data to retrieve */
- memset(&set_param, 0, sizeof(set_param));
- set_param.size = WLC_IOCTL_SMLEN;
+ chan = ieee80211_get_channel(wiphy, freq);
+ WL_DBG(("channel:%d freq:%d chan_type: %d chan_ptr:%p \n",
+ channel, freq, chan_type, chan));
- for (i = 0; i < dhd->event_log_max_sets; i++)
- {
- if ((0x01u << i) & logset_mask) {
- set_param.set = i;
- err = wldev_iovar_setbuf(dev, "event_log_get", &set_param,
- sizeof(struct wl_el_set_params_s), buf, WLC_IOCTL_SMLEN,
- NULL);
- if (err) {
- WL_DBG(("Failed to get fw preserve logs, err=%d\n", err));
- }
- }
+ if (unlikely(!chan)) {
+ /* fw and cfg80211 channel lists are not in sync */
+ WL_ERR(("Couldn't find matching channel in wiphy channel list \n"));
+ ASSERT(0);
+ return -EINVAL;
}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ cfg80211_chandef_create(chandef, chan, chan_type);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
+ \
+ \
+ \
+ 0)))
+ chaninfo->freq = freq;
+ chaninfo->chan_type = chan_type;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+ return 0;
}
-#ifdef USE_WFA_CERT_CONF
-extern int g_frameburst;
-#endif /* USE_WFA_CERT_CONF */
-int
-wl_cfg80211_set_frameburst(struct bcm_cfg80211 *cfg, bool enable)
+void
+wl_cfg80211_ch_switch_notify(struct net_device *dev, uint16 chanspec, struct wiphy *wiphy)
{
- int ret = BCME_OK;
- int val = enable ? 1 : 0;
+ u32 freq;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ struct cfg80211_chan_def chandef;
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, \
+ \
+ \
+ \
+ 0)))
+ struct chan_info chaninfo;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
-#ifdef USE_WFA_CERT_CONF
- if (!g_frameburst) {
- WL_DBG(("Skip setting frameburst\n"));
- return 0;
+ if (!wiphy) {
+ printf("wiphy is null\n");
+ return;
}
-#endif /* USE_WFA_CERT_CONF */
-
- WL_DBG(("Set frameburst %d\n", val));
- ret = wldev_ioctl_set(bcmcfg_to_prmry_ndev(cfg), WLC_SET_FAKEFRAG, &val, sizeof(val));
- if (ret < 0) {
- WL_ERR(("Failed set frameburst, ret=%d\n", ret));
- } else {
- WL_INFORM_MEM(("frameburst is %s\n", enable ? "enabled" : "disabled"));
+#ifndef ALLOW_CHSW_EVT
+ /* Channel switch support is only for AP/GO/ADHOC/MESH */
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION ||
+ dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
+ WL_ERR(("No channel switch notify support for STA/GC\n"));
+ return;
}
-
- return ret;
-}
-
-s32
-wl_cfg80211_set_dbg_verbose(struct net_device *ndev, u32 level)
-{
- /* configure verbose level for debugging */
- if (level) {
- /* Enable increased verbose */
- wl_dbg_level |= WL_DBG_DBG;
- } else {
- /* Disable */
- wl_dbg_level &= ~WL_DBG_DBG;
+#endif /* !ALLOW_CHSW_EVT */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ if (wl_chspec_chandef(chanspec, &chandef, wiphy))
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ if (wl_chspec_chandef(chanspec, &chaninfo, wiphy))
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
+ {
+ WL_ERR(("chspec_chandef failed\n"));
+ return;
}
- WL_INFORM(("debug verbose set to %d\n", level));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0))
+ freq = chandef.chan ? chandef.chan->center_freq : chandef.center_freq1;
+ cfg80211_ch_switch_notify(dev, &chandef);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 5, 0) && (LINUX_VERSION_CODE <= (3, 7, 0)))
+ freq = chan_info.freq;
+ cfg80211_ch_switch_notify(dev, freq, chan_info.chan_type);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION (3, 8, 0)) */
- return BCME_OK;
+ WL_ERR(("Channel switch notification for freq: %d chanspec: 0x%x\n", freq, chanspec));
+ return;
}
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
-const u8 *
-wl_find_attribute(const u8 *buf, u16 len, u16 element_id)
+#ifdef WL11ULB
+s32
+wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode)
{
- const u8 *attrib;
- u16 attrib_id;
- u16 attrib_len;
+ int ret;
+ int cur_mode;
- if (!buf) {
- WL_ERR(("buf null\n"));
- return NULL;
+ ret = wldev_iovar_getint(dev, "ulb_mode", &cur_mode);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] ulb_mode get failed. ret:%d \n", ret));
+ return ret;
}
- attrib = buf;
- while (len >= 4) {
- /* attribute id */
- attrib_id = *attrib++ << 8;
- attrib_id |= *attrib++;
- len -= 2;
+ if (cur_mode == mode) {
+ /* If request mode is same as that of the current mode, then
+ * do nothing (Avoid unnecessary wl down and up).
+ */
+ WL_INFORM(("[ULB] No change in ulb_mode. Do nothing.\n"));
+ return 0;
+ }
- /* 2-byte little endian */
- attrib_len = *attrib++ << 8;
- attrib_len |= *attrib++;
+ /* setting of ulb_mode requires wl to be down */
+ ret = wldev_ioctl_set(dev, WLC_DOWN, NULL, 0);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret));
+ return ret;
+ }
- len -= 2;
- if (attrib_id == element_id) {
- /* This will point to start of subelement attrib after
- * attribute id & len
- */
- return attrib;
- }
- if (len > attrib_len) {
- len -= attrib_len; /* for the remaining subelt fields */
- WL_DBG(("Attribue:%4x attrib_len:%d rem_len:%d\n",
- attrib_id, attrib_len, len));
+ if (mode >= MAX_SUPP_ULB_MODES) {
+ WL_ERR(("[ULB] unsupported ulb_mode :[%d]\n", mode));
+ return -EINVAL;
+ }
- /* Go to next subelement */
- attrib += attrib_len;
- } else {
- WL_ERR(("Incorrect Attribue:%4x attrib_len:%d\n",
- attrib_id, attrib_len));
- return NULL;
- }
+ ret = wldev_iovar_setint(dev, "ulb_mode", mode);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] ulb_mode set failed. ret:%d \n", ret));
+ return ret;
}
- return NULL;
-}
-uint8 wl_cfg80211_get_bus_state(struct bcm_cfg80211 *cfg)
-{
- dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- WL_INFORM(("dhd->hang_was_sent = %d and busstate = %d\n",
- dhd->hang_was_sent, dhd->busstate));
- return ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent);
-}
+ ret = wldev_ioctl_set(dev, WLC_UP, NULL, 0);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] WLC_DOWN command failed:[%d]\n", ret));
+ return ret;
+ }
-#ifdef WL_WPS_SYNC
-static void wl_wps_reauth_timeout(unsigned long data)
-{
- struct net_device *ndev = (struct net_device *)data;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- s32 inst;
- unsigned long flags;
+ WL_DBG(("[ULB] ulb_mode set to %d successfully \n", mode));
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- inst = wl_get_wps_inst_match(cfg, ndev);
- if (inst >= 0) {
- WL_ERR(("[%s][WPS] Reauth Timeout Inst:%d! state:%d\n",
- ndev->name, inst, cfg->wps_session[inst].state));
- if (cfg->wps_session[inst].state == WPS_STATE_REAUTH_WAIT) {
- /* Session should get deleted from success (linkup) or
- * deauth case. Just in case, link reassoc failed, clear
- * state here.
- */
- WL_ERR(("[%s][WPS] Reauth Timeout Inst:%d!\n",
- ndev->name, inst));
- cfg->wps_session[inst].state = WPS_STATE_IDLE;
- cfg->wps_session[inst].in_use = false;
- }
- }
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ return ret;
}
-static void wl_init_wps_reauth_sm(struct bcm_cfg80211 *cfg)
-{
- /* Only two instances are supported as of now. one for
- * infra STA and other for infra STA/GC.
- */
- int i = 0;
- struct net_device *pdev = bcmcfg_to_prmry_ndev(cfg);
-
- spin_lock_init(&cfg->wps_sync);
- for (i = 0; i < WPS_MAX_SESSIONS; i++) {
- /* Init scan_timeout timer */
- init_timer_compat(&cfg->wps_session[i].timer, wl_wps_reauth_timeout, pdev);
- cfg->wps_session[i].in_use = false;
- cfg->wps_session[i].state = WPS_STATE_IDLE;
+static s32
+wl_cfg80211_ulbbw_to_ulbchspec(u32 bw)
+{
+ if (bw == ULB_BW_DISABLED) {
+ return WL_CHANSPEC_BW_20;
+ } else if (bw == ULB_BW_10MHZ) {
+ return WL_CHANSPEC_BW_10;
+ } else if (bw == ULB_BW_5MHZ) {
+ return WL_CHANSPEC_BW_5;
+ } else if (bw == ULB_BW_2P5MHZ) {
+ return WL_CHANSPEC_BW_2P5;
+ } else {
+ WL_ERR(("[ULB] unsupported value for ulb_bw \n"));
+ return -EINVAL;
}
}
-static void wl_deinit_wps_reauth_sm(struct bcm_cfg80211 *cfg)
+static chanspec_t
+wl_cfg80211_ulb_get_min_bw_chspec(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev, s32 bssidx)
{
- int i = 0;
+ struct net_info *_netinfo;
- for (i = 0; i < WPS_MAX_SESSIONS; i++) {
- cfg->wps_session[i].in_use = false;
- cfg->wps_session[i].state = WPS_STATE_IDLE;
- if (timer_pending(&cfg->wps_session[i].timer)) {
- del_timer_sync(&cfg->wps_session[i].timer);
- }
+ /*
+ * Return the chspec value corresponding to the
+ * BW setting for a particular interface
+ */
+ if (wdev) {
+ /* if wdev is provided, use it */
+ _netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
+ } else if (bssidx >= 0) {
+ /* if wdev is not provided, use it */
+ _netinfo = wl_get_netinfo_by_bssidx(cfg, bssidx);
+ } else {
+ WL_ERR(("[ULB] wdev/bssidx not provided\n"));
+ return INVCHANSPEC;
}
-}
-
-static s32
-wl_get_free_wps_inst(struct bcm_cfg80211 *cfg)
-{
- int i;
+ if (unlikely(!_netinfo)) {
+ WL_ERR(("[ULB] net_info is null \n"));
+ return INVCHANSPEC;
+ }
- for (i = 0; i < WPS_MAX_SESSIONS; i++) {
- if (!cfg->wps_session[i].in_use) {
- return i;
- }
+ if (_netinfo->ulb_bw) {
+ WL_DBG(("[ULB] wdev_ptr:%p ulb_bw:0x%x \n", _netinfo->wdev, _netinfo->ulb_bw));
+ return wl_cfg80211_ulbbw_to_ulbchspec(_netinfo->ulb_bw);
+ } else {
+ return WL_CHANSPEC_BW_20;
}
- return BCME_ERROR;
}
static s32
-wl_get_wps_inst_match(struct bcm_cfg80211 *cfg, struct net_device *ndev)
+wl_cfg80211_get_ulb_bw(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
{
- int i;
+ struct net_info *_netinfo = wl_get_netinfo_by_wdev(cfg, wdev);
- for (i = 0; i < WPS_MAX_SESSIONS; i++) {
- if ((cfg->wps_session[i].in_use) &&
- (ndev == cfg->wps_session[i].ndev)) {
- return i;
- }
+ /*
+ * Return the ulb_bw setting for a
+ * particular interface
+ */
+ if (unlikely(!_netinfo)) {
+ WL_ERR(("[ULB] net_info is null \n"));
+ return -1;
}
- return BCME_ERROR;
+ return _netinfo->ulb_bw;
}
-static s32
-wl_wps_session_add(struct net_device *ndev, u16 mode, u8 *mac_addr)
+s32
+wl_cfg80211_set_ulb_bw(struct net_device *dev,
+ u32 ulb_bw, char *ifname)
{
- s32 inst;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int ret;
+ int mode;
+ struct net_info *_netinfo = NULL, *iter, *next;
+ u32 bssidx;
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- /* Fetch and initialize a wps instance */
- inst = wl_get_free_wps_inst(cfg);
- if (inst == BCME_ERROR) {
- WL_ERR(("[WPS] No free insance\n"));
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- return BCME_ERROR;
- }
- cfg->wps_session[inst].in_use = true;
- cfg->wps_session[inst].state = WPS_STATE_STARTED;
- cfg->wps_session[inst].ndev = ndev;
- cfg->wps_session[inst].mode = mode;
- /* return check not required since both buffer lens are same */
- (void)memcpy_s(cfg->wps_session[inst].peer_mac, ETH_ALEN, mac_addr, ETH_ALEN);
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ if (!ifname)
+ return -EINVAL;
- WL_INFORM_MEM(("[%s][WPS] session created. Peer: " MACDBG "\n",
- ndev->name, MAC2STRDBG(mac_addr)));
- return BCME_OK;
-}
+ WL_DBG(("[ULB] Enter. bw_type:%d \n", ulb_bw));
-static void
-wl_wps_session_del(struct net_device *ndev)
-{
- s32 inst;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
+ ret = wldev_iovar_getint(dev, "ulb_mode", &mode);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] ulb_mode not supported \n"));
+ return ret;
+ }
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
+ if (mode != ULB_MODE_STD_ALONE_MODE) {
+ WL_ERR(("[ULB] ulb bw modification allowed only in stand-alone mode\n"));
+ return -EINVAL;
+ }
- /* Get current instance for the given ndev */
- inst = wl_get_wps_inst_match(cfg, ndev);
- if (inst == BCME_ERROR) {
- WL_DBG(("[WPS] instance match NOT found\n"));
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- return;
+ if (ulb_bw >= MAX_SUPP_ULB_BW) {
+ WL_ERR(("[ULB] unsupported value (%d) for ulb_bw \n", ulb_bw));
+ return -EINVAL;
}
- cur_state = cfg->wps_session[inst].state;
- if (cur_state != WPS_STATE_DONE) {
- WL_DBG(("[WPS] wrong state:%d\n", cur_state));
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- return;
+#ifdef WL_CFG80211_P2P_DEV_IF
+ if (strcmp(ifname, "p2p-dev-wlan0") == 0) {
+ /* Use wdev corresponding to the dedicated p2p discovery interface */
+ if (likely(cfg->p2p_wdev)) {
+ _netinfo = wl_get_netinfo_by_wdev(cfg, cfg->p2p_wdev);
+ } else {
+ return -ENODEV;
+ }
}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ if (!_netinfo) {
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ if (strncmp(iter->ndev->name, ifname, strlen(ifname)) == 0) {
+ _netinfo = wl_get_netinfo_by_netdev(cfg, iter->ndev);
+ }
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ }
+
+ if (!_netinfo)
+ return -ENODEV;
+ bssidx = _netinfo->bssidx;
+ _netinfo->ulb_bw = ulb_bw;
- /* Mark this as unused */
- cfg->wps_session[inst].in_use = false;
- cfg->wps_session[inst].state = WPS_STATE_IDLE;
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- /* Ensure this API is called from sleepable context. */
- if (timer_pending(&cfg->wps_session[inst].timer)) {
- del_timer_sync(&cfg->wps_session[inst].timer);
+ WL_DBG(("[ULB] Applying ulb_bw:%d for bssidx:%d \n", ulb_bw, bssidx));
+ ret = wldev_iovar_setbuf_bsscfg(dev, "ulb_bw", (void *)&ulb_bw, 4,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx,
+ &cfg->ioctl_buf_sync);
+ if (unlikely(ret)) {
+ WL_ERR(("[ULB] ulb_bw set failed. ret:%d \n", ret));
+ return ret;
}
- WL_INFORM_MEM(("[%s][WPS] session deleted\n", ndev->name));
+ return ret;
}
+#endif /* WL11ULB */
static void
-wl_wps_handle_ifdel(struct net_device *ndev)
+wl_ap_channel_ind(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev,
+ chanspec_t chanspec)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 inst;
-
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- inst = wl_get_wps_inst_match(cfg, ndev);
- if (inst == BCME_ERROR) {
- WL_DBG(("[WPS] instance match NOT found\n"));
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- return;
- }
- cur_state = cfg->wps_session[inst].state;
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
+ u32 channel = LCHSPEC_CHANNEL(chanspec);
- WL_INFORM_MEM(("[%s][WPS] state:%x\n", ndev->name, cur_state));
- if (cur_state > WPS_STATE_IDLE) {
- wl_wps_session_del(ndev);
+ WL_DBG(("(%s) AP channel:%d chspec:0x%x \n",
+ ndev->name, channel, chanspec));
+ if (cfg->ap_oper_channel && (cfg->ap_oper_channel != channel)) {
+ /*
+ * If cached channel is different from the channel indicated
+ * by the event, notify user space about the channel switch.
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+ cfg->ap_oper_channel = channel;
}
}
static s32
-wl_wps_handle_sta_linkdown(struct net_device *ndev, u16 inst)
+wl_ap_start_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+const wl_event_msg_t *e, void *data)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- bool wps_done = false;
+ struct net_device *ndev = NULL;
+ chanspec_t chanspec;
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- wl_clr_drv_status(cfg, CONNECTED, ndev);
- wl_clr_drv_status(cfg, DISCONNECTING, ndev);
- WL_INFORM_MEM(("[%s][WPS] REAUTH link down\n", ndev->name));
- /* Drop the link down event while we are waiting for reauth */
- return BCME_UNSUPPORTED;
- } else if (cur_state == WPS_STATE_STARTED) {
- /* Link down before reaching EAP-FAIL. End WPS session */
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] link down after wps start\n", ndev->name));
- } else {
- WL_DBG(("[%s][WPS] link down in state:%d\n",
- ndev->name, cur_state));
+ WL_DBG(("Enter\n"));
+ if (unlikely(e->status)) {
+ WL_ERR(("status:0x%x \n", e->status));
+ return -1;
}
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
-
- if (wps_done) {
- wl_wps_session_del(ndev);
+ if (!data) {
+ return -EINVAL;
}
- return BCME_OK;
-}
-
-static s32
-wl_wps_handle_peersta_linkdown(struct net_device *ndev, u16 inst, const u8 *peer_mac)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
- bool wps_done = false;
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
+ if (likely(cfgdev)) {
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ chanspec = *((chanspec_t *)data);
- if (!peer_mac) {
- WL_ERR(("Invalid arg\n"));
- ret = BCME_ERROR;
- goto exit;
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ /* For AP/GO role */
+ wl_ap_channel_ind(cfg, ndev, chanspec);
+ }
}
- /* AP/GO can have multiple clients. so validate peer_mac addr
- * and ensure states are updated only for right peer.
- */
- if (memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- /* Mac addr not matching. Ignore. */
- WL_DBG(("[%s][WPS] No active WPS session"
- "for the peer:" MACDBG "\n", ndev->name, MAC2STRDBG(peer_mac)));
- ret = BCME_OK;
- goto exit;
- }
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- WL_INFORM_MEM(("[%s][WPS] REAUTH link down."
- " Peer: " MACDBG "\n",
- ndev->name, MAC2STRDBG(peer_mac)));
-#ifdef NOT_YET
- /* Link down during REAUTH state is expected. However,
- * if this is send up, hostapd statemachine issues a
- * deauth down and that may pre-empt WPS reauth state
- * at GC.
- */
- WL_INFORM_MEM(("[%s][WPS] REAUTH link down. Ignore."
- " for client:" MACDBG "\n",
- ndev->name, MAC2STRDBG(peer_mac)));
- ret = BCME_UNSUPPORTED;
-#endif // endif
- } else if (cur_state == WPS_STATE_STARTED) {
- /* Link down before reaching REAUTH_WAIT state. WPS
- * session ended.
- */
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- WL_INFORM_MEM(("[%s][WPS] link down after wps start"
- " client:" MACDBG "\n",
- ndev->name, MAC2STRDBG(peer_mac)));
- wps_done = true;
- /* since we have freed lock above, return from here */
- ret = BCME_OK;
- } else {
- WL_ERR(("[%s][WPS] Unsupported state:%d",
- ndev->name, cur_state));
- ret = BCME_ERROR;
- }
-exit:
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- if (wps_done) {
- wl_wps_session_del(ndev);
- }
- return ret;
+ return 0;
}
static s32
-wl_wps_handle_sta_linkup(struct net_device *ndev, u16 inst)
+wl_csa_complete_ind(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+const wl_event_msg_t *e, void *data)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
- bool wps_done = false;
-
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- /* WPS session succeeded. del session. */
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] WPS_REAUTH link up (WPS DONE)\n", ndev->name));
- ret = BCME_OK;
- } else {
- WL_ERR(("[%s][WPS] unexpected link up in state:%d \n",
- ndev->name, cur_state));
- ret = BCME_ERROR;
- }
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- if (wps_done) {
- wl_wps_session_del(ndev);
- }
- return ret;
-}
+ int error = 0;
+ u32 chanspec = 0;
+ struct net_device *ndev = NULL;
-static s32
-wl_wps_handle_peersta_linkup(struct net_device *ndev, u16 inst, const u8 *peer_mac)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
+ WL_DBG(("Enter\n"));
+ if (unlikely(e->status)) {
+ WL_ERR(("status:0x%x \n", e->status));
+ return -1;
+ }
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
+ if (likely(cfgdev)) {
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ error = wldev_iovar_getint(ndev, "chanspec", &chanspec);
+ if (unlikely(error)) {
+ WL_ERR(("Get chanspec error: %d \n", error));
+ return -1;
+ }
- /* For AP case, check whether call came for right peer */
- if (!peer_mac ||
- memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- WL_ERR(("[WPS] macaddr mismatch\n"));
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- /* Mac addr not matching. Ignore. */
- return BCME_ERROR;
- }
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ /* For AP/GO role */
+ wl_ap_channel_ind(cfg, ndev, chanspec);
+ } else {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ wl_cfg80211_ch_switch_notify(ndev, chanspec, bcmcfg_to_wiphy(cfg));
+#endif /* LINUX_VERSION_CODE >= (3, 5, 0) */
+ }
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- WL_INFORM_MEM(("[%s][WPS] REAUTH link up\n", ndev->name));
- ret = BCME_OK;
- } else {
- WL_INFORM_MEM(("[%s][WPS] unexpected link up in state:%d \n",
- ndev->name, cur_state));
- ret = BCME_ERROR;
}
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
-
- return ret;
+ return 0;
}
-static s32
-wl_wps_handle_authorize(struct net_device *ndev, u16 inst, const u8 *peer_mac)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- bool wps_done = false;
- s32 ret = BCME_OK;
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
+void wl_cfg80211_clear_security(struct bcm_cfg80211 *cfg)
+{
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
+ int err;
- /* For AP case, check whether call came for right peer */
- if (!peer_mac ||
- memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- WL_ERR(("[WPS] macaddr mismatch\n"));
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- /* Mac addr not matching. Ignore. */
- return BCME_ERROR;
+ /* Clear the security settings on the primary Interface */
+ err = wldev_iovar_setint(dev, "wsec", 0);
+ if (unlikely(err)) {
+ WL_ERR(("wsec clear failed \n"));
}
-
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- /* WPS session succeeded. del session. */
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] Authorize done (WPS DONE)\n", ndev->name));
- ret = BCME_OK;
- } else {
- WL_INFORM_MEM(("[%s][WPS] unexpected Authorize in state:%d \n",
- ndev->name, cur_state));
- ret = BCME_ERROR;
+ err = wldev_iovar_setint(dev, "auth", 0);
+ if (unlikely(err)) {
+ WL_ERR(("auth clear failed \n"));
}
-
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- if (wps_done) {
- wl_wps_session_del(ndev);
+ err = wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
+ if (unlikely(err)) {
+ WL_ERR(("wpa_auth clear failed \n"));
}
- return ret;
}
-static s32
-wl_wps_handle_reauth(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+#ifdef WL_CFG80211_P2P_DEV_IF
+void wl_cfg80211_del_p2p_wdev(struct net_device *dev)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- u16 mode;
- s32 ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct wireless_dev *wdev = NULL;
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- mode = cfg->wps_session[inst].mode;
-
- if (((mode == WL_MODE_BSS) && (cur_state == WPS_STATE_STARTED)) ||
- ((mode == WL_MODE_AP) && (cur_state == WPS_STATE_M8_SENT))) {
- /* Move to reauth wait */
- cfg->wps_session[inst].state = WPS_STATE_REAUTH_WAIT;
- /* Use ndev to find the wps instance which fired the timer */
- timer_set_private(&cfg->wps_session[inst].timer, ndev);
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- mod_timer(&cfg->wps_session[inst].timer,
- jiffies + msecs_to_jiffies(WL_WPS_REAUTH_TIMEOUT));
- WL_INFORM_MEM(("[%s][WPS] STATE_REAUTH_WAIT mode:%d Peer: " MACDBG "\n",
- ndev->name, mode, MAC2STRDBG(peer_mac)));
- return BCME_OK;
+ WL_DBG(("Enter \n"));
+ if (!cfg) {
+ WL_ERR(("Invalid Ptr\n"));
+ return;
} else {
- /* 802.1x cases */
- WL_DBG(("[%s][WPS] EAP-FAIL\n", ndev->name));
+ wdev = cfg->p2p_wdev;
}
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- return ret;
-}
-
-static s32
-wl_wps_handle_disconnect(struct net_device *ndev, u16 inst, const u8 *peer_mac)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- /* If Disconnect command comes from user space for STA/GC,
- * respond with event without waiting for event from fw as
- * it would be dropped by the WPS_SYNC code.
- */
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- if (ETHER_ISBCAST(peer_mac)) {
- WL_DBG(("[WPS] Bcast peer. Do nothing.\n"));
- } else {
- /* Notify link down */
- CFG80211_DISCONNECTED(ndev,
- WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
- true, GFP_ATOMIC);
- }
- } else {
- WL_DBG(("[%s][WPS] Not valid state to report disconnected:%d",
- ndev->name, cur_state));
- ret = BCME_UNSUPPORTED;
+ if (wdev && cfg->down_disc_if) {
+ wl_cfgp2p_del_p2p_disc_if(wdev, cfg);
+ cfg->down_disc_if = FALSE;
}
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- return ret;
}
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#ifdef GTK_OFFLOAD_SUPPORT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
static s32
-wl_wps_handle_disconnect_client(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+wl_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_gtk_rekey_data *data)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
- bool wps_done = false;
-
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- /* For GO/AP, ignore disconnect client during reauth state */
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- if (ETHER_ISBCAST(peer_mac)) {
- /* If there is broadcast deauth, then mark wps session as ended */
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] BCAST deauth. WPS stopped.\n", ndev->name));
- ret = BCME_OK;
- goto exit;
- } else if (!(memcmp(cfg->wps_session[inst].peer_mac,
- peer_mac, ETH_ALEN))) {
- WL_ERR(("[%s][WPS] Drop disconnect client\n", ndev->name));
- ret = BCME_UNSUPPORTED;
- }
- }
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 err = 0;
+ gtk_keyinfo_t keyinfo;
-exit:
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- if (wps_done) {
- wl_wps_session_del(ndev);
+ WL_DBG(("Enter\n"));
+ if (data == NULL || cfg->p2p_net == dev) {
+ WL_ERR(("data is NULL or wrong net device\n"));
+ return -EINVAL;
}
- return ret;
-}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
+ prhex("kck", (uchar *) (data->kck), RSN_KCK_LENGTH);
+ prhex("kek", (uchar *) (data->kek), RSN_KEK_LENGTH);
+ prhex("replay_ctr", (uchar *) (data->replay_ctr), RSN_REPLAY_LEN);
+#else
+ prhex("kck", (uchar *)data->kck, RSN_KCK_LENGTH);
+ prhex("kek", (uchar *)data->kek, RSN_KEK_LENGTH);
+ prhex("replay_ctr", (uchar *)data->replay_ctr, RSN_REPLAY_LEN);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
+ bcopy(data->kck, keyinfo.KCK, RSN_KCK_LENGTH);
+ bcopy(data->kek, keyinfo.KEK, RSN_KEK_LENGTH);
+ bcopy(data->replay_ctr, keyinfo.ReplayCounter, RSN_REPLAY_LEN);
-static s32
-wl_wps_handle_connect_fail(struct net_device *ndev, u16 inst)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- bool wps_done = false;
-
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
- if (cur_state == WPS_STATE_REAUTH_WAIT) {
- cfg->wps_session[inst].state = WPS_STATE_DONE;
- wps_done = true;
- WL_INFORM_MEM(("[%s][WPS] Connect fail. WPS stopped.\n",
- ndev->name));
- } else {
- WL_ERR(("[%s][WPS] Connect fail. state:%d\n",
- ndev->name, cur_state));
- }
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- if (wps_done) {
- wl_wps_session_del(ndev);
+ if ((err = wldev_iovar_setbuf(dev, "gtk_key_info", &keyinfo, sizeof(keyinfo),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("seting gtk_key_info failed code=%d\n", err));
+ return err;
}
- return BCME_OK;
+ WL_DBG(("Exit\n"));
+ return err;
}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) */
+#endif /* GTK_OFFLOAD_SUPPORT */
-static s32
-wl_wps_handle_m8_sent(struct net_device *ndev, u16 inst, const u8 *peer_mac)
+#if defined(WL_SUPPORT_AUTO_CHANNEL)
+int
+wl_cfg80211_set_spect(struct net_device *dev, int spect)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- unsigned long flags;
- u16 cur_state;
- s32 ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int wlc_down = 1;
+ int wlc_up = 1;
+ int err = BCME_OK;
+
+ if (!wl_get_drv_status_all(cfg, CONNECTED)) {
+ err = wldev_ioctl_set(dev, WLC_DOWN, &wlc_down, sizeof(wlc_down));
+ if (err) {
+ WL_ERR(("%s: WLC_DOWN failed: code: %d\n", __func__, err));
+ return err;
+ }
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- cur_state = cfg->wps_session[inst].state;
+ err = wldev_ioctl_set(dev, WLC_SET_SPECT_MANAGMENT, &spect, sizeof(spect));
+ if (err) {
+ WL_ERR(("%s: error setting spect: code: %d\n", __func__, err));
+ return err;
+ }
- if (cur_state == WPS_STATE_STARTED) {
- /* Move to M8 sent state */
- cfg->wps_session[inst].state = WPS_STATE_M8_SENT;
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- return BCME_OK;
- } else {
- /* 802.1x cases */
- WL_DBG(("[%s][WPS] Not valid state to send M8\n", ndev->name));
+ err = wldev_ioctl_set(dev, WLC_UP, &wlc_up, sizeof(wlc_up));
+ if (err) {
+ WL_ERR(("%s: WLC_UP failed: code: %d\n", __func__, err));
+ return err;
+ }
}
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- return ret;
+ return err;
}
-static s32
-wl_wps_session_update(struct net_device *ndev, u16 state, const u8 *peer_mac)
+int
+wl_cfg80211_get_sta_channel(struct net_device *dev)
{
- s32 inst;
- u16 mode;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- s32 ret = BCME_ERROR;
- unsigned long flags;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int channel = 0;
- WL_CFG_WPS_SYNC_LOCK(&cfg->wps_sync, flags);
- /* Get current instance for the given ndev */
- inst = wl_get_wps_inst_match(cfg, ndev);
- if (inst == BCME_ERROR) {
- /* No active WPS session. Do Nothing. */
- WL_DBG(("[%s][WPS] No matching instance.\n", ndev->name));
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
- return BCME_NOTFOUND;
+ if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+ channel = cfg->channel;
+ }
+ return channel;
+}
+#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#ifdef P2P_LISTEN_OFFLOADING
+s32
+wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg)
+{
+ s32 bssidx;
+ int ret = 0;
+ int p2plo_pause = 0;
+ if (!cfg || !cfg->p2p) {
+ WL_ERR(("Wl %p or cfg->p2p %p is null\n",
+ cfg, cfg ? cfg->p2p : 0));
+ return 0;
}
- mode = cfg->wps_session[inst].mode;
- WL_CFG_WPS_SYNC_UNLOCK(&cfg->wps_sync, flags);
-
- WL_DBG(("[%s][WPS] state:%d mode:%d Peer: " MACDBG "\n",
- ndev->name, state, mode, MAC2STRDBG(peer_mac)));
-
- switch (state) {
- case WPS_STATE_M8_RECVD:
- {
- /* Occasionally, due to race condition between ctrl
- * and data path, deauth ind is recvd before EAP-FAIL.
- * Ignore deauth ind before EAP-FAIL
- * So move to REAUTH WAIT on receiving M8 on GC and
- * ignore deauth ind before EAP-FAIL till 'x' timeout.
- * Kickoff a timer to monitor reauth status.
- */
- if (mode == WL_MODE_BSS) {
- ret = wl_wps_handle_reauth(ndev, inst, peer_mac);
- } else {
- /* Nothing to be done for AP/GO mode */
- ret = BCME_OK;
- }
- break;
- }
- case WPS_STATE_M8_SENT:
- {
- /* Mantain the M8 sent state to verify
- * EAP-FAIL sent is valid
- */
- if (mode == WL_MODE_AP) {
- ret = wl_wps_handle_m8_sent(ndev, inst, peer_mac);
- } else {
- /* Nothing to be done for STA/GC mode */
- ret = BCME_OK;
- }
- break;
- }
- case WPS_STATE_EAP_FAIL:
- {
- /* Move to REAUTH WAIT following EAP-FAIL TX on GO/AP.
- * Kickoff a timer to monitor reauth status
- */
- if (mode == WL_MODE_AP) {
- ret = wl_wps_handle_reauth(ndev, inst, peer_mac);
- } else {
- /* Nothing to be done for STA/GC mode */
- ret = BCME_OK;
- }
- break;
- }
- case WPS_STATE_LINKDOWN:
- {
- if (mode == WL_MODE_BSS) {
- ret = wl_wps_handle_sta_linkdown(ndev, inst);
- } else if (mode == WL_MODE_AP) {
- /* Take action only for matching peer mac */
- if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- ret = wl_wps_handle_peersta_linkdown(ndev, inst, peer_mac);
- }
- }
- break;
- }
- case WPS_STATE_LINKUP:
- {
- if (mode == WL_MODE_BSS) {
- wl_wps_handle_sta_linkup(ndev, inst);
- } else if (mode == WL_MODE_AP) {
- /* Take action only for matching peer mac */
- if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- wl_wps_handle_peersta_linkup(ndev, inst, peer_mac);
- }
- }
- break;
- }
- case WPS_STATE_DISCONNECT_CLIENT:
- {
- /* Disconnect STA/GC command from user space */
- if (mode == WL_MODE_AP) {
- ret = wl_wps_handle_disconnect_client(ndev, inst, peer_mac);
- } else {
- WL_ERR(("[WPS] Unsupported mode %d\n", mode));
- }
- break;
- }
- case WPS_STATE_DISCONNECT:
- {
- /* Disconnect command on STA/GC interface */
- if (mode == WL_MODE_BSS) {
- ret = wl_wps_handle_disconnect(ndev, inst, peer_mac);
- }
- break;
- }
- case WPS_STATE_CONNECT_FAIL:
- {
- if (mode == WL_MODE_BSS) {
- ret = wl_wps_handle_connect_fail(ndev, inst);
- } else {
- WL_ERR(("[WPS] Unsupported mode %d\n", mode));
- }
- break;
- }
- case WPS_STATE_AUTHORIZE:
- {
- if (mode == WL_MODE_AP) {
- /* Take action only for matching peer mac */
- if (!memcmp(cfg->wps_session[inst].peer_mac, peer_mac, ETH_ALEN)) {
- wl_wps_handle_authorize(ndev, inst, peer_mac);
- } else {
- WL_INFORM_MEM(("[WPS] Authorize Request for wrong peer\n"));
- }
- }
- break;
- }
- default:
- WL_ERR(("[WPS] Unsupported state:%d mode:%d\n", state, mode));
- ret = BCME_ERROR;
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
+ "p2po_stop", (void*)&p2plo_pause, sizeof(p2plo_pause),
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
+ if (ret < 0) {
+ WL_ERR(("p2po_stop Failed :%d\n", ret));
}
- return ret;
+ return ret;
}
-
-#define EAP_EXP_ATTRIB_DATA_OFFSET 14
-void
-wl_handle_wps_states(struct net_device *ndev, u8 *pkt, u16 len, bool direction)
+s32
+wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len)
{
- eapol_header_t *eapol_hdr;
- bool tx_packet = direction;
- u16 eapol_type;
- u16 mode;
- u8 *peer_mac;
-
- if (!ndev || !pkt) {
- WL_ERR(("[WPS] Invalid arg\n"));
- return;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ wl_p2plo_listen_t p2plo_listen;
+ int ret = -EAGAIN;
+ int channel = 0;
+ int period = 0;
+ int interval = 0;
+ int count = 0;
+ if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
+ WL_ERR(("Sending Action Frames. Try it again.\n"));
+ goto exit;
}
- if (len < (ETHER_HDR_LEN + EAPOL_HDR_LEN)) {
- WL_ERR(("[WPS] Invalid len\n"));
- return;
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ WL_ERR(("Scanning already\n"));
+ goto exit;
}
- eapol_hdr = (eapol_header_t *)pkt;
- eapol_type = eapol_hdr->type;
+ if (wl_get_drv_status(cfg, SCAN_ABORTING, dev)) {
+ WL_ERR(("Scanning being aborted\n"));
+ goto exit;
+ }
- peer_mac = tx_packet ? eapol_hdr->eth.ether_dhost :
- eapol_hdr->eth.ether_shost;
- /*
- * The implementation assumes only one WPS session would be active
- * per interface at a time. Even for hostap, the wps_pin session
- * is limited to one enrollee/client at a time. A session is marked
- * started on WSC_START and gets cleared from below contexts
- * a) Deauth/link down before reaching EAP-FAIL state. (Fail case)
- * b) Link up following EAP-FAIL. (success case)
- * c) Link up timeout after EAP-FAIL. (Fail case)
- */
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ WL_ERR(("p2p listen offloading already running\n"));
+ goto exit;
+ }
- if (eapol_type == EAP_PACKET) {
- wl_eap_header_t *eap;
-
- if (len > sizeof(*eap)) {
- eap = (wl_eap_header_t *)(pkt + ETHER_HDR_LEN + EAPOL_HDR_LEN);
- if (eap->type == EAP_EXPANDED_TYPE) {
- wl_eap_exp_t *exp = (wl_eap_exp_t *)eap->data;
- if (eap->length > EAP_EXP_HDR_MIN_LENGTH) {
- /* opcode is at fixed offset */
- u8 opcode = exp->opcode;
- u16 eap_len = ntoh16(eap->length);
-
- WL_DBG(("[%s][WPS] EAP EXPANDED packet. opcode:%x len:%d\n",
- ndev->name, opcode, eap_len));
- if (opcode == EAP_WSC_MSG) {
- const u8 *msg;
- const u8* parse_buf = exp->data;
- /* Check if recvd pkt is fragmented */
- if ((!tx_packet) &&
- (exp->flags &
- EAP_EXP_FLAGS_FRAGMENTED_DATA)) {
- if ((eap_len - EAP_EXP_ATTRIB_DATA_OFFSET)
- > 2) {
- parse_buf +=
- EAP_EXP_FRAGMENT_LEN_OFFSET;
- eap_len -=
- EAP_EXP_FRAGMENT_LEN_OFFSET;
- WL_DBG(("Rcvd EAP"
- " fragmented pkt\n"));
- } else {
- /* If recvd pkt is fragmented
- * and does not have
- * length field drop the packet.
- */
- return;
- }
- }
+ /* Just in case if it is not enabled */
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
+ WL_ERR(("cfgp2p_enable discovery failed"));
+ goto exit;
+ }
- msg = wl_find_attribute(parse_buf,
- (eap_len - EAP_EXP_ATTRIB_DATA_OFFSET),
- EAP_ATTRIB_MSGTYPE);
- if (unlikely(!msg)) {
- WL_ERR(("[WPS] ATTRIB MSG not found!\n"));
- } else if ((*msg == EAP_WSC_MSG_M8) &&
- !tx_packet) {
- WL_INFORM_MEM(("[%s][WPS] M8\n",
- ndev->name));
- wl_wps_session_update(ndev,
- WPS_STATE_M8_RECVD, peer_mac);
- } else if ((*msg == EAP_WSC_MSG_M8) &&
- tx_packet) {
- WL_INFORM_MEM(("[%s][WPS] M8 Sent\n",
- ndev->name));
- wl_wps_session_update(ndev,
- WPS_STATE_M8_SENT, peer_mac);
- } else {
- WL_DBG(("[%s][WPS] EAP WSC MSG: 0x%X\n",
- ndev->name, *msg));
- }
- } else if (opcode == EAP_WSC_START) {
- /* WSC session started. WSC_START - Tx from GO/AP.
- * Session will be deleted on successful link up or
- * on failure (deauth context)
- */
- mode = tx_packet ? WL_MODE_AP : WL_MODE_BSS;
- wl_wps_session_add(ndev, mode, peer_mac);
- WL_INFORM_MEM(("[%s][WPS] WSC_START Mode:%d\n",
- ndev->name, mode));
- } else if (opcode == EAP_WSC_DONE) {
- /* WSC session done. TX on STA/GC. RX on GO/AP
- * On devices where config file save fails, it may
- * return WPS_NAK with config_error:0. But the
- * connection would still proceed. Hence don't let
- * state machine depend on WSC DONE.
- */
- WL_INFORM_MEM(("[%s][WPS] WSC_DONE\n", ndev->name));
- }
- }
- }
+ bzero(&p2plo_listen, sizeof(wl_p2plo_listen_t));
- if (eap->code == EAP_CODE_FAILURE) {
- /* EAP_FAIL */
- WL_INFORM_MEM(("[%s][WPS] EAP_FAIL\n", ndev->name));
- wl_wps_session_update(ndev,
- WPS_STATE_EAP_FAIL, peer_mac);
- }
+ if (len) {
+ sscanf(buf, " %10d %10d %10d %10d", &channel, &period, &interval, &count);
+ if ((channel == 0) || (period == 0) ||
+ (interval == 0) || (count == 0)) {
+ WL_ERR(("Wrong argument %d/%d/%d/%d \n",
+ channel, period, interval, count));
+ ret = -EAGAIN;
+ goto exit;
}
- }
-}
-#endif /* WL_WPS_SYNC */
-
-s32
-wl_cfg80211_sup_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *event, void *data)
-{
- int err = BCME_OK;
- u32 status = ntoh32(event->status);
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- u32 reason = ntoh32(event->reason);
+ p2plo_listen.period = period;
+ p2plo_listen.interval = interval;
+ p2plo_listen.count = count;
- if (!wl_get_drv_status(cfg, CFG80211_CONNECT, ndev)) {
- /* Join attempt via non-cfg80211 interface.
- * Don't send resultant events to cfg80211
- * layer
- */
- WL_INFORM_MEM(("Event received in non-cfg80211"
- " connect state. Ignore\n"));
- return BCME_OK;
+ WL_ERR(("channel:%d period:%d, interval:%d count:%d\n",
+ channel, period, interval, count));
+ } else {
+ WL_ERR(("Argument len is wrong.\n"));
+ ret = -EAGAIN;
+ goto exit;
+ }
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
+ sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
+ goto exit;
}
- if ((status == WLC_SUP_KEYED || status == WLC_SUP_KEYXCHANGE_WAIT_G1) &&
- reason == WLC_E_SUP_OTHER) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
- /* NL80211_CMD_PORT_AUTHORIZED supported above >= 4.15 */
- cfg80211_port_authorized(ndev, (u8 *)wl_read_prof(cfg, ndev, WL_PROF_BSSID),
- GFP_KERNEL);
- WL_INFORM_MEM(("4way HS finished. port authorized event sent\n"));
-#elif ((LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || \
- defined(WL_VENDOR_EXT_SUPPORT))
- err = wl_cfgvendor_send_async_event(bcmcfg_to_wiphy(cfg), ndev,
- BRCM_VENDOR_EVENT_PORT_AUTHORIZED, NULL, 0);
- WL_INFORM_MEM(("4way HS finished. port authorized event sent\n"));
-#else
- /* not supported in kernel <= 3,14,0 */
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0) */
- } else if (status < WLC_SUP_KEYXCHANGE_WAIT_G1 && (reason != WLC_E_SUP_OTHER &&
- reason != WLC_E_SUP_PTK_UPDATE)) {
- /* if any failure seen while 4way HS, should send NL80211_CMD_DISCONNECT */
- WL_ERR(("4way HS error. status:%d, reason:%d\n", status, reason));
- CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&p2plo_listen,
+ sizeof(wl_p2plo_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_listen Failed :%d\n", ret));
+ goto exit;
}
- return err;
+ wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
+exit :
+ return ret;
}
-
-#ifdef WL_BCNRECV
-static s32
-wl_bcnrecv_aborted_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
+s32
+wl_cfg80211_p2plo_listen_stop(struct net_device *dev)
{
- s32 status = ntoh32(e->status);
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- /* Abort fakeapscan, when Roam is in progress */
- if (status == WLC_E_STATUS_RXBCN_ABORT) {
- wl_android_bcnrecv_stop(ndev, WL_BCNRECV_ROAMABORT);
- } else {
- WL_ERR(("UNKNOWN STATUS. status:%d\n", status));
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ int ret = -EAGAIN;
+
+ if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", NULL,
+ 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
+ WL_ERR(("p2po_stop Failed :%d\n", ret));
+ goto exit;
}
- return BCME_OK;
+
+exit:
+ return ret;
}
-#endif /* WL_BCNRECV */
+#endif /* P2P_LISTEN_OFFLOADING */
-#ifdef WL_MBO
-static s32
-wl_mbo_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
+u64
+wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg)
{
- s32 err = 0;
- wl_event_mbo_t *mbo_evt = (wl_event_mbo_t *)data;
- wl_event_mbo_cell_nw_switch_t *cell_sw_evt = NULL;
- wl_btm_event_type_data_t *evt_data = NULL;
-
- WL_INFORM(("MBO: Evt %u\n", mbo_evt->type));
-
- if (mbo_evt->type == WL_MBO_E_CELLULAR_NW_SWITCH) {
- cell_sw_evt = (wl_event_mbo_cell_nw_switch_t *)mbo_evt->data;
- BCM_REFERENCE(cell_sw_evt);
- SUPP_EVENT(("CTRL-EVENT-CELLULAR-SWITCH", "reason %d cur_assoc_time_left %u "
- "reassoc_delay %u\n", cell_sw_evt->reason,
- cell_sw_evt->assoc_time_remain, cell_sw_evt->reassoc_delay));
- } else if (mbo_evt->type == WL_MBO_E_BTM_RCVD) {
- evt_data = (wl_btm_event_type_data_t *)mbo_evt->data;
- if (evt_data->version != WL_BTM_EVENT_DATA_VER_1) {
- WL_ERR(("version mismatch. rcvd %u expected %u\n",
- evt_data->version, WL_BTM_EVENT_DATA_VER_1));
- return -1;
- }
- SUPP_EVENT(("CTRL-EVENT-BRCM-BTM-REQ-RCVD", "reason=%u\n",
- evt_data->transition_reason));
- } else {
- WL_INFORM(("UNKNOWN EVENT. type:%u\n", mbo_evt->type));
+ u64 id = 0;
+ id = ++cfg->last_roc_id;
+#ifdef P2P_LISTEN_OFFLOADING
+ if (id == P2PO_COOKIE) {
+ id = ++cfg->last_roc_id;
}
- return err;
+#endif /* P2P_LISTEN_OFFLOADING */
+ if (id == 0)
+ id = ++cfg->last_roc_id;
+ return id;
}
-#endif /* WL_MBO */
-#ifdef WL_CAC_TS
-static s32
-wl_cfg80211_cac_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+int
+wl_cfg80211_set_random_mac(struct net_device *dev, bool enable)
{
- u32 event = ntoh32(e->event_type);
- s32 status = ntoh32(e->status);
- s32 reason = ntoh32(e->reason);
-
- BCM_REFERENCE(reason);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ int ret;
- if (event == WLC_E_ADDTS_IND) {
- /* The supp log format of adding ts_delay in success case needs to be maintained */
- if (status == WLC_E_STATUS_SUCCESS) {
- uint *ts_delay = (uint *)data;
- BCM_REFERENCE(ts_delay);
- SUPP_EVENT(("CTRL-EVENT-CAC-ADDTS", "status=%d reason=%d ts_delay=%u\n",
- status, reason, *ts_delay));
- } else {
- SUPP_EVENT(("CTRL-EVENT-CAC-ADDTS", "status=%d reason=%d\n",
- status, reason));
- }
- } else if (event == WLC_E_DELTS_IND) {
- SUPP_EVENT(("CTRL-EVENT-CAC-DELTS", "status=%d reason=%d\n", status, reason));
+ if (cfg->random_mac_enabled == enable) {
+ WL_ERR(("Random MAC already %s\n", enable ? "Enabled" : "Disabled"));
+ return BCME_OK;
}
- return BCME_OK;
-}
-#endif /* WL_CAC_TS */
-
-#if defined(WL_MBO) || defined(WL_OCE)
-static s32
-wl_bssid_prune_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- s32 err = 0;
- uint reason = 0;
- wl_bssid_pruned_evt_info_t *evt_info = (wl_bssid_pruned_evt_info_t *)data;
-
- if (evt_info->version == WL_BSSID_PRUNE_EVT_VER_1) {
- if (evt_info->reason == WLC_E_PRUNE_ASSOC_RETRY_DELAY) {
- /* MBO assoc retry delay */
- reason = WIFI_PRUNE_ASSOC_RETRY_DELAY;
- SUPP_EVENT(("CTRL-EVENT-BRCM-BSSID-PRUNED", "ssid=%s bssid=" MACF
- " reason=%u timeout_val=%u(ms)\n", evt_info->SSID,
- ETHER_TO_MACF(evt_info->BSSID), reason, evt_info->time_remaining));
- } else if (evt_info->reason == WLC_E_PRUNE_RSSI_ASSOC_REJ) {
- /* OCE RSSI-based assoc rejection */
- reason = WIFI_PRUNE_RSSI_ASSOC_REJ;
- SUPP_EVENT(("CTRL-EVENT-BRCM-BSSID-PRUNED", "ssid=%s bssid=" MACF
- " reason=%u timeout_val=%u(ms) rssi_threshold=%d(dBm)\n",
- evt_info->SSID, ETHER_TO_MACF(evt_info->BSSID),
- reason, evt_info->time_remaining, evt_info->rssi_threshold));
- } else {
- /* Invalid other than the assoc retry delay/RSSI assoc rejection
- * in the current handler
- */
- BCM_REFERENCE(reason);
- WL_INFORM(("INVALID. reason:%u\n", evt_info->reason));
- }
+ if (enable) {
+ ret = wl_cfg80211_random_mac_enable(dev);
} else {
- WL_INFORM(("version mismatch. rcvd %u expected %u\n", evt_info->version,
- WL_BSSID_PRUNE_EVT_VER_1));
+ ret = wl_cfg80211_random_mac_disable(dev);
+ }
+
+ if (!ret) {
+ cfg->random_mac_enabled = enable;
}
- return err;
-}
-#endif /* WL_MBO || WL_OCE */
-#ifdef RTT_SUPPORT
-static s32
-wl_cfg80211_rtt_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- wl_event_msg_t event;
- (void)memcpy_s(&event, sizeof(wl_event_msg_t),
- e, sizeof(wl_event_msg_t));
- return dhd_rtt_event_handler(dhdp, &event, data);
+ return ret;
}
-#endif /* RTT_SUPPORT */
-void
-wl_print_verinfo(struct bcm_cfg80211 *cfg)
+int
+wl_cfg80211_random_mac_enable(struct net_device *dev)
{
- char *ver_ptr;
- uint32 alloc_len = MOD_PARAM_INFOLEN;
+ u8 random_mac[ETH_ALEN] = {0, };
+ u8 rand_bytes[3] = {0, };
+ s32 err = BCME_ERROR;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- if (!cfg) {
- WL_ERR(("cfg is NULL\n"));
- return;
+ if (wl_get_drv_status_all(cfg, CONNECTED) || wl_get_drv_status_all(cfg, CONNECTING) ||
+ wl_get_drv_status_all(cfg, AP_CREATED) || wl_get_drv_status_all(cfg, AP_CREATING)) {
+ WL_ERR(("fail to Set random mac, current state is wrong\n"));
+ return err;
}
- ver_ptr = (char *)MALLOCZ(cfg->osh, alloc_len);
- if (!ver_ptr) {
- WL_ERR(("Failed to alloc ver_ptr\n"));
- return;
- }
+ memcpy(random_mac, bcmcfg_to_prmry_ndev(cfg)->dev_addr, ETH_ALEN);
+ get_random_bytes(&rand_bytes, sizeof(rand_bytes));
- if (!dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg),
- TRUE, &ver_ptr, alloc_len)) {
- WL_ERR(("DHD Version: %s\n", ver_ptr));
+ if (rand_bytes[2] == 0x0 || rand_bytes[2] == 0xff) {
+ rand_bytes[2] = 0xf0;
}
- if (!dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg),
- FALSE, &ver_ptr, alloc_len)) {
- WL_ERR(("F/W Version: %s\n", ver_ptr));
+ memcpy(&random_mac[3], rand_bytes, sizeof(rand_bytes));
+
+ err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
+ random_mac, ETH_ALEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed to set random generate MAC address\n"));
+ } else {
+ WL_ERR(("set mac " MACDBG " to " MACDBG "\n",
+ MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr),
+ MAC2STRDBG((const u8 *)&random_mac)));
+ WL_ERR(("random MAC enable done"));
}
- MFREE(cfg->osh, ver_ptr, alloc_len);
+ return err;
}
-#if defined(WL_DISABLE_HE_SOFTAP) || defined(WL_DISABLE_HE_P2P)
-typedef struct {
- uint16 id;
- uint16 len;
- uint32 val;
-} he_xtlv_v32;
- static bool
-wl_he_get_uint_cb(void *ctx, uint16 *id, uint16 *len)
+int
+wl_cfg80211_random_mac_disable(struct net_device *dev)
{
- he_xtlv_v32 *v32 = ctx;
+ s32 err = BCME_ERROR;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- *id = v32->id;
- *len = v32->len;
+ WL_ERR(("set original mac " MACDBG "\n",
+ MAC2STRDBG((const u8 *)bcmcfg_to_prmry_ndev(cfg)->dev_addr)));
- return FALSE;
+ err = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr",
+ bcmcfg_to_prmry_ndev(cfg)->dev_addr, ETH_ALEN,
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+
+ if (err != BCME_OK) {
+ WL_ERR(("failed to set original MAC address\n"));
+ } else {
+ WL_ERR(("random MAC disable done\n"));
+ }
+
+ return err;
}
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
- static void
-wl_he_pack_uint_cb(void *ctx, uint16 id, uint16 len, uint8 *buf)
+#ifdef WLTDLS
+static s32
+wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg, enum wl_tdls_config state, bool auto_mode)
{
- he_xtlv_v32 *v32 = ctx;
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ int err = 0;
+ struct net_info *iter, *next;
+ int update_reqd = 0;
+ int enable = 0;
+ dhd_pub_t *dhdp;
+ dhdp = (dhd_pub_t *)(cfg->pub);
- BCM_REFERENCE(id);
- BCM_REFERENCE(len);
+ /*
+ * TDLS need to be enabled only if we have a single STA/GC
+ * connection.
+ */
- v32->val = htod32(v32->val);
+ WL_DBG(("Enter state:%d\n", state));
- switch (v32->len) {
- case sizeof(uint8):
- *buf = (uint8)v32->val;
- break;
- case sizeof(uint16):
- store16_ua(buf, (uint16)v32->val);
- break;
- case sizeof(uint32):
- store32_ua(buf, v32->val);
- break;
- default:
- /* ASSERT(0); */
- break;
+ if (!cfg->tdls_supported) {
+ /* FW doesn't support tdls. Do nothing */
+ return -ENODEV;
}
-}
-int wl_cfg80211_set_he_mode(struct net_device *dev, struct bcm_cfg80211 *cfg,
- s32 bssidx, u32 interface_type, bool set)
-{
- bcm_xtlv_t read_he_xtlv;
- uint8 se_he_xtlv[32];
- int se_he_xtlv_len = sizeof(se_he_xtlv);
- he_xtlv_v32 v32;
- u32 he_feature = 0;
- s32 err = 0;
- u32 he_interface = 0;
+ /* Protect tdls config session */
+ mutex_lock(&cfg->tdls_sync);
- read_he_xtlv.id = WL_HE_CMD_FEATURES;
- read_he_xtlv.len = 0;
- err = wldev_iovar_getbuf_bsscfg(dev, "he", &read_he_xtlv, sizeof(read_he_xtlv),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL);
- if (err < 0) {
- if (err == BCME_UNSUPPORTED) {
- /* HE not supported. Do nothing. */
- return BCME_OK;
+ if ((state == TDLS_STATE_TEARDOWN)) {
+ /* Host initiated TDLS tear down */
+ err = dhd_tdls_enable(ndev, false, auto_mode, NULL);
+ goto exit;
+ } else if (state == TDLS_STATE_AP_CREATE) {
+ /* We don't support tdls while AP/GO is operational */
+ update_reqd = true;
+ enable = false;
+ } else if ((state == TDLS_STATE_CONNECT) || (state == TDLS_STATE_IF_CREATE)) {
+ if (wl_get_drv_status_all(cfg,
+ CONNECTED) >= TDLS_MAX_IFACE_FOR_ENABLE) {
+ /* For STA/GC connect command request, disable
+ * tdls if we have any concurrent interfaces
+ * operational.
+ */
+ WL_DBG(("Interface limit restriction. disable tdls.\n"));
+ update_reqd = true;
+ enable = false;
+ }
+ } else if ((state == TDLS_STATE_DISCONNECT) ||
+ (state == TDLS_STATE_AP_DELETE) ||
+ (state == TDLS_STATE_SETUP) ||
+ (state == TDLS_STATE_IF_DELETE)) {
+ /* Enable back the tdls connection only if we have less than
+ * or equal to a single STA/GC connection.
+ */
+ if (wl_get_drv_status_all(cfg,
+ CONNECTED) == 0) {
+ /* If there are no interfaces connected, enable tdls */
+ update_reqd = true;
+ enable = true;
+ } else if (wl_get_drv_status_all(cfg,
+ CONNECTED) == TDLS_MAX_IFACE_FOR_ENABLE) {
+ /* We have one interface in CONNECTED state.
+ * Verify whether its a non-AP interface before
+ * we enable back tdls.
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ for_each_ndev(cfg, iter, next) {
+ if ((iter->ndev) && (wl_get_drv_status(cfg, CONNECTED, ndev)) &&
+ wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ WL_DBG(("AP/GO operational. Can't enable tdls. \n"));
+ err = -ENOTSUPP;
+ goto exit;
+ }
+ }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ /* No AP/GO found. Enable back tdls */
+ update_reqd = true;
+ enable = true;
+ } else {
+ WL_DBG(("Concurrent connection mode. Can't enable tdls. \n"));
+ err = -ENOTSUPP;
+ goto exit;
}
- WL_ERR(("HE get failed. error=%d\n", err));
} else {
- he_feature = *(int*)cfg->ioctl_buf;
- he_feature = dtoh32(he_feature);
+ WL_ERR(("Unknown tdls state:%d \n", state));
+ err = -EINVAL;
+ goto exit;
}
- v32.id = WL_HE_CMD_FEATURES;
- v32.len = sizeof(s32);
- if (interface_type == WL_IF_TYPE_P2P_DISC) {
- he_interface = WL_HE_FEATURES_HE_P2P;
- } else if (interface_type == WL_IF_TYPE_AP) {
- he_interface = WL_HE_FEATURES_HE_AP;
- } else {
- WL_ERR(("HE request for Invalid interface type"));
- err = BCME_BADARG;
- return err;
- }
+ if (update_reqd == true) {
+ if (dhdp->tdls_enable == enable) {
+ WL_ERR(("No change in tdls state. Do nothing."
+ " tdls_enable:%d\n", enable));
+ goto exit;
+ }
+ err = wldev_iovar_setint(ndev, "tdls_enable", enable);
+ if (unlikely(err)) {
+ WL_ERR(("tdls_enable setting failed. err:%d\n", err));
+ goto exit;
+ } else {
+ WL_DBG(("set tdls_enable: %d done\n", enable));
+ /* Update the dhd state variable to be in sync */
+ dhdp->tdls_enable = enable;
+ if (state == TDLS_STATE_SETUP) {
+ /* For host initiated setup, apply TDLS params
+ * Don't propagate errors up for param config
+ * failures
+ */
+ dhd_tdls_enable(ndev, true, auto_mode, NULL);
- if (set) {
- v32.val = (he_feature | he_interface);
+ }
+ }
} else {
- v32.val = (he_feature & ~he_interface);
- }
-
- err = bcm_pack_xtlv_buf((void *)&v32, se_he_xtlv, sizeof(se_he_xtlv),
- BCM_XTLV_OPTION_ALIGN32, wl_he_get_uint_cb, wl_he_pack_uint_cb,
- &se_he_xtlv_len);
- if (err != BCME_OK) {
- WL_ERR(("failed to pack he settvl=%d\n", err));
+ WL_DBG(("Skip tdls config. state:%d update_reqd:%d "
+ "current_status:%d \n",
+ state, update_reqd, dhdp->tdls_enable));
}
- err = wldev_iovar_setbuf_bsscfg(dev, "he", &se_he_xtlv, sizeof(se_he_xtlv),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync);
- if (err < 0) {
- WL_ERR(("failed to set he features, error=%d\n", err));
- }
- WL_INFORM(("Set HE[%d] done\n", set));
+exit:
+ mutex_unlock(&cfg->tdls_sync);
return err;
}
-#endif /* WL_DISABLE_HE_SOFTAP || WL_DISABLE_HE_P2P */
+#endif /* WLTDLS */
-/* Get the concurrency mode */
-int wl_cfg80211_get_concurrency_mode(struct bcm_cfg80211 *cfg)
+struct net_device* wl_get_ap_netdev(struct bcm_cfg80211 *cfg, char *ifname)
{
struct net_info *iter, *next;
- uint cmode = CONCURRENCY_MODE_NONE;
- u32 connected_cnt = 0;
- u32 pre_channel = 0, channel = 0;
- u32 pre_band = 0;
- u32 chanspec = 0;
- u32 band = 0;
+ struct net_device *ndev = NULL;
- connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
- if (connected_cnt <= 1) {
- return cmode;
- }
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
for_each_ndev(cfg, iter, next) {
if (iter->ndev) {
- if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
- if (wldev_iovar_getint(iter->ndev, "chanspec",
- (s32 *)&chanspec) == BCME_OK) {
- channel = wf_chspec_ctlchan(
- wl_chspec_driver_to_host(chanspec));
- band = (channel <= CH_MAX_2G_CHANNEL) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- }
- if ((!pre_channel && channel)) {
- pre_band = band;
- pre_channel = channel;
- } else if (pre_channel) {
- if ((pre_band == band) && (pre_channel == channel)) {
- cmode = CONCURRENCY_SCC_MODE;
- goto exit;
- } else if ((pre_band == band) && (pre_channel != channel)) {
- cmode = CONCURRENCY_VSDB_MODE;
- goto exit;
- } else if (pre_band != band) {
- cmode = CONCURRENCY_RSDB_MODE;
- goto exit;
- }
+ if (strncmp(iter->ndev->name, ifname, strlen(iter->ndev->name)) == 0) {
+ if (iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
+ ndev = iter->ndev;
+ break;
}
}
}
}
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-_Pragma("GCC diagnostic pop")
-#endif // endif
-exit:
- return cmode;
-}
-#ifdef WL_CHAN_UTIL
-static s32
-wl_cfg80211_bssload_report_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- s32 err = BCME_OK;
- struct sk_buff *skb = NULL;
- s32 status = ntoh32(e->status);
- u8 chan_use_percentage = 0;
-#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
- LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
-#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
- /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- uint len;
- gfp_t kflags;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- len = CU_ATTR_HDR_LEN + sizeof(u8);
- kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+ return ndev;
+}
-#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
- LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(ndev), len,
- BRCM_VENDOR_EVENT_CU, kflags);
-#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- skb = cfg80211_vendor_event_alloc(wiphy, len, BRCM_VENDOR_EVENT_CU, kflags);
-#else
- /* No support exist */
-#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
- /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
- if (!skb) {
- WL_ERR(("skb alloc failed"));
- return -ENOMEM;
- }
+struct net_device*
+wl_get_netdev_by_name(struct bcm_cfg80211 *cfg, char *ifname)
+{
+ struct net_info *iter, *next;
+ struct net_device *ndev = NULL;
- if ((status == WLC_E_STATUS_SUCCESS) && data) {
- wl_bssload_t *bssload_report = (wl_bssload_t *)data;
- chan_use_percentage = (bssload_report->chan_util * 100) / 255;
- WL_DBG(("ChannelUtilization=%hhu\n", chan_use_percentage));
- err = nla_put_u8(skb, CU_ATTR_PERCENTAGE, chan_use_percentage);
- if (err < 0) {
- WL_ERR(("Failed to put CU_ATTR_PERCENTAGE, err:%d\n", err));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ if (strncmp(iter->ndev->name, ifname, IFNAMSIZ) == 0) {
+ ndev = iter->ndev;
+ break;
+ }
}
}
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- cfg80211_vendor_event(skb, kflags);
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
-
- return err;
+ return ndev;
}
-#define WL_CHAN_UTIL_DEFAULT_INTERVAL 3000
-#define WL_CHAN_UTIL_THRESH_MIN 15
-#define WL_CHAN_UTIL_THRESH_INTERVAL 10
-#ifndef CUSTOM_CU_INTERVAL
-#define CUSTOM_CU_INTERVAL WL_CHAN_UTIL_DEFAULT_INTERVAL
-#endif /* CUSTOM_CU_INTERVAL */
+#ifdef SUPPORT_AP_HIGHER_BEACONRATE
+#define WLC_RATE_FLAG 0x80
+#define RATE_MASK 0x7f
-static s32
-wl_cfg80211_start_bssload_report(struct net_device *ndev)
+int wl_set_ap_beacon_rate(struct net_device *dev, int val, char *ifname)
{
- s32 err = BCME_OK;
- wl_bssload_cfg_t blcfg;
- u8 i;
- struct bcm_cfg80211 *cfg;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ wl_rateset_args_t rs;
+ int error = BCME_ERROR, i;
+ struct net_device *ndev = NULL;
- if (!ndev) {
- return -ENODEV;
- }
+ dhdp = (dhd_pub_t *)(cfg->pub);
- cfg = wl_get_cfg(ndev);
- if (!cfg) {
- return -ENODEV;
+ if (dhdp && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
}
- /* Typecasting to void as the buffer size is same as the memset size */
- (void)memset_s(&blcfg, sizeof(wl_bssload_cfg_t), 0, sizeof(wl_bssload_cfg_t));
- /* Set default report interval 3 sec and 8 threshhold levels between 15 to 85% */
- blcfg.rate_limit_msec = CUSTOM_CU_INTERVAL;
- blcfg.num_util_levels = MAX_BSSLOAD_LEVELS;
- for (i = 0; i < MAX_BSSLOAD_LEVELS; i++) {
- blcfg.util_levels[i] = (((WL_CHAN_UTIL_THRESH_MIN +
- (i * WL_CHAN_UTIL_THRESH_INTERVAL)) * 255) / 100);
- }
+ ndev = wl_get_ap_netdev(cfg, ifname);
- err = wldev_iovar_setbuf(ndev, "bssload_report_event", &blcfg,
- sizeof(wl_bssload_cfg_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
- if (unlikely(err)) {
- WL_ERR(("Set event_msgs error (%d)\n", err));
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
}
- return err;
-}
-#endif /* WL_CHAN_UTIL */
-
-s32
-wl_cfg80211_config_suspend_events(struct net_device *ndev, bool enable)
-{
- s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
- s8 eventmask[WL_EVENTING_MASK_LEN];
- s32 err = 0;
- struct bcm_cfg80211 *cfg;
-
- if (!ndev) {
- return -ENODEV;
+ bzero(&rs, sizeof(wl_rateset_args_t));
+ error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
+ &rs, sizeof(wl_rateset_args_t), NULL);
+ if (error < 0) {
+ WL_ERR(("get rateset failed = %d\n", error));
+ return error;
}
- cfg = wl_get_cfg(ndev);
- if (!cfg) {
- return -ENODEV;
+ if (rs.count < 1) {
+ WL_ERR(("Failed to get rate count\n"));
+ return BCME_ERROR;
}
- mutex_lock(&cfg->event_sync);
- err = wldev_iovar_getbuf(ndev, "event_msgs", NULL, 0, iovbuf, sizeof(iovbuf), NULL);
- if (unlikely(err)) {
- WL_ERR(("Get event_msgs error (%d)\n", err));
- goto eventmsg_out;
- }
+ /* Host delivers target rate in the unit of 500kbps */
+ /* To make it to 1mbps unit, atof should be implemented for 5.5mbps basic rate */
+ for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
+ if (rs.rates[i] & WLC_RATE_FLAG)
+ if ((rs.rates[i] & RATE_MASK) == val)
+ break;
- (void)memcpy_s(eventmask, WL_EVENTING_MASK_LEN, iovbuf, WL_EVENTING_MASK_LEN);
- /* Add set/clear of event mask under feature specific flags */
- if (enable) {
- WL_DBG(("%s: Enabling events on resume\n", __FUNCTION__));
-#ifdef WL_CHAN_UTIL
- setbit(eventmask, WLC_E_BSS_LOAD);
-#endif /* WL_CHAN_UTIL */
+ /* Valid rate has been delivered as an argument */
+ if (i < rs.count && i < WL_NUMRATES) {
+ error = wldev_iovar_setint(ndev, "force_bcn_rspec", val);
+ if (error < 0) {
+ WL_ERR(("set beacon rate failed = %d\n", error));
+ return BCME_ERROR;
+ }
} else {
- WL_DBG(("%s: Disabling events before suspend\n", __FUNCTION__));
-#ifdef WL_CHAN_UTIL
- clrbit(eventmask, WLC_E_BSS_LOAD);
-#endif /* WL_CHAN_UTIL */
- }
-
- err = wldev_iovar_setbuf(ndev, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
- sizeof(iovbuf), NULL);
- if (unlikely(err)) {
- WL_ERR(("Set event_msgs error (%d)\n", err));
- goto eventmsg_out;
+ WL_ERR(("Rate is invalid"));
+ return BCME_BADARG;
}
-eventmsg_out:
- mutex_unlock(&cfg->event_sync);
- return err;
+ return BCME_OK;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
int
-wl_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_csa_settings *params)
+wl_get_ap_basic_rate(struct net_device *dev, char* command, char *ifname, int total_len)
{
- s32 err = BCME_OK;
- s32 chan = 0;
- u32 band = 0;
- u32 bw = WL_CHANSPEC_BW_20;
- chanspec_t chspec = 0;
- wl_chan_switch_t csa_arg;
- struct cfg80211_chan_def *chandef = ¶ms->chandef;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ wl_rateset_args_t rs;
+ int error = BCME_ERROR;
+ int i, bytes_written = 0;
+ struct net_device *ndev = NULL;
- dev = ndev_to_wlc_ndev(dev, cfg);
- chan = ieee80211_frequency_to_channel(chandef->chan->center_freq);
- band = chandef->chan->band;
+ dhdp = (dhd_pub_t *)(cfg->pub);
- WL_ERR(("netdev_ifidx(%d), target channel(%d) target bandwidth(%d),"
- " mode(%d), count(%d)\n", dev->ifindex, chan, chandef->width,
- params->block_tx, params->count));
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ return BCME_NOTAP;
+ }
- if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP) {
- WL_ERR(("Channel Switch doesn't support on "
- "the non-SoftAP mode\n"));
- return -EINVAL;
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ return BCME_NOTAP;
}
- /* Check if STA is trying to associate with an AP */
- if (wl_get_drv_status(cfg, CONNECTING, primary_dev)) {
- WL_ERR(("Connecting is in progress\n"));
- return BCME_BUSY;
+ bzero(&rs, sizeof(wl_rateset_args_t));
+ error = wldev_iovar_getbuf(ndev, "rateset", NULL, 0,
+ &rs, sizeof(wl_rateset_args_t), NULL);
+ if (error < 0) {
+ WL_ERR(("get rateset failed = %d\n", error));
+ return error;
}
- if (chan == cfg->ap_oper_channel) {
- WL_ERR(("Channel %d is same as current operating channel,"
- " so skip\n", chan));
- return BCME_OK;
+ if (rs.count < 1) {
+ WL_ERR(("Failed to get rate count\n"));
+ return BCME_ERROR;
}
- if (band == IEEE80211_BAND_5GHZ) {
-#ifdef APSTA_RESTRICTED_CHANNEL
- if (chan != DEFAULT_5G_SOFTAP_CHANNEL) {
- WL_ERR(("Invalid 5G Channel, chan=%d\n", chan));
- return -EINVAL;
- }
-#endif /* APSTA_RESTRICTED_CHANNEL */
- err = wl_get_bandwidth_cap(primary_dev, band, &bw);
- if (err < 0) {
- WL_ERR(("Failed to get bandwidth information,"
- " err=%d\n", err));
- return err;
- }
- } else if (band == IEEE80211_BAND_2GHZ) {
-#ifdef APSTA_RESTRICTED_CHANNEL
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- u32 *sta_chan = (u32 *)wl_read_prof(cfg,
- primary_dev, WL_PROF_CHAN);
+ /* Delivers basic rate in the unit of 500kbps to host */
+ for (i = 0; i < rs.count && i < WL_NUMRATES; i++)
+ if (rs.rates[i] & WLC_RATE_FLAG)
+ bytes_written += snprintf(command + bytes_written, total_len,
+ "%d ", rs.rates[i] & RATE_MASK);
- /* In 2GHz STA/SoftAP concurrent mode, the operating channel
- * of STA and SoftAP should be confgiured to the same 2GHz
- * channel. Otherwise, it is an invalid configuration.
- */
- if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhdp) &&
- wl_get_drv_status(cfg, CONNECTED, primary_dev) &&
- sta_chan && (*sta_chan != chan)) {
- WL_ERR(("Invalid 2G Channel in case of STA/SoftAP"
- " concurrent mode, sta_chan=%d, chan=%d\n",
- *sta_chan, chan));
- return -EINVAL;
- }
-#endif /* APSTA_RESTRICTED_CHANNEL */
- bw = WL_CHANSPEC_BW_20;
- } else {
- WL_ERR(("invalid band (%d)\n", band));
- return -EINVAL;
+ /* Remove last space in the command buffer */
+ if (bytes_written) {
+ command[bytes_written - 1] = '\0';
+ bytes_written--;
}
- chspec = wf_channel2chspec(chan, bw);
- if (!wf_chspec_valid(chspec)) {
- WL_ERR(("Invalid chanspec 0x%x\n", chspec));
- return -EINVAL;
- }
+ return bytes_written;
- /* Send CSA to associated STAs */
- memset(&csa_arg, 0, sizeof(wl_chan_switch_t));
- csa_arg.mode = params->block_tx;
- csa_arg.count = params->count;
- csa_arg.chspec = chspec;
- csa_arg.frame_type = CSA_BROADCAST_ACTION_FRAME;
- csa_arg.reg = 0;
+}
+#endif /* SUPPORT_AP_HIGHER_BEACONRATE */
- err = wldev_iovar_setbuf(dev, "csa", &csa_arg, sizeof(wl_chan_switch_t),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
- if (err < 0) {
- WL_ERR(("Failed to switch channel, err=%d\n", err));
+#ifdef SUPPORT_AP_RADIO_PWRSAVE
+static int
+_wl_update_ap_rps_params(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rpsnoa_iovar_params_t iovar;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+
+ if (!dev)
+ return BCME_BADARG;
+
+ memset(&iovar, 0, sizeof(iovar));
+ memset(smbuf, 0, sizeof(smbuf));
+
+ iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+ iovar.hdr.subcmd = WL_RPSNOA_CMD_PARAMS;
+ iovar.hdr.len = sizeof(iovar);
+ iovar.param->band = WLC_BAND_ALL;
+ iovar.param->level = cfg->ap_rps_info.level;
+ iovar.param->stas_assoc_check = cfg->ap_rps_info.sta_assoc_check;
+ iovar.param->pps = cfg->ap_rps_info.pps;
+ iovar.param->quiet_time = cfg->ap_rps_info.quiet_time;
+
+ if (wldev_iovar_setbuf(dev, "rpsnoa", &iovar, sizeof(iovar),
+ smbuf, sizeof(smbuf), NULL)) {
+ WL_ERR(("Failed to set rpsnoa params"));
+ return BCME_ERROR;
}
- return err;
+ return BCME_OK;
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) */
-#ifdef WL_WIPSEVT
int
-wl_cfg80211_wips_event_ext(wl_wips_event_info_t *wips_event)
+wl_get_ap_rps(struct net_device *dev, char* command, char *ifname, int total_len)
{
- s32 err = BCME_OK;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- struct sk_buff *skb;
- gfp_t kflags;
- struct bcm_cfg80211 *cfg;
- struct net_device *ndev;
- struct wiphy *wiphy;
-
- cfg = wl_cfg80211_get_bcmcfg();
- if (!cfg || !cfg->wdev) {
- WL_ERR(("WIPS evt invalid arg\n"));
- return err;
- }
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ int error = BCME_ERROR;
+ int bytes_written = 0;
+ struct net_device *ndev = NULL;
+ rpsnoa_iovar_t iovar;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+ u32 chanspec = 0;
+ u8 idx = 0;
+ u8 val;
- ndev = bcmcfg_to_prmry_ndev(cfg);
- wiphy = bcmcfg_to_wiphy(cfg);
+ dhdp = (dhd_pub_t *)(cfg->pub);
- kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
- skb = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(ndev),
- BRCM_VENDOR_WIPS_EVENT_BUF_LEN, BRCM_VENDOR_EVENT_WIPS, kflags);
+ if (!dhdp) {
+ error = BCME_NOTUP;
+ goto fail;
+ }
- if (!skb) {
- WL_ERR(("skb alloc failed"));
- return BCME_NOMEM;
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ error = BCME_NOTAP;
+ goto fail;
}
- err = nla_put_u16(skb, WIPS_ATTR_DEAUTH_CNT, wips_event->misdeauth);
- if (unlikely(err)) {
- WL_ERR(("nla_put_u16 WIPS_ATTR_DEAUTH_CNT failed\n"));
+ ndev = wl_get_ap_netdev(cfg, ifname);
+
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ error = BCME_NOTAP;
goto fail;
}
- err = nla_put(skb, WIPS_ATTR_DEAUTH_BSSID, ETHER_ADDR_LEN, &wips_event->bssid);
- if (unlikely(err)) {
- WL_ERR(("nla_put WIPS_ATTR_DEAUTH_BSSID failed\n"));
+
+ memset(&iovar, 0, sizeof(iovar));
+ memset(smbuf, 0, sizeof(smbuf));
+
+ iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+ iovar.hdr.subcmd = WL_RPSNOA_CMD_STATUS;
+ iovar.hdr.len = sizeof(iovar);
+ iovar.data->band = WLC_BAND_ALL;
+
+ error = wldev_iovar_getbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
+ smbuf, sizeof(smbuf), NULL);
+ if (error < 0) {
+ WL_ERR(("get ap radio pwrsave failed = %d\n", error));
goto fail;
}
- err = nla_put_s16(skb, WIPS_ATTR_CURRENT_RSSI, wips_event->current_RSSI);
- if (unlikely(err)) {
- WL_ERR(("nla_put_u16 WIPS_ATTR_CURRENT_RSSI failed\n"));
+
+ /* RSDB event doesn't seem to be handled correctly.
+ * So check chanspec of AP directly from the firmware
+ */
+ error = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
+ if (error < 0) {
+ WL_ERR(("get chanspec from AP failed = %d\n", error));
goto fail;
}
- err = nla_put_s16(skb, WIPS_ATTR_DEAUTH_RSSI, wips_event->deauth_RSSI);
- if (unlikely(err)) {
- WL_ERR(("nla_put_u16 WIPS_ATTR_DEAUTH_RSSI failed\n"));
+
+ chanspec = wl_chspec_driver_to_host(chanspec);
+ if (CHSPEC_IS2G(chanspec))
+ idx = 0;
+ else if (CHSPEC_IS5G(chanspec))
+ idx = 1;
+ else {
+ error = BCME_BADCHAN;
goto fail;
}
- cfg80211_vendor_event(skb, kflags);
- return err;
+ val = ((rpsnoa_iovar_t *)smbuf)->data[idx].value;
+ bytes_written += snprintf(command + bytes_written, total_len, "%d", val);
+ error = bytes_written;
-fail:
- if (skb) {
- nlmsg_free(skb);
- }
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
- return err;
+fail:
+ return error;
}
int
-wl_cfg80211_wips_event(uint16 misdeauth, char* bssid)
+wl_set_ap_rps(struct net_device *dev, bool enable, char *ifname)
{
- s32 err = BCME_OK;
- wl_wips_event_info_t wips_event;
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ dhd_pub_t *dhdp;
+ struct net_device *ndev = NULL;
+ rpsnoa_iovar_t iovar;
+ u8 smbuf[WLC_IOCTL_SMLEN];
+ int ret = BCME_OK;
- wips_event.misdeauth = misdeauth;
- memcpy(&wips_event.bssid, bssid, ETHER_ADDR_LEN);
- wips_event.current_RSSI = 0;
- wips_event.deauth_RSSI = 0;
+ dhdp = (dhd_pub_t *)(cfg->pub);
- err = wl_cfg80211_wips_event_ext(&wips_event);
- return err;
-}
-#endif /* WL_WIPSEVT */
+ if (!dhdp) {
+ ret = BCME_NOTUP;
+ goto exit;
+ }
-bool wl_cfg80211_check_in_progress(struct net_device *dev)
-{
- /* TODO: Check for cfg status like scan in progress,
- * four way handshake, etc before entering Deep Sleep.
- */
- return TRUE;
-}
+ if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ WL_ERR(("Not Hostapd mode\n"));
+ ret = BCME_NOTAP;
+ goto exit;
+ }
-#ifdef SUPPORT_AP_SUSPEND
-void
-wl_set_ap_suspend_error_handler(struct net_device *ndev, bool suspend)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
+ ndev = wl_get_ap_netdev(cfg, ifname);
- if (wl_get_drv_status(cfg, READY, ndev)) {
- /* IF dongle is down due to previous hang or other conditions, sending
- * one more hang notification is not needed.
- */
- if (dhd_query_bus_erros(dhdp)) {
- return;
+ if (ndev == NULL) {
+ WL_ERR(("No softAP interface named %s\n", ifname));
+ ret = BCME_NOTAP;
+ goto exit;
+ }
+
+ if (cfg->ap_rps_info.enable != enable) {
+ cfg->ap_rps_info.enable = enable;
+ if (enable) {
+ ret = _wl_update_ap_rps_params(ndev);
+ if (ret) {
+ WL_ERR(("Filed to update rpsnoa params\n"));
+ goto exit;
+ }
}
- dhdp->iface_op_failed = TRUE;
-#if defined(DHD_FW_COREDUMP)
- if (dhdp->memdump_enabled) {
- dhdp->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
- dhd_bus_mem_dump(dhdp);
+ memset(&iovar, 0, sizeof(iovar));
+ memset(smbuf, 0, sizeof(smbuf));
+
+ iovar.hdr.ver = RADIO_PWRSAVE_VERSION;
+ iovar.hdr.subcmd = WL_RPSNOA_CMD_ENABLE;
+ iovar.hdr.len = sizeof(iovar);
+ iovar.data->band = WLC_BAND_ALL;
+ iovar.data->value = (int16)enable;
+
+ ret = wldev_iovar_setbuf(ndev, "rpsnoa", &iovar, sizeof(iovar),
+ smbuf, sizeof(smbuf), NULL);
+ if (ret) {
+ WL_ERR(("Failed to enable AP radio power save"));
+ goto exit;
}
-#endif /* DHD_FW_COREDUMP */
- WL_ERR(("Notify hang event to upper layer \n"));
- dhdp->hang_reason = suspend ?
- HANG_REASON_BSS_DOWN_FAILURE : HANG_REASON_BSS_UP_FAILURE;
- net_os_send_hang_message(ndev);
+ cfg->ap_rps_info.enable = enable;
}
+exit:
+ return ret;
}
-#define MAX_AP_RESUME_TIME 5000
int
-wl_set_ap_suspend(struct net_device *dev, bool suspend, char *ifname)
+wl_update_ap_rps_params(struct net_device *dev, ap_rps_info_t* rps, char *ifname)
{
struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
dhd_pub_t *dhdp;
struct net_device *ndev = NULL;
- int ret = BCME_OK;
- bool is_bssup = FALSE;
- int bssidx;
- unsigned long start_j;
- int time_to_sleep = MAX_AP_RESUME_TIME;
dhdp = (dhd_pub_t *)(cfg->pub);
- if (!dhdp) {
+ if (!dhdp)
return BCME_NOTUP;
- }
if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
WL_ERR(("Not Hostapd mode\n"));
return BCME_NOTAP;
}
- if ((bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find p2p index from wdev(%p) failed\n", ndev->ieee80211_ptr));
- return BCME_NOTFOUND;
- }
+ if (!rps)
+ return BCME_BADARG;
- is_bssup = wl_cfg80211_bss_isup(ndev, bssidx);
- if (is_bssup && suspend) {
- wl_clr_drv_status(cfg, AP_CREATED, ndev);
- wl_clr_drv_status(cfg, CONNECTED, ndev);
+ if (rps->pps < RADIO_PWRSAVE_PPS_MIN)
+ return BCME_BADARG;
- if ((ret = wl_cfg80211_bss_up(cfg, ndev, bssidx, 0)) < 0) {
- WL_ERR(("AP suspend error %d, suspend %d\n", ret, suspend));
- ret = BCME_NOTDOWN;
- goto exit;
- }
- } else if (!is_bssup && !suspend) {
- /* Abort scan before starting AP again */
- wl_cfg80211_scan_abort(cfg);
+ if (rps->level < RADIO_PWRSAVE_LEVEL_MIN ||
+ rps->level > RADIO_PWRSAVE_LEVEL_MAX)
+ return BCME_BADARG;
- if ((ret = wl_cfg80211_bss_up(cfg, ndev, bssidx, 1)) < 0) {
- WL_ERR(("AP resume error %d, suspend %d\n", ret, suspend));
- ret = BCME_NOTUP;
- goto exit;
- }
+ if (rps->quiet_time < RADIO_PWRSAVE_QUIETTIME_MIN)
+ return BCME_BADARG;
- while (TRUE) {
- start_j = get_jiffies_64();
- /* Wait for Linkup event to mark successful AP bring up */
- ret = wait_event_interruptible_timeout(cfg->netif_change_event,
- wl_get_drv_status(cfg, AP_CREATED, ndev),
- msecs_to_jiffies(time_to_sleep));
- if (ret == -ERESTARTSYS) {
- WL_ERR(("waitqueue was interrupted by a signal\n"));
- time_to_sleep -= jiffies_to_msecs(get_jiffies_64() - start_j);
- if (time_to_sleep <= 0) {
- WL_ERR(("time to sleep hits 0\n"));
- ret = BCME_NOTUP;
- goto exit;
- }
- } else if (ret == 0 || !wl_get_drv_status(cfg, AP_CREATED, ndev)) {
- WL_ERR(("AP resume failed!\n"));
- ret = BCME_NOTUP;
- goto exit;
- } else {
- wl_set_drv_status(cfg, CONNECTED, ndev);
- ret = BCME_OK;
- break;
- }
+ if (rps->sta_assoc_check > RADIO_PWRSAVE_ASSOCCHECK_MAX ||
+ rps->sta_assoc_check < RADIO_PWRSAVE_ASSOCCHECK_MIN)
+ return BCME_BADARG;
+
+ cfg->ap_rps_info.pps = rps->pps;
+ cfg->ap_rps_info.level = rps->level;
+ cfg->ap_rps_info.quiet_time = rps->quiet_time;
+ cfg->ap_rps_info.sta_assoc_check = rps->sta_assoc_check;
+
+ if (cfg->ap_rps_info.enable) {
+ if (_wl_update_ap_rps_params(ndev)) {
+ WL_ERR(("Failed to update rpsnoa params"));
+ return BCME_ERROR;
}
- } else {
- /* bssup + resume or bssdown + suspend,
- * So, returns OK
- */
- ret = BCME_OK;
}
-exit:
- if (ret != BCME_OK)
- wl_set_ap_suspend_error_handler(bcmcfg_to_prmry_ndev(cfg), suspend);
- return ret;
+ return BCME_OK;
}
-#endif /* SUPPORT_AP_SUSPEND */
-#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
-int wl_set_softap_elna_bypass(struct net_device *dev, char *ifname, int enable)
+void
+wl_cfg80211_init_ap_rps(struct bcm_cfg80211 *cfg)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- struct net_device *ifdev = NULL;
- char iobuf[WLC_IOCTL_SMLEN];
- int err = BCME_OK;
- int iftype = 0;
-
- memset(iobuf, 0, WLC_IOCTL_SMLEN);
+ cfg->ap_rps_info.enable = FALSE;
+ cfg->ap_rps_info.sta_assoc_check = RADIO_PWRSAVE_STAS_ASSOC_CHECK;
+ cfg->ap_rps_info.pps = RADIO_PWRSAVE_PPS;
+ cfg->ap_rps_info.quiet_time = RADIO_PWRSAVE_QUIET_TIME;
+ cfg->ap_rps_info.level = RADIO_PWRSAVE_LEVEL;
+}
+#endif /* SUPPORT_AP_RADIO_PWRSAVE */
- /* Check the interface type */
- ifdev = wl_get_netdev_by_name(cfg, ifname);
- if (ifdev == NULL) {
- WL_ERR(("%s: Could not find net_device for ifname:%s\n", __FUNCTION__, ifname));
- err = BCME_BADARG;
- goto fail;
- }
+int
+wl_cfg80211_iface_count(struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ struct net_info *iter, *next;
+ int iface_count = 0;
- iftype = ifdev->ieee80211_ptr->iftype;
- if (iftype == NL80211_IFTYPE_AP) {
- err = wldev_iovar_setint(ifdev, "softap_elnabypass", enable);
- if (unlikely(err)) {
- WL_ERR(("%s: Failed to set softap_elnabypass, err=%d\n",
- __FUNCTION__, err));
+ /* Return the count of network interfaces (skip netless p2p discovery
+ * interface)
+ */
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+ for_each_ndev(cfg, iter, next) {
+ if (iter->ndev) {
+ iface_count++;
}
- } else {
- WL_ERR(("%s: softap_elnabypass should control in SoftAP mode only\n",
- __FUNCTION__));
- err = BCME_BADARG;
}
-fail:
- return err;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ return iface_count;
}
-int wl_get_softap_elna_bypass(struct net_device *dev, char *ifname, void *param)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- int *enable = (int*)param;
- struct net_device *ifdev = NULL;
- char iobuf[WLC_IOCTL_SMLEN];
- int err = BCME_OK;
- int iftype = 0;
-
- memset(iobuf, 0, WLC_IOCTL_SMLEN);
- /* Check the interface type */
- ifdev = wl_get_netdev_by_name(cfg, ifname);
- if (ifdev == NULL) {
- WL_ERR(("%s: Could not find net_device for ifname:%s\n", __FUNCTION__, ifname));
- err = BCME_BADARG;
- goto fail;
- }
+#ifdef WBTEXT
+static bool wl_cfg80211_wbtext_check_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea)
+{
+ wl_wbtext_bssid_t *bssid = NULL;
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
- iftype = ifdev->ieee80211_ptr->iftype;
- if (iftype == NL80211_IFTYPE_AP) {
- err = wldev_iovar_getint(ifdev, "softap_elnabypass", enable);
- if (unlikely(err)) {
- WL_ERR(("%s: Failed to get softap_elnabypass, err=%d\n",
- __FUNCTION__, err));
+ /* check duplicate */
+ list_for_each_entry(bssid, &cfg->wbtext_bssid_list, list) {
+ if (!memcmp(bssid->ea.octet, ea, ETHER_ADDR_LEN)) {
+ return FALSE;
}
- } else {
- WL_ERR(("%s: softap_elnabypass should control in SoftAP mode only\n",
- __FUNCTION__));
- err = BCME_BADARG;
}
-fail:
- return err;
+ return TRUE;
}
-#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
-#ifdef SUPPORT_AP_BWCTRL
-#define OPER_MODE_ENABLE (1 << 8)
-static int op2bw[] = {20, 40, 80, 160};
-
-static int
-wl_get_ap_he_mode(struct net_device *ndev, struct bcm_cfg80211 *cfg, bool *he)
+static bool wl_cfg80211_wbtext_add_bssid_list(struct bcm_cfg80211 *cfg, struct ether_addr *ea)
{
- bcm_xtlv_t read_he_xtlv;
- int ret = 0;
- u8 he_enab = 0;
- u32 he_feature = 0;
- *he = FALSE;
-
- /* Check he enab first */
- read_he_xtlv.id = WL_HE_CMD_ENAB;
- read_he_xtlv.len = 0;
+ wl_wbtext_bssid_t *bssid = NULL;
+ char eabuf[ETHER_ADDR_STR_LEN];
- ret = wldev_iovar_getbuf(ndev, "he", &read_he_xtlv, sizeof(read_he_xtlv),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, NULL);
- if (ret < 0) {
- if (ret == BCME_UNSUPPORTED) {
- /* HE not supported */
- ret = BCME_OK;
- } else {
- WL_ERR(("HE ENAB get failed. ret=%d\n", ret));
- }
- goto exit;
- } else {
- he_enab = *(u8*)cfg->ioctl_buf;
+ bssid = kmalloc(sizeof(wl_wbtext_bssid_t), GFP_KERNEL);
+ if (bssid == NULL) {
+ WL_ERR(("alloc failed\n"));
+ return FALSE;
}
- if (!he_enab) {
- goto exit;
- }
+ memcpy(bssid->ea.octet, ea, ETHER_ADDR_LEN);
- /* Then check BIT3 of he features */
- read_he_xtlv.id = WL_HE_CMD_FEATURES;
- read_he_xtlv.len = 0;
+ INIT_LIST_HEAD(&bssid->list);
+ list_add_tail(&bssid->list, &cfg->wbtext_bssid_list);
- ret = wldev_iovar_getbuf(ndev, "he", &read_he_xtlv, sizeof(read_he_xtlv),
- cfg->ioctl_buf, WLC_IOCTL_SMLEN, NULL);
- if (ret < 0) {
- WL_ERR(("HE FEATURE get failed. error=%d\n", ret));
- goto exit;
- } else {
- he_feature = *(int*)cfg->ioctl_buf;
- he_feature = dtoh32(he_feature);
- }
+ WL_DBG(("add wbtext bssid : %s\n", bcm_ether_ntoa(ea, eabuf)));
- if (he_feature & WL_HE_FEATURES_HE_AP) {
- WL_DBG(("HE is enabled in AP\n"));
- *he = TRUE;
- }
-exit:
- return ret;
+ return TRUE;
}
-static void
-wl_update_apchan_bwcap(struct bcm_cfg80211 *cfg, struct net_device *ndev, chanspec_t chanspec)
+static void wl_cfg80211_wbtext_clear_bssid_list(struct bcm_cfg80211 *cfg)
{
- struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
- struct wireless_dev *wdev = ndev_to_wdev(dev);
- struct wiphy *wiphy = wdev->wiphy;
- int ret = BCME_OK;
- u32 bw_cap;
- u32 ctl_chan;
- chanspec_t chanbw = WL_CHANSPEC_BW_20;
+ wl_wbtext_bssid_t *bssid = NULL;
+ char eabuf[ETHER_ADDR_STR_LEN];
- /* Update channel in profile */
- ctl_chan = wf_chspec_ctlchan(chanspec);
- wl_update_prof(cfg, ndev, NULL, &ctl_chan, WL_PROF_CHAN);
-
- /* BW cap is only updated in 5GHz */
- if (ctl_chan <= CH_MAX_2G_CHANNEL)
- return;
-
- /* Get WL BW CAP */
- ret = wl_get_bandwidth_cap(bcmcfg_to_prmry_ndev(cfg),
- IEEE80211_BAND_5GHZ, &bw_cap);
- if (ret < 0) {
- WL_ERR(("get bw_cap failed = %d\n", ret));
- goto exit;
+ while (!list_empty(&cfg->wbtext_bssid_list)) {
+ bssid = list_entry(cfg->wbtext_bssid_list.next, wl_wbtext_bssid_t, list);
+ if (bssid) {
+ WL_DBG(("clear wbtext bssid : %s\n", bcm_ether_ntoa(&bssid->ea, eabuf)));
+ list_del(&bssid->list);
+ kfree(bssid);
+ }
}
-
- chanbw = CHSPEC_BW(channel_to_chanspec(wiphy,
- ndev, wf_chspec_ctlchan(chanspec), bw_cap));
-
-exit:
- cfg->bw_cap_5g = bw2cap[chanbw >> WL_CHANSPEC_BW_SHIFT];
- WL_INFORM_MEM(("supported bw cap is:0x%x\n", cfg->bw_cap_5g));
-
}
-int
-wl_rxchain_to_opmode_nss(int rxchain)
+static void wl_cfg80211_wbtext_update_rcc(struct bcm_cfg80211 *cfg, struct net_device *dev)
{
- /*
- * Nss 1 -> 0, Nss 2 -> 1
- * This is from operating mode field
- * in 8.4.1.50 of 802.11ac-2013
- */
- /* TODO : Nss 3 ? */
- if (rxchain == 3)
- return (1 << 4);
- else
- return 0;
-}
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ bcm_tlv_t * cap_ie = NULL;
+ bool req_sent = FALSE;
+ struct wl_profile *profile;
-int
-wl_update_opmode(struct net_device *ndev, u32 bw)
-{
- int ret = BCME_OK;
- int oper_mode;
- int rxchain;
+ WL_DBG(("Enter\n"));
- ret = wldev_iovar_getint(ndev, "rxchain", (s32 *)&rxchain);
- if (ret < 0) {
- WL_ERR(("get rxchain failed = %d\n", ret));
- goto exit;
+ profile = wl_get_profile_by_netdev(cfg, dev);
+ if (!profile) {
+ WL_ERR(("no profile exists\n"));
+ return;
}
- oper_mode = bw;
- oper_mode |= wl_rxchain_to_opmode_nss(rxchain);
- /* Enable flag */
- oper_mode |= OPER_MODE_ENABLE;
+ if (wl_cfg80211_wbtext_check_bssid_list(cfg,
+ (struct ether_addr *)&profile->bssid) == FALSE) {
+ WL_DBG(("already updated\n"));
+ return;
+ }
- ret = wldev_iovar_setint(ndev, "oper_mode", oper_mode);
- if (ret < 0) {
- WL_ERR(("set oper_mode failed = %d\n", ret));
- goto exit;
+ /* first, check NBR bit in RRM IE */
+ if ((cap_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+ DOT11_MNG_RRM_CAP_ID)) != NULL) {
+ if (isset(cap_ie->data, DOT11_RRM_CAP_NEIGHBOR_REPORT)) {
+ req_sent = wl_cfg80211_wbtext_send_nbr_req(cfg, dev, profile);
+ }
}
-exit:
- return ret;
+ /* if RRM nbr was not supported, check BTM bit in extend cap. IE */
+ if (!req_sent) {
+ if ((cap_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+ DOT11_MNG_EXT_CAP_ID)) != NULL) {
+ if (cap_ie->len >= DOT11_EXTCAP_LEN_BSSTRANS &&
+ isset(cap_ie->data, DOT11_EXT_CAP_BSSTRANS_MGMT)) {
+ wl_cfg80211_wbtext_send_btm_query(cfg, dev, profile);
+ }
+ }
+ }
}
-int
-wl_set_ap_bw(struct net_device *dev, u32 bw, char *ifname)
+static bool wl_cfg80211_wbtext_send_nbr_req(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ struct wl_profile *profile)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp;
- struct net_device *ndev = NULL;
- int ret = BCME_OK;
- u32 *channel;
- bool he;
-
- dhdp = (dhd_pub_t *)(cfg->pub);
-
- if (!dhdp) {
- return BCME_NOTUP;
- }
-
- if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Not Hostapd mode\n"));
- return BCME_NOTAP;
- }
+ int error = -1;
+ char *smbuf = NULL;
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ bcm_tlv_t * rrm_cap_ie = NULL;
+ wlc_ssid_t *ssid = NULL;
+ bool ret = FALSE;
- ndev = wl_get_ap_netdev(cfg, ifname);
+ WL_DBG(("Enter\n"));
- if (ndev == NULL) {
- WL_ERR(("No softAP interface named %s\n", ifname));
- return BCME_NOTAP;
+ /* check RRM nbr bit in extend cap. IE of assoc response */
+ if ((rrm_cap_ie = bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+ DOT11_MNG_RRM_CAP_ID)) != NULL) {
+ if (!isset(rrm_cap_ie->data, DOT11_RRM_CAP_NEIGHBOR_REPORT)) {
+ WL_DBG(("AP doesn't support neighbor report\n"));
+ return FALSE;
+ }
}
- if (bw > DOT11_OPER_MODE_160MHZ) {
- WL_ERR(("BW is too big %d\n", bw));
- return BCME_BADARG;
+ smbuf = (char *) kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (smbuf == NULL) {
+ WL_ERR(("failed to allocated memory\n"));
+ goto nbr_req_out;
}
- channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
- if (*channel <= CH_MAX_2G_CHANNEL) {
- WL_ERR(("current channel is %d, not supported\n", *channel));
- ret = BCME_BADCHAN;
- goto exit;
+ ssid = (wlc_ssid_t *) kzalloc(sizeof(wlc_ssid_t), GFP_KERNEL);
+ if (ssid == NULL) {
+ WL_ERR(("failed to allocated memory\n"));
+ goto nbr_req_out;
}
- if ((DHD_OPMODE_STA_SOFTAP_CONCURR(dhdp) &&
- wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg))) ||
- cfg->nan_enable) {
- WL_ERR(("BW control in concurrent mode is not supported\n"));
- return BCME_BUSY;
- }
+ ssid->SSID_len = MIN(profile->ssid.SSID_len, DOT11_MAX_SSID_LEN);
+ memcpy(ssid->SSID, profile->ssid.SSID, ssid->SSID_len);
- /* When SCAN is on going either in STA or in AP, return BUSY */
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- WL_ERR(("STA is SCANNING, not support BW control\n"));
- return BCME_BUSY;
+ error = wldev_iovar_setbuf(dev, "rrm_nbr_req", ssid,
+ sizeof(wlc_ssid_t), smbuf, WLC_IOCTL_MAXLEN, NULL);
+ if (error == BCME_OK) {
+ ret = wl_cfg80211_wbtext_add_bssid_list(cfg,
+ (struct ether_addr *)&profile->bssid);
+ } else {
+ WL_ERR(("failed to send neighbor report request, error=%d\n", error));
}
- /* When SCANABORT is on going either in STA or in AP, return BUSY */
- if (wl_get_drv_status_all(cfg, SCAN_ABORTING)) {
- WL_ERR(("STA is SCAN_ABORTING, not support BW control\n"));
- return BCME_BUSY;
+nbr_req_out:
+ if (ssid) {
+ kfree(ssid);
}
- /* When CONNECTION is on going in STA, return BUSY */
- if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {
- WL_ERR(("STA is CONNECTING, not support BW control\n"));
- return BCME_BUSY;
+ if (smbuf) {
+ kfree(smbuf);
}
+ return ret;
+}
- /* BW control in AX mode needs more verification */
- ret = wl_get_ap_he_mode(ndev, cfg, &he);
- if (ret == BCME_OK && he) {
- WL_ERR(("BW control in HE mode is not supported\n"));
- return BCME_UNSUPPORTED;
- }
- if (ret < 0) {
- WL_ERR(("Check AX mode is failed\n"));
- goto exit;
- }
+static bool wl_cfg80211_wbtext_send_btm_query(struct bcm_cfg80211 *cfg, struct net_device *dev,
+ struct wl_profile *profile)
- if ((!WL_BW_CAP_160MHZ(cfg->bw_cap_5g) && (bw == DOT11_OPER_MODE_160MHZ)) ||
- (!WL_BW_CAP_80MHZ(cfg->bw_cap_5g) && (bw >= DOT11_OPER_MODE_80MHZ)) ||
- (!WL_BW_CAP_40MHZ(cfg->bw_cap_5g) && (bw >= DOT11_OPER_MODE_40MHZ)) ||
- (!WL_BW_CAP_20MHZ(cfg->bw_cap_5g) && (bw >= DOT11_OPER_MODE_20MHZ))) {
- WL_ERR(("bw_cap %x does not support bw = %d\n", cfg->bw_cap_5g, bw));
- ret = BCME_BADARG;
- goto exit;
- }
+{
+ int error = -1;
+ bool ret = FALSE;
- WL_DBG(("Updating AP BW to %d\n", op2bw[bw]));
+ WL_DBG(("Enter\n"));
- ret = wl_update_opmode(ndev, bw);
- if (ret < 0) {
- WL_ERR(("opmode set failed = %d\n", ret));
- goto exit;
+ error = wldev_iovar_setbuf(dev, "wnm_bsstrans_query", NULL,
+ 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
+ if (error == BCME_OK) {
+ ret = wl_cfg80211_wbtext_add_bssid_list(cfg,
+ (struct ether_addr *)&profile->bssid);
+ } else {
+ WL_ERR(("%s: failed to set BTM query, error=%d\n", __FUNCTION__, error));
}
-
-exit:
return ret;
}
-int
-wl_get_ap_bw(struct net_device *dev, char* command, char *ifname, int total_len)
+static void wl_cfg80211_wbtext_set_wnm_maxidle(struct bcm_cfg80211 *cfg, struct net_device *dev)
{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- dhd_pub_t *dhdp;
- struct net_device *ndev = NULL;
- int ret = BCME_OK;
- u32 chanspec = 0;
- u32 bw = DOT11_OPER_MODE_20MHZ;
- int bytes_written = 0;
+ keepalives_max_idle_t keepalive = {0, 0, 0, 0};
+ s32 bssidx, error;
+ int wnm_maxidle = 0;
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
- dhdp = (dhd_pub_t *)(cfg->pub);
+ /* AP supports wnm max idle ? */
+ if (bcm_parse_tlvs(conn_info->resp_ie, conn_info->resp_ie_len,
+ DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID) != NULL) {
+ error = wldev_iovar_getint(dev, "wnm_maxidle", &wnm_maxidle);
+ if (error < 0) {
+ WL_ERR(("failed to get wnm max idle period : %d\n", error));
+ }
+ }
- if (!dhdp) {
- return BCME_NOTUP;
+ WL_DBG(("wnm max idle period : %d\n", wnm_maxidle));
+
+ /* if wnm maxidle has valid period, set it as keep alive */
+ if (wnm_maxidle > 0) {
+ keepalive.keepalive_count = 1;
}
- if (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- WL_ERR(("Not Hostapd mode\n"));
- return BCME_NOTAP;
+ if ((bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr)) >= 0) {
+ error = wldev_iovar_setbuf_bsscfg(dev, "wnm_keepalives_max_idle", &keepalive,
+ sizeof(keepalives_max_idle_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync);
+ if (error < 0) {
+ WL_ERR(("set wnm_keepalives_max_idle failed : %d\n", error));
+ }
}
+}
- ndev = wl_get_ap_netdev(cfg, ifname);
+static int
+wl_cfg80211_recv_nbr_resp(struct net_device *dev, uint8 *body, int body_len)
+{
+ dot11_rm_action_t *rm_rep;
+ bcm_tlv_t *tlvs;
+ int tlv_len, i, error;
+ dot11_neighbor_rep_ie_t *nbr_rep_ie;
+ chanspec_t ch;
+ wl_roam_channel_list_t channel_list;
+ char iobuf[WLC_IOCTL_SMLEN];
- if (ndev == NULL) {
- WL_ERR(("No softAP interface named %s\n", ifname));
- return BCME_NOTAP;
+ if (body_len < DOT11_RM_ACTION_LEN) {
+ WL_ERR(("Received Neighbor Report frame with incorrect length %d\n",
+ body_len));
+ return BCME_ERROR;
}
- ret = wldev_iovar_getint(ndev, "chanspec", (s32 *)&chanspec);
- if (ret < 0) {
- WL_ERR(("get chanspec from AP failed = %d\n", ret));
- goto exit;
- }
+ rm_rep = (dot11_rm_action_t *)body;
+ WL_DBG(("received neighbor report (token = %d)\n", rm_rep->token));
- chanspec = wl_chspec_driver_to_host(chanspec);
+ tlvs = (bcm_tlv_t *)&rm_rep->data[0];
- if (CHSPEC_IS20(chanspec)) {
- bw = DOT11_OPER_MODE_20MHZ;
- } else if (CHSPEC_IS40(chanspec)) {
- bw = DOT11_OPER_MODE_40MHZ;
- } else if (CHSPEC_IS80(chanspec)) {
- bw = DOT11_OPER_MODE_80MHZ;
- } else if (CHSPEC_IS_BW_160_WIDE(chanspec)) {
- bw = DOT11_OPER_MODE_160MHZ;
- } else {
- WL_ERR(("chanspec error %x\n", chanspec));
- ret = BCME_BADCHAN;
- goto exit;
+ tlv_len = body_len - DOT11_RM_ACTION_LEN;
+
+ while (tlvs && tlvs->id == DOT11_MNG_NEIGHBOR_REP_ID) {
+ nbr_rep_ie = (dot11_neighbor_rep_ie_t *)tlvs;
+
+ if (nbr_rep_ie->len < DOT11_NEIGHBOR_REP_IE_FIXED_LEN) {
+ WL_ERR(("malformed Neighbor Report element with length %d\n",
+ nbr_rep_ie->len));
+ tlvs = bcm_next_tlv(tlvs, &tlv_len);
+ continue;
+ }
+
+ ch = CH20MHZ_CHSPEC(nbr_rep_ie->channel);
+ WL_DBG(("ch:%d, bssid:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ ch, nbr_rep_ie->bssid.octet[0], nbr_rep_ie->bssid.octet[1],
+ nbr_rep_ie->bssid.octet[2], nbr_rep_ie->bssid.octet[3],
+ nbr_rep_ie->bssid.octet[4], nbr_rep_ie->bssid.octet[5]));
+
+ /* get RCC list */
+ error = wldev_iovar_getbuf(dev, "roamscan_channels", 0, 0,
+ (void *)&channel_list, sizeof(channel_list), NULL);
+ if (error) {
+ WL_ERR(("Failed to get roamscan channels, error = %d\n", error));
+ return BCME_ERROR;
+ }
+
+ /* update RCC */
+ if (channel_list.n < MAX_ROAM_CHANNEL) {
+ for (i = 0; i < channel_list.n; i++) {
+ if (channel_list.channels[i] == ch) {
+ break;
+ }
+ }
+ if (i == channel_list.n) {
+ channel_list.channels[channel_list.n] = ch;
+ channel_list.n++;
+ }
+ }
+
+ /* set RCC list */
+ error = wldev_iovar_setbuf(dev, "roamscan_channels", &channel_list,
+ sizeof(channel_list), iobuf, sizeof(iobuf), NULL);
+ if (error) {
+ WL_DBG(("Failed to set roamscan channels, error = %d\n", error));
+ }
+
+ tlvs = bcm_next_tlv(tlvs, &tlv_len);
}
- bytes_written += snprintf(command + bytes_written, total_len,
- "bw=%d", bw);
- ret = bytes_written;
-exit:
- return ret;
+ return BCME_OK;
}
+#endif /* WBTEXT */
+#ifdef SUPPORT_SET_CAC
static void
-wl_restore_ap_bw(struct bcm_cfg80211 *cfg)
+wl_cfg80211_set_cac(struct bcm_cfg80211 *cfg, int enable)
{
- int ret = BCME_OK;
- u32 bw;
- bool he = FALSE;
- struct net_info *iter, *next;
- struct net_device *ndev = NULL;
- u32 *channel;
+ int ret = 0;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
- if (!cfg) {
+ WL_DBG(("cac enable %d, op_mode 0x%04x\n", enable, dhd->op_mode));
+ if (!dhd) {
+ WL_ERR(("dhd is NULL\n"));
return;
}
+ if (enable && ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) ||
+ (dhd->op_mode & DHD_FLAG_P2P_GC_MODE) ||
+ (dhd->op_mode & DHD_FLAG_P2P_GO_MODE))) {
+ WL_ERR(("op_mode 0x%04x\n", dhd->op_mode));
+ enable = 0;
+ }
+ if ((ret = dhd_wl_ioctl_set_intiovar(dhd, "cac", enable,
+ WLC_SET_VAR, TRUE, 0)) < 0) {
+ WL_ERR(("Failed set CAC, ret=%d\n", ret));
+ } else {
+ WL_DBG(("CAC set successfully\n"));
+ }
+ return;
+}
+#endif /* SUPPORT_SET_CAC */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- for_each_ndev(cfg, iter, next) {
- GCC_DIAGNOSTIC_POP();
- if (iter->ndev) {
- if (iter->ndev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP) {
- channel = (u32 *)wl_read_prof(cfg, iter->ndev, WL_PROF_CHAN);
- if (*channel > CH_MAX_2G_CHANNEL) {
- ndev = iter->ndev;
- break;
- }
+#ifdef SUPPORT_RSSI_LOGGING
+int
+wl_get_rssi_per_ant(struct net_device *dev, char *ifname, char *peer_mac, void *param)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ wl_rssi_ant_mimo_t *get_param = (wl_rssi_ant_mimo_t *)param;
+ rssi_ant_param_t *set_param = NULL;
+ struct net_device *ifdev = NULL;
+ char iobuf[WLC_IOCTL_SMLEN];
+ int err = BCME_OK;
+ int iftype = 0;
+
+ memset(iobuf, 0, WLC_IOCTL_SMLEN);
+
+ /* Check the interface type */
+ ifdev = wl_get_netdev_by_name(cfg, ifname);
+ if (ifdev == NULL) {
+ WL_ERR(("Could not find net_device for ifname:%s\n", ifname));
+ err = BCME_BADARG;
+ goto fail;
+ }
+
+ iftype = ifdev->ieee80211_ptr->iftype;
+ if (iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO) {
+ if (peer_mac) {
+ set_param = (rssi_ant_param_t *)kzalloc(sizeof(rssi_ant_param_t),
+ GFP_KERNEL);
+ err = wl_cfg80211_ether_atoe(peer_mac, &set_param->ea);
+ if (!err) {
+ WL_ERR(("Invalid Peer MAC format\n"));
+ err = BCME_BADARG;
+ goto fail;
}
+ } else {
+ WL_ERR(("Peer MAC is not provided for iftype %d\n", iftype));
+ err = BCME_BADARG;
+ goto fail;
}
}
- if (!ndev) {
- return;
+ err = wldev_iovar_getbuf(ifdev, "phy_rssi_ant", peer_mac ?
+ (void *)&(set_param->ea) : NULL, peer_mac ? ETHER_ADDR_LEN : 0,
+ (void *)iobuf, sizeof(iobuf), NULL);
+ if (unlikely(err)) {
+ WL_ERR(("Failed to get rssi info, err=%d\n", err));
+ } else {
+ memcpy(get_param, iobuf, sizeof(wl_rssi_ant_mimo_t));
+ if (get_param->count == 0) {
+ WL_ERR(("Not supported on this chip\n"));
+ err = BCME_UNSUPPORTED;
+ }
}
- /* BW control in AX mode not allowed */
- ret = wl_get_ap_he_mode(bcmcfg_to_prmry_ndev(cfg), cfg, &he);
- if (ret == BCME_OK && he) {
- return;
- }
- if (ret < 0) {
- WL_ERR(("Check AX mode is failed\n"));
- return;
+fail:
+ if (set_param) {
+ kfree(set_param);
}
- if (WL_BW_CAP_160MHZ(cfg->bw_cap_5g)) {
- bw = DOT11_OPER_MODE_160MHZ;
- } else if (WL_BW_CAP_80MHZ(cfg->bw_cap_5g)) {
- bw = DOT11_OPER_MODE_80MHZ;
- } else if (WL_BW_CAP_40MHZ(cfg->bw_cap_5g)) {
- bw = DOT11_OPER_MODE_40MHZ;
+ return err;
+}
+
+int
+wl_get_rssi_logging(struct net_device *dev, void *param)
+{
+ rssilog_get_param_t *get_param = (rssilog_get_param_t *)param;
+ char iobuf[WLC_IOCTL_SMLEN];
+ int err = BCME_OK;
+
+ memset(iobuf, 0, WLC_IOCTL_SMLEN);
+ memset(get_param, 0, sizeof(*get_param));
+ err = wldev_iovar_getbuf(dev, "rssilog", NULL, 0, (void *)iobuf,
+ sizeof(iobuf), NULL);
+ if (err) {
+ WL_ERR(("Failed to get rssi logging info, err=%d\n", err));
} else {
- return;
+ memcpy(get_param, iobuf, sizeof(*get_param));
}
- WL_DBG(("Restoring AP BW to %d\n", op2bw[bw]));
+ return err;
+}
- ret = wl_update_opmode(ndev, bw);
- if (ret < 0) {
- WL_ERR(("bw restore failed = %d\n", ret));
- return;
+int
+wl_set_rssi_logging(struct net_device *dev, void *param)
+{
+ struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
+ rssilog_set_param_t *set_param = (rssilog_set_param_t *)param;
+ int err;
+
+ err = wldev_iovar_setbuf(dev, "rssilog", set_param,
+ sizeof(*set_param), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ &cfg->ioctl_buf_sync);
+ if (err) {
+ WL_ERR(("Failed to set rssi logging param, err=%d\n", err));
}
+
+ return err;
}
-#endif /* SUPPORT_AP_BWCTRL */
+#endif /* SUPPORT_RSSI_LOGGING */
s32
wl_cfg80211_autochannel(struct net_device *dev, char* command, int total_len)
} else if (cfg->autochannel == 2) {
bytes_written = snprintf(command, total_len, "2g=%d 5g=%d",
cfg->best_2g_ch, cfg->best_5g_ch);
- WL_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
ret = bytes_written;
}
return ret;
}
-int
+static int
wl_cfg80211_check_in4way(struct bcm_cfg80211 *cfg,
struct net_device *dev, uint action, enum wl_ext_status status, void *context)
{
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
struct wl_security *sec;
s32 bssidx = -1;
- int ret = 0, cur_eapol_status, ifidx;
- int max_wait_time, max_wait_cnt;
- int suppressed = 0;
+ int ret = 0;
+ int max_wait_gc_time = dhdp->conf->max_wait_gc_time;
+
+ if (!(dhdp->conf->in4way & action))
+ return 0;
mutex_lock(&cfg->in4way_sync);
- action = action & dhdp->conf->in4way;
- WL_DBG(("status=%d, action=0x%x, in4way=0x%x\n", status, action, dhdp->conf->in4way));
+ WL_DBG(("status=%d, action=0x%x\n", status, action));
- cur_eapol_status = dhdp->conf->eapol_status;
switch (status) {
case WL_EXT_STATUS_SCAN:
- wldev_ioctl(dev, WLC_GET_SCANSUPPRESS, &suppressed, sizeof(int), false);
- if (suppressed) {
- WL_ERR(("scan suppressed\n"));
- ret = -EBUSY;
- break;
- }
- if (action & NO_SCAN_IN4WAY) {
+ if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
if (cfg->handshaking > 0 && cfg->handshaking <= 3) {
- WL_ERR(("return -EBUSY cnt %d\n", cfg->handshaking));
+ WL_ERR(("%s: return -EBUSY cnt %d\n",
+ __FUNCTION__, cfg->handshaking));
cfg->handshaking++;
ret = -EBUSY;
break;
}
}
break;
- case WL_EXT_STATUS_DISCONNECTING:
- if (cur_eapol_status >= EAPOL_STATUS_4WAY_START &&
- cur_eapol_status < EAPOL_STATUS_4WAY_DONE) {
- WL_ERR(("WPA failed at %d\n", cur_eapol_status));
- dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
- } else if (cur_eapol_status >= EAPOL_STATUS_WSC_START &&
- cur_eapol_status < EAPOL_STATUS_WSC_DONE) {
- WL_ERR(("WPS failed at %d\n", cur_eapol_status));
- dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
- }
- if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
- if (cfg->handshaking) {
- if ((action & NO_BTC_IN4WAY) && cfg->btc_mode) {
- WL_TRACE(("status=%d, restore btc_mode %d\n",
- status, cfg->btc_mode));
- wldev_iovar_setint(dev, "btc_mode", cfg->btc_mode);
- }
- cfg->handshaking = 0;
- }
- }
- if (action & WAIT_DISCONNECTED) {
- max_wait_time = 200;
- max_wait_cnt = 20;
- cfg->disconnected_jiffies = jiffies;
- while (!time_after(jiffies,
- cfg->disconnected_jiffies + msecs_to_jiffies(max_wait_time)) &&
- max_wait_cnt) {
- WL_TRACE(("status=%d, max_wait_cnt=%d waiting...\n",
- status, max_wait_cnt));
- mutex_unlock(&cfg->in4way_sync);
- OSL_SLEEP(50);
- mutex_lock(&cfg->in4way_sync);
- max_wait_cnt--;
- }
- wake_up_interruptible(&dhdp->conf->event_complete);
- }
- break;
case WL_EXT_STATUS_CONNECTING:
if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
bssidx = wl_get_bssidx_by_wdev(cfg, dev->ieee80211_ptr);
sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
if ((sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) &&
bssidx == 0) {
- dhdp->conf->eapol_status = EAPOL_STATUS_4WAY_START;
cfg->handshaking = 1;
- if (action & NO_BTC_IN4WAY) {
- ret = wldev_iovar_getint(dev, "btc_mode", &cfg->btc_mode);
- if (!ret && cfg->btc_mode) {
- WL_TRACE(("status=%d, disable current btc_mode %d\n",
- status, cfg->btc_mode));
- wldev_iovar_setint(dev, "btc_mode", 0);
- }
- }
- }
- }
- if (action & WAIT_DISCONNECTED) {
- max_wait_time = 200;
- max_wait_cnt = 10;
- while (!time_after(jiffies,
- cfg->disconnected_jiffies + msecs_to_jiffies(max_wait_time)) &&
- max_wait_cnt) {
- WL_TRACE(("status=%d, max_wait_cnt=%d waiting...\n",
- status, max_wait_cnt));
- mutex_unlock(&cfg->in4way_sync);
- OSL_SLEEP(50);
- mutex_lock(&cfg->in4way_sync);
- max_wait_cnt--;
+ if (action & NO_BTC_IN4WAY)
+ wldev_iovar_setint(dev, "btc_mode", 0);
}
- wake_up_interruptible(&dhdp->conf->event_complete);
}
break;
- case WL_EXT_STATUS_CONNECTED:
- ifidx = dhd_net2idx(dhdp->info, dev);
- if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION && ifidx >= 0) {
- dhd_conf_set_wme(cfg->pub, ifidx, 0);
- wake_up_interruptible(&dhdp->conf->event_complete);
- }
- else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_CLIENT) {
- dhd_conf_set_mchan_bw(cfg->pub, WL_P2P_IF_CLIENT, -1);
- }
- break;
- case WL_EXT_STATUS_DISCONNECTED:
- if (cur_eapol_status >= EAPOL_STATUS_4WAY_START &&
- cur_eapol_status < EAPOL_STATUS_4WAY_DONE) {
- WL_ERR(("WPA failed at %d\n", cur_eapol_status));
- dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
- } else if (cur_eapol_status >= EAPOL_STATUS_WSC_START &&
- cur_eapol_status < EAPOL_STATUS_WSC_DONE) {
- WL_ERR(("WPS failed at %d\n", cur_eapol_status));
- dhdp->conf->eapol_status = EAPOL_STATUS_NONE;
- }
- if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
- if (cfg->handshaking) {
- if ((action & NO_BTC_IN4WAY) && cfg->btc_mode) {
- WL_TRACE(("status=%d, restore btc_mode %d\n",
- status, cfg->btc_mode));
- wldev_iovar_setint(dev, "btc_mode", cfg->btc_mode);
- }
- cfg->handshaking = 0;
- }
- }
- if (action & WAIT_DISCONNECTED) {
- cfg->disconnected_jiffies = jiffies;
- }
- wake_up_interruptible(&dhdp->conf->event_complete);
- break;
- case WL_EXT_STATUS_ADD_KEY:
- dhdp->conf->eapol_status = EAPOL_STATUS_4WAY_DONE;
- if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
- if (cfg->handshaking) {
- if ((action & NO_BTC_IN4WAY) && cfg->btc_mode) {
- WL_TRACE(("status=%d, restore btc_mode %d\n",
- status, cfg->btc_mode));
- wldev_iovar_setint(dev, "btc_mode", cfg->btc_mode);
- }
- cfg->handshaking = 0;
- }
- }
- wake_up_interruptible(&dhdp->conf->event_complete);
- break;
- case WL_EXT_STATUS_AP_ENABLED:
- ifidx = dhd_net2idx(dhdp->info, dev);
- if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP && ifidx >= 0) {
- dhd_conf_set_wme(cfg->pub, ifidx, 1);
- }
- else if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
- dhd_conf_set_mchan_bw(cfg->pub, WL_P2P_IF_GO, -1);
- }
- break;
- case WL_EXT_STATUS_DELETE_STA:
+ case WL_EXT_STATUS_DELETE_GC:
if ((action & DONT_DELETE_GC_AFTER_WPS) &&
(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO)) {
u8* mac_addr = context;
- if (mac_addr && memcmp(ðer_bcast, mac_addr, ETHER_ADDR_LEN) &&
- cur_eapol_status == EAPOL_STATUS_WSC_DONE) {
+ if (memcmp(ðer_bcast, mac_addr, ETHER_ADDR_LEN) &&
+ dhdp->conf->eapol_status == EAPOL_STATUS_WPS_DONE) {
u32 timeout;
- max_wait_time = 300;
WL_TRACE(("status=%d, wps_done=%d, waiting %dms ...\n",
- status, cfg->wps_done, max_wait_time));
+ status, cfg->wps_done, max_wait_gc_time));
mutex_unlock(&cfg->in4way_sync);
timeout = wait_event_interruptible_timeout(cfg->wps_done_event,
- cfg->wps_done, msecs_to_jiffies(max_wait_time));
+ cfg->wps_done, msecs_to_jiffies(max_wait_gc_time));
mutex_lock(&cfg->in4way_sync);
WL_TRACE(("status=%d, wps_done=%d, timeout=%d\n",
status, cfg->wps_done, timeout));
}
}
break;
- case WL_EXT_STATUS_STA_DISCONNECTED:
+ case WL_EXT_STATUS_GC_DISCONNECTED:
if ((action & DONT_DELETE_GC_AFTER_WPS) &&
(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) &&
- cur_eapol_status == EAPOL_STATUS_WSC_DONE) {
+ dhdp->conf->eapol_status == EAPOL_STATUS_WPS_DONE) {
WL_TRACE(("status=%d, wps_done=%d => 0\n", status, cfg->wps_done));
cfg->wps_done = FALSE;
}
break;
- case WL_EXT_STATUS_STA_CONNECTED:
+ case WL_EXT_STATUS_GC_CONNECTED:
if ((action & DONT_DELETE_GC_AFTER_WPS) &&
(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) &&
- cur_eapol_status == EAPOL_STATUS_WSC_DONE) {
+ dhdp->conf->eapol_status == EAPOL_STATUS_WPS_DONE) {
WL_TRACE(("status=%d, wps_done=%d => 1\n", status, cfg->wps_done));
cfg->wps_done = TRUE;
wake_up_interruptible(&cfg->wps_done_event);
}
break;
+ case WL_EXT_STATUS_DISCONNECTED:
+ case WL_EXT_STATUS_4WAY_DONE:
+ if (action & (NO_SCAN_IN4WAY|NO_BTC_IN4WAY)) {
+ if (cfg->handshaking) {
+ if (action & NO_BTC_IN4WAY)
+ wldev_iovar_setint(dev, "btc_mode", 1);
+ cfg->handshaking = 0;
+ }
+ }
+ break;
default:
WL_ERR(("Unknown action=0x%x, status=%d\n", action, status));
}
/*
* Linux cfg80211 driver
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfg80211.h 825255 2019-06-13 12:26:42Z $
+ * $Id: wl_cfg80211.h 710862 2017-07-14 07:43:59Z $
*/
/**
#include <linux/wireless.h>
#include <net/cfg80211.h>
#include <linux/rfkill.h>
-#include <osl.h>
+
#include <dngl_stats.h>
#include <dhd.h>
-
-#define WL_CFG_DRV_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
-#define WL_CFG_DRV_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
-
-#define WL_CFG_WPS_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
-#define WL_CFG_WPS_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
-
-#define WL_CFG_NET_LIST_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
-#define WL_CFG_NET_LIST_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
-
-#define WL_CFG_EQ_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
-#define WL_CFG_EQ_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
-
-#define WL_CFG_BAM_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
-#define WL_CFG_BAM_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
-
-#define WL_CFG_VNDR_OUI_SYNC_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
-#define WL_CFG_VNDR_OUI_SYNC_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
-
#include <wl_cfgp2p.h>
#include <wl_android.h>
-#ifdef WL_NAN
-#include <wl_cfgnan.h>
-#endif /* WL_NAN */
struct wl_conf;
struct wl_iface;
struct bcm_cfg80211;
struct wl_security;
struct wl_ibss;
-#if !defined(WL_CLIENT_SAE) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0))
-#define WL_CLIENT_SAE
-#endif
-#if defined(WL_SAE) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
-#error "Can not support WL_SAE befor kernel 3.14"
-#endif
-#if defined(WL_CLIENT_SAE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0))
-#error "Can not support WL_CLIENT_SAE before kernel 4.17"
-#endif
-#if defined(WL_CLIENT_SAE) && defined(WL_SAE)
-#error "WL_SAE is for dongle-offload and WL_CLIENT_SAE is for wpa_supplicant. Please choose one."
-#endif
-
-#if defined(WL_CLIENT_SAE)
-#ifndef WL_ASSOC_MGR_CMD_SEND_AUTH
-#define WL_ASSOC_MGR_CMD_SEND_AUTH 3
-#endif /* WL_ASSOC_MGR_CMD_SEND_AUTH */
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) && !defined(WL_SCAN_TYPE))
-#define WL_SCAN_TYPE
-#endif /* WL_SCAN_TYPE */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0)) && !defined(WL_FILS_ROAM_OFFLD)
-#define WL_FILS_ROAM_OFFLD
-#endif // endif
-
-#ifdef WL_SAE
-#define IS_AKM_SAE(akm) (akm == WLAN_AKM_SUITE_SAE)
-#else
-#define IS_AKM_SAE(akm) FALSE
-#endif // endif
-#ifdef WL_OWE
-#define IS_AKM_OWE(akm) (akm == WLAN_AKM_SUITE_OWE)
-#else
-#define IS_AKM_OWE(akm) FALSE
-#endif // endif
#define htod32(i) (i)
#define htod16(i) (i)
#define WL_DBG_INFO (1 << 1)
#define WL_DBG_ERR (1 << 0)
-#ifndef WAIT_FOR_DISCONNECT_MAX
-#define WAIT_FOR_DISCONNECT_MAX 10
-#endif /* WAIT_FOR_DISCONNECT_MAX */
-#define WAIT_FOR_DISCONNECT_STATE_SYNC 10
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0))
-/* Newer kernels use defines from nl80211.h */
-#define IEEE80211_BAND_2GHZ NL80211_BAND_2GHZ
-#define IEEE80211_BAND_5GHZ NL80211_BAND_5GHZ
-#define IEEE80211_BAND_60GHZ NL80211_BAND_60GHZ
-#define IEEE80211_NUM_BANDS NUM_NL80211_BANDS
-#endif /* LINUX_VER >= 4.7 */
-
#ifdef DHD_LOG_DUMP
-extern void dhd_log_dump_write(int type, char *binary_data,
- int binary_len, const char *fmt, ...);
+extern void dhd_log_dump_write(int type, const char *fmt, ...);
extern char *dhd_log_dump_get_timestamp(void);
#ifndef _DHD_LOG_DUMP_DEFINITIONS_
-#define DHD_LOG_DUMP_WRITE(fmt, ...) \
- dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, NULL, 0, fmt, ##__VA_ARGS__)
-#define DHD_LOG_DUMP_WRITE_EX(fmt, ...) \
- dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, NULL, 0, fmt, ##__VA_ARGS__)
+#define DLD_BUF_TYPE_GENERAL 0
+#define DLD_BUF_TYPE_SPECIAL 1
+#define DHD_LOG_DUMP_WRITE(fmt, ...) dhd_log_dump_write(DLD_BUF_TYPE_GENERAL, fmt, ##__VA_ARGS__)
+#define DHD_LOG_DUMP_WRITE_EX(fmt, ...) dhd_log_dump_write(DLD_BUF_TYPE_SPECIAL, fmt, ##__VA_ARGS__)
#endif /* !_DHD_LOG_DUMP_DEFINITIONS_ */
#endif /* DHD_LOG_DUMP */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) || (defined(CONFIG_ARCH_MSM) && \
- defined(CFG80211_DISCONNECTED_V2))
-#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
- cfg80211_disconnected(dev, reason, ie, len, loc_gen, gfp);
-#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0))
-#define CFG80211_DISCONNECTED(dev, reason, ie, len, loc_gen, gfp) \
- BCM_REFERENCE(loc_gen); \
- cfg80211_disconnected(dev, reason, ie, len, gfp);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) */
-
/* 0 invalidates all debug messages. default is 1 */
#define WL_DBG_LEVEL 0xFF
-#define CFG80211_INFO_TEXT "[dhd] CFG80211-INFO) "
-#define CFG80211_ERROR_TEXT "[dhd] CFG80211-ERROR) "
+#ifdef CUSTOMER_HW4_DEBUG
+#define CFG80211_ERROR_TEXT "CFG80211-INFO2) "
+#else
+#define CFG80211_ERROR_TEXT "CFG80211-ERROR) "
+#endif /* CUSTOMER_HW4_DEBUG */
#if defined(DHD_DEBUG)
#ifdef DHD_LOG_DUMP
-#define WL_ERR_MSG(x, args...) \
+#define WL_ERR(args) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
+ printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
+ printk args; \
DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
- DHD_LOG_DUMP_WRITE(x, ## args); \
- } \
-} while (0)
-#define WL_ERR(x) WL_ERR_MSG x
-#define WL_ERR_KERN_MSG(x, args...) \
-do { \
- if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
+ DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
-#define WL_ERR_KERN(x) WL_ERR_KERN_MSG x
-#define WL_ERR_MEM_MSG(x, args...) \
+#define WL_ERR_MEM(args) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
- DHD_LOG_DUMP_WRITE(x, ## args); \
- } \
-} while (0)
-#define WL_ERR_MEM(x) WL_ERR_MEM_MSG x
-#define WL_INFORM_MEM_MSG(x, args...) \
-do { \
- if (wl_dbg_level & WL_DBG_INFO) { \
- printk(KERN_INFO CFG80211_INFO_TEXT "%s : " x, __func__, ## args); \
- DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
- DHD_LOG_DUMP_WRITE(x, ## args); \
+ DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
-#define WL_INFORM_MEM(x) WL_INFORM_MEM_MSG x
#define WL_ERR_EX(args) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
+ printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
+ printk args; \
DHD_LOG_DUMP_WRITE_EX("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
DHD_LOG_DUMP_WRITE_EX args; \
} \
} while (0)
-#define WL_MEM(args) \
-do { \
- DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
- DHD_LOG_DUMP_WRITE args; \
-} while (0)
#else
-#define WL_ERR_MSG(x, args...) \
+#define WL_ERR(args) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
- } \
+ printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
+ printk args; \
+ } \
} while (0)
-#define WL_ERR(x) WL_ERR_MSG x
-#define WL_ERR_KERN(args) WL_ERR(args)
#define WL_ERR_MEM(args) WL_ERR(args)
-#define WL_INFORM_MEM(args) WL_INFORM(args)
#define WL_ERR_EX(args) WL_ERR(args)
-#define WL_MEM(args) WL_DBG(args)
#endif /* DHD_LOG_DUMP */
#else /* defined(DHD_DEBUG) */
-#define WL_ERR_MSG(x, args...) \
+#define WL_ERR(args) \
do { \
if ((wl_dbg_level & WL_DBG_ERR) && net_ratelimit()) { \
- printk(KERN_INFO CFG80211_ERROR_TEXT "%s : " x, __func__, ## args); \
- } \
+ printk(KERN_INFO CFG80211_ERROR_TEXT "%s : ", __func__); \
+ printk args; \
+ } \
} while (0)
-#define WL_ERR(x) WL_ERR_MSG x
-#define WL_ERR_KERN(args) WL_ERR(args)
#define WL_ERR_MEM(args) WL_ERR(args)
-#define WL_INFORM_MEM(args) WL_INFORM(args)
#define WL_ERR_EX(args) WL_ERR(args)
-#define WL_MEM(args) WL_DBG(args)
#endif /* defined(DHD_DEBUG) */
-#define WL_PRINT_RATE_LIMIT_PERIOD 4000000000u /* 4s in units of ns */
-#define WL_ERR_RLMT(args) \
-do { \
- if (wl_dbg_level & WL_DBG_ERR) { \
- static uint64 __err_ts = 0; \
- static uint32 __err_cnt = 0; \
- uint64 __cur_ts = 0; \
- __cur_ts = local_clock(); \
- if (__err_ts == 0 || (__cur_ts > __err_ts && \
- (__cur_ts - __err_ts > WL_PRINT_RATE_LIMIT_PERIOD))) { \
- __err_ts = __cur_ts; \
- WL_ERR(args); \
- WL_ERR(("[Repeats %u times]\n", __err_cnt)); \
- __err_cnt = 0; \
- } else { \
- ++__err_cnt; \
- } \
- } \
-} while (0)
-
#ifdef WL_INFORM
#undef WL_INFORM
-#endif // endif
+#endif
-#define WL_INFORM_MSG(x, args...) \
+#define WL_INFORM(args) \
do { \
if (wl_dbg_level & WL_DBG_INFO) { \
- printk(KERN_INFO "[dhd] CFG80211-INFO) %s : " x, __func__, ## args); \
- } \
+ printk(KERN_INFO "CFG80211-INFO) %s : ", __func__); \
+ printk args; \
+ } \
} while (0)
-#define WL_INFORM(x) WL_INFORM_MSG x
+
#ifdef WL_SCAN
#undef WL_SCAN
-#endif // endif
-#define WL_SCAN_MSG(x, args...) \
+#endif
+#define WL_SCAN(args) \
do { \
if (wl_dbg_level & WL_DBG_SCAN) { \
- printk(KERN_INFO "[dhd] CFG80211-SCAN) %s :" x, __func__, ## args); \
+ printk(KERN_INFO "CFG80211-SCAN) %s :", __func__); \
+ printk args; \
} \
} while (0)
-#define WL_SCAN(x) WL_SCAN_MSG x
#ifdef WL_TRACE
#undef WL_TRACE
-#endif // endif
-#define WL_TRACE_MSG(x, args...) \
+#endif
+#define WL_TRACE(args) \
do { \
if (wl_dbg_level & WL_DBG_TRACE) { \
- printk(KERN_INFO "[dhd] CFG80211-TRACE) %s :" x, __func__, ## args); \
+ printk(KERN_INFO "CFG80211-TRACE) %s :", __func__); \
+ printk args; \
} \
} while (0)
-#define WL_TRACE(x) WL_TRACE_MSG x
#ifdef WL_TRACE_HW4
#undef WL_TRACE_HW4
-#endif // endif
+#endif
+#ifdef CUSTOMER_HW4_DEBUG
+#define WL_TRACE_HW4(args) \
+do { \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_INFO "CFG80211-TRACE) %s : ", __func__); \
+ printk args; \
+ } \
+} while (0)
+#else
#define WL_TRACE_HW4 WL_TRACE
+#endif /* CUSTOMER_HW4_DEBUG */
#if (WL_DBG_LEVEL > 0)
-#define WL_DBG_MSG(x, args...) \
+#define WL_DBG(args) \
do { \
if (wl_dbg_level & WL_DBG_DBG) { \
- printk(KERN_INFO "[dhd] CFG80211-DEBUG) %s :" x, __func__, ## args); \
+ printk(KERN_INFO "CFG80211-DEBUG) %s :", __func__); \
+ printk args; \
} \
} while (0)
-#define WL_DBG(x) WL_DBG_MSG x
#else /* !(WL_DBG_LEVEL > 0) */
#define WL_DBG(args)
#endif /* (WL_DBG_LEVEL > 0) */
#define IEEE80211_BAND_5GHZ NL80211_BAND_5GHZ
#define IEEE80211_NUM_BANDS NUM_NL80211_BANDS
#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+#ifdef WLMESH
+#undef WLMESH
+#endif
+#endif
#define WL_SCAN_RETRY_MAX 3
#define WL_NUM_PMKIDS_MAX MAXPMKID
#define WL_MED_DWELL_TIME 400
#define WL_MIN_DWELL_TIME 100
#define WL_LONG_DWELL_TIME 1000
-#define IFACE_MAX_CNT 5
+#define IFACE_MAX_CNT 4
#define WL_SCAN_CONNECT_DWELL_TIME_MS 200
#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20
#define WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320
#define WL_AF_TX_EXTRA_TIME_MAX 200
#define WL_SCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */
-#ifdef WL_NAN
-#define WL_SCAN_TIMER_INTERVAL_MS_NAN 15000 /* Scan timeout */
-#endif /* WL_NAN */
-#define WL_CHANNEL_SYNC_RETRY 5
-#define WL_INVALID -1
+#define WL_CHANNEL_SYNC_RETRY 5
+#define WL_INVALID -1
#ifdef DHD_LOSSLESS_ROAMING
#define WL_ROAM_TIMEOUT_MS 1000 /* Roam timeout */
-#endif // endif
+#endif
/* Bring down SCB Timeout to 20secs from 60secs default */
#ifndef WL_SCB_TIMEOUT
#define WL_SCB_TIMEOUT 20
-#endif // endif
-
-#if defined(ROAM_ENABLE) || defined(ROAM_CHANNEL_CACHE)
-#define ESCAN_CHANNEL_CACHE
-#endif // endif
+#endif
#ifndef WL_SCB_ACTIVITY_TIME
#define WL_SCB_ACTIVITY_TIME 5
-#endif // endif
+#endif
#ifndef WL_SCB_MAX_PROBE
#define WL_SCB_MAX_PROBE 3
-#endif // endif
-
-#ifndef WL_PSPRETEND_RETRY_LIMIT
-#define WL_PSPRETEND_RETRY_LIMIT 1
-#endif // endif
+#endif
#ifndef WL_MIN_PSPRETEND_THRESHOLD
#define WL_MIN_PSPRETEND_THRESHOLD 2
-#endif // endif
+#endif
/* Cipher suites */
-#ifndef WLAN_CIPHER_SUITE_PMK
-#define WLAN_CIPHER_SUITE_PMK 0x00904C00
-#endif /* WLAN_CIPHER_SUITE_PMK */
+#define WLAN_CIPHER_SUITE_PMK 0x00904C00
#ifndef WLAN_AKM_SUITE_FT_8021X
-#define WLAN_AKM_SUITE_FT_8021X 0x000FAC03
+#define WLAN_AKM_SUITE_FT_8021X 0x000FAC03
#endif /* WLAN_AKM_SUITE_FT_8021X */
#ifndef WLAN_AKM_SUITE_FT_PSK
-#define WLAN_AKM_SUITE_FT_PSK 0x000FAC04
+#define WLAN_AKM_SUITE_FT_PSK 0x000FAC04
#endif /* WLAN_AKM_SUITE_FT_PSK */
-#define WLAN_AKM_SUITE_SAE_SHA256 0x000FAC08
-
-#ifndef WLAN_AKM_SUITE_8021X_SUITE_B
-#define WLAN_AKM_SUITE_8021X_SUITE_B 0x000FAC0B
-#define WLAN_AKM_SUITE_8021X_SUITE_B_192 0x000FAC0C
-#endif /* WLAN_AKM_SUITE_8021X_SUITE_B */
-
-/* TODO: even in upstream linux(v5.0), FT-1X-SHA384 isn't defined and supported yet.
- * need to revisit here to sync correct name later.
- */
-#define WLAN_AKM_SUITE_FT_8021X_SHA384 0x000FAC0D
-
-#define WL_AKM_SUITE_SHA256_1X 0x000FAC05
-#define WL_AKM_SUITE_SHA256_PSK 0x000FAC06
-
-#ifndef WLAN_AKM_SUITE_FILS_SHA256
-#define WLAN_AKM_SUITE_FILS_SHA256 0x000FAC0E
-#define WLAN_AKM_SUITE_FILS_SHA384 0x000FAC0F
-#define WLAN_AKM_SUITE_FT_FILS_SHA256 0x000FAC10
-#define WLAN_AKM_SUITE_FT_FILS_SHA384 0x000FAC11
-#endif /* WLAN_AKM_SUITE_FILS_SHA256 */
-
-#define MIN_VENDOR_EXTN_IE_LEN 2
-#ifdef WL_OWE
-#ifndef WLAN_AKM_SUITE_OWE
-#define WLAN_AKM_SUITE_OWE 0X000FAC12
-#endif /* WPA_KEY_MGMT_OWE */
-#endif /* WL_OWE */
-
/*
* BRCM local.
* Use a high number that's unlikely to clash with linux upstream for a while until we can
#define WL_WOWLAN_PKT_FILTER_ID_FIRST 201
#define WL_WOWLAN_PKT_FILTER_ID_LAST (WL_WOWLAN_PKT_FILTER_ID_FIRST + \
WL_WOWLAN_MAX_PATTERNS - 1)
-#define IBSS_COALESCE_DEFAULT 1
-#define IBSS_INITIAL_SCAN_ALLOWED_DEFAULT 1
#ifdef WLTDLS
#define TDLS_TUNNELED_PRB_REQ "\x7f\x50\x6f\x9a\04"
#define TDLS_MAX_IFACE_FOR_ENABLE 1
#endif /* WLTDLS */
-#ifndef FILS_INDICATION_IE_TAG_FIXED_LEN
-#define FILS_INDICATION_IE_TAG_FIXED_LEN 2
-#endif // endif
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
-GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); \
-(entry) = list_first_entry((ptr), type, member); \
-GCC_DIAGNOSTIC_POP(); \
-
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST(); \
-entry = container_of((ptr), type, member); \
-GCC_DIAGNOSTIC_POP(); \
-
-#else
-#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
-(entry) = list_first_entry((ptr), type, member); \
-
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-entry = container_of((ptr), type, member); \
-
-#endif /* STRICT_GCC_WARNINGS */
/* driver status */
enum wl_status {
*/
WL_STATUS_FAKE_REMAINING_ON_CHANNEL,
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
- WL_STATUS_NESTED_CONNECT,
- WL_STATUS_CFG80211_CONNECT
+ WL_STATUS_NESTED_CONNECT
};
-typedef enum wl_iftype {
- WL_IF_TYPE_STA = 0,
- WL_IF_TYPE_AP = 1,
-#ifdef WLMESH_CFG80211
- WL_IF_TYPE_MESH = 2,
-#endif /* WLMESH_CFG80211 */
- WL_IF_TYPE_NAN_NMI = 3,
- WL_IF_TYPE_NAN = 4,
- WL_IF_TYPE_P2P_GO = 5,
- WL_IF_TYPE_P2P_GC = 6,
- WL_IF_TYPE_P2P_DISC = 7,
- WL_IF_TYPE_IBSS = 8,
- WL_IF_TYPE_MONITOR = 9,
- WL_IF_TYPE_AIBSS = 10,
- WL_IF_TYPE_MAX
-} wl_iftype_t;
-
-typedef enum wl_interface_state {
- WL_IF_CREATE_REQ,
- WL_IF_CREATE_DONE,
- WL_IF_DELETE_REQ,
- WL_IF_DELETE_DONE,
- WL_IF_CHANGE_REQ,
- WL_IF_CHANGE_DONE,
- WL_IF_STATE_MAX, /* Retain as last one */
-} wl_interface_state_t;
+enum wl_ext_status {
+ WL_EXT_STATUS_DISCONNECTED = 0,
+ WL_EXT_STATUS_SCAN,
+ WL_EXT_STATUS_CONNECTING,
+ WL_EXT_STATUS_CONNECTED,
+ WL_EXT_STATUS_DELETE_GC,
+ WL_EXT_STATUS_GC_DISCONNECTED,
+ WL_EXT_STATUS_GC_CONNECTED,
+ WL_EXT_STATUS_4WAY_START,
+ WL_EXT_STATUS_4WAY_DONE
+};
/* wi-fi mode */
enum wl_mode {
- WL_MODE_BSS = 0,
- WL_MODE_IBSS = 1,
- WL_MODE_AP = 2,
- WL_MODE_NAN = 4,
-#ifdef WLMESH_CFG80211
- WL_MODE_MESH = 5,
-#endif /* WLMESH_CFG80211 */
- WL_MODE_MAX
+ WL_MODE_BSS,
+ WL_MODE_IBSS,
+ WL_MODE_AP,
+#ifdef WLMESH
+ WL_MODE_MESH
+#endif
};
/* driver profile list */
WL_PROF_BSSID,
WL_PROF_ACT,
WL_PROF_BEACONINT,
- WL_PROF_DTIMPERIOD,
- WL_PROF_LATEST_BSSID
+ WL_PROF_DTIMPERIOD
};
/* donlge escan state */
TDLS_STATE_SETUP,
TDLS_STATE_TEARDOWN,
TDLS_STATE_IF_CREATE,
- TDLS_STATE_IF_DELETE,
- TDLS_STATE_NMI_CREATE
+ TDLS_STATE_IF_DELETE
};
/* beacon / probe_response */
struct list_head eq_list;
u32 etype;
wl_event_msg_t emsg;
- u32 datalen;
s8 edata[1];
};
u8 assoc_req_ie[VNDR_IES_BUF_LEN];
u8 assoc_res_ie[VNDR_IES_BUF_LEN];
u8 beacon_ie[VNDR_IES_MAX_BUF_LEN];
- u8 disassoc_ie[VNDR_IES_BUF_LEN];
u32 probe_req_ie_len;
u32 probe_res_ie_len;
u32 assoc_req_ie_len;
u32 assoc_res_ie_len;
u32 beacon_ie_len;
- u32 disassoc_ie_len;
} wl_bss_vndr_ies_t;
typedef struct wl_cfgbss {
u8 *wpa_ie;
u8 *rsn_ie;
u8 *wps_ie;
- u8 *fils_ind_ie;
bool security_mode;
struct wl_bss_vndr_ies ies; /* Common for STA, P2P GC, GO, AP, P2P Disc Interface */
} wl_cfgbss_t;
u16 beacon_interval;
u8 dtim_period;
bool active;
- u8 latest_bssid[ETHER_ADDR_LEN];
};
-struct wl_wps_ie {
- uint8 id; /* IE ID: 0xDD */
- uint8 len; /* IE length */
- uint8 OUI[3]; /* WiFi WPS specific OUI */
- uint8 oui_type; /* Vendor specific OUI Type */
- uint8 attrib[1]; /* variable length attributes */
-} __attribute__ ((packed));
-typedef struct wl_wps_ie wl_wps_ie_t;
-
-struct wl_eap_msg {
- uint16 attrib;
- uint16 len;
- uint8 type;
-} __attribute__ ((packed));
-typedef struct wl_eap_msg wl_eap_msg_t;
-
-struct wl_eap_exp {
- uint8 OUI[3];
- uint32 oui_type;
- uint8 opcode;
- u8 flags;
- u8 data[1];
-} __attribute__ ((packed));
-typedef struct wl_eap_exp wl_eap_exp_t;
-
struct net_info {
struct net_device *ndev;
struct wireless_dev *wdev;
struct wl_profile profile;
- wl_iftype_t iftype;
+ s32 mode;
s32 roam_off;
unsigned long sme_state;
bool pm_restore;
s32 pm;
s32 bssidx;
wl_cfgbss_t bss;
- u8 ifidx;
+ u32 ulb_bw;
struct list_head list; /* list of all net_info structure */
};
-#ifdef WL_BCNRECV
-/* PERIODIC Beacon receive for detecting FakeAPs */
-typedef struct wl_bcnrecv_result {
- uint8 SSID[DOT11_MAX_SSID_LEN]; /**< SSID String */
- struct ether_addr BSSID; /**< Network BSSID */
- uint8 channel; /**< Channel */
- uint16 beacon_interval;
- uint32 timestamp[2]; /**< Beacon Timestamp */
- uint64 system_time;
-} wl_bcnrecv_result_t;
-
-typedef struct wl_bcnrecv_info {
- uint bcnrecv_state; /* TO know the fakeap state */
-} wl_bcnrecv_info_t;
-
-typedef enum wl_bcnrecv_state {
- BEACON_RECV_IDLE = 0,
- BEACON_RECV_STARTED,
- BEACON_RECV_STOPPED,
- BEACON_RECV_SUSPENDED
-} wl_bcnrecv_state_t;
-
-typedef enum wl_bcnrecv_reason {
- WL_BCNRECV_INVALID = 0,
- WL_BCNRECV_USER_TRIGGER,
- WL_BCNRECV_SUSPEND,
- WL_BCNRECV_SCANBUSY,
- WL_BCNRECV_CONCURRENCY,
- WL_BCNRECV_LISTENBUSY,
- WL_BCNRECV_ROAMABORT,
- WL_BCNRECV_HANG
-} wl_bcnrecv_reason_t;
-
-typedef enum wl_bcnrecv_status {
- WL_BCNRECV_STARTED = 0,
- WL_BCNRECV_STOPPED,
- WL_BCNRECV_ABORTED,
- WL_BCNRECV_SUSPENDED,
- WL_BCNRECV_MAX
-} wl_bcnrecv_status_t;
-
-typedef enum wl_bcnrecv_attr_type {
- BCNRECV_ATTR_STATUS = 1,
- BCNRECV_ATTR_REASON,
- BCNRECV_ATTR_BCNINFO
-} wl_bcnrecv_attr_type_t;
-#endif /* WL_BCNRECV */
-#ifdef WL_CHAN_UTIL
-#define CU_ATTR_PERCENTAGE 1
-#define CU_ATTR_HDR_LEN 30
-#endif /* WL_CHAN_UTIL */
-
/* association inform */
-#define MAX_REQ_LINE 1024u
+#define MAX_REQ_LINE 1024
struct wl_connect_info {
u8 req_ie[MAX_REQ_LINE];
- u32 req_ie_len;
+ s32 req_ie_len;
u8 resp_ie[MAX_REQ_LINE];
- u32 resp_ie_len;
-};
-#define WL_MAX_FILS_KEY_LEN 64
-
-struct wl_fils_info {
- u8 fils_kek[WL_MAX_FILS_KEY_LEN];
- u32 fils_kek_len;
- u8 fils_pmk[WL_MAX_FILS_KEY_LEN];
- u32 fils_pmk_len;
- u8 fils_pmkid[WL_MAX_FILS_KEY_LEN];
- u16 fils_erp_next_seq_num;
- bool fils_roam_disabled;
- u32 fils_bcn_timeout_cache;
+ s32 resp_ie_len;
};
/* firmware /nvram downloading controller */
u32 resp_len;
};
-#define MIN_PMKID_LIST_V3_FW_MAJOR 13
-#define MIN_PMKID_LIST_V3_FW_MINOR 0
-
-#define MIN_PMKID_LIST_V2_FW_MAJOR 12
-#define MIN_PMKID_LIST_V2_FW_MINOR 0
-
-#define MIN_ESCAN_PARAM_V2_FW_MAJOR 14
-#define MIN_ESCAN_PARAM_V2_FW_MINOR 0
-
/* wpa2 pmk list */
struct wl_pmk_list {
- pmkid_list_v3_t pmkids;
- pmkid_v3_t foo[MAXPMKID - 1];
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID - 1];
};
-#define KEY_PERM_PMK 0xFFFFFFFF
-
#ifdef DHD_MAX_IFS
#define WL_MAX_IFS DHD_MAX_IFS
#else
#define WL_MAX_IFS 16
-#endif // endif
+#endif
-#define MAC_RAND_BYTES 3
#define ESCAN_BUF_SIZE (64 * 1024)
struct escan_info {
#ifndef CONFIG_DHD_USE_STATIC_BUF
#error STATIC_WL_PRIV_STRUCT should be used with CONFIG_DHD_USE_STATIC_BUF
#endif /* CONFIG_DHD_USE_STATIC_BUF */
-#ifdef DUAL_ESCAN_RESULT_BUFFER
- u8 *escan_buf[2];
-#else
u8 *escan_buf;
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
-#else
-#ifdef DUAL_ESCAN_RESULT_BUFFER
- u8 escan_buf[2][ESCAN_BUF_SIZE];
#else
u8 escan_buf[ESCAN_BUF_SIZE];
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
#endif /* STATIC_WL_PRIV_STRUCT */
-#ifdef DUAL_ESCAN_RESULT_BUFFER
- u8 cur_sync_id;
- u8 escan_type[2];
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
struct wiphy *wiphy;
struct net_device *ndev;
};
};
struct parsed_ies {
- const wpa_ie_fixed_t *wps_ie;
+ wpa_ie_fixed_t *wps_ie;
u32 wps_ie_len;
- const wpa_ie_fixed_t *wpa_ie;
+ wpa_ie_fixed_t *wpa_ie;
u32 wpa_ie_len;
- const bcm_tlv_t *wpa2_ie;
+ bcm_tlv_t *wpa2_ie;
u32 wpa2_ie_len;
- const bcm_tlv_t *fils_ind_ie;
- u32 fils_ind_ie_len;
};
+
#ifdef P2P_LISTEN_OFFLOADING
typedef struct {
uint16 period; /* listen offload period */
#ifdef WL11U
/* Max length of Interworking element */
-#define IW_IES_MAX_BUF_LEN 8
-#endif // endif
-#ifdef WLFBT
-#define FBT_KEYLEN 32
-#endif // endif
+#define IW_IES_MAX_BUF_LEN 9
+#endif
#define MAX_EVENT_BUF_NUM 16
typedef struct wl_eventmsg_buf {
u16 num;
uint8 role;
} wl_if_event_info;
+
#ifdef SUPPORT_AP_RADIO_PWRSAVE
typedef struct ap_rps_info {
bool enable;
- int sta_assoc_check;
+ bool sta_assoc_check;
int pps;
int quiet_time;
int level;
} ap_rps_info_t;
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#ifdef SUPPORT_RSSI_SUM_REPORT
+#ifdef SUPPORT_RSSI_LOGGING
#define RSSILOG_FLAG_FEATURE_SW 0x1
#define RSSILOG_FLAG_REPORT_READY 0x2
typedef struct rssilog_set_param {
int8 rssi_sum;
int8 PAD[3];
} wl_rssi_ant_mimo_t;
-#endif /* SUPPORT_RSSI_SUM_REPORT */
-
-/* MBO-OCE prune event reason codes */
-#if defined(WL_MBO) || defined(WL_OCE)
-typedef enum wl_prune_evt_reason {
- WIFI_PRUNE_UNSPECIFIED = 0, /* Unspecified event reason code */
- WIFI_PRUNE_ASSOC_RETRY_DELAY = 1, /* MBO assoc retry delay */
- WIFI_PRUNE_RSSI_ASSOC_REJ = 2 /* OCE RSSI-based assoc rejection */
-} wl_prune_evt_reason_t;
-#endif /* WL_MBO || WL_OCE */
-
-#ifdef WL_MBO
-typedef struct wl_event_mbo wl_event_mbo_t;
-typedef struct wl_event_mbo_cell_nw_switch wl_event_mbo_cell_nw_switch_t;
-typedef struct wl_btm_event_type_data wl_btm_event_type_data_t;
-#endif /* WL_MBO */
-
-#if defined(WL_MBO) || defined(WL_OCE)
-typedef struct wl_bssid_prune_evt_info wl_bssid_pruned_evt_info_t;
-#endif /* WL_MBO || WL_OCE */
-
-#ifdef WL_NAN
-#define NAN_MAX_NDI 1u
-typedef struct wl_ndi_data
-{
- u8 ifname[IFNAMSIZ];
- u8 in_use;
- u8 created;
- struct net_device *nan_ndev;
-} wl_ndi_data_t;
+#endif /* SUPPORT_RSSI_LOGGING */
+
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+#define GET_BSS_INFO_LEN 90
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
-typedef struct wl_nancfg
-{
- wl_nan_ver_t version;
- wl_ndi_data_t ndi[NAN_MAX_NDI];
- struct mutex nan_sync;
- uint8 svc_inst_id_mask[NAN_SVC_INST_SIZE];
- uint8 inst_id_start;
- /* wait queue and condition variable for nan event */
- bool nan_event_recvd;
- wait_queue_head_t nan_event_wait;
- nan_stop_reason_code_t disable_reason;
- bool mac_rand;
- int range_type;
- uint8 max_ndp_count; /* Max no. of NDPs */
- nan_ndp_peer_t *nan_ndp_peer_info;
- nan_data_path_id ndp_id[NAN_MAX_NDP_PEER];
-} wl_nancfg_t;
-
-#ifdef WL_NANP2P
-#define WL_CFG_P2P_DISC_BIT 0x1u
-#define WL_CFG_NAN_DISC_BIT 0x2u
-#define WL_NANP2P_CONC_SUPPORT (WL_CFG_P2P_DISC_BIT | WL_CFG_NAN_DISC_BIT)
-#endif /* WL_NAN2P */
-#endif /* WL_NAN */
-
-#ifdef WL_IFACE_MGMT
-#define WL_IFACE_NOT_PRESENT -1
-
-typedef enum iface_conc_policy {
- WL_IF_POLICY_DEFAULT = 0,
- WL_IF_POLICY_FCFS = 1,
- WL_IF_POLICY_LP = 2,
- WL_IF_POLICY_ROLE_PRIORITY = 3,
- WL_IF_POLICY_CUSTOM = 4,
- WL_IF_POLICY_INVALID
-} iface_conc_policy_t;
-
-typedef struct iface_mgmt_data {
- uint8 policy;
- uint8 priority[WL_IF_TYPE_MAX];
-} iface_mgmt_data_t;
-#endif /* WL_IFACE_MGMT */
-
-#ifdef WL_WPS_SYNC
-#define EAP_PACKET 0
-#define EAP_EXPANDED_TYPE 254
-#define EAP_EXP_OPCODE_OFFSET 7
-#define EAP_EXP_FRAGMENT_LEN_OFFSET 2
-#define EAP_EXP_FLAGS_FRAGMENTED_DATA 2
-#define EAP_EXP_FLAGS_MORE_DATA 1
-#define EAPOL_EAP_HDR_LEN 5
-#define EAP_EXP_HDR_MIN_LENGTH (EAPOL_EAP_HDR_LEN + EAP_EXP_OPCODE_OFFSET)
-#define EAP_ATTRIB_MSGTYPE 0x1022
-#define EAP_WSC_UPNP 0
-#define EAP_WSC_START 1
-#define EAP_WSC_ACK 2
-#define EAP_WSC_NACK 3
-#define EAP_WSC_MSG 4
-#define EAP_WSC_DONE 5
-#define EAP_WSC_MSG_M8 12
-#define EAP_CODE_FAILURE 4
-#define WL_WPS_REAUTH_TIMEOUT 10000
-
-struct wl_eap_header {
- unsigned char code; /* EAP code */
- unsigned char id; /* Current request ID */
- unsigned short length; /* Length including header */
- unsigned char type; /* EAP type (optional) */
- unsigned char data[1]; /* Type data (optional) */
-} __attribute__ ((packed));
-typedef struct wl_eap_header wl_eap_header_t;
-
-typedef enum wl_wps_state {
- WPS_STATE_IDLE = 0,
- WPS_STATE_STARTED,
- WPS_STATE_M8_SENT,
- WPS_STATE_M8_RECVD,
- WPS_STATE_EAP_FAIL,
- WPS_STATE_REAUTH_WAIT,
- WPS_STATE_LINKUP,
- WPS_STATE_LINKDOWN,
- WPS_STATE_DISCONNECT,
- WPS_STATE_DISCONNECT_CLIENT,
- WPS_STATE_CONNECT_FAIL,
- WPS_STATE_AUTHORIZE,
- WPS_STATE_DONE,
- WPS_STATE_INVALID
-} wl_wps_state_t;
-
-#define WPS_MAX_SESSIONS 2
-typedef struct wl_wps_session {
- bool in_use;
- timer_list_compat_t timer;
- struct net_device *ndev;
- wl_wps_state_t state;
- u16 mode;
- u8 peer_mac[ETHER_ADDR_LEN];
-} wl_wps_session_t;
-#endif /* WL_WPS_SYNC */
-
-#ifndef WL_STATIC_IFNAME_PREFIX
-#define WL_STATIC_IFNAME_PREFIX "wlan%d"
-#endif /* WL_STATIC_IFNAME */
-
-typedef struct buf_data {
- u32 ver; /* version of struct */
- u32 len; /* Total len */
- /* size of each buffer in case of split buffers (0 - single buffer). */
- u32 buf_threshold;
- const void *data_buf[1]; /* array of user space buffer pointers. */
-} buf_data_t;
/* private data of cfg80211 interface */
struct bcm_cfg80211 {
struct completion wait_next_af;
struct mutex usr_sync; /* maily for up/down synchronization */
struct mutex if_sync; /* maily for iface op synchronization */
- struct mutex scan_sync; /* scan sync from different scan contexts */
+ struct mutex scan_complete; /* serialize scan_complete call */
struct wl_scan_results *bss_list;
struct wl_scan_results *scan_results;
struct wl_ie *ie;
#else
struct wl_ie ie;
-#endif // endif
+#endif
/* association information container */
#if defined(STATIC_WL_PRIV_STRUCT)
struct wl_connect_info *conn_info;
#else
struct wl_connect_info conn_info;
-#endif // endif
+#endif
#ifdef DEBUGFS_CFG80211
struct dentry *debugfs;
#endif /* DEBUGFS_CFG80211 */
bool scan_tried; /* indicates if first scan attempted */
#if defined(BCMSDIO) || defined(BCMDBUS)
bool wlfc_on;
-#endif // endif
+#endif
bool vsdb_mode;
#define WL_ROAM_OFF_ON_CONCURRENT 0x0001
#define WL_ROAM_REVERT_STATUS 0x0002
struct p2p_info *p2p;
bool p2p_supported;
void *btcoex_info;
- timer_list_compat_t scan_timeout; /* Timer for catch scan event timeout */
-#ifdef WL_CFG80211_GON_COLLISION
- u8 block_gon_req_tx_count;
- u8 block_gon_req_rx_count;
-#endif /* WL_CFG80211_GON_COLLISION */
+ struct timer_list scan_timeout; /* Timer for catch scan event timeout */
#if defined(P2P_IE_MISSING_FIX)
bool p2p_prb_noti;
-#endif // endif
+#endif
s32(*state_notifier) (struct bcm_cfg80211 *cfg,
struct net_info *_net_info, enum wl_status state, bool set);
unsigned long interrested_state;
#endif /* WL11U */
bool sched_scan_running; /* scheduled scan req status */
struct cfg80211_sched_scan_request *sched_scan_req; /* scheduled scan req */
-#ifdef WL_HOST_BAND_MGMT
- u8 curr_band;
-#endif /* WL_HOST_BAND_MGMT */
bool scan_suppressed;
- timer_list_compat_t scan_supp_timer;
+ struct timer_list scan_supp_timer;
struct work_struct wlan_work;
struct mutex event_sync; /* maily for up/down synchronization */
bool disable_roam_event;
bcm_struct_cfgdev *ibss_cfgdev; /* For AIBSS */
#endif /* WLAIBSS_MCHAN */
bool bss_pending_op; /* indicate where there is a pending IF operation */
-#ifdef WLFBT
- uint8 fbt_key[FBT_KEYLEN];
-#endif // endif
int roam_offload;
-#ifdef WL_NAN
- bool nan_enable;
- nan_svc_inst_t nan_inst_ctrl[NAN_ID_CTRL_SIZE];
- struct ether_addr initiator_ndi;
- uint8 nan_dp_state;
- bool nan_init_state; /* nan initialization state */
- wait_queue_head_t ndp_if_change_event;
- uint8 support_5g;
- u8 nan_nmi_mac[ETH_ALEN];
- u8 nan_dp_count;
- wl_nancfg_t nancfg;
- struct delayed_work nan_disable;
-#ifdef WL_NAN_DISC_CACHE
- int nan_disc_count;
- nan_disc_result_cache *nan_disc_cache;
- nan_svc_info_t svc_info[NAN_MAX_SVC_INST];
- nan_ranging_inst_t nan_ranging_info[NAN_MAX_RANGING_INST];
-#endif /* WL_NAN_DISC_CACHE */
-#ifdef WL_NANP2P
- uint8 conc_disc;
- bool nan_p2p_supported;
-#endif /* WL_NANP2P */
-#endif /* WL_NAN */
-#ifdef WL_IFACE_MGMT
- iface_mgmt_data_t iface_data;
-#endif /* WL_IFACE_MGMT */
+#ifdef WL_CFG80211_P2P_DEV_IF
+ bool down_disc_if;
+#endif /* WL_CFG80211_P2P_DEV_IF */
#ifdef P2PLISTEN_AP_SAMECHN
bool p2p_resp_apchn_status;
#endif /* P2PLISTEN_AP_SAMECHN */
#endif /* QOS_MAP_SET */
struct ether_addr last_roamed_addr;
bool rcc_enabled; /* flag for Roam channel cache feature */
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+ char bss_info[GET_BSS_INFO_LEN];
+ wl_event_msg_t event_auth_assoc;
+ u32 assoc_reject_status;
+ u32 roam_count;
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
u16 ap_oper_channel;
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+ bool random_mac_enabled;
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
#ifdef DHD_LOSSLESS_ROAMING
- timer_list_compat_t roam_timeout; /* Timer for catch roam timeout */
-#endif // endif
-#ifndef DUAL_ESCAN_RESULT_BUFFER
+ struct timer_list roam_timeout; /* Timer for catch roam timeout */
+#endif
uint16 escan_sync_id_cntr;
-#endif // endif
#ifdef WLTDLS
uint8 tdls_supported;
struct mutex tdls_sync; /* protect tdls config operations */
#endif /* WLTDLS */
#ifdef MFP
- const uint8 *bip_pos;
+ uint8 *bip_pos;
int mfp_mode;
#endif /* MFP */
- uint8 vif_count; /* Virtual Interface count */
#ifdef SUPPORT_AP_RADIO_PWRSAVE
ap_rps_info_t ap_rps_info;
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
- u16 vif_macaddr_mask;
- osl_t *osh;
+#ifdef WBTEXT
+ struct list_head wbtext_bssid_list;
+#endif /* WBTEXT */
struct list_head vndr_oui_list;
- spinlock_t vndr_oui_sync; /* to protect vndr_oui_list */
- bool rssi_sum_report;
- int rssi; /* previous RSSI (backup) of get_station */
- uint64 scan_enq_time;
- uint64 scan_deq_time;
- uint64 scan_hdlr_cmplt_time;
- uint64 scan_cmplt_time;
- uint64 wl_evt_deq_time;
- uint64 wl_evt_hdlr_entry_time;
- uint64 wl_evt_hdlr_exit_time;
-#ifdef WL_WPS_SYNC
- wl_wps_session_t wps_session[WPS_MAX_SESSIONS];
- spinlock_t wps_sync; /* to protect wps states (and others if needed) */
-#endif /* WL_WPS_SYNC */
- struct wl_fils_info fils_info;
- uint8 scanmac_enabled;
-#ifdef WL_BCNRECV
- /* structure used for fake ap detection info */
- struct mutex bcn_sync; /* mainly for bcn resume/suspend synchronization */
- wl_bcnrecv_info_t bcnrecv_info;
-#endif /* WL_BCNRECV */
- struct net_device *static_ndev;
- uint8 static_ndev_state;
- bool hal_started;
- wl_wlc_version_t wlc_ver;
- bool scan_params_v2;
-#ifdef SUPPORT_AP_BWCTRL
- u32 bw_cap_5g;
-#endif /* SUPPORT_AP_BWCTRL */
+
+#ifdef STAT_REPORT
+ void *stat_report_info;
+#endif
+#ifdef WLMESH
+ char sae_password[SAE_MAX_PASSWD_LEN];
+ uint sae_password_len;
+#endif /* WLMESH */
#if defined(RSSIAVG)
wl_rssi_cache_ctrl_t g_rssi_cache_ctrl;
wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl;
int best_2g_ch;
int best_5g_ch;
uint handshaking;
- int btc_mode;
bool wps_done;
wait_queue_head_t wps_done_event;
struct mutex in4way_sync;
- ulong disconnected_jiffies;
-};
-#define WL_STATIC_IFIDX (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1)
-enum static_ndev_states {
- NDEV_STATE_NONE,
- NDEV_STATE_OS_IF_CREATED,
- NDEV_STATE_FW_IF_CREATED,
- NDEV_STATE_FW_IF_FAILED,
- NDEV_STATE_FW_IF_DELETED
};
-#define IS_CFG80211_STATIC_IF(cfg, ndev) \
- ((cfg && (cfg->static_ndev == ndev)) ? true : false)
-#define IS_CFG80211_STATIC_IF_ACTIVE(cfg) \
- ((cfg && cfg->static_ndev && \
- (cfg->static_ndev_state & NDEV_STATE_FW_IF_CREATED)) ? true : false)
-#define IS_CFG80211_STATIC_IF_NAME(cfg, name) \
- (cfg && cfg->static_ndev && \
- !strncmp(cfg->static_ndev->name, name, strlen(name)))
-
-#ifdef WL_SAE
-typedef struct wl_sae_key_info {
- uint8 peer_mac[ETHER_ADDR_LEN];
- uint16 pmk_len;
- uint16 pmkid_len;
- const uint8 *pmk;
- const uint8 *pmkid;
-} wl_sae_key_info_t;
-#endif /* WL_SAE */
-
-typedef enum wl_concurrency_mode {
- CONCURRENCY_MODE_NONE = 0,
- CONCURRENCY_SCC_MODE,
- CONCURRENCY_VSDB_MODE,
- CONCURRENCY_RSDB_MODE
-} wl_concurrency_mode_t;
-
-typedef struct wl_wips_event_info {
- uint32 timestamp;
- struct ether_addr bssid;
- uint16 misdeauth;
- int16 current_RSSI;
- int16 deauth_RSSI;
-} wl_wips_event_info_t;
-
-s32 wl_iftype_to_mode(wl_iftype_t iftype);
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#define GCC_DIAGNOSTIC_PUSH() \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#define GCC_DIAGNOSTIC_POP() \
+_Pragma("GCC diagnostic pop")
+#else
+#define GCC_DIAGNOSTIC_PUSH()
+#define GCC_DIAGNOSTIC_POP()
+#endif /* STRICT_GCC_WARNINGS */
#define BCM_LIST_FOR_EACH_ENTRY_SAFE(pos, next, head, member) \
list_for_each_entry_safe((pos), (next), (head), member)
extern int ioctl_version;
-static inline wl_bss_info_t *next_bss(struct wl_scan_results *list, wl_bss_info_t *bss)
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
{
return bss = bss ?
- (wl_bss_info_t *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
+ (struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
}
static inline void
struct net_info *_net_info, *next;
unsigned long int flags;
int idx = 0;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next,
&cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
- WL_INFORM_MEM(("wl_probe_wdev_all: net_list[%d] bssidx: %d\n",
- idx++, _net_info->bssidx));
+ WL_ERR(("%s: net_list[%d] bssidx: %d, "
+ "ndev: %p, wdev: %p \n", __FUNCTION__,
+ idx++, _net_info->bssidx,
+ _net_info->ndev, _net_info->wdev));
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return;
}
static inline struct net_info *
-wl_get_netinfo_by_fw_idx(struct bcm_cfg80211 *cfg, s32 bssidx, u8 ifidx)
+wl_get_netinfo_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx)
{
struct net_info *_net_info, *next, *info = NULL;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
- if ((bssidx >= 0) && (_net_info->bssidx == bssidx) &&
- (_net_info->ifidx == ifidx)) {
+ if ((bssidx >= 0) && (_net_info->bssidx == bssidx)) {
info = _net_info;
break;
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return info;
}
unsigned long int flags;
#ifdef DHD_IFDEBUG
- WL_INFORM_MEM(("dealloc_netinfo enter wdev=%p \n", OSL_OBFUSCATE_BUF(wdev)));
-#endif // endif
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ WL_ERR(("dealloc_netinfo enter wdev=%p \n", wdev));
+#endif
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
if (wdev && (_net_info->wdev == wdev)) {
wl_cfgbss_t *bss = &_net_info->bss;
- if (bss->wpa_ie) {
- MFREE(cfg->osh, bss->wpa_ie, bss->wpa_ie[1]
- + WPA_RSN_IE_TAG_FIXED_LEN);
- bss->wpa_ie = NULL;
- }
-
- if (bss->rsn_ie) {
- MFREE(cfg->osh, bss->rsn_ie,
- bss->rsn_ie[1] + WPA_RSN_IE_TAG_FIXED_LEN);
- bss->rsn_ie = NULL;
- }
-
- if (bss->wps_ie) {
- MFREE(cfg->osh, bss->wps_ie, bss->wps_ie[1] + 2);
- bss->wps_ie = NULL;
- }
+ kfree(bss->wpa_ie);
+ bss->wpa_ie = NULL;
+ kfree(bss->rsn_ie);
+ bss->rsn_ie = NULL;
+ kfree(bss->wps_ie);
+ bss->wps_ie = NULL;
list_del(&_net_info->list);
cfg->iface_cnt--;
- MFREE(cfg->osh, _net_info, sizeof(struct net_info));
+ kfree(_net_info);
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
#ifdef DHD_IFDEBUG
- WL_INFORM_MEM(("dealloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
-#endif // endif
+ WL_ERR(("dealloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
+#endif
}
static inline s32
wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- struct wireless_dev * wdev, wl_iftype_t iftype, bool pm_block, u8 bssidx, u8 ifidx)
+ struct wireless_dev * wdev, s32 mode, bool pm_block, u8 bssidx)
{
struct net_info *_net_info;
s32 err = 0;
unsigned long int flags;
#ifdef DHD_IFDEBUG
- WL_INFORM_MEM(("alloc_netinfo enter bssidx=%d wdev=%p\n",
- bssidx, OSL_OBFUSCATE_BUF(wdev)));
-#endif // endif
+ WL_ERR(("alloc_netinfo enter bssidx=%d wdev=%p ndev=%p\n", bssidx, wdev, ndev));
+#endif
/* Check whether there is any duplicate entry for the
- * same bssidx && ifidx.
+ * same bssidx *
*/
- if ((_net_info = wl_get_netinfo_by_fw_idx(cfg, bssidx, ifidx))) {
+ if ((_net_info = wl_get_netinfo_by_bssidx(cfg, bssidx))) {
/* We have a duplicate entry for the same bssidx
* already present which shouldn't have been the case.
* Attempt recovery.
*/
- WL_ERR(("Duplicate entry for bssidx=%d ifidx=%d present."
- " Can't add new entry\n", bssidx, ifidx));
+ WL_ERR(("Duplicate entry for bssidx=%d present\n", bssidx));
wl_probe_wdev_all(cfg);
#ifdef DHD_DEBUG
ASSERT(0);
#endif /* DHD_DEBUG */
- return -EINVAL;
+ WL_ERR(("Removing the Dup entry for bssidx=%d \n", bssidx));
+ wl_dealloc_netinfo_by_wdev(cfg, _net_info->wdev);
}
if (cfg->iface_cnt == IFACE_MAX_CNT)
return -ENOMEM;
- _net_info = (struct net_info *)MALLOCZ(cfg->osh, sizeof(struct net_info));
+ _net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL);
if (!_net_info)
err = -ENOMEM;
else {
- _net_info->iftype = iftype;
+ _net_info->mode = mode;
_net_info->ndev = ndev;
_net_info->wdev = wdev;
_net_info->pm_restore = 0;
_net_info->pm_block = pm_block;
_net_info->roam_off = WL_INVALID;
_net_info->bssidx = bssidx;
- _net_info->ifidx = ifidx;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
cfg->iface_cnt++;
list_add(&_net_info->list, &cfg->net_list);
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
}
#ifdef DHD_IFDEBUG
- WL_DBG(("alloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
-#endif // endif
+ WL_ERR(("alloc_netinfo exit iface_cnt=%d \n", cfg->iface_cnt));
+#endif
return err;
}
struct net_info *_net_info, *next;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
wl_cfgbss_t *bss = &_net_info->bss;
- GCC_DIAGNOSTIC_POP();
-
- if (bss->wpa_ie) {
- MFREE(cfg->osh, bss->wpa_ie, bss->wpa_ie[1]
- + WPA_RSN_IE_TAG_FIXED_LEN);
- bss->wpa_ie = NULL;
- }
- if (bss->rsn_ie) {
- MFREE(cfg->osh, bss->rsn_ie, bss->rsn_ie[1]
- + WPA_RSN_IE_TAG_FIXED_LEN);
- bss->rsn_ie = NULL;
- }
-
- if (bss->wps_ie) {
- MFREE(cfg->osh, bss->wps_ie, bss->wps_ie[1] + 2);
- bss->wps_ie = NULL;
- }
-
- if (bss->fils_ind_ie) {
- MFREE(cfg->osh, bss->fils_ind_ie, bss->fils_ind_ie[1]
- + FILS_INDICATION_IE_TAG_FIXED_LEN);
- bss->fils_ind_ie = NULL;
- }
+ kfree(bss->wpa_ie);
+ bss->wpa_ie = NULL;
+ kfree(bss->rsn_ie);
+ bss->rsn_ie = NULL;
+ kfree(bss->wps_ie);
+ bss->wps_ie = NULL;
list_del(&_net_info->list);
- if (_net_info->wdev) {
- MFREE(cfg->osh, _net_info->wdev, sizeof(struct wireless_dev));
- }
- MFREE(cfg->osh, _net_info, sizeof(struct net_info));
+ if (_net_info->wdev)
+ kfree(_net_info->wdev);
+ kfree(_net_info);
}
cfg->iface_cnt = 0;
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
}
static inline u32
wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status)
u32 cnt = 0;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
if (_net_info->ndev &&
test_bit(status, &_net_info->sme_state))
cnt++;
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return cnt;
}
static inline void
struct net_info *_net_info, *next;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
switch (op) {
case 1:
break; /* set all status is not allowed */
* Release the spinlock before calling notifier. Else there
* will be nested calls
*/
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
clear_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
break; /* unknown operation */
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
}
static inline void
wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
struct net_info *_net_info, *next;
unsigned long int flags;
- if (status >= BITS_PER_LONG) {
- /* max value for shift operation is
- * (BITS_PER_LONG -1) for unsigned long.
- * if status crosses BIT_PER_LONG, the variable
- * sme_state should be correspondingly updated.
- */
- ASSERT(0);
- return;
- }
-
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev)) {
- GCC_DIAGNOSTIC_POP();
switch (op) {
case 1:
/*
* Release the spinlock before calling notifier. Else there
* will be nested calls
*/
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
set_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
* Release the spinlock before calling notifier. Else there
* will be nested calls
*/
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
clear_bit(status, &_net_info->sme_state);
if (cfg->state_notifier &&
test_bit(status, &(cfg->interrested_state)))
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
}
wl_cfgbss_t *bss = NULL;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
if (wdev && (_net_info->wdev == wdev)) {
bss = &_net_info->bss;
break;
}
}
+ GCC_DIAGNOSTIC_POP();
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return bss;
}
u32 stat = 0;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
if (ndev && (_net_info->ndev == ndev)) {
stat = test_bit(status, &_net_info->sme_state);
break;
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return stat;
}
s32 mode = -1;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
- if (_net_info->ndev && (_net_info->ndev == ndev)) {
- mode = wl_iftype_to_mode(_net_info->iftype);
+ if (ndev && (_net_info->ndev == ndev)) {
+ mode = _net_info->mode;
break;
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return mode;
}
+static inline void
+wl_set_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ s32 mode)
+{
+ struct net_info *_net_info, *next;
+ unsigned long int flags;
+
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
+ BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
+ if (ndev && (_net_info->ndev == ndev))
+ _net_info->mode = mode;
+ }
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
+}
+
static inline s32
wl_get_bssidx_by_wdev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev)
{
s32 bssidx = -1;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
if (_net_info->wdev && (_net_info->wdev == wdev)) {
bssidx = _net_info->bssidx;
break;
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return bssidx;
}
static inline struct wireless_dev *
-wl_get_wdev_by_fw_idx(struct bcm_cfg80211 *cfg, s32 bssidx, s32 ifidx)
+wl_get_wdev_by_bssidx(struct bcm_cfg80211 *cfg, s32 bssidx)
{
struct net_info *_net_info, *next;
struct wireless_dev *wdev = NULL;
if (bssidx < 0)
return NULL;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
- if ((_net_info->bssidx == bssidx) && (_net_info->ifidx == ifidx)) {
+ if (_net_info->bssidx == bssidx) {
wdev = _net_info->wdev;
break;
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return wdev;
}
struct wl_profile *prof = NULL;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
if (ndev && (_net_info->ndev == ndev)) {
prof = &_net_info->profile;
break;
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return prof;
}
static inline struct net_info *
struct net_info *_net_info, *next, *info = NULL;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
if (ndev && (_net_info->ndev == ndev)) {
info = _net_info;
break;
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return info;
}
struct net_info *_net_info, *next, *info = NULL;
unsigned long int flags;
- WL_CFG_NET_LIST_SYNC_LOCK(&cfg->net_list_sync, flags);
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ spin_lock_irqsave(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_PUSH();
BCM_LIST_FOR_EACH_ENTRY_SAFE(_net_info, next, &cfg->net_list, list) {
- GCC_DIAGNOSTIC_POP();
if (wdev && (_net_info->wdev == wdev)) {
info = _net_info;
break;
}
}
- WL_CFG_NET_LIST_SYNC_UNLOCK(&cfg->net_list_sync, flags);
+ GCC_DIAGNOSTIC_POP();
+ spin_unlock_irqrestore(&cfg->net_list_sync, flags);
return info;
}
-static inline char *
-wl_iftype_to_str(int wl_iftype)
-{
- switch (wl_iftype) {
- case (WL_IF_TYPE_STA):
- return "WL_IF_TYPE_STA";
- case (WL_IF_TYPE_AP):
- return "WL_IF_TYPE_AP";
- case (WL_IF_TYPE_NAN_NMI):
- return "WL_IF_TYPE_NAN_NMI";
- case (WL_IF_TYPE_NAN):
- return "WL_IF_TYPE_NAN";
- case (WL_IF_TYPE_P2P_GO):
- return "WL_IF_TYPE_P2P_GO";
- case (WL_IF_TYPE_P2P_GC):
- return "WL_IF_TYPE_P2P_GC";
- case (WL_IF_TYPE_P2P_DISC):
- return "WL_IF_TYPE_P2P_DISC";
- case (WL_IF_TYPE_IBSS):
- return "WL_IF_TYPE_IBSS";
- case (WL_IF_TYPE_MONITOR):
- return "WL_IF_TYPE_MONITOR";
- case (WL_IF_TYPE_AIBSS):
- return "WL_IF_TYPE_AIBSS";
- default:
- return "WL_IF_TYPE_UNKNOWN";
- }
-}
-
-#define is_discovery_iface(iface) (((iface == WL_IF_TYPE_P2P_DISC) || \
- (iface == WL_IF_TYPE_NAN_NMI)) ? 1 : 0)
#define is_p2p_group_iface(wdev) (((wdev->iftype == NL80211_IFTYPE_P2P_GO) || \
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) ? 1 : 0)
#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy)
#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr)
#define wdev_to_ndev(wdev) (wdev->netdev)
-#ifdef WL_BLOCK_P2P_SCAN_ON_STA
-#define IS_P2P_IFACE(wdev) (wdev && \
- ((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) || \
- (wdev->iftype == NL80211_IFTYPE_P2P_GO) || \
- (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)))
-#endif /* WL_BLOCK_P2P_SCAN_ON_STA */
-
-#define IS_STA_IFACE(wdev) (wdev && \
- (wdev->iftype == NL80211_IFTYPE_STATION))
-
-#define IS_AP_IFACE(wdev) (wdev && \
- (wdev->iftype == NL80211_IFTYPE_AP))
-
#if defined(WL_ENABLE_P2P_IF)
#define ndev_to_wlc_ndev(ndev, cfg) ((ndev == cfg->p2p_net) ? \
bcmcfg_to_prmry_ndev(cfg) : ndev)
{ \
struct wireless_dev *wdev = cfgdev_to_wdev(cfgdev); \
struct net_device *netdev = wdev ? wdev->netdev : NULL; \
- WL_DBG(("wdev_ptr:%p ndev_ptr:%p ifname:%s iftype:%d\n", OSL_OBFUSCATE_BUF(wdev), \
- OSL_OBFUSCATE_BUF(netdev), \
+ WL_DBG(("wdev_ptr:%p ndev_ptr:%p ifname:%s iftype:%d\n", wdev, netdev, \
netdev ? netdev->name : "NULL (non-ndev device)", \
wdev ? wdev->iftype : 0xff)); \
}
#else
#define wl_to_ie(w) (&w->ie)
#define wl_to_conn(w) (&w->conn_info)
-#endif // endif
-#define wl_to_fils_info(w) (&w->fils_info)
+#endif
#define wiphy_from_scan(w) (w->escan_info.wiphy)
#define wl_get_drv_status_all(cfg, stat) \
(wl_get_status_all(cfg, WL_STATUS_ ## stat))
* In addtion to that, wpa_version is WPA_VERSION_1
*/
#define is_wps_conn(_sme) \
- ((wl_cfgp2p_find_wpsie(_sme->ie, _sme->ie_len) != NULL) && \
+ ((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \
(!_sme->crypto.n_ciphers_pairwise) && \
(!_sme->crypto.cipher_group))
-#ifdef WLFBT
-#if defined(WLAN_AKM_SUITE_FT_8021X) && defined(WLAN_AKM_SUITE_FT_PSK)
-#define IS_AKM_SUITE_FT(sec) (sec->wpa_auth == WLAN_AKM_SUITE_FT_8021X || \
- sec->wpa_auth == WLAN_AKM_SUITE_FT_PSK)
-#elif defined(WLAN_AKM_SUITE_FT_8021X)
-#define IS_AKM_SUITE_FT(sec) (sec->wpa_auth == WLAN_AKM_SUITE_FT_8021X)
-#elif defined(WLAN_AKM_SUITE_FT_PSK)
-#define IS_AKM_SUITE_FT(sec) (sec->wpa_auth == WLAN_AKM_SUITE_FT_PSK)
-#else
-#define IS_AKM_SUITE_FT(sec) ({BCM_REFERENCE(sec); FALSE;})
-#endif /* WLAN_AKM_SUITE_FT_8021X && WLAN_AKM_SUITE_FT_PSK */
-#else
#define IS_AKM_SUITE_FT(sec) ({BCM_REFERENCE(sec); FALSE;})
-#endif /* WLFBT */
#define IS_AKM_SUITE_CCKM(sec) ({BCM_REFERENCE(sec); FALSE;})
extern void wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t *e,
void *data);
-extern s32 wl_cfg80211_handle_critical_events(struct bcm_cfg80211 *cfg,
- const wl_event_msg_t * e);
-
void wl_cfg80211_set_parent_dev(void *dev);
struct device *wl_cfg80211_get_parent_dev(void);
-struct bcm_cfg80211 *wl_cfg80211_get_bcmcfg(void);
-void wl_cfg80211_set_bcmcfg(struct bcm_cfg80211 *cfg);
/* clear IEs */
extern s32 wl_cfg80211_clear_mgmt_vndr_ies(struct bcm_cfg80211 *cfg);
-extern s32 wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev);
-extern void wl_cfg80211_clear_p2p_disc_ies(struct bcm_cfg80211 *cfg);
-#ifdef WL_STATIC_IF
-extern int32 wl_cfg80211_update_iflist_info(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- int ifidx, uint8 *addr, int bssidx, char *name, int if_state);
-#endif /* WL_STATIC_IF */
+extern s32 wl_cfg80211_clear_per_bss_ies(struct bcm_cfg80211 *cfg, s32 bssidx);
+
extern s32 wl_cfg80211_up(struct net_device *net);
extern s32 wl_cfg80211_down(struct net_device *net);
-extern void wl_cfg80211_sta_ifdown(struct net_device *net);
extern s32 wl_cfg80211_notify_ifadd(struct net_device * dev, int ifidx, char *name, uint8 *mac,
- uint8 bssidx, uint8 role);
+ uint8 bssidx);
extern s32 wl_cfg80211_notify_ifdel(struct net_device * dev, int ifidx, char *name, uint8 *mac,
uint8 bssidx);
extern s32 wl_cfg80211_notify_ifchange(struct net_device * dev, int ifidx, char *name, uint8 *mac,
int ifidx, struct net_device* ndev, bool rtnl_lock_reqd);
extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg,
int ifidx, struct net_device* ndev, bool rtnl_lock_reqd);
-extern void wl_cfg80211_cleanup_if(struct net_device *dev);
+extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
+extern void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
extern bool wl_cfg80211_is_concurrent_mode(struct net_device * dev);
-extern void wl_cfg80211_disassoc(struct net_device *ndev, uint32 reason);
-extern void wl_cfg80211_del_all_sta(struct net_device *ndev, uint32 reason);
extern void* wl_cfg80211_get_dhdp(struct net_device * dev);
extern bool wl_cfg80211_is_p2p_active(struct net_device * dev);
extern bool wl_cfg80211_is_roam_offload(struct net_device * dev);
extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
extern s32 wl_cfg80211_set_p2p_ecsa(struct net_device *net, char* buf, int len);
extern s32 wl_cfg80211_increase_p2p_bw(struct net_device *net, char* buf, int len);
-extern bool wl_cfg80211_check_vif_in_use(struct net_device *ndev);
+#ifdef WLMESH
+extern s32 wl_cfg80211_set_sae_password(struct net_device *net, char* buf, int len);
+#endif
+#ifdef WL11ULB
+extern s32 wl_cfg80211_set_ulb_mode(struct net_device *dev, int mode);
+extern s32 wl_cfg80211_set_ulb_bw(struct net_device *dev,
+ u32 ulb_bw, char *ifname);
+#endif /* WL11ULB */
#ifdef P2PLISTEN_AP_SAMECHN
extern s32 wl_cfg80211_set_p2p_resp_ap_chn(struct net_device *net, s32 enable);
#endif /* P2PLISTEN_AP_SAMECHN */
extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
extern s32 wl_cfg80211_if_is_group_owner(void);
extern chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
-extern chanspec_t wl_ch_host_to_driver(u16 channel);
+extern chanspec_t wl_ch_host_to_driver(struct bcm_cfg80211 *cfg, s32 bssidx, u16 channel);
extern s32 wl_set_tx_power(struct net_device *dev,
enum nl80211_tx_power_setting type, s32 dbm);
extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm);
extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev,
u8 bsscfgidx);
-#ifdef WL_HOST_BAND_MGMT
-extern s32 wl_cfg80211_set_band(struct net_device *ndev, int band);
-#endif /* WL_HOST_BAND_MGMT */
extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set);
extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev,
struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev);
extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
extern void wl_cfg80211_update_power_mode(struct net_device *dev);
+extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command);
extern void wl_terminate_event_handler(struct net_device *dev);
+#if defined(DHD_ENABLE_BIGDATA_LOGGING)
+extern s32 wl_cfg80211_get_bss_info(struct net_device *dev, char* cmd, int total_len);
+extern s32 wl_cfg80211_get_connect_failed_status(struct net_device *dev, char* cmd, int total_len);
+#endif /* DHD_ENABLE_BIGDATA_LOGGING */
extern struct bcm_cfg80211 *wl_get_cfg(struct net_device *ndev);
extern s32 wl_cfg80211_set_if_band(struct net_device *ndev, int band);
-extern s32 wl_cfg80211_set_country_code(struct net_device *dev, char *country_code,
- bool notify, bool user_enforced, int revinfo);
-extern bool wl_cfg80211_is_hal_started(struct bcm_cfg80211 *cfg);
-#ifdef WL_WIPSEVT
-extern int wl_cfg80211_wips_event(uint16 misdeauth, char* bssid);
-extern int wl_cfg80211_wips_event_ext(wl_wips_event_info_t *wips_event);
-#endif /* WL_WIPSEVT */
#define SCAN_BUF_CNT 2
#define SCAN_BUF_NEXT 1
#define WL_SCANTYPE_LEGACY 0x1
#define WL_SCANTYPE_P2P 0x2
-#ifdef DUAL_ESCAN_RESULT_BUFFER
-#define wl_escan_set_sync_id(a, b) ((a) = (b)->escan_info.cur_sync_id)
-#define wl_escan_set_type(a, b) ((a)->escan_info.escan_type\
- [((a)->escan_info.cur_sync_id)%SCAN_BUF_CNT] = (b))
-static inline wl_scan_results_t *wl_escan_get_buf(struct bcm_cfg80211 *cfg, bool aborted)
-{
- u8 index;
- if (aborted) {
- if (cfg->escan_info.escan_type[0] == cfg->escan_info.escan_type[1])
- index = (cfg->escan_info.cur_sync_id + 1)%SCAN_BUF_CNT;
- else
- index = (cfg->escan_info.cur_sync_id)%SCAN_BUF_CNT;
- }
- else
- index = (cfg->escan_info.cur_sync_id)%SCAN_BUF_CNT;
-
- return (wl_scan_results_t *)cfg->escan_info.escan_buf[index];
-}
-static inline int wl_escan_check_sync_id(s32 status, u16 result_id, u16 wl_id)
-{
- if (result_id != wl_id) {
- WL_ERR(("ESCAN sync id mismatch :status :%d "
- "cur_sync_id:%d coming sync_id:%d\n",
- status, wl_id, result_id));
- return -1;
- }
- else
- return 0;
-}
-static inline void wl_escan_print_sync_id(s32 status, u16 result_id, u16 wl_id)
-{
- if (result_id != wl_id) {
- WL_ERR(("ESCAN sync id mismatch :status :%d "
- "cur_sync_id:%d coming sync_id:%d\n",
- status, wl_id, result_id));
- }
-}
-#define wl_escan_increment_sync_id(a, b) ((a)->escan_info.cur_sync_id += b)
-#define wl_escan_init_sync_id(a) ((a)->escan_info.cur_sync_id = 0)
-#else
#define wl_escan_set_sync_id(a, b) ((a) = htod16((b)->escan_sync_id_cntr++))
#define wl_escan_set_type(a, b)
#define wl_escan_get_buf(a, b) ((wl_scan_results_t *) (a)->escan_info.escan_buf)
#define wl_escan_print_sync_id(a, b, c)
#define wl_escan_increment_sync_id(a, b)
#define wl_escan_init_sync_id(a)
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
extern void wl_cfg80211_ibss_vsie_set_buffer(struct net_device *dev, vndr_ie_setbuf_t *ibss_vsie,
int ibss_vsie_len);
extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev);
bcm_struct_cfgdev *cfgdev, s32 bssidx, s32 pktflag,
const u8 *vndr_ie, u32 vndr_ie_len);
-#ifdef WLFBT
-extern int wl_cfg80211_get_fbt_key(struct net_device *dev, uint8 *key, int total_len);
-#endif // endif
/* Action frame specific functions */
extern u8 wl_get_action_category(void *frame, u32 frame_len);
#define IDLE_TOKEN_IDX 12
#endif /* WL_SUPPORT_ACS */
-#ifdef BCMWAPI_WPI
-#define is_wapi(cipher) (cipher == WLAN_CIPHER_SUITE_SMS4) ? 1 : 0
-#endif /* BCMWAPI_WPI */
extern int wl_cfg80211_get_ioctl_version(void);
extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, int enable);
extern s32 wl_cfg80211_dfs_ap_move(struct net_device *ndev, char *data,
char *command, int total_len);
+#ifdef WBTEXT
+extern s32 wl_cfg80211_wbtext_set_default(struct net_device *ndev);
+extern s32 wl_cfg80211_wbtext_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern int wl_cfg80211_wbtext_weight_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern int wl_cfg80211_wbtext_table_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+extern s32 wl_cfg80211_wbtext_delta_config(struct net_device *ndev, char *data,
+ char *command, int total_len);
+#endif /* WBTEXT */
extern s32 wl_cfg80211_get_chanspecs_2g(struct net_device *ndev,
void *buf, s32 buflen);
extern s32 wl_cfg80211_get_chanspecs_5g(struct net_device *ndev,
struct net_device *wl_cfg80211_post_ifcreate(struct net_device *ndev,
wl_if_event_info *event, u8 *addr, const char *name, bool rtnl_lock_reqd);
-extern s32 wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd, s32 ifidx);
+extern s32 wl_cfg80211_post_ifdel(struct net_device *ndev, bool rtnl_lock_reqd);
+#if defined(WL_VIRTUAL_APSTA)
+extern int wl_cfg80211_interface_create(struct net_device *dev, char *name);
+extern int wl_cfg80211_interface_delete(struct net_device *dev, char *name);
#if defined(PKT_FILTER_SUPPORT) && defined(APSTA_BLOCK_ARP_DURING_DHCP)
extern void wl_cfg80211_block_arp(struct net_device *dev, int enable);
#endif /* PKT_FILTER_SUPPORT && APSTA_BLOCK_ARP_DURING_DHCP */
+#endif /* defined (WL_VIRTUAL_APSTA) */
-#ifdef WLTDLS
-extern s32 wl_cfg80211_tdls_config(struct bcm_cfg80211 *cfg,
- enum wl_tdls_config state, bool tdls_mode);
-extern s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* WLTDLS */
-
-#ifdef WL_NAN
-extern int wl_cfgvendor_send_nan_event(struct wiphy * wiphy,
- struct net_device *dev, int event_id,
- nan_event_data_t *nan_event_data);
-extern int wl_cfgnan_init(struct bcm_cfg80211 *cfg);
-extern int wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate);
-extern bool wl_cfgnan_check_state(struct bcm_cfg80211 *cfg);
-#ifdef RTT_SUPPORT
-extern s32 wl_cfgvendor_send_as_rtt_legacy_event(struct wiphy *wiphy,
- struct net_device *dev, wl_nan_ev_rng_rpt_ind_t *range_res,
- uint32 status);
-#endif /* RTT_SUPPORT */
-#ifdef WL_NANP2P
-extern int wl_cfg80211_set_iface_conc_disc(struct net_device *ndev,
- uint8 arg_val);
-extern uint8 wl_cfg80211_get_iface_conc_disc(struct net_device *ndev);
-#endif /* WL_NANP2P */
-#endif /* WL_NAN */
#ifdef WL_CFG80211_P2P_DEV_IF
extern void wl_cfg80211_del_p2p_wdev(struct net_device *dev);
#endif /* WL_CFG80211_P2P_DEV_IF */
#if defined(WL_SUPPORT_AUTO_CHANNEL)
extern int wl_cfg80211_set_spect(struct net_device *dev, int spect);
-extern int wl_cfg80211_get_sta_channel(struct bcm_cfg80211 *cfg);
+extern int wl_cfg80211_get_sta_channel(struct net_device *dev);
#endif /* WL_SUPPORT_AUTO_CHANNEL */
-#ifdef WL_CFG80211_SYNC_GON
-#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) \
- (wl_get_drv_status_all(cfg, SENDING_ACT_FRM) || \
- wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN))
-#else
-#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM)
-#endif /* WL_CFG80211_SYNC_GON */
#ifdef P2P_LISTEN_OFFLOADING
-extern s32 wl_cfg80211_p2plo_deinit(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_p2plo_listen_start(struct net_device *dev, u8 *buf, int len);
+extern s32 wl_cfg80211_p2plo_listen_stop(struct net_device *dev);
#endif /* P2P_LISTEN_OFFLOADING */
-/* Function to flush the FW log buffer content */
-extern void wl_flush_fw_log_buffer(struct net_device *dev, uint32 logset_mask);
-
#define RETURN_EIO_IF_NOT_UP(wlpriv) \
do { \
struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \
#define P2PO_COOKIE 65535
u64 wl_cfg80211_get_new_roc_id(struct bcm_cfg80211 *cfg);
+#if defined(SUPPORT_RANDOM_MAC_SCAN)
+int wl_cfg80211_set_random_mac(struct net_device *dev, bool enable);
+int wl_cfg80211_random_mac_enable(struct net_device *dev);
+int wl_cfg80211_random_mac_disable(struct net_device *dev);
+#endif /* SUPPORT_RANDOM_MAC_SCAN */
#ifdef SUPPORT_AP_HIGHER_BEACONRATE
int wl_set_ap_beacon_rate(struct net_device *dev, int val, char *ifname);
int wl_get_ap_basic_rate(struct net_device *dev, char* command, char *ifname, int total_len);
int wl_update_ap_rps_params(struct net_device *dev, ap_rps_info_t* rps, char *ifname);
void wl_cfg80211_init_ap_rps(struct bcm_cfg80211 *cfg);
#endif /* SUPPORT_AP_RADIO_PWRSAVE */
-#ifdef SUPPORT_RSSI_SUM_REPORT
+#ifdef SUPPORT_RSSI_LOGGING
int wl_get_rssi_logging(struct net_device *dev, void *param);
int wl_set_rssi_logging(struct net_device *dev, void *param);
int wl_get_rssi_per_ant(struct net_device *dev, char *ifname, char *peer_mac, void *param);
-#endif /* SUPPORT_RSSI_SUM_REPORT */
+#endif /* SUPPORT_RSSI_LOGGING */
int wl_cfg80211_iface_count(struct net_device *dev);
struct net_device* wl_get_ap_netdev(struct bcm_cfg80211 *cfg, char *ifname);
-void wl_cfg80211_cleanup_virtual_ifaces(struct bcm_cfg80211 *cfg, bool rtnl_lock_reqd);
-#ifdef WL_IFACE_MGMT
-extern int wl_cfg80211_set_iface_policy(struct net_device *ndev, char *arg, int len);
-extern uint8 wl_cfg80211_get_iface_policy(struct net_device *ndev);
-extern s32 wl_cfg80211_handle_if_role_conflict(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
-s32 wl_cfg80211_data_if_mgmt(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
-s32 wl_cfg80211_disc_if_mgmt(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype,
- bool *disable_nan, bool *disable_p2p);
-s32 wl_cfg80211_handle_discovery_config(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
-wl_iftype_t wl_cfg80211_get_sec_iface(struct bcm_cfg80211 *cfg);
-bool wl_cfg80211_is_associated_discovery(struct bcm_cfg80211 *cfg, wl_iftype_t new_wl_iftype);
-#endif /* WL_IFACE_MGMT */
-struct wireless_dev * wl_cfg80211_add_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
- wl_iftype_t wl_iftype, const char *name, u8 *mac);
-extern s32 wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
- struct wireless_dev *wdev, char *name);
-s32 _wl_cfg80211_del_if(struct bcm_cfg80211 *cfg, struct net_device *primary_ndev,
- struct wireless_dev *wdev, char *ifname);
-s32 wl_cfg80211_delete_iface(struct bcm_cfg80211 *cfg, wl_iftype_t sec_data_if_type);
-
-#ifdef WL_STATIC_IF
-extern struct net_device *wl_cfg80211_register_static_if(struct bcm_cfg80211 *cfg,
- u16 iftype, char *ifname);
-extern void wl_cfg80211_unregister_static_if(struct bcm_cfg80211 * cfg);
-extern s32 wl_cfg80211_static_if_open(struct net_device *net);
-extern s32 wl_cfg80211_static_if_close(struct net_device *net);
-extern struct net_device * wl_cfg80211_post_static_ifcreate(struct bcm_cfg80211 *cfg,
- wl_if_event_info *event, u8 *addr, s32 iface_type);
-extern s32 wl_cfg80211_post_static_ifdel(struct bcm_cfg80211 *cfg, struct net_device *ndev);
-#endif /* WL_STATIC_IF */
-extern struct wireless_dev *wl_cfg80211_get_wdev_from_ifname(struct bcm_cfg80211 *cfg,
- const char *name);
struct net_device* wl_get_netdev_by_name(struct bcm_cfg80211 *cfg, char *ifname);
-extern s32 wl_get_vif_macaddr(struct bcm_cfg80211 *cfg, u16 wl_iftype, u8 *mac_addr);
-extern s32 wl_release_vif_macaddr(struct bcm_cfg80211 *cfg, u8 *mac_addr, u16 wl_iftype);
-extern int wl_cfg80211_ifstats_counters(struct net_device *dev, wl_if_stats_t *if_stats);
-extern s32 wl_cfg80211_set_dbg_verbose(struct net_device *ndev, u32 level);
-extern int wl_cfg80211_deinit_p2p_discovery(struct bcm_cfg80211 * cfg);
-extern int wl_cfg80211_set_frameburst(struct bcm_cfg80211 *cfg, bool enable);
-extern int wl_cfg80211_determine_p2p_rsdb_mode(struct bcm_cfg80211 *cfg);
-extern uint8 wl_cfg80211_get_bus_state(struct bcm_cfg80211 *cfg);
-#ifdef WL_WPS_SYNC
-void wl_handle_wps_states(struct net_device *ndev, u8 *dump_data, u16 len, bool direction);
-#endif /* WL_WPS_SYNC */
-extern int wl_features_set(u8 *array, uint8 len, u32 ftidx);
-extern void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
-extern s32 wl_cfg80211_sup_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *event, void *data);
-extern s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
-extern void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg);
-extern s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
- struct net_device *ndev, bool aborted, bool fw_abort);
-extern s32 cfg80211_to_wl_iftype(uint16 type, uint16 *role, uint16 *mode);
-extern s32 wl_cfg80211_net_attach(struct net_device *primary_ndev);
-extern void wl_print_verinfo(struct bcm_cfg80211 *cfg);
-extern const u8 *wl_find_attribute(const u8 *buf, u16 len, u16 element_id);
-extern int wl_cfg80211_get_concurrency_mode(struct bcm_cfg80211 *cfg);
-#if defined(WL_DISABLE_HE_SOFTAP) || defined(WL_DISABLE_HE_P2P)
-int wl_cfg80211_set_he_mode(struct net_device *dev, struct bcm_cfg80211 *cfg,
- s32 bssidx, u32 interface_type, bool set);
-#endif /* WL_DISABLE_HE_SOFTAP || WL_DISABLE_HE_P2P */
-extern s32 wl_cfg80211_config_suspend_events(struct net_device *ndev, bool enable);
-#ifdef SUPPORT_AP_SUSPEND
-extern int wl_set_ap_suspend(struct net_device *dev, bool enable, char *ifname);
-#endif /* SUPPORT_AP_SUSPEND */
-#ifdef SUPPORT_SOFTAP_ELNA_BYPASS
-int wl_set_softap_elna_bypass(struct net_device *dev, char *ifname, int enable);
-int wl_get_softap_elna_bypass(struct net_device *dev, char *ifname, void *param);
-#endif /* SUPPORT_SOFTAP_ELNA_BYPASS */
-#ifdef SUPPORT_AP_BWCTRL
-extern int wl_set_ap_bw(struct net_device *dev, u32 bw, char *ifname);
-extern int wl_get_ap_bw(struct net_device *dev, char* command, char *ifname, int total_len);
-#endif /* SUPPORT_AP_BWCTRL */
-bool wl_cfg80211_check_in_progress(struct net_device *dev);
+int wl_cfg80211_get_vndr_ouilist(struct bcm_cfg80211 *cfg, uint8 *buf, int max_cnt);
s32 wl_cfg80211_autochannel(struct net_device *dev, char* command, int total_len);
-int wl_cfg80211_check_in4way(struct bcm_cfg80211 *cfg,
- struct net_device *dev, uint action, enum wl_ext_status status, void *context);
#endif /* _wl_cfg80211_h_ */
/*
* Linux cfg80211 driver - Dongle Host Driver (DHD) related
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfg_btcoex.c 814554 2019-04-11 23:06:22Z $
+ * $Id: wl_cfg_btcoex.c 699163 2017-05-12 05:18:23Z $
*/
#include <net/rtnetlink.h>
extern uint dhd_pkt_filter_enable;
extern uint dhd_master_mode;
extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
-#endif // endif
+#endif
struct btcoex_info {
- timer_list_compat_t timer;
+ struct timer_list timer;
u32 timer_ms;
u32 timer_on;
u32 ts_dhcp_start; /* ms ts ecord time stats */
/* T2 turn off SCO/SCO supperesion is (timeout) */
#define BT_DHCP_FLAG_FORCE_TIME 5500
-#define BTCOEXMODE "BTCOEXMODE"
-#define POWERMODE "POWERMODE"
-
enum wl_cfg80211_btcoex_status {
BT_DHCP_IDLE,
BT_DHCP_START,
} var;
int error;
- bzero(&var, sizeof(var));
- error = bcm_mkiovar(name, (char *)(®), sizeof(reg), (char *)(&var), sizeof(var.buf));
- if (error == 0) {
- return BCME_BUFTOOSHORT;
- }
+ bcm_mkiovar(name, (char *)(®), sizeof(reg),
+ (char *)(&var), sizeof(var.buf));
error = wldev_ioctl_get(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf));
*retval = dtoh32(var.val);
dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
{
char ioctlbuf_local[WLC_IOCTL_SMLEN];
- int ret;
- ret = bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
- if (ret == 0)
- return BCME_BUFTOOSHORT;
- return (wldev_ioctl_set(dev, WLC_SET_VAR, ioctlbuf_local, ret));
-}
+ bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
+ return (wldev_ioctl_set(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local)));
+}
/*
get named driver variable to uint register value and return error indication
calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
{
char reg_addr[8];
- bzero(reg_addr, sizeof(reg_addr));
+ memset(reg_addr, 0, sizeof(reg_addr));
memcpy((char *)®_addr[0], (char *)addr, 4);
memcpy((char *)®_addr[4], (char *)val, 4);
#if defined(BT_DHCP_USE_FLAGS)
char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
-#endif // endif
+#endif
+
#if defined(BT_DHCP_eSCO_FIX)
/* set = 1, save & turn on 0 - off & restore prev settings */
set_btc_esco_params(dev, set);
-#endif // endif
+#endif
#if defined(BT_DHCP_USE_FLAGS)
WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
dev_wlc_bufvar_set(dev, "btc_flags",
(char *)&buf_flag7_default[0],
sizeof(buf_flag7_default));
-#endif // endif
+#endif
}
-static void wl_cfg80211_bt_timerfunc(ulong data)
+static void wl_cfg80211_bt_timerfunc(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct timer_list *t
+#else
+ unsigned long data
+#endif
+)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct btcoex_info *bt_local = from_timer(bt_local, t, timer);
+#else
struct btcoex_info *bt_local = (struct btcoex_info *)data;
+#endif
WL_TRACE(("Enter\n"));
bt_local->timer_on = 0;
schedule_work(&bt_local->work);
{
struct btcoex_info *btcx_inf;
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
btcx_inf = container_of(work, struct btcoex_info, work);
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
if (btcx_inf->timer_on) {
btcx_inf->timer_on = 0;
btco_inf->ts_dhcp_ok = 0;
/* Set up timer for BT */
btco_inf->timer_ms = 10;
- init_timer_compat(&btco_inf->timer, wl_cfg80211_bt_timerfunc, btco_inf);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ timer_setup(&btco_inf->timer, wl_cfg80211_bt_timerfunc, 0);
+#else
+ init_timer(&btco_inf->timer);
+ btco_inf->timer.data = (ulong)btco_inf;
+ btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
+#endif
btco_inf->dev = ndev;
struct btcoex_info *btco_inf = btcoex_info_loc;
char powermode_val = 0;
- uint8 cmd_len = 0;
char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
/* Figure out powermode 1 or o command */
- cmd_len = sizeof(BTCOEXMODE);
- powermode_val = command[cmd_len];
+ strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
- if (powermode_val == '1') {
+ if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
WL_TRACE_HW4(("DHCP session starts\n"));
+
#ifdef PKT_FILTER_SUPPORT
dhd->dhcp_in_progress = 1;
-#if defined(APSTA_BLOCK_ARP_DURING_DHCP)
- if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd)) {
+#if defined(WL_VIRTUAL_APSTA) && defined(APSTA_BLOCK_ARP_DURING_DHCP)
+ if ((dhd->op_mode & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) ==
+ DHD_FLAG_CONCURR_STA_HOSTAP_MODE) {
/* Block ARP frames while DHCP of STA interface is in
* progress in case of STA/SoftAP concurrent mode
*/
wl_cfg80211_block_arp(dev, TRUE);
} else
-#endif /* APSTA_BLOCK_ARP_DURING_DHCP */
+#endif /* WL_VIRTUAL_APSTA && APSTA_BLOCK_ARP_DURING_DHCP */
if (dhd->early_suspended) {
WL_TRACE_HW4(("DHCP in progressing , disable packet filter!!!\n"));
dhd_enable_packet_filter(0, dhd);
btco_inf->bt_state = BT_DHCP_START;
btco_inf->timer_on = 1;
- mod_timer(&btco_inf->timer,
- timer_expires(&btco_inf->timer));
+ mod_timer(&btco_inf->timer, btco_inf->timer.expires);
WL_TRACE(("enable BT DHCP Timer\n"));
}
}
WL_ERR(("was called w/o DHCP OFF. Continue\n"));
}
}
- else if (powermode_val == '2') {
+ else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
#ifdef PKT_FILTER_SUPPORT
dhd->dhcp_in_progress = 0;
WL_TRACE_HW4(("DHCP is complete \n"));
-#if defined(APSTA_BLOCK_ARP_DURING_DHCP)
- if (DHD_OPMODE_STA_SOFTAP_CONCURR(dhd)) {
+#if defined(WL_VIRTUAL_APSTA) && defined(APSTA_BLOCK_ARP_DURING_DHCP)
+ if ((dhd->op_mode & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) ==
+ DHD_FLAG_CONCURR_STA_HOSTAP_MODE) {
/* Unblock ARP frames */
wl_cfg80211_block_arp(dev, FALSE);
} else
-#endif /* APSTA_BLOCK_ARP_DURING_DHCP */
+#endif /* WL_VIRTUAL_APSTA && APSTA_BLOCK_ARP_DURING_DHCP */
if (dhd->early_suspended) {
/* Enable packet filtering */
WL_TRACE_HW4(("DHCP is complete , enable packet filter!!!\n"));
WL_ERR(("Unkwown yet power setting, ignored\n"));
}
- return (snprintf(command, sizeof("OK"), "OK") + 1);
+ snprintf(command, 3, "OK");
+
+ return (strlen("OK"));
}
/*
* Neighbor Awareness Networking
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- *
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgnan.c 825970 2019-06-18 05:28:31Z $
- */
-
-#ifdef WL_NAN
-#include <bcmutils.h>
-#include <bcmendian.h>
-#include <bcmwifi_channels.h>
-#include <nan.h>
-#include <bcmiov.h>
-#include <net/rtnetlink.h>
-
-#include <wl_cfg80211.h>
-#include <wl_cfgscan.h>
-#include <wl_android.h>
-#include <wl_cfgnan.h>
-
-#include <dngl_stats.h>
-#include <dhd.h>
-#ifdef RTT_SUPPORT
-#include <dhd_rtt.h>
-#endif /* RTT_SUPPORT */
-#include <wl_cfgvendor.h>
-#include <bcmbloom.h>
-#include <wl_cfgp2p.h>
-#ifdef RTT_SUPPORT
-#include <dhd_rtt.h>
-#endif /* RTT_SUPPORT */
-#include <bcmstdlib_s.h>
-
-#define NAN_RANGE_REQ_EVNT 1
-#define NAN_RAND_MAC_RETRIES 10
-#define NAN_SCAN_DWELL_TIME_DELTA_MS 10
-
-#ifdef WL_NAN_DISC_CACHE
-/* Disc Cache Parameters update Flags */
-#define NAN_DISC_CACHE_PARAM_SDE_CONTROL 0x0001
-
-static int wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
- u16 *disc_cache_update_flags);
-static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 * cfg, uint8 local_subid);
-static nan_disc_result_cache * wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg,
- uint8 remote_pubid, struct ether_addr *peer);
-#endif /* WL_NAN_DISC_CACHE */
-static int wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id);
-static int wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg);
-
-static int wl_cfgnan_get_capability(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities);
-
-static int32 wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
- nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance);
-
-static void wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
- nan_ranging_inst_t *rng_inst);
-
-static void wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
- nan_event_data_t *nan_event_data);
-
-void wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer_addr);
-
-static void wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg);
-
-static void wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
- nan_ranging_inst_t *ranging_inst);
-
-#ifdef RTT_SUPPORT
-static s32 wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 * cfg,
- struct ether_addr * peer, int reason);
-#endif /* RTT_SUPPORT */
-
-static const char *nan_role_to_str(u8 role)
-{
- switch (role) {
- C2S(WL_NAN_ROLE_AUTO)
- C2S(WL_NAN_ROLE_NON_MASTER_NON_SYNC)
- C2S(WL_NAN_ROLE_NON_MASTER_SYNC)
- C2S(WL_NAN_ROLE_MASTER)
- C2S(WL_NAN_ROLE_ANCHOR_MASTER)
- default:
- return "WL_NAN_ROLE_UNKNOWN";
- }
-}
-
-static const char *nan_event_to_str(u16 cmd)
-{
- switch (cmd) {
- C2S(WL_NAN_EVENT_START)
- C2S(WL_NAN_EVENT_DISCOVERY_RESULT)
- C2S(WL_NAN_EVENT_TERMINATED)
- C2S(WL_NAN_EVENT_RECEIVE)
- C2S(WL_NAN_EVENT_MERGE)
- C2S(WL_NAN_EVENT_STOP)
- C2S(WL_NAN_EVENT_PEER_DATAPATH_IND)
- C2S(WL_NAN_EVENT_DATAPATH_ESTB)
- C2S(WL_NAN_EVENT_SDF_RX)
- C2S(WL_NAN_EVENT_DATAPATH_END)
- C2S(WL_NAN_EVENT_RNG_REQ_IND)
- C2S(WL_NAN_EVENT_RNG_RPT_IND)
- C2S(WL_NAN_EVENT_RNG_TERM_IND)
- C2S(WL_NAN_EVENT_TXS)
- C2S(WL_NAN_EVENT_INVALID)
-
- default:
- return "WL_NAN_EVENT_UNKNOWN";
- }
-}
-
-static int wl_cfgnan_execute_ioctl(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, bcm_iov_batch_buf_t *nan_buf,
- uint16 nan_buf_size, uint32 *status, uint8 *resp_buf,
- uint16 resp_buf_len);
-int
-wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id)
-{
- s32 ret = BCME_OK;
- uint8 i = 0;
- if (p_inst_id == NULL) {
- WL_ERR(("Invalid arguments\n"));
- ret = -EINVAL;
- goto exit;
- }
-
- if (cfg->nancfg.inst_id_start == NAN_ID_MAX) {
- WL_ERR(("Consumed all IDs, resetting the counter\n"));
- cfg->nancfg.inst_id_start = 0;
- }
-
- for (i = cfg->nancfg.inst_id_start; i < NAN_ID_MAX; i++) {
- if (isclr(cfg->nancfg.svc_inst_id_mask, i)) {
- setbit(cfg->nancfg.svc_inst_id_mask, i);
- *p_inst_id = i + 1;
- cfg->nancfg.inst_id_start = *p_inst_id;
- WL_DBG(("Instance ID=%d\n", *p_inst_id));
- goto exit;
- }
- }
- WL_ERR(("Allocated maximum IDs\n"));
- ret = BCME_NORESOURCE;
-exit:
- return ret;
-}
-
-int
-wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg, uint8 inst_id)
-{
- s32 ret = BCME_OK;
- WL_DBG(("%s: Removing svc instance id %d\n", __FUNCTION__, inst_id));
- clrbit(cfg->nancfg.svc_inst_id_mask, inst_id-1);
- return ret;
-}
-s32 wl_cfgnan_parse_sdea_data(osl_t *osh, const uint8 *p_attr,
- uint16 len, nan_event_data_t *tlv_data)
-{
- const wifi_nan_svc_desc_ext_attr_t *nan_svc_desc_ext_attr = NULL;
- uint8 offset;
- s32 ret = BCME_OK;
-
- /* service descriptor ext attributes */
- nan_svc_desc_ext_attr = (const wifi_nan_svc_desc_ext_attr_t *)p_attr;
-
- /* attribute ID */
- WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_ext_attr->id));
-
- /* attribute length */
- WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_ext_attr->len));
- if (nan_svc_desc_ext_attr->instance_id == tlv_data->pub_id) {
- tlv_data->sde_control_flag = nan_svc_desc_ext_attr->control;
- }
- offset = sizeof(*nan_svc_desc_ext_attr);
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
-
- if (tlv_data->sde_control_flag & NAN_SC_RANGE_LIMITED) {
- WL_TRACE(("> svc_control: range limited present\n"));
- }
- if (tlv_data->sde_control_flag & NAN_SDE_CF_SVC_UPD_IND_PRESENT) {
- WL_TRACE(("> svc_control: sdea svc specific info present\n"));
- tlv_data->sde_svc_info.dlen = (p_attr[1] | (p_attr[2] << 8));
- WL_TRACE(("> sdea svc info len: 0x%02x\n", tlv_data->sde_svc_info.dlen));
- if (!tlv_data->sde_svc_info.dlen ||
- tlv_data->sde_svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
- /* must be able to handle null msg which is not error */
- tlv_data->sde_svc_info.dlen = 0;
- WL_ERR(("sde data length is invalid\n"));
- ret = BCME_BADLEN;
- goto fail;
- }
-
- if (tlv_data->sde_svc_info.dlen > 0) {
- tlv_data->sde_svc_info.data = MALLOCZ(osh, tlv_data->sde_svc_info.dlen);
- if (!tlv_data->sde_svc_info.data) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- tlv_data->sde_svc_info.dlen = 0;
- ret = BCME_NOMEM;
- goto fail;
- }
- /* advance read pointer, consider sizeof of Service Update Indicator */
- offset = sizeof(tlv_data->sde_svc_info.dlen) - 1;
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
- ret = memcpy_s(tlv_data->sde_svc_info.data, tlv_data->sde_svc_info.dlen,
- p_attr, tlv_data->sde_svc_info.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy sde_svc_info\n"));
- goto fail;
- }
- } else {
- /* must be able to handle null msg which is not error */
- tlv_data->sde_svc_info.dlen = 0;
- WL_DBG(("%s: sdea svc info length is zero, null info data\n",
- __FUNCTION__));
- }
- }
- return ret;
-fail:
- if (tlv_data->sde_svc_info.data) {
- MFREE(osh, tlv_data->sde_svc_info.data,
- tlv_data->sde_svc_info.dlen);
- tlv_data->sde_svc_info.data = NULL;
- }
-
- WL_DBG(("Parse SDEA event data, status = %d\n", ret));
- return ret;
-}
-
-/*
- * This attribute contains some mandatory fields and some optional fields
- * depending on the content of the service discovery request.
- */
-s32
-wl_cfgnan_parse_sda_data(osl_t *osh, const uint8 *p_attr,
- uint16 len, nan_event_data_t *tlv_data)
-{
- uint8 svc_control = 0, offset = 0;
- s32 ret = BCME_OK;
- const wifi_nan_svc_descriptor_attr_t *nan_svc_desc_attr = NULL;
-
- /* service descriptor attributes */
- nan_svc_desc_attr = (const wifi_nan_svc_descriptor_attr_t *)p_attr;
- /* attribute ID */
- WL_TRACE(("> attr id: 0x%02x\n", nan_svc_desc_attr->id));
-
- /* attribute length */
- WL_TRACE(("> attr len: 0x%x\n", nan_svc_desc_attr->len));
-
- /* service ID */
- ret = memcpy_s(tlv_data->svc_name, sizeof(tlv_data->svc_name),
- nan_svc_desc_attr->svc_hash, NAN_SVC_HASH_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc_hash_name:\n"));
- return ret;
- }
- WL_TRACE(("> svc_hash_name: " MACDBG "\n", MAC2STRDBG(tlv_data->svc_name)));
-
- /* local instance ID */
- tlv_data->local_inst_id = nan_svc_desc_attr->instance_id;
- WL_TRACE(("> local instance id: 0x%02x\n", tlv_data->local_inst_id));
-
- /* requestor instance ID */
- tlv_data->requestor_id = nan_svc_desc_attr->requestor_id;
- WL_TRACE(("> requestor id: 0x%02x\n", tlv_data->requestor_id));
-
- /* service control */
- svc_control = nan_svc_desc_attr->svc_control;
- if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) {
- WL_TRACE(("> Service control type: NAN_SC_PUBLISH\n"));
- } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE) {
- WL_TRACE(("> Service control type: NAN_SC_SUBSCRIBE\n"));
- } else if ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_FOLLOWUP) {
- WL_TRACE(("> Service control type: NAN_SC_FOLLOWUP\n"));
- }
- offset = sizeof(*nan_svc_desc_attr);
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
-
- /*
- * optional fields:
- * must be in order following by service descriptor attribute format
- */
-
- /* binding bitmap */
- if (svc_control & NAN_SC_BINDING_BITMAP_PRESENT) {
- uint16 bitmap = 0;
- WL_TRACE(("> svc_control: binding bitmap present\n"));
-
- /* Copy binding bitmap */
- ret = memcpy_s(&bitmap, sizeof(bitmap),
- p_attr, NAN_BINDING_BITMAP_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy bit map\n"));
- return ret;
- }
- WL_TRACE(("> sc binding bitmap: 0x%04x\n", bitmap));
-
- if (NAN_BINDING_BITMAP_LEN > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += NAN_BINDING_BITMAP_LEN;
- len -= NAN_BINDING_BITMAP_LEN;
- }
-
- /* matching filter */
- if (svc_control & NAN_SC_MATCHING_FILTER_PRESENT) {
- WL_TRACE(("> svc_control: matching filter present\n"));
-
- tlv_data->tx_match_filter.dlen = *p_attr++;
- WL_TRACE(("> matching filter len: 0x%02x\n",
- tlv_data->tx_match_filter.dlen));
-
- if (!tlv_data->tx_match_filter.dlen ||
- tlv_data->tx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
- tlv_data->tx_match_filter.dlen = 0;
- WL_ERR(("tx match filter length is invalid\n"));
- ret = -EINVAL;
- goto fail;
- }
- tlv_data->tx_match_filter.data =
- MALLOCZ(osh, tlv_data->tx_match_filter.dlen);
- if (!tlv_data->tx_match_filter.data) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- tlv_data->tx_match_filter.dlen = 0;
- ret = -ENOMEM;
- goto fail;
- }
- ret = memcpy_s(tlv_data->tx_match_filter.data, tlv_data->tx_match_filter.dlen,
- p_attr, tlv_data->tx_match_filter.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy tx match filter data\n"));
- goto fail;
- }
- /* advance read pointer */
- offset = tlv_data->tx_match_filter.dlen;
- if (offset > len) {
- WL_ERR(("Invalid event buffer\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
- }
-
- /* service response filter */
- if (svc_control & NAN_SC_SR_FILTER_PRESENT) {
- WL_TRACE(("> svc_control: service response filter present\n"));
-
- tlv_data->rx_match_filter.dlen = *p_attr++;
- WL_TRACE(("> sr match filter len: 0x%02x\n",
- tlv_data->rx_match_filter.dlen));
-
- if (!tlv_data->rx_match_filter.dlen ||
- tlv_data->rx_match_filter.dlen > MAX_MATCH_FILTER_LEN) {
- tlv_data->rx_match_filter.dlen = 0;
- WL_ERR(("%s: sr matching filter length is invalid\n",
- __FUNCTION__));
- ret = BCME_BADLEN;
- goto fail;
- }
- tlv_data->rx_match_filter.data =
- MALLOCZ(osh, tlv_data->rx_match_filter.dlen);
- if (!tlv_data->rx_match_filter.data) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- tlv_data->rx_match_filter.dlen = 0;
- ret = BCME_NOMEM;
- goto fail;
- }
-
- ret = memcpy_s(tlv_data->rx_match_filter.data, tlv_data->rx_match_filter.dlen,
- p_attr, tlv_data->rx_match_filter.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy rx match filter data\n"));
- goto fail;
- }
-
- /* advance read pointer */
- offset = tlv_data->rx_match_filter.dlen;
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
- }
-
- /* service specific info */
- if (svc_control & NAN_SC_SVC_INFO_PRESENT) {
- WL_TRACE(("> svc_control: svc specific info present\n"));
-
- tlv_data->svc_info.dlen = *p_attr++;
- WL_TRACE(("> svc info len: 0x%02x\n", tlv_data->svc_info.dlen));
-
- if (!tlv_data->svc_info.dlen ||
- tlv_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
- /* must be able to handle null msg which is not error */
- tlv_data->svc_info.dlen = 0;
- WL_ERR(("sde data length is invalid\n"));
- ret = BCME_BADLEN;
- goto fail;
- }
-
- if (tlv_data->svc_info.dlen > 0) {
- tlv_data->svc_info.data =
- MALLOCZ(osh, tlv_data->svc_info.dlen);
- if (!tlv_data->svc_info.data) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- tlv_data->svc_info.dlen = 0;
- ret = BCME_NOMEM;
- goto fail;
- }
- ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
- p_attr, tlv_data->svc_info.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc info\n"));
- goto fail;
- }
-
- /* advance read pointer */
- offset = tlv_data->svc_info.dlen;
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
- } else {
- /* must be able to handle null msg which is not error */
- tlv_data->svc_info.dlen = 0;
- WL_TRACE(("%s: svc info length is zero, null info data\n",
- __FUNCTION__));
- }
- }
-
- /*
- * discovery range limited:
- * If set to 1, the pub/sub msg is limited in range to close proximity.
- * If set to 0, the pub/sub msg is not limited in range.
- * Valid only when the message is either of a publish or a sub.
- */
- if (svc_control & NAN_SC_RANGE_LIMITED) {
- if (((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_PUBLISH) ||
- ((svc_control & NAN_SVC_CONTROL_TYPE_MASK) == NAN_SC_SUBSCRIBE)) {
- WL_TRACE(("> svc_control: range limited present\n"));
- } else {
- WL_TRACE(("range limited is only valid on pub or sub\n"));
- }
-
- /* TODO: send up */
-
- /* advance read pointer */
- p_attr++;
- }
- return ret;
-fail:
- if (tlv_data->tx_match_filter.data) {
- MFREE(osh, tlv_data->tx_match_filter.data,
- tlv_data->tx_match_filter.dlen);
- tlv_data->tx_match_filter.data = NULL;
- }
- if (tlv_data->rx_match_filter.data) {
- MFREE(osh, tlv_data->rx_match_filter.data,
- tlv_data->rx_match_filter.dlen);
- tlv_data->rx_match_filter.data = NULL;
- }
- if (tlv_data->svc_info.data) {
- MFREE(osh, tlv_data->svc_info.data,
- tlv_data->svc_info.dlen);
- tlv_data->svc_info.data = NULL;
- }
-
- WL_DBG(("Parse SDA event data, status = %d\n", ret));
- return ret;
-}
-
-static s32
-wl_cfgnan_parse_sd_attr_data(osl_t *osh, uint16 len, const uint8 *data,
- nan_event_data_t *tlv_data, uint16 type) {
- const uint8 *p_attr = data;
- uint16 offset = 0;
- s32 ret = BCME_OK;
- const wl_nan_event_disc_result_t *ev_disc = NULL;
- const wl_nan_event_replied_t *ev_replied = NULL;
- const wl_nan_ev_receive_t *ev_fup = NULL;
-
- /*
- * Mapping wifi_nan_svc_descriptor_attr_t, and svc controls are optional.
- */
- if (type == WL_NAN_XTLV_SD_DISC_RESULTS) {
- u8 iter;
- ev_disc = (const wl_nan_event_disc_result_t *)p_attr;
-
- WL_DBG((">> WL_NAN_XTLV_RESULTS: Discovery result\n"));
-
- tlv_data->pub_id = (wl_nan_instance_id_t)ev_disc->pub_id;
- tlv_data->sub_id = (wl_nan_instance_id_t)ev_disc->sub_id;
- tlv_data->publish_rssi = ev_disc->publish_rssi;
- ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
- &ev_disc->pub_mac, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy remote nmi\n"));
- goto fail;
- }
-
- WL_TRACE(("publish id: %d\n", ev_disc->pub_id));
- WL_TRACE(("subscribe d: %d\n", ev_disc->sub_id));
- WL_TRACE(("publish mac addr: " MACDBG "\n",
- MAC2STRDBG(ev_disc->pub_mac.octet)));
- WL_TRACE(("publish rssi: %d\n", (int8)ev_disc->publish_rssi));
- WL_TRACE(("attribute no: %d\n", ev_disc->attr_num));
- WL_TRACE(("attribute len: %d\n", (uint16)ev_disc->attr_list_len));
-
- /* advance to the service descricptor */
- offset = OFFSETOF(wl_nan_event_disc_result_t, attr_list[0]);
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
-
- iter = ev_disc->attr_num;
- while (iter) {
- if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
- WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
- ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
- if (unlikely(ret)) {
- WL_ERR(("wl_cfgnan_parse_sda_data failed,"
- "error = %d \n", ret));
- goto fail;
- }
- }
-
- if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
- WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
- ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
- if (unlikely(ret)) {
- WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
- "error = %d \n", ret));
- goto fail;
- }
- }
- offset = (sizeof(*p_attr) +
- sizeof(ev_disc->attr_list_len) +
- (p_attr[1] | (p_attr[2] << 8)));
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
- iter--;
- }
- } else if (type == WL_NAN_XTLV_SD_FUP_RECEIVED) {
- uint8 iter;
- ev_fup = (const wl_nan_ev_receive_t *)p_attr;
-
- WL_TRACE((">> WL_NAN_XTLV_SD_FUP_RECEIVED: Transmit follow-up\n"));
-
- tlv_data->local_inst_id = (wl_nan_instance_id_t)ev_fup->local_id;
- tlv_data->requestor_id = (wl_nan_instance_id_t)ev_fup->remote_id;
- tlv_data->fup_rssi = ev_fup->fup_rssi;
- ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
- &ev_fup->remote_addr, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy remote nmi\n"));
- goto fail;
- }
-
- WL_TRACE(("local id: %d\n", ev_fup->local_id));
- WL_TRACE(("remote id: %d\n", ev_fup->remote_id));
- WL_TRACE(("peer mac addr: " MACDBG "\n",
- MAC2STRDBG(ev_fup->remote_addr.octet)));
- WL_TRACE(("peer rssi: %d\n", (int8)ev_fup->fup_rssi));
- WL_TRACE(("attribute no: %d\n", ev_fup->attr_num));
- WL_TRACE(("attribute len: %d\n", ev_fup->attr_list_len));
-
- /* advance to the service descriptor which is attr_list[0] */
- offset = OFFSETOF(wl_nan_ev_receive_t, attr_list[0]);
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
-
- iter = ev_fup->attr_num;
- while (iter) {
- if ((uint8)*p_attr == NAN_ATTR_SVC_DESCRIPTOR) {
- WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
- ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
- if (unlikely(ret)) {
- WL_ERR(("wl_cfgnan_parse_sda_data failed,"
- "error = %d \n", ret));
- goto fail;
- }
- }
-
- if ((uint8)*p_attr == NAN_ATTR_SVC_DESC_EXTENSION) {
- WL_TRACE(("> attr id: 0x%02x\n", (uint8)*p_attr));
- ret = wl_cfgnan_parse_sdea_data(osh, p_attr, len, tlv_data);
- if (unlikely(ret)) {
- WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
- "error = %d \n", ret));
- goto fail;
- }
- }
- offset = (sizeof(*p_attr) +
- sizeof(ev_fup->attr_list_len) +
- (p_attr[1] | (p_attr[2] << 8)));
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
- iter--;
- }
- } else if (type == WL_NAN_XTLV_SD_SDF_RX) {
- /*
- * SDF followed by nan2_pub_act_frame_t and wifi_nan_svc_descriptor_attr_t,
- * and svc controls are optional.
- */
- const nan2_pub_act_frame_t *nan_pub_af =
- (const nan2_pub_act_frame_t *)p_attr;
-
- WL_TRACE((">> WL_NAN_XTLV_SD_SDF_RX\n"));
-
- /* nan2_pub_act_frame_t */
- WL_TRACE(("pub category: 0x%02x\n", nan_pub_af->category_id));
- WL_TRACE(("pub action: 0x%02x\n", nan_pub_af->action_field));
- WL_TRACE(("nan oui: %2x-%2x-%2x\n",
- nan_pub_af->oui[0], nan_pub_af->oui[1], nan_pub_af->oui[2]));
- WL_TRACE(("oui type: 0x%02x\n", nan_pub_af->oui_type));
- WL_TRACE(("oui subtype: 0x%02x\n", nan_pub_af->oui_sub_type));
-
- offset = sizeof(*nan_pub_af);
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
- } else if (type == WL_NAN_XTLV_SD_REPLIED) {
- ev_replied = (const wl_nan_event_replied_t *)p_attr;
-
- WL_TRACE((">> WL_NAN_XTLV_SD_REPLIED: Replied Event\n"));
-
- tlv_data->pub_id = (wl_nan_instance_id_t)ev_replied->pub_id;
- tlv_data->sub_id = (wl_nan_instance_id_t)ev_replied->sub_id;
- tlv_data->sub_rssi = ev_replied->sub_rssi;
- ret = memcpy_s(&tlv_data->remote_nmi, ETHER_ADDR_LEN,
- &ev_replied->sub_mac, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy remote nmi\n"));
- goto fail;
- }
-
- WL_TRACE(("publish id: %d\n", ev_replied->pub_id));
- WL_TRACE(("subscribe d: %d\n", ev_replied->sub_id));
- WL_TRACE(("Subscriber mac addr: " MACDBG "\n",
- MAC2STRDBG(ev_replied->sub_mac.octet)));
- WL_TRACE(("subscribe rssi: %d\n", (int8)ev_replied->sub_rssi));
- WL_TRACE(("attribute no: %d\n", ev_replied->attr_num));
- WL_TRACE(("attribute len: %d\n", (uint16)ev_replied->attr_list_len));
-
- /* advance to the service descriptor which is attr_list[0] */
- offset = OFFSETOF(wl_nan_event_replied_t, attr_list[0]);
- if (offset > len) {
- WL_ERR(("Invalid event buffer len\n"));
- ret = BCME_BUFTOOSHORT;
- goto fail;
- }
- p_attr += offset;
- len -= offset;
- ret = wl_cfgnan_parse_sda_data(osh, p_attr, len, tlv_data);
- if (unlikely(ret)) {
- WL_ERR(("wl_cfgnan_parse_sdea_data failed,"
- "error = %d \n", ret));
- }
- }
-
-fail:
- return ret;
-}
-
-/* Based on each case of tlv type id, fill into tlv data */
-int
-wl_cfgnan_set_vars_cbfn(void *ctx, const uint8 *data, uint16 type, uint16 len)
-{
- nan_parse_event_ctx_t *ctx_tlv_data = ((nan_parse_event_ctx_t *)(ctx));
- nan_event_data_t *tlv_data = ((nan_event_data_t *)(ctx_tlv_data->nan_evt_data));
- int ret = BCME_OK;
-
- NAN_DBG_ENTER();
- if (!data || !len) {
- WL_ERR(("data length is invalid\n"));
- ret = BCME_ERROR;
- goto fail;
- }
-
- switch (type) {
- /*
- * Need to parse service descript attributes including service control,
- * when Follow up or Discovery result come
- */
- case WL_NAN_XTLV_SD_FUP_RECEIVED:
- case WL_NAN_XTLV_SD_DISC_RESULTS: {
- ret = wl_cfgnan_parse_sd_attr_data(ctx_tlv_data->cfg->osh,
- len, data, tlv_data, type);
- break;
- }
- case WL_NAN_XTLV_SD_SVC_INFO: {
- tlv_data->svc_info.data =
- MALLOCZ(ctx_tlv_data->cfg->osh, len);
- if (!tlv_data->svc_info.data) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- tlv_data->svc_info.dlen = 0;
- ret = BCME_NOMEM;
- goto fail;
- }
- tlv_data->svc_info.dlen = len;
- ret = memcpy_s(tlv_data->svc_info.data, tlv_data->svc_info.dlen,
- data, tlv_data->svc_info.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc info data\n"));
- goto fail;
- }
- break;
- }
- default:
- WL_ERR(("Not available for tlv type = 0x%x\n", type));
- ret = BCME_ERROR;
- break;
- }
-fail:
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfg_nan_check_cmd_len(uint16 nan_iov_len, uint16 data_size,
- uint16 *subcmd_len)
-{
- s32 ret = BCME_OK;
-
- if (subcmd_len != NULL) {
- *subcmd_len = OFFSETOF(bcm_iov_batch_subcmd_t, data) +
- ALIGN_SIZE(data_size, 4);
- if (*subcmd_len > nan_iov_len) {
- WL_ERR(("%s: Buf short, requested:%d, available:%d\n",
- __FUNCTION__, *subcmd_len, nan_iov_len));
- ret = BCME_NOMEM;
- }
- } else {
- WL_ERR(("Invalid subcmd_len\n"));
- ret = BCME_ERROR;
- }
- return ret;
-}
-
-int
-wl_cfgnan_config_eventmask(struct net_device *ndev, struct bcm_cfg80211 *cfg,
- uint8 event_ind_flag, bool disable_events)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint16 subcmd_len;
- uint32 status;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
- uint8 event_mask[WL_NAN_EVMASK_EXTN_LEN];
- wl_nan_evmask_extn_t *evmask;
- uint16 evmask_cmd_len;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
-
- NAN_DBG_ENTER();
-
- /* same src and dest len here */
- (void)memset_s(event_mask, WL_NAN_EVMASK_EXTN_VER, 0, WL_NAN_EVMASK_EXTN_VER);
- evmask_cmd_len = OFFSETOF(wl_nan_evmask_extn_t, evmask) +
- WL_NAN_EVMASK_EXTN_LEN;
- ret = wl_add_remove_eventmsg(ndev, WLC_E_NAN, true);
- if (unlikely(ret)) {
- WL_ERR((" nan event enable failed, error = %d \n", ret));
- goto fail;
- }
-
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
- sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
-
- ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
- evmask_cmd_len, &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
-
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_EVENT_MASK);
- sub_cmd->len = sizeof(sub_cmd->u.options) + evmask_cmd_len;
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
- evmask->ver = WL_NAN_EVMASK_EXTN_VER;
- evmask->len = WL_NAN_EVMASK_EXTN_LEN;
- nan_buf_size -= subcmd_len;
- nan_buf->count = 1;
-
- if (disable_events) {
- WL_DBG(("Disabling all nan events..except stop event\n"));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
- } else {
- /*
- * Android framework event mask configuration.
- */
- nan_buf->is_set = false;
- memset(resp_buf, 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("get nan event mask failed ret %d status %d \n",
- ret, status));
- goto fail;
- }
- sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
- evmask = (wl_nan_evmask_extn_t *)sub_cmd_resp->data;
-
- /* check the response buff */
- /* same src and dest len here */
- (void)memcpy_s(&event_mask, WL_NAN_EVMASK_EXTN_LEN,
- (uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN);
-
- if (event_ind_flag) {
- if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_DIC_MAC_ADDR_BIT)) {
- WL_DBG(("Need to add disc mac addr change event\n"));
- }
- /* BIT2 - Disable nan cluster join indication (OTA). */
- if (CHECK_BIT(event_ind_flag, WL_NAN_EVENT_JOIN_EVENT)) {
- clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_MERGE));
- }
- }
-
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISCOVERY_RESULT));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RECEIVE));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TERMINATED));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_STOP));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_TXS));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_DATAPATH_IND));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_ESTB));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DATAPATH_END));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_REQ_IND));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_TERM_IND));
- setbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DISC_CACHE_TIMEOUT));
- /* Disable below events by default */
- clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_PEER_SCHED_UPD_NOTIF));
- clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_RNG_RPT_IND));
- clrbit(event_mask, NAN_EVENT_MAP(WL_NAN_EVENT_DW_END));
- }
-
- nan_buf->is_set = true;
- evmask = (wl_nan_evmask_extn_t *)sub_cmd->data;
- /* same src and dest len here */
- (void)memcpy_s((uint8*)&evmask->evmask, WL_NAN_EVMASK_EXTN_LEN,
- &event_mask, WL_NAN_EVMASK_EXTN_LEN);
-
- nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_buf_size);
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("set nan event mask failed ret %d status %d \n", ret, status));
- goto fail;
- }
- WL_DBG(("set nan event mask successfull\n"));
-
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_nan_avail(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_avail_cmd_data *cmd_data, uint8 avail_type)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint16 subcmd_len;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_iov_t *nan_iov_data = NULL;
- wl_avail_t *avail = NULL;
- wl_avail_entry_t *entry; /* used for filling entry structure */
- uint8 *p; /* tracking pointer */
- uint8 i;
- u32 status;
- int c;
- char ndc_id[ETHER_ADDR_LEN] = { 0x50, 0x6f, 0x9a, 0x01, 0x0, 0x0 };
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
- char *a = WL_AVAIL_BIT_MAP;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
-
- NAN_DBG_ENTER();
-
- /* Do not disturb avail if dam is supported */
- if (FW_SUPPORTED(dhdp, autodam)) {
- WL_DBG(("DAM is supported, avail modification not allowed\n"));
- return ret;
- }
-
- if (avail_type < WL_AVAIL_LOCAL || avail_type > WL_AVAIL_TYPE_MAX) {
- WL_ERR(("Invalid availability type\n"));
- ret = BCME_USAGE_ERROR;
- goto fail;
- }
-
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
- if (!nan_iov_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
- nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(*avail), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
- avail = (wl_avail_t *)sub_cmd->data;
-
- /* populate wl_avail_type */
- avail->flags = avail_type;
- if (avail_type == WL_AVAIL_RANGING) {
- ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
- &cmd_data->peer_nmi, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy peer nmi\n"));
- goto fail;
- }
- }
-
- sub_cmd->len = sizeof(sub_cmd->u.options) + subcmd_len;
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- nan_buf->is_set = false;
- nan_buf->count++;
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
-
- WL_TRACE(("Read wl nan avail status\n"));
-
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret)) {
- WL_ERR(("\n Get nan avail failed ret %d, status %d \n", ret, status));
- goto fail;
- }
-
- if (status == BCME_NOTFOUND) {
- nan_buf->count = 0;
- nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
- nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
-
- avail = (wl_avail_t *)sub_cmd->data;
- p = avail->entry;
-
- /* populate wl_avail fields */
- avail->length = OFFSETOF(wl_avail_t, entry);
- avail->flags = avail_type;
- avail->num_entries = 0;
- avail->id = 0;
- entry = (wl_avail_entry_t*)p;
- entry->flags = WL_AVAIL_ENTRY_COM;
-
- /* set default values for optional parameters */
- entry->start_offset = 0;
- entry->u.band = 0;
-
- if (cmd_data->avail_period) {
- entry->period = cmd_data->avail_period;
- } else {
- entry->period = WL_AVAIL_PERIOD_1024;
- }
-
- if (cmd_data->duration != NAN_BAND_INVALID) {
- entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
- (cmd_data->duration << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
- } else {
- entry->flags |= (3 << WL_AVAIL_ENTRY_USAGE_SHIFT) |
- (WL_AVAIL_BIT_DUR_16 << WL_AVAIL_ENTRY_BIT_DUR_SHIFT);
- }
- entry->bitmap_len = 0;
-
- if (avail_type == WL_AVAIL_LOCAL) {
- entry->flags |= 1 << WL_AVAIL_ENTRY_CHAN_SHIFT;
- /* Check for 5g support, based on that choose 5g channel */
- if (cfg->support_5g) {
- entry->u.channel_info =
- htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_5G,
- WL_AVAIL_BANDWIDTH_5G));
- } else {
- entry->u.channel_info =
- htod32(wf_channel2chspec(WL_AVAIL_CHANNEL_2G,
- WL_AVAIL_BANDWIDTH_2G));
- }
- entry->flags = htod16(entry->flags);
- }
-
- if (cfg->support_5g) {
- a = WL_5G_AVAIL_BIT_MAP;
- }
-
- /* point to bitmap value for processing */
- if (cmd_data->bmap) {
- for (c = (WL_NAN_EVENT_CLEAR_BIT-1); c >= 0; c--) {
- i = cmd_data->bmap >> c;
- if (i & 1) {
- setbit(entry->bitmap, (WL_NAN_EVENT_CLEAR_BIT-c-1));
- }
- }
- } else {
- for (i = 0; i < strlen(WL_AVAIL_BIT_MAP); i++) {
- if (*a == '1') {
- setbit(entry->bitmap, i);
- }
- a++;
- }
- }
-
- /* account for partially filled most significant byte */
- entry->bitmap_len = ((WL_NAN_EVENT_CLEAR_BIT) + NBBY - 1) / NBBY;
- if (avail_type == WL_AVAIL_NDC) {
- ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
- ndc_id, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy ndc id\n"));
- goto fail;
- }
- } else if (avail_type == WL_AVAIL_RANGING) {
- ret = memcpy_s(&avail->addr, ETHER_ADDR_LEN,
- &cmd_data->peer_nmi, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy peer nmi\n"));
- goto fail;
- }
- }
- /* account for partially filled most significant byte */
-
- /* update wl_avail and populate wl_avail_entry */
- entry->length = OFFSETOF(wl_avail_entry_t, bitmap) + entry->bitmap_len;
- avail->num_entries++;
- avail->length += entry->length;
- /* advance pointer for next entry */
- p += entry->length;
-
- /* convert to dongle endianness */
- entry->length = htod16(entry->length);
- entry->start_offset = htod16(entry->start_offset);
- entry->u.channel_info = htod32(entry->u.channel_info);
- entry->flags = htod16(entry->flags);
- /* update avail_len only if
- * there are avail entries
- */
- if (avail->num_entries) {
- nan_iov_data->nan_iov_len -= avail->length;
- avail->length = htod16(avail->length);
- avail->flags = htod16(avail->flags);
- }
- avail->length = htod16(avail->length);
-
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_AVAIL);
- sub_cmd->len = sizeof(sub_cmd->u.options) + avail->length;
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- nan_buf->is_set = true;
- nan_buf->count++;
-
- /* Reduce the iov_len size by subcmd_len */
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_buf_size = (NAN_IOCTL_BUF_SIZE - nan_iov_data->nan_iov_len);
-
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("\n set nan avail failed ret %d status %d \n", ret, status));
- ret = status;
- goto fail;
- }
- } else if (status == BCME_OK) {
- WL_DBG(("Avail type [%d] found to be configured\n", avail_type));
- } else {
- WL_ERR(("set nan avail failed ret %d status %d \n", ret, status));
- }
-
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- if (nan_iov_data) {
- MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
- }
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_config_control_flag(struct net_device *ndev, struct bcm_cfg80211 *cfg,
- uint32 flag, uint32 *status, bool set)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_iov_start, nan_iov_end;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint16 subcmd_len;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
- wl_nan_iov_t *nan_iov_data = NULL;
- uint32 cfg_ctrl;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
-
- NAN_DBG_ENTER();
- WL_INFORM_MEM(("%s: Modifying nan ctrl flag %x val %d",
- __FUNCTION__, flag, set));
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
- if (!nan_iov_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
- nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(cfg_ctrl), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
-
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_CONFIG);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cfg_ctrl);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- nan_buf->is_set = false;
- nan_buf->count++;
-
- /* Reduce the iov_len size by subcmd_len */
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_end = nan_iov_data->nan_iov_len;
- nan_buf_size = (nan_iov_start - nan_iov_end);
-
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(*status)) {
- WL_ERR(("get nan cfg ctrl failed ret %d status %d \n", ret, *status));
- goto fail;
- }
- sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
-
- /* check the response buff */
- cfg_ctrl = (*(uint32 *)&sub_cmd_resp->data[0]);
- if (set) {
- cfg_ctrl |= flag;
- } else {
- cfg_ctrl &= ~flag;
- }
- ret = memcpy_s(sub_cmd->data, sizeof(cfg_ctrl),
- &cfg_ctrl, sizeof(cfg_ctrl));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy cfg ctrl\n"));
- goto fail;
- }
-
- nan_buf->is_set = true;
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(*status)) {
- WL_ERR(("set nan cfg ctrl failed ret %d status %d \n", ret, *status));
- goto fail;
- }
- WL_DBG(("set nan cfg ctrl successfull\n"));
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- if (nan_iov_data) {
- MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
- }
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_get_iovars_status(void *ctx, const uint8 *data, uint16 type, uint16 len)
-{
- bcm_iov_batch_buf_t *b_resp = (bcm_iov_batch_buf_t *)ctx;
- uint32 status;
- /* if all tlvs are parsed, we should not be here */
- if (b_resp->count == 0) {
- return BCME_BADLEN;
- }
-
- /* cbfn params may be used in f/w */
- if (len < sizeof(status)) {
- return BCME_BUFTOOSHORT;
- }
-
- /* first 4 bytes consists status */
- if (memcpy_s(&status, sizeof(status),
- data, sizeof(uint32)) != BCME_OK) {
- WL_ERR(("Failed to copy status\n"));
- goto exit;
- }
-
- status = dtoh32(status);
-
- /* If status is non zero */
- if (status != BCME_OK) {
- printf("cmd type %d failed, status: %04x\n", type, status);
- goto exit;
- }
-
- if (b_resp->count > 0) {
- b_resp->count--;
- }
-
- if (!b_resp->count) {
- status = BCME_IOV_LAST_CMD;
- }
-exit:
- return status;
-}
-
-static int
-wl_cfgnan_execute_ioctl(struct net_device *ndev, struct bcm_cfg80211 *cfg,
- bcm_iov_batch_buf_t *nan_buf, uint16 nan_buf_size, uint32 *status,
- uint8 *resp_buf, uint16 resp_buf_size)
-{
- int ret = BCME_OK;
- uint16 tlvs_len;
- int res = BCME_OK;
- bcm_iov_batch_buf_t *p_resp = NULL;
- char *iov = "nan";
- int max_resp_len = WLC_IOCTL_MAXLEN;
-
- WL_DBG(("Enter:\n"));
- if (nan_buf->is_set) {
- ret = wldev_iovar_setbuf(ndev, "nan", nan_buf, nan_buf_size,
- resp_buf, resp_buf_size, NULL);
- p_resp = (bcm_iov_batch_buf_t *)(resp_buf + strlen(iov) + 1);
- } else {
- ret = wldev_iovar_getbuf(ndev, "nan", nan_buf, nan_buf_size,
- resp_buf, resp_buf_size, NULL);
- p_resp = (bcm_iov_batch_buf_t *)(resp_buf);
- }
- if (unlikely(ret)) {
- WL_ERR((" nan execute ioctl failed, error = %d \n", ret));
- goto fail;
- }
-
- p_resp->is_set = nan_buf->is_set;
- tlvs_len = max_resp_len - OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- /* Extract the tlvs and print their resp in cb fn */
- res = bcm_unpack_xtlv_buf((void *)p_resp, (const uint8 *)&p_resp->cmds[0],
- tlvs_len, BCM_IOV_CMD_OPT_ALIGN32, wl_cfgnan_get_iovars_status);
-
- if (res == BCME_IOV_LAST_CMD) {
- res = BCME_OK;
- }
-fail:
- *status = res;
- WL_DBG((" nan ioctl ret %d status %d \n", ret, *status));
- return ret;
-
-}
-
-static int
-wl_cfgnan_if_addr_handler(void *p_buf, uint16 *nan_buf_size,
- struct ether_addr *if_addr)
-{
- /* nan enable */
- s32 ret = BCME_OK;
- uint16 subcmd_len;
-
- NAN_DBG_ENTER();
-
- if (p_buf != NULL) {
- bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
-
- ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
- sizeof(*if_addr), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
-
- /* Fill the sub_command block */
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_IF_ADDR);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*if_addr);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- ret = memcpy_s(sub_cmd->data, sizeof(*if_addr),
- (uint8 *)if_addr, sizeof(*if_addr));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy if addr\n"));
- goto fail;
- }
-
- *nan_buf_size -= subcmd_len;
- } else {
- WL_ERR(("nan_iov_buf is NULL\n"));
- ret = BCME_ERROR;
- goto fail;
- }
-
-fail:
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_get_ver(struct net_device *ndev, struct bcm_cfg80211 *cfg)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- wl_nan_ver_t *nan_ver = NULL;
- uint16 subcmd_len;
- uint32 status;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
-
- NAN_DBG_ENTER();
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
- sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
-
- ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
- sizeof(*nan_ver), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
-
- nan_ver = (wl_nan_ver_t *)sub_cmd->data;
- sub_cmd->id = htod16(WL_NAN_CMD_GLB_NAN_VER);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nan_ver);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- nan_buf_size -= subcmd_len;
- nan_buf->count = 1;
-
- nan_buf->is_set = false;
- bzero(resp_buf, sizeof(resp_buf));
- nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
-
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("get nan ver failed ret %d status %d \n",
- ret, status));
- goto fail;
- }
-
- sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
- nan_ver = ((wl_nan_ver_t *)&sub_cmd_resp->data[0]);
- if (!nan_ver) {
- ret = BCME_NOTFOUND;
- WL_ERR(("nan_ver not found: err = %d\n", ret));
- goto fail;
- }
- cfg->nancfg.version = *nan_ver;
- WL_INFORM_MEM(("Nan Version is %d\n", cfg->nancfg.version));
-
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- NAN_DBG_EXIT();
- return ret;
-
-}
-
-static int
-wl_cfgnan_set_if_addr(struct bcm_cfg80211 *cfg)
-{
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint32 status;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
- struct ether_addr if_addr;
- uint8 buf[NAN_IOCTL_BUF_SIZE];
- bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
- bool rand_mac = cfg->nancfg.mac_rand;
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
- if (rand_mac) {
- RANDOM_BYTES(if_addr.octet, 6);
- /* restore mcast and local admin bits to 0 and 1 */
- ETHER_SET_UNICAST(if_addr.octet);
- ETHER_SET_LOCALADDR(if_addr.octet);
- } else {
- /* Use primary MAC with the locally administered bit for the
- * NAN NMI I/F
- */
- if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN_NMI,
- if_addr.octet) != BCME_OK) {
- ret = -EINVAL;
- WL_ERR(("Failed to get mac addr for NMI\n"));
- goto fail;
- }
- }
- WL_INFORM_MEM(("%s: NMI " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(if_addr.octet)));
- ret = wl_cfgnan_if_addr_handler(&nan_buf->cmds[0],
- &nan_buf_size, &if_addr);
- if (unlikely(ret)) {
- WL_ERR(("Nan if addr handler sub_cmd set failed\n"));
- goto fail;
- }
- nan_buf->count++;
- nan_buf->is_set = true;
- nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
- nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("nan if addr handler failed ret %d status %d\n",
- ret, status));
- goto fail;
- }
- ret = memcpy_s(cfg->nan_nmi_mac, ETH_ALEN,
- if_addr.octet, ETH_ALEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy nmi addr\n"));
- goto fail;
- }
- return ret;
-fail:
- if (!rand_mac) {
- wl_release_vif_macaddr(cfg, if_addr.octet, WL_IF_TYPE_NAN_NMI);
- }
-
- return ret;
-}
-
-static int
-wl_cfgnan_init_handler(void *p_buf, uint16 *nan_buf_size, bool val)
-{
- /* nan enable */
- s32 ret = BCME_OK;
- uint16 subcmd_len;
-
- NAN_DBG_ENTER();
-
- if (p_buf != NULL) {
- bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
-
- ret = wl_cfg_nan_check_cmd_len(*nan_buf_size,
- sizeof(val), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
-
- /* Fill the sub_command block */
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_INIT);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- ret = memcpy_s(sub_cmd->data, sizeof(uint8),
- (uint8*)&val, sizeof(uint8));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy init value\n"));
- goto fail;
- }
-
- *nan_buf_size -= subcmd_len;
- } else {
- WL_ERR(("nan_iov_buf is NULL\n"));
- ret = BCME_ERROR;
- goto fail;
- }
-
-fail:
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_enable_handler(wl_nan_iov_t *nan_iov_data, bool val)
-{
- /* nan enable */
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- uint16 subcmd_len;
-
- NAN_DBG_ENTER();
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(val), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- return ret;
- }
-
- /* Fill the sub_command block */
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_NAN_ENAB);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(uint8);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- ret = memcpy_s(sub_cmd->data, sizeof(uint8),
- (uint8*)&val, sizeof(uint8));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy enab value\n"));
- return ret;
- }
-
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_warmup_time_handler(nan_config_cmd_data_t *cmd_data,
- wl_nan_iov_t *nan_iov_data)
-{
- /* wl nan warm_up_time */
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_warmup_time_ticks_t *wup_ticks = NULL;
- uint16 subcmd_len;
- NAN_DBG_ENTER();
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
- wup_ticks = (wl_nan_warmup_time_ticks_t *)sub_cmd->data;
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(*wup_ticks), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- return ret;
- }
- /* Fill the sub_command block */
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_WARMUP_TIME);
- sub_cmd->len = sizeof(sub_cmd->u.options) +
- sizeof(*wup_ticks);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- *wup_ticks = cmd_data->warmup_time;
-
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_election_metric(nan_config_cmd_data_t *cmd_data,
- wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_election_metric_config_t *metrics = NULL;
- uint16 subcmd_len;
- NAN_DBG_ENTER();
-
- sub_cmd =
- (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(*metrics), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
-
- metrics = (wl_nan_election_metric_config_t *)sub_cmd->data;
-
- if (nan_attr_mask & NAN_ATTR_RAND_FACTOR_CONFIG) {
- metrics->random_factor = (uint8)cmd_data->metrics.random_factor;
- }
-
- if ((!cmd_data->metrics.master_pref) ||
- (cmd_data->metrics.master_pref > NAN_MAXIMUM_MASTER_PREFERENCE)) {
- WL_TRACE(("Master Pref is 0 or greater than 254, hence sending random value\n"));
- /* Master pref for mobile devices can be from 1 - 127 as per Spec AppendixC */
- metrics->master_pref = (RANDOM32()%(NAN_MAXIMUM_MASTER_PREFERENCE/2)) + 1;
- } else {
- metrics->master_pref = (uint8)cmd_data->metrics.master_pref;
- }
- sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_METRICS_CONFIG);
- sub_cmd->len = sizeof(sub_cmd->u.options) +
- sizeof(*metrics);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
-
-fail:
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_rssi_proximity(nan_config_cmd_data_t *cmd_data,
- wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_rssi_notif_thld_t *rssi_notif_thld = NULL;
- uint16 subcmd_len;
-
- NAN_DBG_ENTER();
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
-
- rssi_notif_thld = (wl_nan_rssi_notif_thld_t *)sub_cmd->data;
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(*rssi_notif_thld), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- return ret;
- }
- if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG) {
- rssi_notif_thld->bcn_rssi_2g =
- cmd_data->rssi_attr.rssi_proximity_2dot4g_val;
- } else {
- /* Keeping RSSI threshold value to be -70dBm */
- rssi_notif_thld->bcn_rssi_2g = NAN_DEF_RSSI_NOTIF_THRESH;
- }
-
- if (nan_attr_mask & NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG) {
- rssi_notif_thld->bcn_rssi_5g =
- cmd_data->rssi_attr.rssi_proximity_5g_val;
- } else {
- /* Keeping RSSI threshold value to be -70dBm */
- rssi_notif_thld->bcn_rssi_5g = NAN_DEF_RSSI_NOTIF_THRESH;
- }
-
- sub_cmd->id = htod16(WL_NAN_CMD_SYNC_BCN_RSSI_NOTIF_THRESHOLD);
- sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_notif_thld));
- sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
-
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_rssi_mid_or_close(nan_config_cmd_data_t *cmd_data,
- wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_rssi_thld_t *rssi_thld = NULL;
- uint16 subcmd_len;
-
- NAN_DBG_ENTER();
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
- rssi_thld = (wl_nan_rssi_thld_t *)sub_cmd->data;
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(*rssi_thld), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- return ret;
- }
-
- /*
- * Keeping RSSI mid value -75dBm for both 2G and 5G
- * Keeping RSSI close value -60dBm for both 2G and 5G
- */
- if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_2G_CONFIG) {
- rssi_thld->rssi_mid_2g =
- cmd_data->rssi_attr.rssi_middle_2dot4g_val;
- } else {
- rssi_thld->rssi_mid_2g = NAN_DEF_RSSI_MID;
- }
-
- if (nan_attr_mask & NAN_ATTR_RSSI_MIDDLE_5G_CONFIG) {
- rssi_thld->rssi_mid_5g =
- cmd_data->rssi_attr.rssi_middle_5g_val;
- } else {
- rssi_thld->rssi_mid_5g = NAN_DEF_RSSI_MID;
- }
-
- if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_CONFIG) {
- rssi_thld->rssi_close_2g =
- cmd_data->rssi_attr.rssi_close_2dot4g_val;
- } else {
- rssi_thld->rssi_close_2g = NAN_DEF_RSSI_CLOSE;
- }
-
- if (nan_attr_mask & NAN_ATTR_RSSI_CLOSE_5G_CONFIG) {
- rssi_thld->rssi_close_5g =
- cmd_data->rssi_attr.rssi_close_5g_val;
- } else {
- rssi_thld->rssi_close_5g = NAN_DEF_RSSI_CLOSE;
- }
-
- sub_cmd->id = htod16(WL_NAN_CMD_ELECTION_RSSI_THRESHOLD);
- sub_cmd->len = htod16(sizeof(sub_cmd->u.options) + sizeof(*rssi_thld));
- sub_cmd->u.options = htod32(BCM_XTLV_OPTION_ALIGN32);
-
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-check_for_valid_5gchan(struct net_device *ndev, uint8 chan)
-{
- s32 ret = BCME_OK;
- uint bitmap;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- uint32 chanspec_arg;
- NAN_DBG_ENTER();
-
- chanspec_arg = CH20MHZ_CHSPEC(chan);
- chanspec_arg = wl_chspec_host_to_driver(chanspec_arg);
- memset_s(ioctl_buf, WLC_IOCTL_SMLEN, 0, WLC_IOCTL_SMLEN);
- ret = wldev_iovar_getbuf(ndev, "per_chan_info",
- (void *)&chanspec_arg, sizeof(chanspec_arg),
- ioctl_buf, WLC_IOCTL_SMLEN, NULL);
- if (ret != BCME_OK) {
- WL_ERR(("Chaninfo for channel = %d, error %d\n", chan, ret));
- goto exit;
- }
-
- bitmap = dtoh32(*(uint *)ioctl_buf);
- if (!(bitmap & WL_CHAN_VALID_HW)) {
- WL_ERR(("Invalid channel\n"));
- ret = BCME_BADCHAN;
- goto exit;
- }
-
- if (!(bitmap & WL_CHAN_VALID_SW)) {
- WL_ERR(("Not supported in current locale\n"));
- ret = BCME_BADCHAN;
- goto exit;
- }
-exit:
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_nan_soc_chans(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
- wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_social_channels_t *soc_chans = NULL;
- uint16 subcmd_len;
-
- NAN_DBG_ENTER();
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
- soc_chans =
- (wl_nan_social_channels_t *)sub_cmd->data;
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(*soc_chans), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- return ret;
- }
-
- sub_cmd->id = htod16(WL_NAN_CMD_SYNC_SOCIAL_CHAN);
- sub_cmd->len = sizeof(sub_cmd->u.options) +
- sizeof(*soc_chans);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- if (nan_attr_mask & NAN_ATTR_2G_CHAN_CONFIG) {
- soc_chans->soc_chan_2g = cmd_data->chanspec[1];
- } else {
- soc_chans->soc_chan_2g = NAN_DEF_SOCIAL_CHAN_2G;
- }
-
- if (cmd_data->support_5g) {
- if (nan_attr_mask & NAN_ATTR_5G_CHAN_CONFIG) {
- soc_chans->soc_chan_5g = cmd_data->chanspec[2];
- } else {
- soc_chans->soc_chan_5g = NAN_DEF_SOCIAL_CHAN_5G;
- }
- ret = check_for_valid_5gchan(ndev, soc_chans->soc_chan_5g);
- if (ret != BCME_OK) {
- ret = check_for_valid_5gchan(ndev, NAN_DEF_SEC_SOCIAL_CHAN_5G);
- if (ret == BCME_OK) {
- soc_chans->soc_chan_5g = NAN_DEF_SEC_SOCIAL_CHAN_5G;
- } else {
- soc_chans->soc_chan_5g = 0;
- ret = BCME_OK;
- WL_ERR(("Current locale doesn't support 5G op"
- "continuing with 2G only operation\n"));
- }
- }
- } else {
- WL_DBG(("5G support is disabled\n"));
- }
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_nan_scan_params(struct net_device *ndev, struct bcm_cfg80211 *cfg,
- nan_config_cmd_data_t *cmd_data, uint8 band_index, uint32 nan_attr_mask)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_iov_start, nan_iov_end;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint16 subcmd_len;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_iov_t *nan_iov_data = NULL;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
- wl_nan_scan_params_t *scan_params = NULL;
- uint32 status;
-
- NAN_DBG_ENTER();
-
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
- if (!nan_iov_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
- nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(*scan_params), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
- scan_params = (wl_nan_scan_params_t *)sub_cmd->data;
-
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_SCAN_PARAMS);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*scan_params);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- if (!band_index) {
- /* Fw default: Dwell time for 2G is 210 */
- if ((nan_attr_mask & NAN_ATTR_2G_DWELL_TIME_CONFIG) &&
- cmd_data->dwell_time[0]) {
- scan_params->dwell_time = cmd_data->dwell_time[0] +
- NAN_SCAN_DWELL_TIME_DELTA_MS;
- }
- /* Fw default: Scan period for 2G is 10 */
- if (nan_attr_mask & NAN_ATTR_2G_SCAN_PERIOD_CONFIG) {
- scan_params->scan_period = cmd_data->scan_period[0];
- }
- } else {
- if ((nan_attr_mask & NAN_ATTR_5G_DWELL_TIME_CONFIG) &&
- cmd_data->dwell_time[1]) {
- scan_params->dwell_time = cmd_data->dwell_time[1] +
- NAN_SCAN_DWELL_TIME_DELTA_MS;
- }
- if (nan_attr_mask & NAN_ATTR_5G_SCAN_PERIOD_CONFIG) {
- scan_params->scan_period = cmd_data->scan_period[1];
- }
- }
- scan_params->band_index = band_index;
- nan_buf->is_set = true;
- nan_buf->count++;
-
- /* Reduce the iov_len size by subcmd_len */
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_end = nan_iov_data->nan_iov_len;
- nan_buf_size = (nan_iov_start - nan_iov_end);
-
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("set nan scan params failed ret %d status %d \n", ret, status));
- goto fail;
- }
- WL_DBG(("set nan scan params successfull\n"));
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- if (nan_iov_data) {
- MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
- }
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_cluster_id(nan_config_cmd_data_t *cmd_data,
- wl_nan_iov_t *nan_iov_data)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- uint16 subcmd_len;
-
- NAN_DBG_ENTER();
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- (sizeof(cmd_data->clus_id) - sizeof(uint8)), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- return ret;
- }
-
- cmd_data->clus_id.octet[0] = 0x50;
- cmd_data->clus_id.octet[1] = 0x6F;
- cmd_data->clus_id.octet[2] = 0x9A;
- cmd_data->clus_id.octet[3] = 0x01;
- WL_TRACE(("cluster_id = " MACDBG "\n", MAC2STRDBG(cmd_data->clus_id.octet)));
-
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_CID);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->clus_id);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->clus_id),
- (uint8 *)&cmd_data->clus_id,
- sizeof(cmd_data->clus_id));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy clus id\n"));
- return ret;
- }
-
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_hop_count_limit(nan_config_cmd_data_t *cmd_data,
- wl_nan_iov_t *nan_iov_data)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_hop_count_t *hop_limit = NULL;
- uint16 subcmd_len;
-
- NAN_DBG_ENTER();
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
- hop_limit = (wl_nan_hop_count_t *)sub_cmd->data;
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(*hop_limit), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- return ret;
- }
-
- *hop_limit = cmd_data->hop_count_limit;
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_HOP_LIMIT);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*hop_limit);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_sid_beacon_val(nan_config_cmd_data_t *cmd_data,
- wl_nan_iov_t *nan_iov_data, uint32 nan_attr_mask)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_sid_beacon_control_t *sid_beacon = NULL;
- uint16 subcmd_len;
-
- NAN_DBG_ENTER();
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(*sid_beacon), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- return ret;
- }
-
- sid_beacon = (wl_nan_sid_beacon_control_t *)sub_cmd->data;
- sid_beacon->sid_enable = cmd_data->sid_beacon.sid_enable;
- /* Need to have separate flag for sub beacons
- * sid_beacon->sub_sid_enable = cmd_data->sid_beacon.sub_sid_enable;
- */
- if (nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) {
- /* Limit for number of publish SIDs to be included in Beacons */
- sid_beacon->sid_count = cmd_data->sid_beacon.sid_count;
- }
- if (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG) {
- /* Limit for number of subscribe SIDs to be included in Beacons */
- sid_beacon->sub_sid_count = cmd_data->sid_beacon.sub_sid_count;
- }
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_SID_BEACON);
- sub_cmd->len = sizeof(sub_cmd->u.options) +
- sizeof(*sid_beacon);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_nan_oui(nan_config_cmd_data_t *cmd_data,
- wl_nan_iov_t *nan_iov_data)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- uint16 subcmd_len;
-
- NAN_DBG_ENTER();
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(cmd_data->nan_oui), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- return ret;
- }
-
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_OUI);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(cmd_data->nan_oui);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- ret = memcpy_s(sub_cmd->data, sizeof(cmd_data->nan_oui),
- (uint32 *)&cmd_data->nan_oui,
- sizeof(cmd_data->nan_oui));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy nan oui\n"));
- return ret;
- }
-
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_set_awake_dws(struct net_device *ndev, nan_config_cmd_data_t *cmd_data,
- wl_nan_iov_t *nan_iov_data, struct bcm_cfg80211 *cfg, uint32 nan_attr_mask)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_awake_dws_t *awake_dws = NULL;
- uint16 subcmd_len;
- NAN_DBG_ENTER();
-
- sub_cmd =
- (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- sizeof(*awake_dws), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- return ret;
- }
-
- awake_dws = (wl_nan_awake_dws_t *)sub_cmd->data;
-
- if (nan_attr_mask & NAN_ATTR_2G_DW_CONFIG) {
- awake_dws->dw_interval_2g = cmd_data->awake_dws.dw_interval_2g;
- if (!awake_dws->dw_interval_2g) {
- /* Set 2G awake dw value to fw default value 1 */
- awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
- }
- } else {
- /* Set 2G awake dw value to fw default value 1 */
- awake_dws->dw_interval_2g = NAN_SYNC_DEF_AWAKE_DW;
- }
-
- if (cfg->support_5g) {
- if (nan_attr_mask & NAN_ATTR_5G_DW_CONFIG) {
- awake_dws->dw_interval_5g = cmd_data->awake_dws.dw_interval_5g;
- if (!awake_dws->dw_interval_5g) {
- /* disable 5g beacon ctrls */
- ret = wl_cfgnan_config_control_flag(ndev, cfg,
- WL_NAN_CTRL_DISC_BEACON_TX_5G,
- &(cmd_data->status), 0);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR((" nan control set config handler,"
- " ret = %d status = %d \n",
- ret, cmd_data->status));
- goto fail;
- }
- ret = wl_cfgnan_config_control_flag(ndev, cfg,
- WL_NAN_CTRL_SYNC_BEACON_TX_5G,
- &(cmd_data->status), 0);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR((" nan control set config handler,"
- " ret = %d status = %d \n",
- ret, cmd_data->status));
- goto fail;
- }
- }
- } else {
- /* Set 5G awake dw value to fw default value 1 */
- awake_dws->dw_interval_5g = NAN_SYNC_DEF_AWAKE_DW;
- ret = wl_cfgnan_config_control_flag(ndev, cfg,
- WL_NAN_CTRL_DISC_BEACON_TX_5G |
- WL_NAN_CTRL_SYNC_BEACON_TX_5G,
- &(cmd_data->status), TRUE);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR((" nan control set config handler, ret = %d"
- " status = %d \n", ret, cmd_data->status));
- goto fail;
- }
- }
- }
-
- sub_cmd->id = htod16(WL_NAN_CMD_SYNC_AWAKE_DWS);
- sub_cmd->len = sizeof(sub_cmd->u.options) +
- sizeof(*awake_dws);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_data->nan_iov_buf += subcmd_len;
-
-fail:
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_start_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
- nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
-{
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- bcm_iov_batch_buf_t *nan_buf = NULL;
- wl_nan_iov_t *nan_iov_data = NULL;
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
- int i;
- s32 timeout = 0;
- nan_hal_capabilities_t capabilities;
-
- NAN_DBG_ENTER();
-
- /* Protect discovery creation. Ensure proper mutex precedence.
- * If if_sync & nan_mutex comes together in same context, nan_mutex
- * should follow if_sync.
- */
- mutex_lock(&cfg->if_sync);
- NAN_MUTEX_LOCK();
-
- if (!dhdp->up) {
- WL_ERR(("bus is already down, hence blocking nan start\n"));
- ret = BCME_ERROR;
- NAN_MUTEX_UNLOCK();
- mutex_unlock(&cfg->if_sync);
- goto fail;
- }
-
-#ifdef WL_IFACE_MGMT
- if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN_NMI)) != BCME_OK) {
- WL_ERR(("Conflicting iface is present, cant support nan\n"));
- NAN_MUTEX_UNLOCK();
- mutex_unlock(&cfg->if_sync);
- goto fail;
- }
-#endif /* WL_IFACE_MGMT */
-
- WL_INFORM_MEM(("Initializing NAN\n"));
- ret = wl_cfgnan_init(cfg);
- if (ret != BCME_OK) {
- WL_ERR(("failed to initialize NAN[%d]\n", ret));
- NAN_MUTEX_UNLOCK();
- mutex_unlock(&cfg->if_sync);
- goto fail;
- }
-
- ret = wl_cfgnan_get_ver(ndev, cfg);
- if (ret != BCME_OK) {
- WL_ERR(("failed to Nan IOV version[%d]\n", ret));
- NAN_MUTEX_UNLOCK();
- mutex_unlock(&cfg->if_sync);
- goto fail;
- }
-
- /* set nmi addr */
- ret = wl_cfgnan_set_if_addr(cfg);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to set nmi address \n"));
- NAN_MUTEX_UNLOCK();
- mutex_unlock(&cfg->if_sync);
- goto fail;
- }
- cfg->nancfg.nan_event_recvd = false;
- NAN_MUTEX_UNLOCK();
- mutex_unlock(&cfg->if_sync);
-
- for (i = 0; i < NAN_MAX_NDI; i++) {
- /* Create NDI using the information provided by user space */
- if (cfg->nancfg.ndi[i].in_use && !cfg->nancfg.ndi[i].created) {
- ret = wl_cfgnan_data_path_iface_create_delete_handler(ndev, cfg,
- cfg->nancfg.ndi[i].ifname,
- NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
- if (ret) {
- WL_ERR(("failed to create ndp interface [%d]\n", ret));
- goto fail;
- }
- cfg->nancfg.ndi[i].created = true;
- }
- }
-
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
- if (!nan_iov_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
- nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- if (nan_attr_mask & NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG) {
- /* config sync/discovery beacons on 2G band */
- /* 2g is mandatory */
- if (!cmd_data->beacon_2g_val) {
- WL_ERR(("Invalid NAN config...2G is mandatory\n"));
- ret = BCME_BADARG;
- }
- ret = wl_cfgnan_config_control_flag(ndev, cfg,
- WL_NAN_CTRL_DISC_BEACON_TX_2G | WL_NAN_CTRL_SYNC_BEACON_TX_2G,
- &(cmd_data->status), TRUE);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR((" nan control set config handler, ret = %d status = %d \n",
- ret, cmd_data->status));
- goto fail;
- }
- }
- if (nan_attr_mask & NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG) {
- /* config sync/discovery beacons on 5G band */
- ret = wl_cfgnan_config_control_flag(ndev, cfg,
- WL_NAN_CTRL_DISC_BEACON_TX_5G | WL_NAN_CTRL_SYNC_BEACON_TX_5G,
- &(cmd_data->status), cmd_data->beacon_5g_val);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR((" nan control set config handler, ret = %d status = %d \n",
- ret, cmd_data->status));
- goto fail;
- }
- }
- /* Setting warm up time */
- cmd_data->warmup_time = 1;
- if (cmd_data->warmup_time) {
- ret = wl_cfgnan_warmup_time_handler(cmd_data, nan_iov_data);
- if (unlikely(ret)) {
- WL_ERR(("warm up time handler sub_cmd set failed\n"));
- goto fail;
- }
- nan_buf->count++;
- }
- /* setting master preference and random factor */
- ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("election_metric sub_cmd set failed\n"));
- goto fail;
- } else {
- nan_buf->count++;
- }
-
- /* setting nan social channels */
- ret = wl_cfgnan_set_nan_soc_chans(ndev, cmd_data, nan_iov_data, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("nan social channels set failed\n"));
- goto fail;
- } else {
- /* Storing 5g capability which is reqd for avail chan config. */
- cfg->support_5g = cmd_data->support_5g;
- nan_buf->count++;
- }
-
- if ((cmd_data->support_2g) && ((cmd_data->dwell_time[0]) ||
- (cmd_data->scan_period[0]))) {
- /* setting scan params */
- ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("scan params set failed for 2g\n"));
- goto fail;
- }
- }
-
- if ((cmd_data->support_5g) && ((cmd_data->dwell_time[1]) ||
- (cmd_data->scan_period[1]))) {
- /* setting scan params */
- ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data,
- cmd_data->support_5g, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("scan params set failed for 5g\n"));
- goto fail;
- }
- }
-
- /*
- * A cluster_low value matching cluster_high indicates a request
- * to join a cluster with that value.
- * If the requested cluster is not found the
- * device will start its own cluster
- */
- /* For Debug purpose, using clust id compulsion */
- if (!ETHER_ISNULLADDR(&cmd_data->clus_id.octet)) {
- if (cmd_data->clus_id.octet[4] == cmd_data->clus_id.octet[5]) {
- /* device will merge to configured CID only */
- ret = wl_cfgnan_config_control_flag(ndev, cfg,
- WL_NAN_CTRL_MERGE_CONF_CID_ONLY, &(cmd_data->status), true);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR((" nan control set config handler, ret = %d status = %d \n",
- ret, cmd_data->status));
- goto fail;
- }
- }
- /* setting cluster ID */
- ret = wl_cfgnan_set_cluster_id(cmd_data, nan_iov_data);
- if (unlikely(ret)) {
- WL_ERR(("cluster_id sub_cmd set failed\n"));
- goto fail;
- }
- nan_buf->count++;
- }
-
- /* setting rssi proximaty values for 2.4GHz and 5GHz */
- ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
- goto fail;
- } else {
- nan_buf->count++;
- }
-
- /* setting rssi middle/close values for 2.4GHz and 5GHz */
- ret = wl_cfgnan_set_rssi_mid_or_close(cmd_data, nan_iov_data, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("2.4GHz/5GHz rssi middle and close set failed\n"));
- goto fail;
- } else {
- nan_buf->count++;
- }
-
- /* setting hop count limit or threshold */
- if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
- ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
- if (unlikely(ret)) {
- WL_ERR(("hop_count_limit sub_cmd set failed\n"));
- goto fail;
- }
- nan_buf->count++;
- }
-
- /* setting sid beacon val */
- if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
- (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
- ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("sid_beacon sub_cmd set failed\n"));
- goto fail;
- }
- nan_buf->count++;
- }
-
- /* setting nan oui */
- if (nan_attr_mask & NAN_ATTR_OUI_CONFIG) {
- ret = wl_cfgnan_set_nan_oui(cmd_data, nan_iov_data);
- if (unlikely(ret)) {
- WL_ERR(("nan_oui sub_cmd set failed\n"));
- goto fail;
- }
- nan_buf->count++;
- }
-
- /* setting nan awake dws */
- ret = wl_cfgnan_set_awake_dws(ndev, cmd_data,
- nan_iov_data, cfg, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("nan awake dws set failed\n"));
- goto fail;
- } else {
- nan_buf->count++;
- }
-
- /* enable events */
- ret = wl_cfgnan_config_eventmask(ndev, cfg, cmd_data->disc_ind_cfg, false);
- if (unlikely(ret)) {
- WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n", ret));
- goto fail;
- }
-
- /* setting nan enable sub_cmd */
- ret = wl_cfgnan_enable_handler(nan_iov_data, true);
- if (unlikely(ret)) {
- WL_ERR(("enable handler sub_cmd set failed\n"));
- goto fail;
- }
- nan_buf->count++;
- nan_buf->is_set = true;
-
- nan_buf_size -= nan_iov_data->nan_iov_len;
- memset(resp_buf, 0, sizeof(resp_buf));
- /* Reset conditon variable */
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
- &(cmd_data->status), (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR((" nan start handler, enable failed, ret = %d status = %d \n",
- ret, cmd_data->status));
- goto fail;
- }
-
- timeout = wait_event_timeout(cfg->nancfg.nan_event_wait,
- cfg->nancfg.nan_event_recvd, msecs_to_jiffies(NAN_START_STOP_TIMEOUT));
- if (!timeout) {
- WL_ERR(("Timed out while Waiting for WL_NAN_EVENT_START event !!!\n"));
- ret = BCME_ERROR;
- goto fail;
- }
-
- /* If set, auto datapath confirms will be sent by FW */
- ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL_AUTO_DPCONF,
- &(cmd_data->status), true);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR((" nan control set config handler, ret = %d status = %d \n",
- ret, cmd_data->status));
- goto fail;
- }
-
- /* By default set NAN proprietary rates */
- ret = wl_cfgnan_config_control_flag(ndev, cfg, WL_NAN_CTRL_PROP_RATE,
- &(cmd_data->status), true);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR((" nan proprietary rate set failed, ret = %d status = %d \n",
- ret, cmd_data->status));
- goto fail;
- }
-
- /* malloc for ndp peer list */
- if ((ret = wl_cfgnan_get_capablities_handler(ndev, cfg, &capabilities))
- == BCME_OK) {
- cfg->nancfg.max_ndp_count = capabilities.max_ndp_sessions;
- cfg->nancfg.nan_ndp_peer_info = MALLOCZ(cfg->osh,
- cfg->nancfg.max_ndp_count * sizeof(nan_ndp_peer_t));
- if (!cfg->nancfg.nan_ndp_peer_info) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- } else {
- WL_ERR(("wl_cfgnan_get_capablities_handler failed, ret = %d\n", ret));
- goto fail;
- }
-
-#ifdef RTT_SUPPORT
- /* Initialize geofence cfg */
- dhd_rtt_initialize_geofence_cfg(cfg->pub);
-#endif /* RTT_SUPPORT */
-
- cfg->nan_enable = true;
- WL_INFORM_MEM(("[NAN] Enable successfull \n"));
- /* disable TDLS on NAN NMI IF create */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_NMI_CREATE, false);
-
-fail:
- /* reset conditon variable */
- cfg->nancfg.nan_event_recvd = false;
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- cfg->nan_enable = false;
- mutex_lock(&cfg->if_sync);
- ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
- if (ret != BCME_OK) {
- WL_ERR(("failed to delete NDI[%d]\n", ret));
- }
- mutex_unlock(&cfg->if_sync);
- }
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- if (nan_iov_data) {
- MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
- }
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_disable(struct bcm_cfg80211 *cfg)
-{
- s32 ret = BCME_OK;
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-
- NAN_DBG_ENTER();
- if ((cfg->nan_init_state == TRUE) &&
- (cfg->nan_enable == TRUE)) {
- struct net_device *ndev;
- ndev = bcmcfg_to_prmry_ndev(cfg);
-
- /* We have to remove NDIs so that P2P/Softap can work */
- ret = wl_cfg80211_delete_iface(cfg, WL_IF_TYPE_NAN);
- if (ret != BCME_OK) {
- WL_ERR(("failed to delete NDI[%d]\n", ret));
- }
-
- WL_INFORM_MEM(("Nan Disable Req, reason = %d\n", cfg->nancfg.disable_reason));
- ret = wl_cfgnan_stop_handler(ndev, cfg);
- if (ret == -ENODEV) {
- WL_ERR(("Bus is down, no need to proceed\n"));
- } else if (ret != BCME_OK) {
- WL_ERR(("failed to stop nan, error[%d]\n", ret));
- }
- ret = wl_cfgnan_deinit(cfg, dhdp->up);
- if (ret != BCME_OK) {
- WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
- if (!dhd_query_bus_erros(dhdp)) {
- ASSERT(0);
- }
- }
- wl_cfgnan_disable_cleanup(cfg);
- }
- NAN_DBG_EXIT();
- return ret;
-}
-
-static void
-wl_cfgnan_send_stop_event(struct bcm_cfg80211 *cfg)
-{
- s32 ret = BCME_OK;
- nan_event_data_t *nan_event_data = NULL;
-
- NAN_DBG_ENTER();
-
- if (cfg->nancfg.disable_reason == NAN_USER_INITIATED) {
- /* do not event to host if command is from host */
- goto exit;
- }
- nan_event_data = MALLOCZ(cfg->osh, sizeof(nan_event_data_t));
- if (!nan_event_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
- bzero(nan_event_data, sizeof(nan_event_data_t));
-
- if (cfg->nancfg.disable_reason == NAN_CONCURRENCY_CONFLICT) {
- nan_event_data->status = NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED;
- } else {
- nan_event_data->status = NAN_STATUS_SUCCESS;
- }
-
- nan_event_data->status = NAN_STATUS_SUCCESS;
- ret = memcpy_s(nan_event_data->nan_reason, NAN_ERROR_STR_LEN,
- "NAN_STATUS_SUCCESS", strlen("NAN_STATUS_SUCCESS"));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy nan reason string, ret = %d\n", ret));
- goto exit;
- }
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
- ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
- GOOGLE_NAN_EVENT_DISABLED, nan_event_data);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to send event to nan hal, (%d)\n",
- GOOGLE_NAN_EVENT_DISABLED));
- }
-#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
-exit:
- if (nan_event_data) {
- MFREE(cfg->osh, nan_event_data, sizeof(nan_event_data_t));
- }
- NAN_DBG_EXIT();
- return;
-}
-
-void wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg)
-{
- int i = 0;
-#ifdef RTT_SUPPORT
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhdp);
- rtt_target_info_t *target_info = NULL;
-
- /* Delete the geofence rtt target list */
- dhd_rtt_delete_geofence_target_list(dhdp);
- /* Cancel pending retry timer if any */
- if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
- cancel_delayed_work_sync(&rtt_status->rtt_retry_timer);
- }
- /* Remove if any pending proxd timeout for nan-rtt */
- target_info = &rtt_status->rtt_config.target_info[rtt_status->cur_idx];
- if (target_info && target_info->peer == RTT_PEER_NAN) {
- /* Cancel pending proxd timeout work if any */
- if (delayed_work_pending(&rtt_status->proxd_timeout)) {
- cancel_delayed_work_sync(&rtt_status->proxd_timeout);
- }
- }
- /* Delete if any directed nan rtt session */
- dhd_rtt_delete_nan_session(dhdp);
-#endif /* RTT_SUPPORT */
- /* Clear the NDP ID array and dp count */
- for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
- cfg->nancfg.ndp_id[i] = 0;
- }
- cfg->nan_dp_count = 0;
- if (cfg->nancfg.nan_ndp_peer_info) {
- MFREE(cfg->osh, cfg->nancfg.nan_ndp_peer_info,
- cfg->nancfg.max_ndp_count * sizeof(nan_ndp_peer_t));
- cfg->nancfg.nan_ndp_peer_info = NULL;
- }
- return;
-}
-
-/*
- * Deferred nan disable work,
- * scheduled with 3sec delay in order to remove any active nan dps
- */
-void
-wl_cfgnan_delayed_disable(struct work_struct *work)
-{
- struct bcm_cfg80211 *cfg = NULL;
-
- BCM_SET_CONTAINER_OF(cfg, work, struct bcm_cfg80211, nan_disable.work);
-
- rtnl_lock();
- wl_cfgnan_disable(cfg);
- rtnl_unlock();
-}
-
-int
-wl_cfgnan_stop_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- wl_nan_iov_t *nan_iov_data = NULL;
- uint32 status;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
-
- NAN_DBG_ENTER();
- NAN_MUTEX_LOCK();
-
- if (!cfg->nan_enable) {
- WL_INFORM(("Nan is not enabled\n"));
- ret = BCME_OK;
- goto fail;
- }
-
- if (cfg->nancfg.disable_reason != NAN_BUS_IS_DOWN) {
- /*
- * Framework doing cleanup(iface remove) on disable command,
- * so avoiding event to prevent iface delete calls again
- */
- WL_INFORM_MEM(("[NAN] Disabling Nan events\n"));
- wl_cfgnan_config_eventmask(ndev, cfg, 0, true);
-
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
- if (!nan_iov_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
- nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- ret = wl_cfgnan_enable_handler(nan_iov_data, false);
- if (unlikely(ret)) {
- WL_ERR(("nan disable handler failed\n"));
- goto fail;
- }
- nan_buf->count++;
- nan_buf->is_set = true;
- nan_buf_size -= nan_iov_data->nan_iov_len;
- memset_s(resp_buf, sizeof(resp_buf),
- 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("nan disable failed ret = %d status = %d\n", ret, status));
- goto fail;
- }
- /* Enable back TDLS if connected interface is <= 1 */
- wl_cfg80211_tdls_config(cfg, TDLS_STATE_IF_DELETE, false);
- }
-
- wl_cfgnan_send_stop_event(cfg);
-
-fail:
- /* Resetting instance ID mask */
- cfg->nancfg.inst_id_start = 0;
- memset(cfg->nancfg.svc_inst_id_mask, 0, sizeof(cfg->nancfg.svc_inst_id_mask));
- memset(cfg->svc_info, 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
- cfg->nan_enable = false;
-
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- if (nan_iov_data) {
- MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
- }
-
- NAN_MUTEX_UNLOCK();
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_config_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg,
- nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- wl_nan_iov_t *nan_iov_data = NULL;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
-
- NAN_DBG_ENTER();
-
- /* Nan need to be enabled before configuring/updating params */
- if (cfg->nan_enable) {
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
- if (!nan_iov_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data->nan_iov_len = NAN_IOCTL_BUF_SIZE;
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
- nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- /* setting sid beacon val */
- if ((nan_attr_mask & NAN_ATTR_SID_BEACON_CONFIG) ||
- (nan_attr_mask & NAN_ATTR_SUB_SID_BEACON_CONFIG)) {
- ret = wl_cfgnan_set_sid_beacon_val(cmd_data, nan_iov_data, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("sid_beacon sub_cmd set failed\n"));
- goto fail;
- }
- nan_buf->count++;
- }
-
- /* setting master preference and random factor */
- if (cmd_data->metrics.random_factor ||
- cmd_data->metrics.master_pref) {
- ret = wl_cfgnan_set_election_metric(cmd_data, nan_iov_data,
- nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("election_metric sub_cmd set failed\n"));
- goto fail;
- } else {
- nan_buf->count++;
- }
- }
-
- /* setting hop count limit or threshold */
- if (nan_attr_mask & NAN_ATTR_HOP_COUNT_LIMIT_CONFIG) {
- ret = wl_cfgnan_set_hop_count_limit(cmd_data, nan_iov_data);
- if (unlikely(ret)) {
- WL_ERR(("hop_count_limit sub_cmd set failed\n"));
- goto fail;
- }
- nan_buf->count++;
- }
-
- /* setting rssi proximaty values for 2.4GHz and 5GHz */
- ret = wl_cfgnan_set_rssi_proximity(cmd_data, nan_iov_data,
- nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("2.4GHz/5GHz rssi proximity threshold set failed\n"));
- goto fail;
- } else {
- nan_buf->count++;
- }
-
- /* setting nan awake dws */
- ret = wl_cfgnan_set_awake_dws(ndev, cmd_data, nan_iov_data,
- cfg, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("nan awake dws set failed\n"));
- goto fail;
- } else {
- nan_buf->count++;
- }
-
- if (cmd_data->disc_ind_cfg) {
- /* Disable events */
- WL_TRACE(("Disable events based on flag\n"));
- ret = wl_cfgnan_config_eventmask(ndev, cfg,
- cmd_data->disc_ind_cfg, false);
- if (unlikely(ret)) {
- WL_ERR(("Failed to config disc ind flag in event_mask, ret = %d\n",
- ret));
- goto fail;
- }
- }
-
- if ((cfg->support_5g) && ((cmd_data->dwell_time[1]) ||
- (cmd_data->scan_period[1]))) {
- /* setting scan params */
- ret = wl_cfgnan_set_nan_scan_params(ndev, cfg,
- cmd_data, cfg->support_5g, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("scan params set failed for 5g\n"));
- goto fail;
- }
- }
- if ((cmd_data->dwell_time[0]) ||
- (cmd_data->scan_period[0])) {
- ret = wl_cfgnan_set_nan_scan_params(ndev, cfg, cmd_data, 0, nan_attr_mask);
- if (unlikely(ret)) {
- WL_ERR(("scan params set failed for 2g\n"));
- goto fail;
- }
- }
- nan_buf->is_set = true;
- nan_buf_size -= nan_iov_data->nan_iov_len;
-
- if (nan_buf->count) {
- memset_s(resp_buf, sizeof(resp_buf),
- 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
- &(cmd_data->status),
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR((" nan config handler failed ret = %d status = %d\n",
- ret, cmd_data->status));
- goto fail;
- }
- } else {
- WL_DBG(("No commands to send\n"));
- }
-
- if ((!cmd_data->bmap) || (cmd_data->avail_params.duration == NAN_BAND_INVALID) ||
- (!cmd_data->chanspec[0])) {
- WL_TRACE(("mandatory arguments are not present to set avail\n"));
- ret = BCME_OK;
- } else {
- cmd_data->avail_params.chanspec[0] = cmd_data->chanspec[0];
- cmd_data->avail_params.bmap = cmd_data->bmap;
- /* 1=local, 2=peer, 3=ndc, 4=immutable, 5=response, 6=counter */
- ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
- cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
- if (unlikely(ret)) {
- WL_ERR(("Failed to set avail value with type local\n"));
- goto fail;
- }
-
- ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
- cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
- if (unlikely(ret)) {
- WL_ERR(("Failed to set avail value with type ndc\n"));
- goto fail;
- }
- }
- } else {
- WL_INFORM(("nan is not enabled\n"));
- }
-
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- if (nan_iov_data) {
- MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
- }
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_support_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
-{
- /* TODO: */
- return BCME_OK;
-}
-
-int
-wl_cfgnan_status_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data)
-{
- /* TODO: */
- return BCME_OK;
-}
-
-#ifdef WL_NAN_DISC_CACHE
-static
-nan_svc_info_t *
-wl_cfgnan_get_svc_inst(struct bcm_cfg80211 *cfg,
- wl_nan_instance_id svc_inst_id, uint8 ndp_id)
-{
- uint8 i, j;
- if (ndp_id) {
- for (i = 0; i < NAN_MAX_SVC_INST; i++) {
- for (j = 0; j < NAN_MAX_SVC_INST; j++) {
- if (cfg->svc_info[i].ndp_id[j] == ndp_id) {
- return &cfg->svc_info[i];
- }
- }
- }
- } else if (svc_inst_id) {
- for (i = 0; i < NAN_MAX_SVC_INST; i++) {
- if (cfg->svc_info[i].svc_id == svc_inst_id) {
- return &cfg->svc_info[i];
- }
- }
-
- }
- return NULL;
-}
-
-nan_ranging_inst_t *
-wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg, struct ether_addr *peer)
-{
- uint8 i;
- if (peer) {
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- if (!memcmp(peer, &cfg->nan_ranging_info[i].peer_addr,
- ETHER_ADDR_LEN)) {
- return &(cfg->nan_ranging_info[i]);
- }
- }
- }
- return NULL;
-}
-
-nan_ranging_inst_t *
-wl_cfgnan_get_rng_inst_by_id(struct bcm_cfg80211 *cfg, uint8 rng_id)
-{
- uint8 i;
- if (rng_id) {
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- if (cfg->nan_ranging_info[i].range_id == rng_id)
- {
- return &(cfg->nan_ranging_info[i]);
- }
- }
- }
- WL_ERR(("Couldn't find the ranging instance for rng_id %d\n", rng_id));
- return NULL;
-}
-
-/*
- * Find ranging inst for given peer,
- * On not found, create one
- * with given range role
- */
-nan_ranging_inst_t *
-wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg, struct ether_addr *peer,
- nan_range_role_t range_role)
-{
- nan_ranging_inst_t *ranging_inst = NULL;
- uint8 i;
-
- if (!peer) {
- WL_ERR(("Peer address is NULL"));
- goto done;
- }
-
- ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
- if (ranging_inst) {
- goto done;
- }
- WL_TRACE(("Creating Ranging instance \n"));
-
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- if (cfg->nan_ranging_info[i].in_use == FALSE) {
- break;
- }
- }
-
- if (i == NAN_MAX_RANGING_INST) {
- WL_ERR(("No buffer available for the ranging instance"));
- goto done;
- }
- ranging_inst = &cfg->nan_ranging_info[i];
- memcpy(&ranging_inst->peer_addr, peer, ETHER_ADDR_LEN);
- ranging_inst->range_status = NAN_RANGING_REQUIRED;
- ranging_inst->prev_distance_mm = INVALID_DISTANCE;
- ranging_inst->range_role = range_role;
- ranging_inst->in_use = TRUE;
-
-done:
- return ranging_inst;
-}
-#endif /* WL_NAN_DISC_CACHE */
-
-static int
-process_resp_buf(void *iov_resp,
- uint8 *instance_id, uint16 sub_cmd_id)
-{
- int res = BCME_OK;
- NAN_DBG_ENTER();
-
- if (sub_cmd_id == WL_NAN_CMD_DATA_DATAREQ) {
- wl_nan_dp_req_ret_t *dpreq_ret = NULL;
- dpreq_ret = (wl_nan_dp_req_ret_t *)(iov_resp);
- *instance_id = dpreq_ret->ndp_id;
- WL_TRACE(("%s: Initiator NDI: " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(dpreq_ret->indi.octet)));
- } else if (sub_cmd_id == WL_NAN_CMD_RANGE_REQUEST) {
- wl_nan_range_id *range_id = NULL;
- range_id = (wl_nan_range_id *)(iov_resp);
- *instance_id = *range_id;
- WL_TRACE(("Range id: %d\n", *range_id));
- }
- WL_DBG(("instance_id: %d\n", *instance_id));
- NAN_DBG_EXIT();
- return res;
-}
-
-int
-wl_cfgnan_cancel_ranging(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, uint8 range_id, uint8 flags, uint32 *status)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_iov_start, nan_iov_end;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint16 subcmd_len;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- wl_nan_iov_t *nan_iov_data = NULL;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
- wl_nan_range_cancel_ext_t rng_cncl;
- uint8 size_of_iov;
-
- NAN_DBG_ENTER();
-
- if (cfg->nancfg.version >= NAN_RANGE_EXT_CANCEL_SUPPORT_VER) {
- size_of_iov = sizeof(rng_cncl);
- } else {
- size_of_iov = sizeof(range_id);
- }
-
- memset_s(&rng_cncl, sizeof(rng_cncl), 0, sizeof(rng_cncl));
- rng_cncl.range_id = range_id;
- rng_cncl.flags = flags;
-
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data = MALLOCZ(cfg->osh, sizeof(*nan_iov_data));
- if (!nan_iov_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_iov_data->nan_iov_len = nan_iov_start = NAN_IOCTL_BUF_SIZE;
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_iov_data->nan_iov_buf = (uint8 *)(&nan_buf->cmds[0]);
- nan_iov_data->nan_iov_len -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
- sub_cmd = (bcm_iov_batch_subcmd_t*)(nan_iov_data->nan_iov_buf);
-
- ret = wl_cfg_nan_check_cmd_len(nan_iov_data->nan_iov_len,
- size_of_iov, &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
-
- sub_cmd->id = htod16(WL_NAN_CMD_RANGE_CANCEL);
- sub_cmd->len = sizeof(sub_cmd->u.options) + size_of_iov;
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- /* Reduce the iov_len size by subcmd_len */
- nan_iov_data->nan_iov_len -= subcmd_len;
- nan_iov_end = nan_iov_data->nan_iov_len;
- nan_buf_size = (nan_iov_start - nan_iov_end);
-
- if (size_of_iov >= sizeof(rng_cncl)) {
- (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
- &rng_cncl, size_of_iov);
- } else {
- (void)memcpy_s(sub_cmd->data, nan_iov_data->nan_iov_len,
- &range_id, size_of_iov);
- }
-
- nan_buf->is_set = true;
- nan_buf->count++;
- memset_s(resp_buf, sizeof(resp_buf),
- 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(*status)) {
- WL_ERR(("Range ID %d cancel failed ret %d status %d \n", range_id, ret, *status));
- goto fail;
- }
- WL_MEM(("Range cancel with Range ID [%d] successfull\n", range_id));
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- if (nan_iov_data) {
- MFREE(cfg->osh, nan_iov_data, sizeof(*nan_iov_data));
- }
- NAN_DBG_EXIT();
- return ret;
-}
-
-#ifdef WL_NAN_DISC_CACHE
-static int
-wl_cfgnan_cache_svc_info(struct bcm_cfg80211 *cfg,
- nan_discover_cmd_data_t *cmd_data, uint16 cmd_id, bool update)
-{
- int ret = BCME_OK;
- int i;
- nan_svc_info_t *svc_info;
- uint8 svc_id = (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) ? cmd_data->sub_id :
- cmd_data->pub_id;
-
- for (i = 0; i < NAN_MAX_SVC_INST; i++) {
- if (update) {
- if (cfg->svc_info[i].svc_id == svc_id) {
- svc_info = &cfg->svc_info[i];
- break;
- } else {
- continue;
- }
- }
- if (!cfg->svc_info[i].svc_id) {
- svc_info = &cfg->svc_info[i];
- break;
- }
- }
- if (i == NAN_MAX_SVC_INST) {
- WL_ERR(("%s:cannot accomodate ranging session\n", __FUNCTION__));
- ret = BCME_NORESOURCE;
- goto fail;
- }
- if (cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
- WL_TRACE(("%s: updating ranging info, enabling", __FUNCTION__));
- svc_info->status = 1;
- svc_info->ranging_interval = cmd_data->ranging_intvl_msec;
- svc_info->ranging_ind = cmd_data->ranging_indication;
- svc_info->ingress_limit = cmd_data->ingress_limit;
- svc_info->egress_limit = cmd_data->egress_limit;
- svc_info->ranging_required = 1;
- } else {
- WL_TRACE(("%s: updating ranging info, disabling", __FUNCTION__));
- svc_info->status = 0;
- svc_info->ranging_interval = 0;
- svc_info->ranging_ind = 0;
- svc_info->ingress_limit = 0;
- svc_info->egress_limit = 0;
- svc_info->ranging_required = 0;
- }
-
- /* Reset Range status flags on svc creation/update */
- svc_info->svc_range_status = 0;
- svc_info->flags = cmd_data->flags;
-
- if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
- svc_info->svc_id = cmd_data->sub_id;
- if ((cmd_data->flags & WL_NAN_SUB_ACTIVE) &&
- (cmd_data->tx_match.dlen)) {
- ret = memcpy_s(svc_info->tx_match_filter, sizeof(svc_info->tx_match_filter),
- cmd_data->tx_match.data, cmd_data->tx_match.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy tx match filter data\n"));
- goto fail;
- }
- svc_info->tx_match_filter_len = cmd_data->tx_match.dlen;
- }
- } else {
- svc_info->svc_id = cmd_data->pub_id;
- }
- ret = memcpy_s(svc_info->svc_hash, sizeof(svc_info->svc_hash),
- cmd_data->svc_hash.data, WL_NAN_SVC_HASH_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc hash\n"));
- }
-fail:
- return ret;
-
-}
-
-static bool
-wl_cfgnan_clear_svc_from_ranging_inst(struct bcm_cfg80211 *cfg,
- nan_ranging_inst_t *ranging_inst, nan_svc_info_t *svc)
-{
- int i = 0;
- bool cleared = FALSE;
-
- if (svc && ranging_inst->in_use) {
- for (i = 0; i < MAX_SUBSCRIBES; i++) {
- if (svc == ranging_inst->svc_idx[i]) {
- ranging_inst->num_svc_ctx--;
- ranging_inst->svc_idx[i] = NULL;
- cleared = TRUE;
- /*
- * This list is maintained dupes free,
- * hence can break
- */
- break;
- }
- }
- }
- return cleared;
-}
-
-static int
-wl_cfgnan_clear_svc_from_all_ranging_inst(struct bcm_cfg80211 *cfg, uint8 svc_id)
-{
- nan_ranging_inst_t *ranging_inst;
- int i = 0;
- int ret = BCME_OK;
-
- nan_svc_info_t *svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
- if (!svc) {
- WL_ERR(("\n svc not found \n"));
- ret = BCME_NOTFOUND;
- goto done;
- }
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- ranging_inst = &(cfg->nan_ranging_info[i]);
- wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
- }
-
-done:
- return ret;
-}
-
-static int
-wl_cfgnan_ranging_clear_publish(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer, uint8 svc_id)
-{
- nan_ranging_inst_t *ranging_inst = NULL;
- nan_svc_info_t *svc = NULL;
- bool cleared = FALSE;
- int ret = BCME_OK;
-
- ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer);
- if (!ranging_inst || !ranging_inst->in_use) {
- goto done;
- }
-
- WL_INFORM_MEM(("Check clear Ranging for pub update, sub id = %d,"
- " range_id = %d, peer addr = " MACDBG " \n", svc_id,
- ranging_inst->range_id, MAC2STRDBG(peer)));
- svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
- if (!svc) {
- WL_ERR(("\n svc not found, svc_id = %d\n", svc_id));
- ret = BCME_NOTFOUND;
- goto done;
- }
-
- cleared = wl_cfgnan_clear_svc_from_ranging_inst(cfg, ranging_inst, svc);
- if (!cleared) {
- /* Only if this svc was cleared, any update needed */
- ret = BCME_NOTFOUND;
- goto done;
- }
-
- wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
-
-done:
- return ret;
-}
-
-#ifdef RTT_SUPPORT
-/* API to terminate/clear all directed nan-rtt sessions.
-* Can be called from framework RTT stop context
-*/
-int
-wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev,
- struct bcm_cfg80211 *cfg)
-{
- nan_ranging_inst_t *ranging_inst;
- int i, ret = BCME_OK;
- uint32 status;
-
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- ranging_inst = &cfg->nan_ranging_info[i];
- if (ranging_inst->range_id && ranging_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
- if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
- ret = wl_cfgnan_cancel_ranging(ndev, cfg, ranging_inst->range_id,
- NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("nan range cancel failed ret = %d status = %d\n",
- ret, status));
- }
- }
- wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
- RTT_SHCED_HOST_DIRECTED_TERM);
- }
- }
- return ret;
-}
-#endif /* RTT_SUPPORT */
-
-/*
- * suspend ongoing geofence ranging session
- * with a peer if on-going ranging is with given peer
- * If peer NULL,
- * Suspend on-going ranging blindly
- * Do nothing on:
- * If ranging is not in progress
- * If ranging in progress but not with given peer
- */
-int
-wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
- struct ether_addr *peer, int suspend_reason, u8 cancel_flags)
-{
- int ret = BCME_OK;
- uint32 status;
- nan_ranging_inst_t *ranging_inst = NULL;
- struct ether_addr* peer_addr = NULL;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
-#ifdef RTT_SUPPORT
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
- rtt_geofence_target_info_t *geofence_target_info;
-
- geofence_target_info = dhd_rtt_get_geofence_current_target(dhd);
- if (!geofence_target_info) {
- WL_DBG(("No Geofencing Targets, suspend req dropped\n"));
- goto exit;
- }
- peer_addr = &geofence_target_info->peer_addr;
-
- ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
- if (dhd_rtt_get_geofence_rtt_state(dhd) == FALSE) {
- WL_DBG(("Geofencing Ranging not in progress, suspend req dropped\n"));
- goto exit;
- }
-
- if (peer && memcmp(peer_addr, peer, ETHER_ADDR_LEN)) {
- if (suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER ||
- suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER) {
- /* NDP and Ranging can coexist with different Peers */
- WL_DBG(("Geofencing Ranging not in progress with given peer,"
- " suspend req dropped\n"));
- goto exit;
- }
- }
-#endif /* RTT_SUPPORT */
-
- ASSERT((ranging_inst != NULL));
- if (ranging_inst) {
- if (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS) {
- WL_DBG(("Ranging Inst with peer not in progress, "
- " suspend req dropped\n"));
- goto exit;
- }
- cancel_flags |= NAN_RNG_TERM_FLAG_IMMEDIATE;
- ret = wl_cfgnan_cancel_ranging(ndev, cfg,
- ranging_inst->range_id, cancel_flags, &status);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("Geofence Range suspended failed, err = %d, status = %d,"
- " range_id = %d, suspend_reason = %d, " MACDBG " \n",
- ret, status, ranging_inst->range_id,
- suspend_reason, MAC2STRDBG(peer_addr)));
- }
- ranging_inst->range_status = NAN_RANGING_REQUIRED;
- WL_INFORM_MEM(("Geofence Range suspended, range_id = %d,"
- " suspend_reason = %d, " MACDBG " \n", ranging_inst->range_id,
- suspend_reason, MAC2STRDBG(peer_addr)));
-#ifdef RTT_SUPPORT
- /* Set geofence RTT in progress state to false */
- dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
-#endif /* RTT_SUPPORT */
- }
-
-exit:
- /* Post pending discovery results */
- if (ranging_inst &&
- ((suspend_reason == RTT_GEO_SUSPN_HOST_NDP_TRIGGER) ||
- (suspend_reason == RTT_GEO_SUSPN_PEER_NDP_TRIGGER))) {
- wl_cfgnan_disc_result_on_geofence_cancel(cfg, ranging_inst);
- }
-
- return ret;
-}
-
-static void
-wl_cfgnan_clear_svc_cache(struct bcm_cfg80211 *cfg,
- wl_nan_instance_id svc_id)
-{
- nan_svc_info_t *svc;
- svc = wl_cfgnan_get_svc_inst(cfg, svc_id, 0);
- if (svc) {
- WL_DBG(("clearing cached svc info for svc id %d\n", svc_id));
- memset(svc, 0, sizeof(*svc));
- }
-}
-
-/*
- * Terminate given ranging instance
- * if no pending ranging sub service
- */
-static void
-wl_cfgnan_terminate_ranging_session(struct bcm_cfg80211 *cfg,
- nan_ranging_inst_t *ranging_inst)
-{
- int ret = BCME_OK;
- uint32 status;
-#ifdef RTT_SUPPORT
- rtt_geofence_target_info_t* geofence_target = NULL;
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
- int8 index;
-#endif /* RTT_SUPPORT */
-
- if (ranging_inst->range_id == 0) {
- /* Make sure, range inst is valid in caller */
- return;
- }
-
- if (ranging_inst->num_svc_ctx != 0) {
- /*
- * Make sure to remove all svc_insts for range_inst
- * in order to cancel ranging and remove target in caller
- */
- return;
- }
-
- /* Cancel Ranging if in progress for rang_inst */
- if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
- ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg),
- cfg, ranging_inst->range_id,
- NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
- __FUNCTION__, ret, status));
- } else {
- WL_DBG(("Range cancelled \n"));
- /* Set geofence RTT in progress state to false */
-#ifdef RTT_SUPPORT
- dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
-#endif /* RTT_SUPPORT */
- }
- }
-
-#ifdef RTT_SUPPORT
- geofence_target = dhd_rtt_get_geofence_target(dhd,
- &ranging_inst->peer_addr, &index);
- if (geofence_target) {
- dhd_rtt_remove_geofence_target(dhd, &geofence_target->peer_addr);
- WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
- MAC2STRDBG(&(ranging_inst->peer_addr))));
- bzero(ranging_inst, sizeof(nan_ranging_inst_t));
- }
-#endif /* RTT_SUPPORT */
-}
-
-/*
- * Terminate all ranging sessions
- * with no pending ranging sub service
- */
-static void
-wl_cfgnan_terminate_all_obsolete_ranging_sessions(
- struct bcm_cfg80211 *cfg)
-{
- /* cancel all related ranging instances */
- uint8 i = 0;
- nan_ranging_inst_t *ranging_inst = NULL;
-
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- ranging_inst = &cfg->nan_ranging_info[i];
- if (ranging_inst->in_use) {
- wl_cfgnan_terminate_ranging_session(cfg, ranging_inst);
- }
- }
-
- return;
-}
-
-/*
- * Store svc_ctx for processing during RNG_RPT
- * Return BCME_OK only when svc is added
- */
-static int
-wl_cfgnan_update_ranging_svc_inst(nan_ranging_inst_t *ranging_inst,
- nan_svc_info_t *svc)
-{
- int ret = BCME_OK;
- int i = 0;
-
- for (i = 0; i < MAX_SUBSCRIBES; i++) {
- if (ranging_inst->svc_idx[i] == svc) {
- WL_DBG(("SVC Ctx for ranging already present, "
- " Duplication not supported: sub_id: %d\n", svc->svc_id));
- ret = BCME_UNSUPPORTED;
- goto done;
- }
- }
- for (i = 0; i < MAX_SUBSCRIBES; i++) {
- if (ranging_inst->svc_idx[i]) {
- continue;
- } else {
- WL_DBG(("Adding SVC Ctx for ranging..svc_id %d\n", svc->svc_id));
- ranging_inst->svc_idx[i] = svc;
- ranging_inst->num_svc_ctx++;
- ret = BCME_OK;
- goto done;
- }
- }
- if (i == MAX_SUBSCRIBES) {
- WL_ERR(("wl_cfgnan_update_ranging_svc_inst: "
- "No resource to hold Ref SVC ctx..svc_id %d\n", svc->svc_id));
- ret = BCME_NORESOURCE;
- goto done;
- }
-done:
- return ret;
-}
-
-#ifdef RTT_SUPPORT
-int
-wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
- struct ether_addr *peer_addr)
-{
- int ret = BCME_OK;
- int err_at = 0;
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
- int8 index = -1;
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
- rtt_geofence_target_info_t* geofence_target;
- nan_ranging_inst_t *ranging_inst;
- ranging_inst = wl_cfgnan_check_for_ranging(cfg, peer_addr);
-
- if (!ranging_inst) {
- WL_INFORM_MEM(("Ranging Entry for peer:" MACDBG ", not found\n",
- MAC2STRDBG(peer_addr)));
- ASSERT(0);
- /* Ranging inst should have been added before adding target */
- dhd_rtt_remove_geofence_target(dhd, peer_addr);
- ret = BCME_ERROR;
- err_at = 1;
- goto exit;
- }
-
- ASSERT(ranging_inst->range_status !=
- NAN_RANGING_IN_PROGRESS);
-
- if (ranging_inst->range_status !=
- NAN_RANGING_IN_PROGRESS) {
- WL_DBG(("Trigger range request with first svc in svc list of range inst\n"));
- ret = wl_cfgnan_trigger_ranging(bcmcfg_to_prmry_ndev(cfg),
- cfg, ranging_inst, ranging_inst->svc_idx[0],
- NAN_RANGE_REQ_CMD, TRUE);
- if (ret != BCME_OK) {
- /* Unsupported is for already ranging session for peer */
- if (ret == BCME_BUSY) {
- /* TODO: Attempt again over a timer */
- err_at = 2;
- } else {
- /* Remove target and clean ranging inst */
- geofence_target = dhd_rtt_get_geofence_target(dhd,
- &ranging_inst->peer_addr, &index);
- if (geofence_target) {
- dhd_rtt_remove_geofence_target(dhd,
- &geofence_target->peer_addr);
- }
- bzero(ranging_inst, sizeof(nan_ranging_inst_t));
- err_at = 3;
- goto exit;
- }
- }
- } else {
- /* already in progress..This should not happen */
- ASSERT(0);
- ret = BCME_ERROR;
- err_at = 4;
- goto exit;
- }
-
-exit:
- if (ret) {
- WL_ERR(("wl_cfgnan_trigger_geofencing_ranging: Failed to "
- "trigger ranging, peer: " MACDBG " ret"
- " = (%d), err_at = %d\n", MAC2STRDBG(peer_addr),
- ret, err_at));
- }
- return ret;
-}
-#endif /* RTT_SUPPORT */
-
-static int
-wl_cfgnan_check_disc_result_for_ranging(struct bcm_cfg80211 *cfg,
- nan_event_data_t* nan_event_data)
-{
- nan_svc_info_t *svc;
- int ret = BCME_OK;
-#ifdef RTT_SUPPORT
- rtt_geofence_target_info_t geofence_target;
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
- uint8 index;
-#endif /* RTT_SUPPORT */
- bool add_target;
-
- svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
-
- if (svc && svc->ranging_required) {
- nan_ranging_inst_t *ranging_inst;
- ranging_inst = wl_cfgnan_get_ranging_inst(cfg,
- &nan_event_data->remote_nmi,
- NAN_RANGING_ROLE_INITIATOR);
- if (!ranging_inst) {
- ret = BCME_NORESOURCE;
- goto exit;
- }
- ASSERT(ranging_inst->range_role != NAN_RANGING_ROLE_INVALID);
-
- /* For responder role, range state should be in progress only */
- ASSERT(ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR ||
- ranging_inst->range_status == NAN_RANGING_IN_PROGRESS);
-
- /*
- * On rec disc result with ranging required, add target, if
- * ranging role is responder (range state has to be in prog always)
- * Or ranging role is initiator and ranging is not already in prog
- */
- add_target = ((ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) ||
- ((ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) &&
- (ranging_inst->range_status != NAN_RANGING_IN_PROGRESS)));
- if (add_target) {
- WL_DBG(("Add Range request to geofence target list\n"));
-#ifdef RTT_SUPPORT
- memcpy(&geofence_target.peer_addr, &nan_event_data->remote_nmi,
- ETHER_ADDR_LEN);
- /* check if target is already added */
- if (!dhd_rtt_get_geofence_target(dhd, &nan_event_data->remote_nmi, &index))
- {
- ret = dhd_rtt_add_geofence_target(dhd, &geofence_target);
- if (unlikely(ret)) {
- WL_ERR(("Failed to add geofence Tgt, ret = (%d)\n", ret));
- bzero(ranging_inst, sizeof(*ranging_inst));
- goto exit;
- } else {
- WL_INFORM_MEM(("Geofence Tgt Added:" MACDBG " sub_id:%d\n",
- MAC2STRDBG(&geofence_target.peer_addr),
- svc->svc_id));
- }
- ranging_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
- }
-#endif /* RTT_SUPPORT */
- if (wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc)
- != BCME_OK) {
- goto exit;
- }
-#ifdef RTT_SUPPORT
- if (ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
- /* Adding RTT target while responder, leads to role concurrency */
- dhd_rtt_set_role_concurrency_state(dhd, TRUE);
- }
- else {
- /* Trigger/Reset geofence RTT */
- wl_cfgnan_reset_geofence_ranging(cfg, ranging_inst,
- RTT_SCHED_SUB_MATCH);
- }
-#endif /* RTT_SUPPORT */
- } else {
- /* Target already added, check & add svc_inst ref to rang_inst */
- wl_cfgnan_update_ranging_svc_inst(ranging_inst, svc);
- }
- /* Disc event will be given on receving range_rpt event */
- WL_TRACE(("Disc event will given when Range RPT event is recvd"));
- } else {
- ret = BCME_UNSUPPORTED;
- }
-
-exit:
- return ret;
-}
-
-bool
-wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg)
-{
- int i = 0;
- uint8 rng_progress_count = 0;
- nan_ranging_inst_t *ranging_inst = NULL;
-
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- ranging_inst = &cfg->nan_ranging_info[i];
- if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS) {
- rng_progress_count++;
- }
- }
-
- ASSERT(rng_progress_count <= NAN_MAX_RANGING_SSN_ALLOWED);
- if (rng_progress_count == NAN_MAX_RANGING_SSN_ALLOWED) {
- return FALSE;
- }
- return TRUE;
-}
-
-uint8
-wl_cfgnan_cancel_rng_responders(struct net_device *ndev,
- struct bcm_cfg80211 *cfg)
-{
- int i = 0;
- uint8 num_resp_cancelled = 0;
- int status, ret;
- nan_ranging_inst_t *ranging_inst = NULL;
-
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- ranging_inst = &cfg->nan_ranging_info[i];
- if (ranging_inst->range_status == NAN_RANGING_IN_PROGRESS &&
- ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
- num_resp_cancelled++;
- WL_ERR((" Cancelling responder\n"));
- ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
- ranging_inst->range_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("wl_cfgnan_cancel_rng_responders: Failed to cancel"
- " existing ranging, ret = (%d)\n", ret));
- }
- WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
- MAC2STRDBG(&(ranging_inst->peer_addr))));
- bzero(ranging_inst, sizeof(*ranging_inst));
- }
- }
- return num_resp_cancelled;
-}
-
-#ifdef RTT_SUPPORT
-/* ranging reqeust event handler */
-static int
-wl_cfgnan_handle_ranging_ind(struct bcm_cfg80211 *cfg,
- wl_nan_ev_rng_req_ind_t *rng_ind)
-{
- int ret = BCME_OK;
- nan_ranging_inst_t *ranging_inst = NULL;
- uint32 status;
- uint8 cancel_flags = 0;
- bool accept = TRUE;
- nan_ranging_inst_t tmp_rng_inst;
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
-
- WL_DBG(("Trigger range response\n"));
-
- /* check if we are already having any ranging session with peer.
- * If so below are the policies
- * If we are already a Geofence Initiator or responder w.r.t the peer
- * then silently teardown the current session and accept the REQ.
- * If we are in direct rtt initiator role then reject.
- */
- ranging_inst = wl_cfgnan_check_for_ranging(cfg, &(rng_ind->peer_m_addr));
- if (ranging_inst) {
- if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE ||
- ranging_inst->range_role == NAN_RANGING_ROLE_RESPONDER) {
- WL_INFORM_MEM(("Already responder/geofence for the Peer, cancel current"
- " ssn and accept new one, range_type = %d, role = %d\n",
- ranging_inst->range_type, ranging_inst->range_role));
- cancel_flags = NAN_RNG_TERM_FLAG_IMMEDIATE |
- NAN_RNG_TERM_FLAG_SILIENT_TEARDOWN;
-
- if (ranging_inst->range_type == RTT_TYPE_NAN_GEOFENCE &&
- ranging_inst->range_role == NAN_RANGING_ROLE_INITIATOR) {
- wl_cfgnan_suspend_geofence_rng_session(ndev,
- &(rng_ind->peer_m_addr), RTT_GEO_SUSPN_PEER_RTT_TRIGGER,
- cancel_flags);
- } else {
- ret = wl_cfgnan_cancel_ranging(ndev, cfg,
- ranging_inst->range_id, cancel_flags, &status);
- if (unlikely(ret)) {
- WL_ERR(("wl_cfgnan_handle_ranging_ind: Failed to cancel"
- " existing ranging, ret = (%d)\n", ret));
- goto done;
- }
- }
- ranging_inst->range_status = NAN_RANGING_REQUIRED;
- ranging_inst->range_role = NAN_RANGING_ROLE_RESPONDER;
- ranging_inst->range_type = 0;
- } else {
- WL_ERR(("Reject the RNG_REQ_IND in direct rtt initiator role\n"));
- ret = BCME_BUSY;
- goto done;
- }
- } else {
- /* Check if new Ranging session is allowed */
- if (!wl_cfgnan_ranging_allowed(cfg)) {
- WL_ERR(("Cannot allow more ranging sessions \n"));
- ret = BCME_NORESOURCE;
- goto done;
- }
-
- ranging_inst = wl_cfgnan_get_ranging_inst(cfg, &rng_ind->peer_m_addr,
- NAN_RANGING_ROLE_RESPONDER);
- if (!ranging_inst) {
- WL_ERR(("Failed to create ranging instance \n"));
- ASSERT(0);
- ret = BCME_NORESOURCE;
- goto done;
- }
- }
-
-done:
- if (ret != BCME_OK) {
- /* reject the REQ using temp ranging instance */
- bzero(&tmp_rng_inst, sizeof(tmp_rng_inst));
- ranging_inst = &tmp_rng_inst;
- (void)memcpy_s(&tmp_rng_inst.peer_addr, ETHER_ADDR_LEN,
- &rng_ind->peer_m_addr, ETHER_ADDR_LEN);
- accept = FALSE;
- }
-
- ranging_inst->range_id = rng_ind->rng_id;
-
- WL_DBG(("Trigger Ranging at Responder\n"));
- ret = wl_cfgnan_trigger_ranging(ndev, cfg, ranging_inst,
- NULL, NAN_RANGE_REQ_EVNT, accept);
- if (unlikely(ret) || !accept) {
- WL_ERR(("Failed to handle range request, ret = (%d) accept %d\n",
- ret, accept));
- bzero(ranging_inst, sizeof(*ranging_inst));
- }
-
- return ret;
-}
-#endif /* RTT_SUPPORT */
-/* ranging quest and response iovar handler */
-int
-wl_cfgnan_trigger_ranging(struct net_device *ndev, struct bcm_cfg80211 *cfg,
- void *ranging_ctxt, nan_svc_info_t *svc,
- uint8 range_cmd, bool accept_req)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_buf_t *nan_buf = NULL;
- wl_nan_range_req_t *range_req = NULL;
- wl_nan_range_resp_t *range_resp = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint32 status;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE_MED];
- nan_ranging_inst_t *ranging_inst = (nan_ranging_inst_t *)ranging_ctxt;
- nan_avail_cmd_data cmd_data;
-
- NAN_DBG_ENTER();
-
- memset_s(&cmd_data, sizeof(cmd_data),
- 0, sizeof(cmd_data));
- ret = memcpy_s(&cmd_data.peer_nmi, ETHER_ADDR_LEN,
- &ranging_inst->peer_addr, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy ranging peer addr\n"));
- goto fail;
- }
-
- cmd_data.avail_period = NAN_RANGING_PERIOD;
- ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
- cfg, &cmd_data, WL_AVAIL_LOCAL);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to set avail value with type [WL_AVAIL_LOCAL]\n"));
- goto fail;
- }
-
- ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
- cfg, &cmd_data, WL_AVAIL_RANGING);
- if (unlikely(ret)) {
- WL_ERR(("Failed to set avail value with type [WL_AVAIL_RANGING]\n"));
- goto fail;
- }
-
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- if (range_cmd == NAN_RANGE_REQ_CMD) {
- sub_cmd->id = htod16(WL_NAN_CMD_RANGE_REQUEST);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_req_t);
- range_req = (wl_nan_range_req_t *)(sub_cmd->data);
- /* ranging config */
- range_req->peer = ranging_inst->peer_addr;
- if (svc) {
- range_req->interval = svc->ranging_interval;
- /* Limits are in cm from host */
- range_req->ingress = svc->ingress_limit;
- range_req->egress = svc->egress_limit;
- }
- range_req->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
- } else {
- /* range response config */
- sub_cmd->id = htod16(WL_NAN_CMD_RANGE_RESPONSE);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(wl_nan_range_resp_t);
- range_resp = (wl_nan_range_resp_t *)(sub_cmd->data);
- range_resp->range_id = ranging_inst->range_id;
- range_resp->indication = NAN_RANGING_INDICATE_CONTINUOUS_MASK;
- if (accept_req) {
- range_resp->status = NAN_RNG_REQ_ACCEPTED_BY_HOST;
- } else {
- range_resp->status = NAN_RNG_REQ_REJECTED_BY_HOST;
- }
- nan_buf->is_set = true;
- }
-
- nan_buf_size -= (sub_cmd->len +
- OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
- nan_buf->count++;
-
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
- &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("nan ranging failed ret = %d status = %d\n",
- ret, status));
- ret = (ret == BCME_OK) ? status : ret;
- goto fail;
- }
- WL_TRACE(("nan ranging trigger successful\n"));
- if (range_cmd == NAN_RANGE_REQ_CMD) {
- WL_MEM(("Ranging Req Triggered"
- " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
- MAC2STRDBG(&ranging_inst->peer_addr), range_req->indication,
- range_req->ingress, range_req->egress));
- } else {
- WL_MEM(("Ranging Resp Triggered"
- " peer: " MACDBG ", ind : %d, ingress : %d, egress : %d\n",
- MAC2STRDBG(&ranging_inst->peer_addr), range_resp->indication,
- range_resp->ingress, range_resp->egress));
- }
-
- /* check the response buff for request */
- if (range_cmd == NAN_RANGE_REQ_CMD) {
- ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
- &ranging_inst->range_id, WL_NAN_CMD_RANGE_REQUEST);
- WL_INFORM_MEM(("ranging instance returned %d\n", ranging_inst->range_id));
- }
- /* Preventing continuous range requests */
- ranging_inst->range_status = NAN_RANGING_IN_PROGRESS;
-
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
-
- NAN_DBG_EXIT();
- return ret;
-}
-#endif /* WL_NAN_DISC_CACHE */
-
-static void *wl_nan_bloom_alloc(void *ctx, uint size)
-{
- uint8 *buf;
- BCM_REFERENCE(ctx);
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- buf = NULL;
- }
- return buf;
-}
-
-static void wl_nan_bloom_free(void *ctx, void *buf, uint size)
-{
- BCM_REFERENCE(ctx);
- BCM_REFERENCE(size);
- if (buf) {
- kfree(buf);
- }
-}
-
-static uint wl_nan_hash(void *ctx, uint index, const uint8 *input, uint input_len)
-{
- uint8* filter_idx = (uint8*)ctx;
- uint8 i = (*filter_idx * WL_NAN_HASHES_PER_BLOOM) + (uint8)index;
- uint b = 0;
-
- /* Steps 1 and 2 as explained in Section 6.2 */
- /* Concatenate index to input and run CRC32 by calling hndcrc32 twice */
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- b = hndcrc32(&i, sizeof(uint8), CRC32_INIT_VALUE);
- b = hndcrc32((uint8*)input, input_len, b);
- GCC_DIAGNOSTIC_POP();
- /* Obtain the last 2 bytes of the CRC32 output */
- b &= NAN_BLOOM_CRC32_MASK;
-
- /* Step 3 is completed by bcmbloom functions */
- return b;
-}
-
-static int wl_nan_bloom_create(bcm_bloom_filter_t **bp, uint *idx, uint size)
-{
- uint i;
- int err;
-
- err = bcm_bloom_create(wl_nan_bloom_alloc, wl_nan_bloom_free,
- idx, WL_NAN_HASHES_PER_BLOOM, size, bp);
- if (err != BCME_OK) {
- goto exit;
- }
-
- /* Populate bloom filter with hash functions */
- for (i = 0; i < WL_NAN_HASHES_PER_BLOOM; i++) {
- err = bcm_bloom_add_hash(*bp, wl_nan_hash, &i);
- if (err) {
- WL_ERR(("bcm_bloom_add_hash failed\n"));
- goto exit;
- }
- }
-exit:
- return err;
-}
-
-static int
-wl_cfgnan_sd_params_handler(struct net_device *ndev,
- nan_discover_cmd_data_t *cmd_data, uint16 cmd_id,
- void *p_buf, uint16 *nan_buf_size)
-{
- s32 ret = BCME_OK;
- uint8 *pxtlv, *srf = NULL, *srf_mac = NULL, *srftmp = NULL;
- uint16 buflen_avail;
- bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
- wl_nan_sd_params_t *sd_params = (wl_nan_sd_params_t *)sub_cmd->data;
- uint16 srf_size = 0;
- uint bloom_size, a;
- bcm_bloom_filter_t *bp = NULL;
- /* Bloom filter index default, indicates it has not been set */
- uint bloom_idx = 0xFFFFFFFF;
- uint16 bloom_len = NAN_BLOOM_LENGTH_DEFAULT;
- /* srf_ctrl_size = bloom_len + src_control field */
- uint16 srf_ctrl_size = bloom_len + 1;
-
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- BCM_REFERENCE(cfg);
-
- NAN_DBG_ENTER();
-
- if (cmd_data->period) {
- sd_params->awake_dw = cmd_data->period;
- }
- sd_params->period = 1;
-
- if (cmd_data->ttl) {
- sd_params->ttl = cmd_data->ttl;
- } else {
- sd_params->ttl = WL_NAN_TTL_UNTIL_CANCEL;
- }
-
- sd_params->flags = 0;
- sd_params->flags = cmd_data->flags;
-
- /* Nan Service Based event suppression Flags */
- if (cmd_data->recv_ind_flag) {
- /* BIT0 - If set, host wont rec event "terminated" */
- if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT)) {
- sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_TERMINATED;
- }
-
- /* BIT1 - If set, host wont receive match expiry evt */
- /* TODO: Exp not yet supported */
- if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT)) {
- WL_DBG(("Need to add match expiry event\n"));
- }
- /* BIT2 - If set, host wont rec event "receive" */
- if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT)) {
- sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_RECEIVE;
- }
- /* BIT3 - If set, host wont rec event "replied" */
- if (CHECK_BIT(cmd_data->recv_ind_flag, WL_NAN_EVENT_SUPPRESS_REPLIED_BIT)) {
- sd_params->flags |= WL_NAN_SVC_CTRL_SUPPRESS_EVT_REPLIED;
- }
- }
- if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
- sd_params->instance_id = cmd_data->pub_id;
- if (cmd_data->service_responder_policy) {
- /* Do not disturb avail if dam is supported */
- if (FW_SUPPORTED(dhdp, autodam)) {
- /* Nan Accept policy: Per service basis policy
- * Based on this policy(ALL/NONE), responder side
- * will send ACCEPT/REJECT
- * If set, auto datapath responder will be sent by FW
- */
- sd_params->flags |= WL_NAN_SVC_CTRL_AUTO_DPRESP;
- } else {
- WL_ERR(("svc specifiv auto dp resp is not"
- " supported in non-auto dam fw\n"));
- }
- }
- } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
- sd_params->instance_id = cmd_data->sub_id;
- } else {
- ret = BCME_USAGE_ERROR;
- WL_ERR(("wrong command id = %d \n", cmd_id));
- goto fail;
- }
-
- if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
- (cmd_data->svc_hash.data)) {
- ret = memcpy_s((uint8*)sd_params->svc_hash,
- sizeof(sd_params->svc_hash),
- cmd_data->svc_hash.data,
- cmd_data->svc_hash.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc hash\n"));
- goto fail;
- }
-#ifdef WL_NAN_DEBUG
- prhex("hashed svc name", cmd_data->svc_hash.data,
- cmd_data->svc_hash.dlen);
-#endif /* WL_NAN_DEBUG */
- } else {
- ret = BCME_ERROR;
- WL_ERR(("invalid svc hash data or length = %d\n",
- cmd_data->svc_hash.dlen));
- goto fail;
- }
-
- /* check if ranging support is present in firmware */
- if ((cmd_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) &&
- !FW_SUPPORTED(dhdp, nanrange)) {
- WL_ERR(("Service requires ranging but fw doesnt support it\n"));
- ret = BCME_UNSUPPORTED;
- goto fail;
- }
-
- /* Optional parameters: fill the sub_command block with service descriptor attr */
- sub_cmd->id = htod16(cmd_id);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- sub_cmd->len = sizeof(sub_cmd->u.options) +
- OFFSETOF(wl_nan_sd_params_t, optional[0]);
- pxtlv = (uint8*)&sd_params->optional[0];
-
- *nan_buf_size -= sub_cmd->len;
- buflen_avail = *nan_buf_size;
-
- if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
- WL_TRACE(("optional svc_info present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
- WL_NAN_XTLV_SD_SVC_INFO,
- cmd_data->svc_info.dlen,
- cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SVC_INFO\n", __FUNCTION__));
- goto fail;
- }
- }
-
- if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
- WL_TRACE(("optional sdea svc_info present, pack it, %d\n",
- cmd_data->sde_svc_info.dlen));
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
- WL_NAN_XTLV_SD_SDE_SVC_INFO,
- cmd_data->sde_svc_info.dlen,
- cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
- goto fail;
- }
- }
-
- if (cmd_data->tx_match.dlen) {
- WL_TRACE(("optional tx match filter presnet (len=%d)\n",
- cmd_data->tx_match.dlen));
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
- WL_NAN_XTLV_CFG_MATCH_TX, cmd_data->tx_match.dlen,
- cmd_data->tx_match.data, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: failed on xtlv_pack for tx match filter\n", __FUNCTION__));
- goto fail;
- }
- }
-
- if (cmd_data->life_count) {
- WL_TRACE(("optional life count is present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SVC_LIFE_COUNT,
- sizeof(cmd_data->life_count), &cmd_data->life_count,
- BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SVC_LIFE_COUNT\n", __FUNCTION__));
- goto fail;
- }
- }
-
- if (cmd_data->use_srf) {
- uint8 srf_control = 0;
- /* set include bit */
- if (cmd_data->srf_include == true) {
- srf_control |= 0x2;
- }
-
- if (!ETHER_ISNULLADDR(&cmd_data->mac_list.list) &&
- (cmd_data->mac_list.num_mac_addr
- < NAN_SRF_MAX_MAC)) {
- if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
- /* mac list */
- srf_size = (cmd_data->mac_list.num_mac_addr
- * ETHER_ADDR_LEN) + NAN_SRF_CTRL_FIELD_LEN;
- WL_TRACE(("srf size = %d\n", srf_size));
-
- srf_mac = MALLOCZ(cfg->osh, srf_size);
- if (srf_mac == NULL) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- ret = -ENOMEM;
- goto fail;
- }
- ret = memcpy_s(srf_mac, NAN_SRF_CTRL_FIELD_LEN,
- &srf_control, NAN_SRF_CTRL_FIELD_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy srf control\n"));
- goto fail;
- }
- ret = memcpy_s(srf_mac+1, (srf_size - NAN_SRF_CTRL_FIELD_LEN),
- cmd_data->mac_list.list,
- (srf_size - NAN_SRF_CTRL_FIELD_LEN));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy srf control mac list\n"));
- goto fail;
- }
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
- WL_NAN_XTLV_CFG_SR_FILTER, srf_size, srf_mac,
- BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: failed to WL_NAN_XTLV_CFG_SR_FILTER\n",
- __FUNCTION__));
- goto fail;
- }
- } else if (cmd_data->srf_type == SRF_TYPE_BLOOM_FILTER) {
- /* Create bloom filter */
- srf = MALLOCZ(cfg->osh, srf_ctrl_size);
- if (srf == NULL) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- ret = -ENOMEM;
- goto fail;
- }
- /* Bloom filter */
- srf_control |= 0x1;
- /* Instance id must be from 1 to 255, 0 is Reserved */
- if (sd_params->instance_id == NAN_ID_RESERVED) {
- WL_ERR(("Invalid instance id: %d\n",
- sd_params->instance_id));
- ret = BCME_BADARG;
- goto fail;
- }
- if (bloom_idx == 0xFFFFFFFF) {
- bloom_idx = sd_params->instance_id % 4;
- } else {
- WL_ERR(("Invalid bloom_idx\n"));
- ret = BCME_BADARG;
- goto fail;
-
- }
- srf_control |= bloom_idx << 2;
-
- ret = wl_nan_bloom_create(&bp, &bloom_idx, bloom_len);
- if (unlikely(ret)) {
- WL_ERR(("%s: Bloom create failed\n", __FUNCTION__));
- goto fail;
- }
-
- srftmp = cmd_data->mac_list.list;
- for (a = 0;
- a < cmd_data->mac_list.num_mac_addr; a++) {
- ret = bcm_bloom_add_member(bp, srftmp, ETHER_ADDR_LEN);
- if (unlikely(ret)) {
- WL_ERR(("%s: Cannot add to bloom filter\n",
- __FUNCTION__));
- goto fail;
- }
- srftmp += ETHER_ADDR_LEN;
- }
-
- ret = memcpy_s(srf, NAN_SRF_CTRL_FIELD_LEN,
- &srf_control, NAN_SRF_CTRL_FIELD_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy srf control\n"));
- goto fail;
- }
- ret = bcm_bloom_get_filter_data(bp, bloom_len,
- (srf + NAN_SRF_CTRL_FIELD_LEN),
- &bloom_size);
- if (unlikely(ret)) {
- WL_ERR(("%s: Cannot get filter data\n", __FUNCTION__));
- goto fail;
- }
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
- WL_NAN_XTLV_CFG_SR_FILTER, srf_ctrl_size,
- srf, BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to pack SR FILTER data, ret = %d\n", ret));
- goto fail;
- }
- } else {
- WL_ERR(("Invalid SRF Type = %d !!!\n",
- cmd_data->srf_type));
- goto fail;
- }
- } else {
- WL_ERR(("Invalid MAC Addr/Too many mac addr = %d !!!\n",
- cmd_data->mac_list.num_mac_addr));
- goto fail;
- }
- }
-
- if (cmd_data->rx_match.dlen) {
- WL_TRACE(("optional rx match filter is present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
- WL_NAN_XTLV_CFG_MATCH_RX, cmd_data->rx_match.dlen,
- cmd_data->rx_match.data, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: failed on xtlv_pack for rx match filter\n", __func__));
- goto fail;
- }
- }
-
- /* Security elements */
- if (cmd_data->csid) {
- WL_TRACE(("Cipher suite type is present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
- WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
- (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
- goto fail;
- }
- }
-
- if (cmd_data->ndp_cfg.security_cfg) {
- if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
- (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
- if (cmd_data->key.data && cmd_data->key.dlen) {
- WL_TRACE(("optional pmk present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
- WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
- cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
- __FUNCTION__));
- goto fail;
- }
- }
- } else {
- WL_ERR(("Invalid security key type\n"));
- ret = BCME_BADARG;
- goto fail;
- }
- }
-
- if (cmd_data->scid.data && cmd_data->scid.dlen) {
- WL_TRACE(("optional scid present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size, WL_NAN_XTLV_CFG_SEC_SCID,
- cmd_data->scid.dlen, cmd_data->scid.data, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_SCID\n", __FUNCTION__));
- goto fail;
- }
- }
-
- if (cmd_data->sde_control_config) {
- ret = bcm_pack_xtlv_entry(&pxtlv, nan_buf_size,
- WL_NAN_XTLV_SD_SDE_CONTROL,
- sizeof(uint16), (uint8*)&cmd_data->sde_control_flag,
- BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- WL_ERR(("%s: fail to pack WL_NAN_XTLV_SD_SDE_CONTROL\n", __FUNCTION__));
- goto fail;
- }
- }
-
- sub_cmd->len += (buflen_avail - *nan_buf_size);
-
-fail:
- if (srf) {
- MFREE(cfg->osh, srf, srf_ctrl_size);
- }
-
- if (srf_mac) {
- MFREE(cfg->osh, srf_mac, srf_size);
- }
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_aligned_data_size_of_opt_disc_params(uint16 *data_size, nan_discover_cmd_data_t *cmd_data)
-{
- s32 ret = BCME_OK;
- if (cmd_data->svc_info.dlen)
- *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->sde_svc_info.dlen)
- *data_size += ALIGN_SIZE(cmd_data->sde_svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->tx_match.dlen)
- *data_size += ALIGN_SIZE(cmd_data->tx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->rx_match.dlen)
- *data_size += ALIGN_SIZE(cmd_data->rx_match.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->use_srf) {
- if (cmd_data->srf_type == SRF_TYPE_SEQ_MAC_ADDR) {
- *data_size += (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN)
- + NAN_SRF_CTRL_FIELD_LEN;
- } else { /* Bloom filter type */
- *data_size += NAN_BLOOM_LENGTH_DEFAULT + 1;
- }
- *data_size += ALIGN_SIZE(*data_size + NAN_XTLV_ID_LEN_SIZE, 4);
- }
- if (cmd_data->csid)
- *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->key.dlen)
- *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->scid.dlen)
- *data_size += ALIGN_SIZE(cmd_data->scid.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->sde_control_config)
- *data_size += ALIGN_SIZE(sizeof(uint16) + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->life_count)
- *data_size += ALIGN_SIZE(sizeof(cmd_data->life_count) + NAN_XTLV_ID_LEN_SIZE, 4);
- return ret;
-}
-
-static int
-wl_cfgnan_aligned_data_size_of_opt_dp_params(uint16 *data_size, nan_datapath_cmd_data_t *cmd_data)
-{
- s32 ret = BCME_OK;
- if (cmd_data->svc_info.dlen)
- *data_size += ALIGN_SIZE(cmd_data->svc_info.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->key.dlen)
- *data_size += ALIGN_SIZE(cmd_data->key.dlen + NAN_XTLV_ID_LEN_SIZE, 4);
- if (cmd_data->csid)
- *data_size += ALIGN_SIZE(sizeof(nan_sec_csid_e) + NAN_XTLV_ID_LEN_SIZE, 4);
-
- *data_size += ALIGN_SIZE(WL_NAN_SVC_HASH_LEN + NAN_XTLV_ID_LEN_SIZE, 4);
- return ret;
-}
-int
-wl_cfgnan_svc_get_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
-{
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- uint32 instance_id;
- s32 ret = BCME_OK;
- bcm_iov_batch_buf_t *nan_buf = NULL;
-
- uint8 *resp_buf = NULL;
- uint16 data_size = WL_NAN_OBUF_DATA_OFFSET + sizeof(instance_id);
-
- NAN_DBG_ENTER();
-
- nan_buf = MALLOCZ(cfg->osh, data_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- resp_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE_LARGE);
- if (!resp_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 1;
- /* check if service is present */
- nan_buf->is_set = false;
- sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
- if (cmd_id == WL_NAN_CMD_SD_PUBLISH) {
- instance_id = cmd_data->pub_id;
- } else if (cmd_id == WL_NAN_CMD_SD_SUBSCRIBE) {
- instance_id = cmd_data->sub_id;
- } else {
- ret = BCME_USAGE_ERROR;
- WL_ERR(("wrong command id = %u\n", cmd_id));
- goto fail;
- }
- /* Fill the sub_command block */
- sub_cmd->id = htod16(cmd_id);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- ret = memcpy_s(sub_cmd->data, (data_size - WL_NAN_OBUF_DATA_OFFSET),
- &instance_id, sizeof(instance_id));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
- goto fail;
- }
-
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
- &(cmd_data->status), resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
-
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR(("nan svc check failed ret = %d status = %d\n", ret, cmd_data->status));
- goto fail;
- } else {
- WL_DBG(("nan svc check successful..proceed to update\n"));
- }
-
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, data_size);
- }
-
- if (resp_buf) {
- MFREE(cfg->osh, resp_buf, NAN_IOCTL_BUF_SIZE_LARGE);
- }
- NAN_DBG_EXIT();
- return ret;
-
-}
-
-int
-wl_cfgnan_svc_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, uint16 cmd_id, nan_discover_cmd_data_t *cmd_data)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_buf_t *nan_buf = NULL;
- uint16 nan_buf_size;
- uint8 *resp_buf = NULL;
- /* Considering fixed params */
- uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
- OFFSETOF(wl_nan_sd_params_t, optional[0]);
-
- if (cmd_data->svc_update) {
- ret = wl_cfgnan_svc_get_handler(ndev, cfg, cmd_id, cmd_data);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to update svc handler, ret = %d\n", ret));
- goto fail;
- } else {
- /* Ignoring any other svc get error */
- if (cmd_data->status == WL_NAN_E_BAD_INSTANCE) {
- WL_ERR(("Bad instance status, failed to update svc handler\n"));
- goto fail;
- }
- }
- }
-
- ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to get alligned size of optional params\n"));
- goto fail;
- }
- nan_buf_size = data_size;
- NAN_DBG_ENTER();
-
- nan_buf = MALLOCZ(cfg->osh, data_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
- if (!resp_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf->is_set = true;
-
- ret = wl_cfgnan_sd_params_handler(ndev, cmd_data, cmd_id,
- &nan_buf->cmds[0], &nan_buf_size);
- if (unlikely(ret)) {
- WL_ERR((" Service discovery params handler failed, ret = %d\n", ret));
- goto fail;
- }
-
- nan_buf->count++;
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
- &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
- if (cmd_data->svc_update && (cmd_data->status == BCME_DATA_NOTFOUND)) {
- /* return OK if update tlv data is not present
- * which means nothing to update
- */
- cmd_data->status = BCME_OK;
- }
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR(("nan svc failed ret = %d status = %d\n", ret, cmd_data->status));
- goto fail;
- } else {
- WL_DBG(("nan svc successful\n"));
-#ifdef WL_NAN_DISC_CACHE
- ret = wl_cfgnan_cache_svc_info(cfg, cmd_data, cmd_id, cmd_data->svc_update);
- if (ret < 0) {
- WL_ERR(("%s: fail to cache svc info, ret=%d\n",
- __FUNCTION__, ret));
- goto fail;
- }
-#endif /* WL_NAN_DISC_CACHE */
- }
-
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, data_size);
- }
-
- if (resp_buf) {
- MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
- }
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_publish_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
-{
- int ret = BCME_OK;
-
- NAN_DBG_ENTER();
- NAN_MUTEX_LOCK();
- /*
- * proceed only if mandatory arguments are present - subscriber id,
- * service hash
- */
- if ((!cmd_data->pub_id) || (!cmd_data->svc_hash.data) ||
- (!cmd_data->svc_hash.dlen)) {
- WL_ERR(("mandatory arguments are not present\n"));
- ret = BCME_BADARG;
- goto fail;
- }
-
- ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_PUBLISH, cmd_data);
- if (ret < 0) {
- WL_ERR(("%s: fail to handle pub, ret=%d\n", __FUNCTION__, ret));
- goto fail;
- }
- WL_INFORM_MEM(("[NAN] Service published for instance id:%d\n", cmd_data->pub_id));
-
-fail:
- NAN_MUTEX_UNLOCK();
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_subscribe_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
-{
- int ret = BCME_OK;
-#ifdef WL_NAN_DISC_CACHE
- nan_svc_info_t *svc_info;
- uint8 upd_ranging_required;
-#endif /* WL_NAN_DISC_CACHE */
-#ifdef RTT_GEOFENCE_CONT
-#ifdef RTT_SUPPORT
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
-#endif /* RTT_SUPPORT */
-#endif /* RTT_GEOFENCE_CONT */
-
- NAN_DBG_ENTER();
- NAN_MUTEX_LOCK();
-
- /*
- * proceed only if mandatory arguments are present - subscriber id,
- * service hash
- */
- if ((!cmd_data->sub_id) || (!cmd_data->svc_hash.data) ||
- (!cmd_data->svc_hash.dlen)) {
- WL_ERR(("mandatory arguments are not present\n"));
- ret = BCME_BADARG;
- goto fail;
- }
-
- /* Check for ranging sessions if any */
- if (cmd_data->svc_update) {
-#ifdef WL_NAN_DISC_CACHE
- svc_info = wl_cfgnan_get_svc_inst(cfg, cmd_data->sub_id, 0);
- if (svc_info) {
- wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
- /* terminate ranging sessions for this svc, avoid clearing svc cache */
- wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
- WL_DBG(("Ranging sessions handled for svc update\n"));
- upd_ranging_required = !!(cmd_data->sde_control_flag &
- NAN_SDE_CF_RANGING_REQUIRED);
- if ((svc_info->ranging_required ^ upd_ranging_required) ||
- (svc_info->ingress_limit != cmd_data->ingress_limit) ||
- (svc_info->egress_limit != cmd_data->egress_limit)) {
- /* Clear cache info in Firmware */
- ret = wl_cfgnan_clear_disc_cache(cfg, cmd_data->sub_id);
- if (ret != BCME_OK) {
- WL_ERR(("couldn't send clear cache to FW \n"));
- goto fail;
- }
- /* Invalidate local cache info */
- wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
- }
- }
-#endif /* WL_NAN_DISC_CACHE */
- }
-
-#ifdef RTT_GEOFENCE_CONT
-#ifdef RTT_SUPPORT
- /* Override ranging Indication */
- if (rtt_status->geofence_cfg.geofence_cont) {
- if (cmd_data->ranging_indication !=
- NAN_RANGE_INDICATION_NONE) {
- cmd_data->ranging_indication = NAN_RANGE_INDICATION_CONT;
- }
- }
-#endif /* RTT_SUPPORT */
-#endif /* RTT_GEOFENCE_CONT */
- ret = wl_cfgnan_svc_handler(ndev, cfg, WL_NAN_CMD_SD_SUBSCRIBE, cmd_data);
- if (ret < 0) {
- WL_ERR(("%s: fail to handle svc, ret=%d\n", __FUNCTION__, ret));
- goto fail;
- }
- WL_INFORM_MEM(("[NAN] Service subscribed for instance id:%d\n", cmd_data->sub_id));
-
-fail:
- NAN_MUTEX_UNLOCK();
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_cancel_handler(nan_discover_cmd_data_t *cmd_data,
- uint16 cmd_id, void *p_buf, uint16 *nan_buf_size)
-{
- s32 ret = BCME_OK;
-
- NAN_DBG_ENTER();
-
- if (p_buf != NULL) {
- bcm_iov_batch_subcmd_t *sub_cmd = (bcm_iov_batch_subcmd_t*)(p_buf);
- wl_nan_instance_id_t instance_id;
-
- if (cmd_id == WL_NAN_CMD_SD_CANCEL_PUBLISH) {
- instance_id = cmd_data->pub_id;
- } else if (cmd_id == WL_NAN_CMD_SD_CANCEL_SUBSCRIBE) {
- instance_id = cmd_data->sub_id;
- } else {
- ret = BCME_USAGE_ERROR;
- WL_ERR(("wrong command id = %u\n", cmd_id));
- goto fail;
- }
-
- /* Fill the sub_command block */
- sub_cmd->id = htod16(cmd_id);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(instance_id);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- ret = memcpy_s(sub_cmd->data, *nan_buf_size,
- &instance_id, sizeof(instance_id));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy instance id, ret = %d\n", ret));
- goto fail;
- }
- /* adjust iov data len to the end of last data record */
- *nan_buf_size -= (sub_cmd->len +
- OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
- WL_INFORM_MEM(("[NAN] Service with instance id:%d cancelled\n", instance_id));
- } else {
- WL_ERR(("nan_iov_buf is NULL\n"));
- ret = BCME_ERROR;
- goto fail;
- }
-
-fail:
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
-
- NAN_DBG_ENTER();
- NAN_MUTEX_LOCK();
-
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- /* proceed only if mandatory argument is present - publisher id */
- if (!cmd_data->pub_id) {
- WL_ERR(("mandatory argument is not present\n"));
- ret = BCME_BADARG;
- goto fail;
- }
-
-#ifdef WL_NAN_DISC_CACHE
- wl_cfgnan_clear_svc_cache(cfg, cmd_data->pub_id);
-#endif /* WL_NAN_DISC_CACHE */
- ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_PUBLISH,
- &nan_buf->cmds[0], &nan_buf_size);
- if (unlikely(ret)) {
- WL_ERR(("cancel publish failed\n"));
- goto fail;
- }
- nan_buf->is_set = true;
- nan_buf->count++;
-
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
- &(cmd_data->status),
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR(("nan cancel publish failed ret = %d status = %d\n",
- ret, cmd_data->status));
- goto fail;
- }
- WL_DBG(("nan cancel publish successfull\n"));
- wl_cfgnan_remove_inst_id(cfg, cmd_data->pub_id);
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
-
- NAN_MUTEX_UNLOCK();
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
-
- NAN_DBG_ENTER();
- NAN_MUTEX_LOCK();
-
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- /* proceed only if mandatory argument is present - subscriber id */
- if (!cmd_data->sub_id) {
- WL_ERR(("mandatory argument is not present\n"));
- ret = BCME_BADARG;
- goto fail;
- }
-
-#ifdef WL_NAN_DISC_CACHE
- /* terminate ranging sessions for this svc */
- wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, cmd_data->sub_id);
- wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
- /* clear svc cache for the service */
- wl_cfgnan_clear_svc_cache(cfg, cmd_data->sub_id);
- wl_cfgnan_remove_disc_result(cfg, cmd_data->sub_id);
-#endif /* WL_NAN_DISC_CACHE */
-
- ret = wl_cfgnan_cancel_handler(cmd_data, WL_NAN_CMD_SD_CANCEL_SUBSCRIBE,
- &nan_buf->cmds[0], &nan_buf_size);
- if (unlikely(ret)) {
- WL_ERR(("cancel subscribe failed\n"));
- goto fail;
- }
- nan_buf->is_set = true;
- nan_buf->count++;
-
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
- &(cmd_data->status),
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR(("nan cancel subscribe failed ret = %d status = %d\n",
- ret, cmd_data->status));
- goto fail;
- }
- WL_DBG(("subscribe cancel successfull\n"));
- wl_cfgnan_remove_inst_id(cfg, cmd_data->sub_id);
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
-
- NAN_MUTEX_UNLOCK();
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_transmit_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_buf_t *nan_buf = NULL;
- wl_nan_sd_transmit_t *sd_xmit = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- bool is_lcl_id = FALSE;
- bool is_dest_id = FALSE;
- bool is_dest_mac = FALSE;
- uint16 buflen_avail;
- uint8 *pxtlv;
- uint16 nan_buf_size;
- uint8 *resp_buf = NULL;
- /* Considering fixed params */
- uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
- OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
- data_size = ALIGN_SIZE(data_size, 4);
- ret = wl_cfgnan_aligned_data_size_of_opt_disc_params(&data_size, cmd_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to get alligned size of optional params\n"));
- goto fail;
- }
- NAN_DBG_ENTER();
- NAN_MUTEX_LOCK();
- nan_buf_size = data_size;
- nan_buf = MALLOCZ(cfg->osh, data_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
- if (!resp_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- /* nan transmit */
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
- /*
- * proceed only if mandatory arguments are present - subscriber id,
- * publisher id, mac address
- */
- if ((!cmd_data->local_id) || (!cmd_data->remote_id) ||
- ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
- WL_ERR(("mandatory arguments are not present\n"));
- ret = -EINVAL;
- goto fail;
- }
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
- sd_xmit = (wl_nan_sd_transmit_t *)(sub_cmd->data);
-
- /* local instance id must be from 1 to 255, 0 is reserved */
- if (cmd_data->local_id == NAN_ID_RESERVED) {
- WL_ERR(("Invalid local instance id: %d\n", cmd_data->local_id));
- ret = BCME_BADARG;
- goto fail;
- }
- sd_xmit->local_service_id = cmd_data->local_id;
- is_lcl_id = TRUE;
-
- /* remote instance id must be from 1 to 255, 0 is reserved */
- if (cmd_data->remote_id == NAN_ID_RESERVED) {
- WL_ERR(("Invalid remote instance id: %d\n", cmd_data->remote_id));
- ret = BCME_BADARG;
- goto fail;
- }
-
- sd_xmit->requestor_service_id = cmd_data->remote_id;
- is_dest_id = TRUE;
-
- if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
- ret = memcpy_s(&sd_xmit->destination_addr, ETHER_ADDR_LEN,
- &cmd_data->mac_addr, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy dest mac address\n"));
- goto fail;
- }
- } else {
- WL_ERR(("Invalid ether addr provided\n"));
- ret = BCME_BADARG;
- goto fail;
- }
- is_dest_mac = TRUE;
-
- if (cmd_data->priority) {
- sd_xmit->priority = cmd_data->priority;
- }
- sd_xmit->token = cmd_data->token;
-
- if (cmd_data->recv_ind_flag) {
- /* BIT0 - If set, host wont rec event "txs" */
- if (CHECK_BIT(cmd_data->recv_ind_flag,
- WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT)) {
- sd_xmit->flags = WL_NAN_FUP_SUPR_EVT_TXS;
- }
- }
- /* Optional parameters: fill the sub_command block with service descriptor attr */
- sub_cmd->id = htod16(WL_NAN_CMD_SD_TRANSMIT);
- sub_cmd->len = sizeof(sub_cmd->u.options) +
- OFFSETOF(wl_nan_sd_transmit_t, opt_tlv);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- pxtlv = (uint8 *)&sd_xmit->opt_tlv;
-
- nan_buf_size -= (sub_cmd->len +
- OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
-
- buflen_avail = nan_buf_size;
-
- if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
- bcm_xtlv_t *pxtlv_svc_info = (bcm_xtlv_t *)pxtlv;
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
- cmd_data->svc_info.data, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack on bcm_pack_xtlv_entry, ret=%d\n",
- __FUNCTION__, ret));
- goto fail;
- }
-
- /* 0xFF is max length for svc_info */
- if (pxtlv_svc_info->len > 0xFF) {
- WL_ERR(("Invalid service info length %d\n",
- (pxtlv_svc_info->len)));
- ret = BCME_USAGE_ERROR;
- goto fail;
- }
- sd_xmit->opt_len = (uint8)(pxtlv_svc_info->len);
- }
- if (cmd_data->sde_svc_info.data && cmd_data->sde_svc_info.dlen) {
- WL_TRACE(("optional sdea svc_info present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_SD_SDE_SVC_INFO, cmd_data->sde_svc_info.dlen,
- cmd_data->sde_svc_info.data, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack sdea svc info\n", __FUNCTION__));
- goto fail;
- }
- }
-
- /* Check if all mandatory params are provided */
- if (is_lcl_id && is_dest_id && is_dest_mac) {
- nan_buf->count++;
- sub_cmd->len += (buflen_avail - nan_buf_size);
- } else {
- WL_ERR(("Missing parameters\n"));
- ret = BCME_USAGE_ERROR;
- }
- nan_buf->is_set = TRUE;
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
- &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR(("nan transmit failed for token %d ret = %d status = %d\n",
- sd_xmit->token, ret, cmd_data->status));
- goto fail;
- }
- WL_INFORM_MEM(("nan transmit successful for token %d\n", sd_xmit->token));
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, data_size);
- }
- if (resp_buf) {
- MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
- }
- NAN_MUTEX_UNLOCK();
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_get_capability(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- wl_nan_fw_cap_t *fw_cap = NULL;
- uint16 subcmd_len;
- uint32 status;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
- const bcm_xtlv_t *xtlv;
- uint16 type = 0;
- int len = 0;
-
- NAN_DBG_ENTER();
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
- sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
-
- ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
- sizeof(*fw_cap), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
-
- fw_cap = (wl_nan_fw_cap_t *)sub_cmd->data;
- sub_cmd->id = htod16(WL_NAN_CMD_GEN_FW_CAP);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*fw_cap);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- nan_buf_size -= subcmd_len;
- nan_buf->count = 1;
-
- nan_buf->is_set = false;
- memset(resp_buf, 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("get nan fw cap failed ret %d status %d \n",
- ret, status));
- goto fail;
- }
-
- sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
-
- /* check the response buff */
- xtlv = ((const bcm_xtlv_t *)&sub_cmd_resp->data[0]);
- if (!xtlv) {
- ret = BCME_NOTFOUND;
- WL_ERR(("xtlv not found: err = %d\n", ret));
- goto fail;
- }
- bcm_xtlv_unpack_xtlv(xtlv, &type, (uint16*)&len, NULL, BCM_XTLV_OPTION_ALIGN32);
- do
- {
- switch (type) {
- case WL_NAN_XTLV_GEN_FW_CAP:
- if (len > sizeof(wl_nan_fw_cap_t)) {
- ret = BCME_BADARG;
- goto fail;
- }
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
- fw_cap = (wl_nan_fw_cap_t*)xtlv->data;
- GCC_DIAGNOSTIC_POP();
- break;
- default:
- WL_ERR(("Unknown xtlv: id %u\n", type));
- ret = BCME_ERROR;
- break;
- }
- if (ret != BCME_OK) {
- goto fail;
- }
- } while ((xtlv = bcm_next_xtlv(xtlv, &len, BCM_XTLV_OPTION_ALIGN32)));
-
- memset(capabilities, 0, sizeof(nan_hal_capabilities_t));
- capabilities->max_publishes = fw_cap->max_svc_publishes;
- capabilities->max_subscribes = fw_cap->max_svc_subscribes;
- capabilities->max_ndi_interfaces = fw_cap->max_lcl_ndi_interfaces;
- capabilities->max_ndp_sessions = fw_cap->max_ndp_sessions;
- capabilities->max_concurrent_nan_clusters = fw_cap->max_concurrent_nan_clusters;
- capabilities->max_service_name_len = fw_cap->max_service_name_len;
- capabilities->max_match_filter_len = fw_cap->max_match_filter_len;
- capabilities->max_total_match_filter_len = fw_cap->max_total_match_filter_len;
- capabilities->max_service_specific_info_len = fw_cap->max_service_specific_info_len;
- capabilities->max_app_info_len = fw_cap->max_app_info_len;
- capabilities->max_sdea_service_specific_info_len = fw_cap->max_sdea_svc_specific_info_len;
- capabilities->max_queued_transmit_followup_msgs = fw_cap->max_queued_tx_followup_msgs;
- capabilities->max_subscribe_address = fw_cap->max_subscribe_address;
- capabilities->is_ndp_security_supported = fw_cap->is_ndp_security_supported;
- capabilities->ndp_supported_bands = fw_cap->ndp_supported_bands;
- capabilities->cipher_suites_supported = fw_cap->cipher_suites_supported_mask;
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_get_capablities_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities)
-{
- s32 ret = BCME_OK;
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
-
- NAN_DBG_ENTER();
-
- /* Do not query fw about nan if feature is not supported */
- if (!FW_SUPPORTED(dhdp, nan)) {
- WL_DBG(("NAN is not supported\n"));
- return ret;
- }
-
- if (cfg->nan_init_state) {
- ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
- if (ret != BCME_OK) {
- WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
- cfg->nan_init_state, ret));
- goto exit;
- }
- } else {
- /* Initialize NAN before sending iovar */
- WL_ERR(("Initializing NAN\n"));
- ret = wl_cfgnan_init(cfg);
- if (ret != BCME_OK) {
- WL_ERR(("failed to initialize NAN[%d]\n", ret));
- goto fail;
- }
-
- ret = wl_cfgnan_get_capability(ndev, cfg, capabilities);
- if (ret != BCME_OK) {
- WL_ERR(("NAN init state: %d, failed to get capability from FW[%d]\n",
- cfg->nan_init_state, ret));
- goto exit;
- }
- WL_ERR(("De-Initializing NAN\n"));
- ret = wl_cfgnan_deinit(cfg, dhdp->up);
- if (ret != BCME_OK) {
- WL_ERR(("failed to de-initialize NAN[%d]\n", ret));
- goto fail;
- }
- }
-fail:
- NAN_DBG_EXIT();
- return ret;
-exit:
- /* Keeping backward campatibility */
- capabilities->max_concurrent_nan_clusters = MAX_CONCURRENT_NAN_CLUSTERS;
- capabilities->max_publishes = MAX_PUBLISHES;
- capabilities->max_subscribes = MAX_SUBSCRIBES;
- capabilities->max_service_name_len = MAX_SVC_NAME_LEN;
- capabilities->max_match_filter_len = MAX_MATCH_FILTER_LEN;
- capabilities->max_total_match_filter_len = MAX_TOTAL_MATCH_FILTER_LEN;
- capabilities->max_service_specific_info_len = NAN_MAX_SERVICE_SPECIFIC_INFO_LEN;
- capabilities->max_ndi_interfaces = MAX_NDI_INTERFACES;
- capabilities->max_ndp_sessions = MAX_NDP_SESSIONS;
- capabilities->max_app_info_len = MAX_APP_INFO_LEN;
- capabilities->max_queued_transmit_followup_msgs = MAX_QUEUED_TX_FOLLOUP_MSGS;
- capabilities->max_sdea_service_specific_info_len = MAX_SDEA_SVC_INFO_LEN;
- capabilities->max_subscribe_address = MAX_SUBSCRIBE_ADDRESS;
- capabilities->cipher_suites_supported = WL_NAN_CIPHER_SUITE_SHARED_KEY_128_MASK;
- capabilities->max_scid_len = MAX_SCID_LEN;
- capabilities->is_ndp_security_supported = true;
- capabilities->ndp_supported_bands = NDP_SUPPORTED_BANDS;
- ret = BCME_OK;
- NAN_DBG_EXIT();
- return ret;
-}
-
-bool wl_cfgnan_check_state(struct bcm_cfg80211 *cfg)
-{
- return cfg->nan_enable;
-}
-
-int
-wl_cfgnan_init(struct bcm_cfg80211 *cfg)
-{
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint32 status;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
- uint8 buf[NAN_IOCTL_BUF_SIZE];
- bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
-
- NAN_DBG_ENTER();
- if (cfg->nan_init_state) {
- WL_ERR(("nan initialized/nmi exists\n"));
- return BCME_OK;
- }
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
- ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, true);
- if (unlikely(ret)) {
- WL_ERR(("init handler sub_cmd set failed\n"));
- goto fail;
- }
- nan_buf->count++;
- nan_buf->is_set = true;
-
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
- nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("nan init handler failed ret %d status %d\n",
- ret, status));
- goto fail;
- }
-
-#ifdef WL_NAN_DISC_CACHE
- /* malloc for disc result */
- cfg->nan_disc_cache = MALLOCZ(cfg->osh,
- NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
- if (!cfg->nan_disc_cache) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-#endif /* WL_NAN_DISC_CACHE */
- cfg->nan_init_state = true;
- return ret;
-fail:
- NAN_DBG_EXIT();
- return ret;
-}
-
-void
-wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 *cfg)
-{
- uint8 i = 0;
- cfg->nan_dp_count = 0;
- cfg->nan_init_state = false;
-#ifdef WL_NAN_DISC_CACHE
- if (cfg->nan_disc_cache) {
- for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
- if (cfg->nan_disc_cache[i].tx_match_filter.data) {
- MFREE(cfg->osh, cfg->nan_disc_cache[i].tx_match_filter.data,
- cfg->nan_disc_cache[i].tx_match_filter.dlen);
- }
- if (cfg->nan_disc_cache[i].svc_info.data) {
- MFREE(cfg->osh, cfg->nan_disc_cache[i].svc_info.data,
- cfg->nan_disc_cache[i].svc_info.dlen);
- }
- }
- MFREE(cfg->osh, cfg->nan_disc_cache,
- NAN_MAX_CACHE_DISC_RESULT * sizeof(nan_disc_result_cache));
- cfg->nan_disc_cache = NULL;
- }
- cfg->nan_disc_count = 0;
- memset_s(cfg->svc_info, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t),
- 0, NAN_MAX_SVC_INST * sizeof(nan_svc_info_t));
- memset_s(cfg->nan_ranging_info, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t),
- 0, NAN_MAX_RANGING_INST * sizeof(nan_ranging_inst_t));
-#endif /* WL_NAN_DISC_CACHE */
- return;
-}
-
-int
-wl_cfgnan_deinit(struct bcm_cfg80211 *cfg, uint8 busstate)
-{
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint32 status;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
- uint8 buf[NAN_IOCTL_BUF_SIZE];
- bcm_iov_batch_buf_t *nan_buf = (bcm_iov_batch_buf_t*)buf;
-
- NAN_DBG_ENTER();
- NAN_MUTEX_LOCK();
-
- if (!cfg->nan_init_state) {
- WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
- ret = BCME_OK;
- goto fail;
- }
-
- if (busstate != DHD_BUS_DOWN) {
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- WL_DBG(("nan deinit\n"));
- ret = wl_cfgnan_init_handler(&nan_buf->cmds[0], &nan_buf_size, false);
- if (unlikely(ret)) {
- WL_ERR(("deinit handler sub_cmd set failed\n"));
- } else {
- nan_buf->count++;
- nan_buf->is_set = true;
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(cfg->wdev->netdev, cfg,
- nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("nan init handler failed ret %d status %d\n",
- ret, status));
- }
- }
- }
- wl_cfgnan_deinit_cleanup(cfg);
-
-fail:
- if (!cfg->nancfg.mac_rand && !ETHER_ISNULLADDR(cfg->nan_nmi_mac)) {
- wl_release_vif_macaddr(cfg, cfg->nan_nmi_mac, WL_IF_TYPE_NAN_NMI);
- }
- NAN_MUTEX_UNLOCK();
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgnan_get_ndi_macaddr(struct bcm_cfg80211 *cfg, u8* mac_addr)
-{
- int i = 0;
- int ret = BCME_OK;
- bool rand_mac = cfg->nancfg.mac_rand;
- BCM_REFERENCE(i);
-
- if (rand_mac) {
- /* ensure nmi != ndi */
- do {
- RANDOM_BYTES(mac_addr, ETHER_ADDR_LEN);
- /* restore mcast and local admin bits to 0 and 1 */
- ETHER_SET_UNICAST(mac_addr);
- ETHER_SET_LOCALADDR(mac_addr);
- i++;
- if (i == NAN_RAND_MAC_RETRIES) {
- break;
- }
- } while (eacmp(cfg->nan_nmi_mac, mac_addr) == 0);
-
- if (i == NAN_RAND_MAC_RETRIES) {
- if (eacmp(cfg->nan_nmi_mac, mac_addr) == 0) {
- WL_ERR(("\nCouldn't generate rand NDI which != NMI\n"));
- ret = BCME_NORESOURCE;
- goto fail;
- }
- }
- } else {
- if (wl_get_vif_macaddr(cfg, WL_IF_TYPE_NAN,
- mac_addr) != BCME_OK) {
- ret = -EINVAL;
- WL_ERR(("Failed to get mac addr for NDI\n"));
- goto fail;
- }
- }
-
-fail:
- return ret;
-}
-
-int
-wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate)
-{
- u8 mac_addr[ETH_ALEN];
- s32 ret = BCME_OK;
- s32 idx;
- struct wireless_dev *wdev;
- NAN_DBG_ENTER();
-
- if (busstate != DHD_BUS_DOWN) {
- if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE) {
- if ((idx = wl_cfgnan_get_ndi_idx(cfg)) < 0) {
- WL_ERR(("No free idx for NAN NDI\n"));
- ret = BCME_NORESOURCE;
- goto fail;
- }
-
- ret = wl_cfgnan_get_ndi_macaddr(cfg, mac_addr);
- if (ret != BCME_OK) {
- WL_ERR(("Couldn't get mac addr for NDI ret %d\n", ret));
- goto fail;
- }
- wdev = wl_cfg80211_add_if(cfg, ndev, WL_IF_TYPE_NAN,
- ifname, mac_addr);
- if (!wdev) {
- ret = -ENODEV;
- WL_ERR(("Failed to create NDI iface = %s, wdev is NULL\n", ifname));
- goto fail;
- }
- /* Store the iface name to pub data so that it can be used
- * during NAN enable
- */
- wl_cfgnan_add_ndi_data(cfg, idx, ifname);
- cfg->nancfg.ndi[idx].created = true;
- /* Store nan ndev */
- cfg->nancfg.ndi[idx].nan_ndev = wdev_to_ndev(wdev);
-
- } else if (type == NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE) {
- ret = wl_cfg80211_del_if(cfg, ndev, NULL, ifname);
- if (ret == BCME_OK) {
- if (wl_cfgnan_del_ndi_data(cfg, ifname) < 0) {
- WL_ERR(("Failed to find matching data for ndi:%s\n",
- ifname));
- }
- } else if (ret == -ENODEV) {
- WL_INFORM(("Already deleted: %s\n", ifname));
- ret = BCME_OK;
- } else if (ret != BCME_OK) {
- WL_ERR(("failed to delete NDI[%d]\n", ret));
- }
- }
- } else {
- ret = -ENODEV;
- WL_ERR(("Bus is already down, no dev found to remove, ret = %d\n", ret));
- }
-fail:
- NAN_DBG_EXIT();
- return ret;
-}
-
-/*
- * Return data peer from peer list
- * for peer_addr
- * NULL if not found
- */
-nan_ndp_peer_t *
-wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer_addr)
-{
- uint8 i;
- nan_ndp_peer_t* peer = cfg->nancfg.nan_ndp_peer_info;
-
- if (!peer) {
- WL_ERR(("wl_cfgnan_data_get_peer: nan_ndp_peer_info is NULL\n"));
- goto exit;
- }
- for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
- if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED &&
- (!memcmp(peer_addr, &peer[i].peer_addr, ETHER_ADDR_LEN))) {
- return &peer[i];
- }
- }
-
-exit:
- return NULL;
-}
-
-/*
- * Returns True if
- * datapath exists for nan cfg
- * for any peer
- */
-bool
-wl_cfgnan_data_dp_exists(struct bcm_cfg80211 *cfg)
-{
- bool ret = FALSE;
- uint8 i;
- nan_ndp_peer_t* peer = NULL;
-
- if ((cfg->nan_init_state == FALSE) ||
- (cfg->nan_enable == FALSE)) {
- goto exit;
- }
-
- if (!cfg->nancfg.nan_ndp_peer_info) {
- goto exit;
- }
-
- peer = cfg->nancfg.nan_ndp_peer_info;
- for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
- if (peer[i].peer_dp_state != NAN_PEER_DP_NOT_CONNECTED) {
- ret = TRUE;
- break;
- }
- }
-
-exit:
- return ret;
-}
-
-/*
- * Returns True if
- * datapath exists for nan cfg
- * for given peer
- */
-bool
-wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer_addr)
-{
- bool ret = FALSE;
- nan_ndp_peer_t* peer = NULL;
-
- if ((cfg->nan_init_state == FALSE) ||
- (cfg->nan_enable == FALSE)) {
- goto exit;
- }
-
- /* check for peer exist */
- peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
- if (peer) {
- ret = TRUE;
- }
-
-exit:
- return ret;
-}
-
-/*
- * As of now API only available
- * for setting state to CONNECTED
- * if applicable
- */
-void
-wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer_addr, nan_peer_dp_state_t state)
-{
- nan_ndp_peer_t* peer = NULL;
- /* check for peer exist */
- peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
- if (!peer) {
- goto end;
- }
- peer->peer_dp_state = state;
-end:
- return;
-}
-
-/* Adds peer to nan data peer list */
-void
-wl_cfgnan_data_add_peer(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer_addr)
-{
- uint8 i;
- nan_ndp_peer_t* peer = NULL;
- /* check for peer exist */
- peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
- if (peer) {
- peer->dp_count++;
- goto end;
- }
- peer = cfg->nancfg.nan_ndp_peer_info;
- for (i = 0; i < cfg->nancfg.max_ndp_count; i++) {
- if (peer[i].peer_dp_state == NAN_PEER_DP_NOT_CONNECTED) {
- break;
- }
- }
- if (i == NAN_MAX_NDP_PEER) {
- WL_DBG(("DP Peer list full, Droopping add peer req\n"));
- goto end;
- }
- /* Add peer to list */
- memcpy(&peer[i].peer_addr, peer_addr, ETHER_ADDR_LEN);
- peer[i].dp_count = 1;
- peer[i].peer_dp_state = NAN_PEER_DP_CONNECTING;
-
-end:
- return;
-}
-
-/* Removes nan data peer from peer list */
-void
-wl_cfgnan_data_remove_peer(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer_addr)
-{
- nan_ndp_peer_t* peer = NULL;
- /* check for peer exist */
- peer = wl_cfgnan_data_get_peer(cfg, peer_addr);
- if (!peer) {
- WL_DBG(("DP Peer not present in list, "
- "Droopping remove peer req\n"));
- goto end;
- }
- peer->dp_count--;
- if (peer->dp_count == 0) {
- /* No more NDPs, delete entry */
- memset(peer, 0, sizeof(nan_ndp_peer_t));
- } else {
- /* Set peer dp state to connected if any ndp still exits */
- peer->peer_dp_state = NAN_PEER_DP_CONNECTED;
- }
-end:
- return;
-}
-
-int
-wl_cfgnan_data_path_request_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
- uint8 *ndp_instance_id)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_buf_t *nan_buf = NULL;
- wl_nan_dp_req_t *datareq = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- uint16 buflen_avail;
- uint8 *pxtlv;
- struct wireless_dev *wdev;
- uint16 nan_buf_size;
- uint8 *resp_buf = NULL;
- /* Considering fixed params */
- uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
- OFFSETOF(wl_nan_dp_req_t, tlv_params);
- data_size = ALIGN_SIZE(data_size, 4);
-
- ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size, cmd_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to get alligned size of optional params\n"));
- goto fail;
- }
-
- nan_buf_size = data_size;
- NAN_DBG_ENTER();
-
- mutex_lock(&cfg->if_sync);
- NAN_MUTEX_LOCK();
-#ifdef WL_IFACE_MGMT
- if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
- WL_ERR(("Conflicting iface found to be active\n"));
- ret = BCME_UNSUPPORTED;
- goto fail;
- }
-#endif /* WL_IFACE_MGMT */
-
-#ifdef RTT_SUPPORT
- /* cancel any ongoing RTT session with peer
- * as we donot support DP and RNG to same peer
- */
- wl_cfgnan_clear_peer_ranging(cfg, &cmd_data->mac_addr,
- RTT_GEO_SUSPN_HOST_NDP_TRIGGER);
-#endif /* RTT_SUPPORT */
-
- nan_buf = MALLOCZ(cfg->osh, data_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
- if (!resp_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
- cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
- if (unlikely(ret)) {
- WL_ERR(("Failed to set avail value with type local\n"));
- goto fail;
- }
-
- ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
- cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
- if (unlikely(ret)) {
- WL_ERR(("Failed to set avail value with type ndc\n"));
- goto fail;
- }
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
- datareq = (wl_nan_dp_req_t *)(sub_cmd->data);
-
- /* setting default data path type to unicast */
- datareq->type = WL_NAN_DP_TYPE_UNICAST;
-
- if (cmd_data->pub_id) {
- datareq->pub_id = cmd_data->pub_id;
- }
-
- if (!ETHER_ISNULLADDR(&cmd_data->mac_addr.octet)) {
- ret = memcpy_s(&datareq->peer_mac, ETHER_ADDR_LEN,
- &cmd_data->mac_addr, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy ether addr provided\n"));
- goto fail;
- }
- } else {
- WL_ERR(("Invalid ether addr provided\n"));
- ret = BCME_BADARG;
- goto fail;
- }
-
- /* Retrieve mac from given iface name */
- wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
- (char *)cmd_data->ndp_iface);
- if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
- ret = -EINVAL;
- WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
- (char *)cmd_data->ndp_iface));
- goto fail;
- }
-
- if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
- ret = memcpy_s(&datareq->ndi, ETHER_ADDR_LEN,
- wdev->netdev->dev_addr, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy ether addr provided\n"));
- goto fail;
- }
- WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(datareq->ndi.octet)));
- } else {
- WL_ERR(("Invalid NDI addr retrieved\n"));
- ret = BCME_BADARG;
- goto fail;
- }
-
- datareq->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
- datareq->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
-
- /* Fill the sub_command block */
- sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAREQ);
- sub_cmd->len = sizeof(sub_cmd->u.options) +
- OFFSETOF(wl_nan_dp_req_t, tlv_params);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- pxtlv = (uint8 *)&datareq->tlv_params;
-
- nan_buf_size -= (sub_cmd->len +
- OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
- buflen_avail = nan_buf_size;
-
- if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
- cmd_data->svc_info.data,
- BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- WL_ERR(("unable to process svc_spec_info: %d\n", ret));
- goto fail;
- }
- datareq->flags |= WL_NAN_DP_FLAG_SVC_INFO;
- }
-
- /* Security elements */
-
- if (cmd_data->csid) {
- WL_TRACE(("Cipher suite type is present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
- (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack on csid\n", __FUNCTION__));
- goto fail;
- }
- }
-
- if (cmd_data->ndp_cfg.security_cfg) {
- if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
- (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
- if (cmd_data->key.data && cmd_data->key.dlen) {
- WL_TRACE(("optional pmk present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
- cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack on WL_NAN_XTLV_CFG_SEC_PMK\n",
- __FUNCTION__));
- goto fail;
- }
- }
- } else {
- WL_ERR(("Invalid security key type\n"));
- ret = BCME_BADARG;
- goto fail;
- }
-
- if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
- (cmd_data->svc_hash.data)) {
- WL_TRACE(("svc hash present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
- cmd_data->svc_hash.data, BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
- __FUNCTION__));
- goto fail;
- }
- } else {
-#ifdef WL_NAN_DISC_CACHE
- /* check in cache */
- nan_disc_result_cache *cache;
- cache = wl_cfgnan_get_disc_result(cfg,
- datareq->pub_id, &datareq->peer_mac);
- if (!cache) {
- ret = BCME_ERROR;
- WL_ERR(("invalid svc hash data or length = %d\n",
- cmd_data->svc_hash.dlen));
- goto fail;
- }
- WL_TRACE(("svc hash present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
- cache->svc_hash, BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
- __FUNCTION__));
- goto fail;
- }
-#else
- ret = BCME_ERROR;
- WL_ERR(("invalid svc hash data or length = %d\n",
- cmd_data->svc_hash.dlen));
- goto fail;
-#endif /* WL_NAN_DISC_CACHE */
- }
- /* If the Data req is for secure data connection */
- datareq->flags |= WL_NAN_DP_FLAG_SECURITY;
- }
-
- sub_cmd->len += (buflen_avail - nan_buf_size);
- nan_buf->is_set = false;
- nan_buf->count++;
-
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
- &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR(("nan data path request handler failed, ret = %d status %d\n",
- ret, cmd_data->status));
- goto fail;
- }
-
- /* check the response buff */
- if (ret == BCME_OK) {
- ret = process_resp_buf(resp_buf + WL_NAN_OBUF_DATA_OFFSET,
- ndp_instance_id, WL_NAN_CMD_DATA_DATAREQ);
- cmd_data->ndp_instance_id = *ndp_instance_id;
- }
- WL_INFORM_MEM(("[NAN] DP request successfull (ndp_id:%d)\n",
- cmd_data->ndp_instance_id));
- /* Add peer to data ndp peer list */
- wl_cfgnan_data_add_peer(cfg, &datareq->peer_mac);
-
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, data_size);
- }
-
- if (resp_buf) {
- MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
- }
- NAN_MUTEX_UNLOCK();
- mutex_unlock(&cfg->if_sync);
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgnan_data_path_response_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data)
-{
- s32 ret = BCME_OK;
- bcm_iov_batch_buf_t *nan_buf = NULL;
- wl_nan_dp_resp_t *dataresp = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- uint16 buflen_avail;
- uint8 *pxtlv;
- struct wireless_dev *wdev;
- uint16 nan_buf_size;
- uint8 *resp_buf = NULL;
-
- /* Considering fixed params */
- uint16 data_size = WL_NAN_OBUF_DATA_OFFSET +
- OFFSETOF(wl_nan_dp_resp_t, tlv_params);
- data_size = ALIGN_SIZE(data_size, 4);
- ret = wl_cfgnan_aligned_data_size_of_opt_dp_params(&data_size, cmd_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to get alligned size of optional params\n"));
- goto fail;
- }
- nan_buf_size = data_size;
-
- NAN_DBG_ENTER();
-
- mutex_lock(&cfg->if_sync);
- NAN_MUTEX_LOCK();
-#ifdef WL_IFACE_MGMT
- if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_NAN)) < 0) {
- WL_ERR(("Conflicting iface found to be active\n"));
- ret = BCME_UNSUPPORTED;
- goto fail;
- }
-#endif /* WL_IFACE_MGMT */
-
- nan_buf = MALLOCZ(cfg->osh, data_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- resp_buf = MALLOCZ(cfg->osh, data_size + NAN_IOVAR_NAME_SIZE);
- if (!resp_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
- cfg, &cmd_data->avail_params, WL_AVAIL_LOCAL);
- if (unlikely(ret)) {
- WL_ERR(("Failed to set avail value with type local\n"));
- goto fail;
- }
-
- ret = wl_cfgnan_set_nan_avail(bcmcfg_to_prmry_ndev(cfg),
- cfg, &cmd_data->avail_params, WL_AVAIL_NDC);
- if (unlikely(ret)) {
- WL_ERR(("Failed to set avail value with type ndc\n"));
- goto fail;
- }
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
- dataresp = (wl_nan_dp_resp_t *)(sub_cmd->data);
-
- /* Setting default data path type to unicast */
- dataresp->type = WL_NAN_DP_TYPE_UNICAST;
- /* Changing status value as per fw convention */
- dataresp->status = cmd_data->rsp_code ^= 1;
- dataresp->reason_code = 0;
-
- /* ndp instance id must be from 1 to 255, 0 is reserved */
- if (cmd_data->ndp_instance_id < NAN_ID_MIN ||
- cmd_data->ndp_instance_id > NAN_ID_MAX) {
- WL_ERR(("Invalid ndp instance id: %d\n", cmd_data->ndp_instance_id));
- ret = BCME_BADARG;
- goto fail;
- }
- dataresp->ndp_id = cmd_data->ndp_instance_id;
-
- /* Retrieved initiator ndi from NanDataPathRequestInd */
- if (!ETHER_ISNULLADDR(&cfg->initiator_ndi.octet)) {
- ret = memcpy_s(&dataresp->mac_addr, ETHER_ADDR_LEN,
- &cfg->initiator_ndi, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy initiator ndi\n"));
- goto fail;
- }
- } else {
- WL_ERR(("Invalid ether addr retrieved\n"));
- ret = BCME_BADARG;
- goto fail;
- }
-
- /* Interface is not mandatory, when it is a reject from framework */
- if (dataresp->status != WL_NAN_DP_STATUS_REJECTED) {
- /* Retrieve mac from given iface name */
- wdev = wl_cfg80211_get_wdev_from_ifname(cfg,
- (char *)cmd_data->ndp_iface);
- if (!wdev || ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
- ret = -EINVAL;
- WL_ERR(("Failed to retrieve wdev/dev addr for ndp_iface = %s\n",
- (char *)cmd_data->ndp_iface));
- goto fail;
- }
-
- if (!ETHER_ISNULLADDR(wdev->netdev->dev_addr)) {
- ret = memcpy_s(&dataresp->ndi, ETHER_ADDR_LEN,
- wdev->netdev->dev_addr, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy responder ndi\n"));
- goto fail;
- }
- WL_TRACE(("%s: Retrieved ndi mac " MACDBG "\n",
- __FUNCTION__, MAC2STRDBG(dataresp->ndi.octet)));
- } else {
- WL_ERR(("Invalid NDI addr retrieved\n"));
- ret = BCME_BADARG;
- goto fail;
- }
- }
-
- dataresp->ndl_qos.min_slots = NAN_NDL_QOS_MIN_SLOT_NO_PREF;
- dataresp->ndl_qos.max_latency = NAN_NDL_QOS_MAX_LAT_NO_PREF;
-
- /* Fill the sub_command block */
- sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATARESP);
- sub_cmd->len = sizeof(sub_cmd->u.options) +
- OFFSETOF(wl_nan_dp_resp_t, tlv_params);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- pxtlv = (uint8 *)&dataresp->tlv_params;
-
- nan_buf_size -= (sub_cmd->len +
- OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
- buflen_avail = nan_buf_size;
-
- if (cmd_data->svc_info.data && cmd_data->svc_info.dlen) {
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_SD_SVC_INFO, cmd_data->svc_info.dlen,
- cmd_data->svc_info.data,
- BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- WL_ERR(("unable to process svc_spec_info: %d\n", ret));
- goto fail;
- }
- dataresp->flags |= WL_NAN_DP_FLAG_SVC_INFO;
- }
-
- /* Security elements */
- if (cmd_data->csid) {
- WL_TRACE(("Cipher suite type is present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_CFG_SEC_CSID, sizeof(nan_sec_csid_e),
- (uint8*)&cmd_data->csid, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack csid\n", __FUNCTION__));
- goto fail;
- }
- }
-
- if (cmd_data->ndp_cfg.security_cfg) {
- if ((cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PMK) ||
- (cmd_data->key_type == NAN_SECURITY_KEY_INPUT_PASSPHRASE)) {
- if (cmd_data->key.data && cmd_data->key.dlen) {
- WL_TRACE(("optional pmk present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_CFG_SEC_PMK, cmd_data->key.dlen,
- cmd_data->key.data, BCM_XTLV_OPTION_ALIGN32);
- if (unlikely(ret)) {
- WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SEC_PMK\n",
- __FUNCTION__));
- goto fail;
- }
- }
- } else {
- WL_ERR(("Invalid security key type\n"));
- ret = BCME_BADARG;
- goto fail;
- }
-
- if ((cmd_data->svc_hash.dlen == WL_NAN_SVC_HASH_LEN) &&
- (cmd_data->svc_hash.data)) {
- WL_TRACE(("svc hash present, pack it\n"));
- ret = bcm_pack_xtlv_entry(&pxtlv, &nan_buf_size,
- WL_NAN_XTLV_CFG_SVC_HASH, WL_NAN_SVC_HASH_LEN,
- cmd_data->svc_hash.data,
- BCM_XTLV_OPTION_ALIGN32);
- if (ret != BCME_OK) {
- WL_ERR(("%s: fail to pack WL_NAN_XTLV_CFG_SVC_HASH\n",
- __FUNCTION__));
- goto fail;
- }
- }
- /* If the Data resp is for secure data connection */
- dataresp->flags |= WL_NAN_DP_FLAG_SECURITY;
- }
-
- sub_cmd->len += (buflen_avail - nan_buf_size);
-
- nan_buf->is_set = false;
- nan_buf->count++;
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, data_size,
- &(cmd_data->status), resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR(("nan data path response handler failed, error = %d, status %d\n",
- ret, cmd_data->status));
- goto fail;
- }
-
- WL_INFORM_MEM(("[NAN] DP response successfull (ndp_id:%d)\n", dataresp->ndp_id));
-
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, data_size);
- }
-
- if (resp_buf) {
- MFREE(cfg->osh, resp_buf, data_size + NAN_IOVAR_NAME_SIZE);
- }
- NAN_MUTEX_UNLOCK();
- mutex_unlock(&cfg->if_sync);
-
- NAN_DBG_EXIT();
- return ret;
-}
-
-int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_data_path_id ndp_instance_id,
- int *status)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- wl_nan_dp_end_t *dataend = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
-
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
-
- NAN_DBG_ENTER();
- NAN_MUTEX_LOCK();
-
- if (!dhdp->up) {
- WL_ERR(("bus is already down, hence blocking nan dp end\n"));
- ret = BCME_OK;
- goto fail;
- }
-
- if (!cfg->nan_enable) {
- WL_ERR(("nan is not enabled, nan dp end blocked\n"));
- ret = BCME_OK;
- goto fail;
- }
-
- /* ndp instance id must be from 1 to 255, 0 is reserved */
- if (ndp_instance_id < NAN_ID_MIN ||
- ndp_instance_id > NAN_ID_MAX) {
- WL_ERR(("Invalid ndp instance id: %d\n", ndp_instance_id));
- ret = BCME_BADARG;
- goto fail;
- }
-
- nan_buf = MALLOCZ(cfg->osh, nan_buf_size);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- sub_cmd = (bcm_iov_batch_subcmd_t*)(&nan_buf->cmds[0]);
- dataend = (wl_nan_dp_end_t *)(sub_cmd->data);
-
- /* Fill sub_cmd block */
- sub_cmd->id = htod16(WL_NAN_CMD_DATA_DATAEND);
- sub_cmd->len = sizeof(sub_cmd->u.options) +
- sizeof(*dataend);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
-
- dataend->lndp_id = ndp_instance_id;
-
- /*
- * Currently fw requires ndp_id and reason to end the data path
- * But wifi_nan.h takes ndp_instances_count and ndp_id.
- * Will keep reason = accept always.
- */
-
- dataend->status = 1;
-
- nan_buf->is_set = true;
- nan_buf->count++;
-
- nan_buf_size -= (sub_cmd->len +
- OFFSETOF(bcm_iov_batch_subcmd_t, u.options));
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size,
- status, (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(*status)) {
- WL_ERR(("nan data path end handler failed, error = %d status %d\n",
- ret, *status));
- goto fail;
- }
- WL_INFORM_MEM(("[NAN] DP end successfull (ndp_id:%d)\n",
- dataend->lndp_id));
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
-
- NAN_MUTEX_UNLOCK();
- NAN_DBG_EXIT();
- return ret;
-}
-
-#ifdef WL_NAN_DISC_CACHE
-int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
- nan_datapath_sec_info_cmd_data_t *cmd_data, nan_hal_resp_t *nan_req_resp)
-{
- s32 ret = BCME_NOTFOUND;
- /* check in cache */
- nan_disc_result_cache *disc_cache = NULL;
- nan_svc_info_t *svc_info = NULL;
-
- NAN_DBG_ENTER();
- NAN_MUTEX_LOCK();
-
- if (!cfg->nan_init_state) {
- WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
- ret = BCME_NOTENABLED;
- goto fail;
- }
-
- /* datapath request context */
- if (cmd_data->pub_id && !ETHER_ISNULLADDR(&cmd_data->mac_addr)) {
- disc_cache = wl_cfgnan_get_disc_result(cfg,
- cmd_data->pub_id, &cmd_data->mac_addr);
- WL_DBG(("datapath request: PUB ID: = %d\n",
- cmd_data->pub_id));
- if (disc_cache) {
- (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
- disc_cache->svc_hash, WL_NAN_SVC_HASH_LEN);
- ret = BCME_OK;
- } else {
- WL_ERR(("disc_cache is NULL\n"));
- goto fail;
- }
- }
-
- /* datapath response context */
- if (cmd_data->ndp_instance_id) {
- WL_DBG(("datapath response: NDP ID: = %d\n",
- cmd_data->ndp_instance_id));
- svc_info = wl_cfgnan_get_svc_inst(cfg, 0, cmd_data->ndp_instance_id);
- /* Note: svc_info will not be present in OOB cases
- * In such case send NMI alone and let HAL handle if
- * svc_hash is mandatory
- */
- if (svc_info) {
- WL_DBG(("svc hash present, pack it\n"));
- (void)memcpy_s(nan_req_resp->svc_hash, WL_NAN_SVC_HASH_LEN,
- svc_info->svc_hash, WL_NAN_SVC_HASH_LEN);
- } else {
- WL_INFORM_MEM(("svc_info not present..assuming OOB DP\n"));
- }
- /* Always send NMI */
- (void)memcpy_s(nan_req_resp->pub_nmi, ETHER_ADDR_LEN,
- cfg->nan_nmi_mac, ETHER_ADDR_LEN);
- ret = BCME_OK;
- }
-fail:
- NAN_MUTEX_UNLOCK();
- NAN_DBG_EXIT();
- return ret;
-}
-
-static s32 wl_nan_cache_to_event_data(nan_disc_result_cache *cache,
- nan_event_data_t *nan_event_data, osl_t *osh)
-{
- s32 ret = BCME_OK;
- NAN_DBG_ENTER();
-
- nan_event_data->pub_id = cache->pub_id;
- nan_event_data->sub_id = cache->sub_id;
- nan_event_data->publish_rssi = cache->publish_rssi;
- nan_event_data->peer_cipher_suite = cache->peer_cipher_suite;
- ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
- &cache->peer, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy cached peer nan nmi\n"));
- goto fail;
- }
-
- if (cache->svc_info.dlen && cache->svc_info.data) {
- nan_event_data->svc_info.dlen = cache->svc_info.dlen;
- nan_event_data->svc_info.data =
- MALLOCZ(osh, nan_event_data->svc_info.dlen);
- if (!nan_event_data->svc_info.data) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- nan_event_data->svc_info.dlen = 0;
- ret = -ENOMEM;
- goto fail;
- }
- ret = memcpy_s(nan_event_data->svc_info.data, nan_event_data->svc_info.dlen,
- cache->svc_info.data, cache->svc_info.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy cached svc info data\n"));
- goto fail;
- }
- }
- if (cache->tx_match_filter.dlen && cache->tx_match_filter.data) {
- nan_event_data->tx_match_filter.dlen = cache->tx_match_filter.dlen;
- nan_event_data->tx_match_filter.data =
- MALLOCZ(osh, nan_event_data->tx_match_filter.dlen);
- if (!nan_event_data->tx_match_filter.data) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- nan_event_data->tx_match_filter.dlen = 0;
- ret = -ENOMEM;
- goto fail;
- }
- ret = memcpy_s(nan_event_data->tx_match_filter.data,
- nan_event_data->tx_match_filter.dlen,
- cache->tx_match_filter.data, cache->tx_match_filter.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy cached tx match filter data\n"));
- goto fail;
- }
- }
-fail:
- NAN_DBG_EXIT();
- return ret;
-}
-#endif /* WL_NAN_DISC_CACHE */
-
-/* API to cancel the ranging with peer
-* For geofence initiator, suspend ranging.
-* for directed RTT initiator , report fail result, cancel ranging
-* and clear ranging instance
-* For responder, cancel ranging and clear ranging instance
-*/
-#ifdef RTT_SUPPORT
-static s32
-wl_cfgnan_clear_peer_ranging(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer, int reason)
-{
- uint32 status = 0;
- nan_ranging_inst_t *rng_inst = NULL;
- int err = BCME_OK;
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-
- rng_inst = wl_cfgnan_check_for_ranging(cfg, peer);
- if (rng_inst) {
- if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
- err = wl_cfgnan_suspend_geofence_rng_session(ndev,
- peer, reason, 0);
- } else {
- if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
- dhd_rtt_handle_nan_rtt_session_end(dhdp,
- peer);
- }
- /* responder */
- err = wl_cfgnan_cancel_ranging(ndev, cfg,
- rng_inst->range_id,
- NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
- bzero(rng_inst, sizeof(*rng_inst));
- }
- }
-
- if (err) {
- WL_ERR(("Failed to stop ranging with peer %d\n", err));
- }
-
- return err;
-}
-#endif /* RTT_SUPPORT */
-
-static s32
-wl_nan_dp_cmn_event_data(struct bcm_cfg80211 *cfg, void *event_data,
- uint16 data_len, uint16 *tlvs_offset,
- uint16 *nan_opts_len, uint32 event_num,
- int *hal_event_id, nan_event_data_t *nan_event_data)
-{
- s32 ret = BCME_OK;
- uint8 i;
- wl_nan_ev_datapath_cmn_t *ev_dp;
- nan_svc_info_t *svc_info;
- bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
-#ifdef RTT_SUPPORT
- nan_ranging_inst_t *rng_inst = NULL;
-#endif /* RTT_SUPPORT */
-
- if (xtlv->id == WL_NAN_XTLV_DATA_DP_INFO) {
- ev_dp = (wl_nan_ev_datapath_cmn_t *)xtlv->data;
- NAN_DBG_ENTER();
-
- BCM_REFERENCE(svc_info);
- BCM_REFERENCE(i);
- /* Mapping to common struct between DHD and HAL */
- WL_TRACE(("Event type: %d\n", ev_dp->type));
- nan_event_data->type = ev_dp->type;
- WL_TRACE(("pub_id: %d\n", ev_dp->pub_id));
- nan_event_data->pub_id = ev_dp->pub_id;
- WL_TRACE(("security: %d\n", ev_dp->security));
- nan_event_data->security = ev_dp->security;
-
- /* Store initiator_ndi, required for data_path_response_request */
- ret = memcpy_s(&cfg->initiator_ndi, ETHER_ADDR_LEN,
- &ev_dp->initiator_ndi, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy event's initiator addr\n"));
- goto fail;
- }
- if (ev_dp->type == NAN_DP_SESSION_UNICAST) {
- WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->ndp_id));
- nan_event_data->ndp_id = ev_dp->ndp_id;
- WL_TRACE(("INITIATOR_NDI: " MACDBG "\n",
- MAC2STRDBG(ev_dp->initiator_ndi.octet)));
- WL_TRACE(("RESPONDOR_NDI: " MACDBG "\n",
- MAC2STRDBG(ev_dp->responder_ndi.octet)));
- WL_TRACE(("PEER NMI: " MACDBG "\n",
- MAC2STRDBG(ev_dp->peer_nmi.octet)));
- ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
- &ev_dp->peer_nmi, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy event's peer nmi\n"));
- goto fail;
- }
- } else {
- /* type is multicast */
- WL_INFORM_MEM(("NDP ID: %d\n", ev_dp->mc_id));
- nan_event_data->ndp_id = ev_dp->mc_id;
- WL_TRACE(("PEER NMI: " MACDBG "\n",
- MAC2STRDBG(ev_dp->peer_nmi.octet)));
- ret = memcpy_s(&nan_event_data->remote_nmi, ETHER_ADDR_LEN,
- &ev_dp->peer_nmi,
- ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy event's peer nmi\n"));
- goto fail;
- }
- }
- *tlvs_offset = OFFSETOF(wl_nan_ev_datapath_cmn_t, opt_tlvs) +
- OFFSETOF(bcm_xtlv_t, data);
- *nan_opts_len = data_len - *tlvs_offset;
- if (event_num == WL_NAN_EVENT_PEER_DATAPATH_IND) {
- *hal_event_id = GOOGLE_NAN_EVENT_DATA_REQUEST;
-#ifdef WL_NAN_DISC_CACHE
- svc_info = wl_cfgnan_get_svc_inst(cfg, nan_event_data->pub_id, 0);
- if (svc_info) {
- for (i = 0; i < NAN_MAX_SVC_INST; i++) {
- if (!svc_info->ndp_id[i]) {
- WL_TRACE(("Found empty field\n"));
- break;
- }
- }
- if (i == NAN_MAX_SVC_INST) {
- WL_ERR(("%s:cannot accommadate ndp id\n", __FUNCTION__));
- ret = BCME_NORESOURCE;
- goto fail;
- }
- svc_info->ndp_id[i] = nan_event_data->ndp_id;
- /* Add peer to data ndp peer list */
- wl_cfgnan_data_add_peer(cfg, &ev_dp->peer_nmi);
-#ifdef RTT_SUPPORT
- /* cancel any ongoing RTT session with peer
- * as we donot support DP and RNG to same peer
- */
- wl_cfgnan_clear_peer_ranging(cfg, &ev_dp->peer_nmi,
- RTT_GEO_SUSPN_PEER_NDP_TRIGGER);
-#endif /* RTT_SUPPORT */
- ret = BCME_OK;
- }
-#endif /* WL_NAN_DISC_CACHE */
- } else if (event_num == WL_NAN_EVENT_DATAPATH_ESTB) {
- *hal_event_id = GOOGLE_NAN_EVENT_DATA_CONFIRMATION;
- if (ev_dp->role == NAN_DP_ROLE_INITIATOR) {
- ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
- &ev_dp->responder_ndi,
- ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy event's responder ndi\n"));
- goto fail;
- }
- WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
- MAC2STRDBG(ev_dp->responder_ndi.octet)));
- WL_TRACE(("Initiator status %d\n", nan_event_data->status));
- } else {
- ret = memcpy_s(&nan_event_data->responder_ndi, ETHER_ADDR_LEN,
- &ev_dp->initiator_ndi,
- ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy event's responder ndi\n"));
- goto fail;
- }
- WL_TRACE(("REMOTE_NDI: " MACDBG "\n",
- MAC2STRDBG(ev_dp->initiator_ndi.octet)));
- }
- if (ev_dp->status == NAN_NDP_STATUS_ACCEPT) {
- nan_event_data->status = NAN_DP_REQUEST_ACCEPT;
- wl_cfgnan_data_set_peer_dp_state(cfg, &ev_dp->peer_nmi,
- NAN_PEER_DP_CONNECTED);
- wl_cfgnan_update_dp_info(cfg, true, nan_event_data->ndp_id);
- } else if (ev_dp->status == NAN_NDP_STATUS_REJECT) {
- nan_event_data->status = NAN_DP_REQUEST_REJECT;
- /* Remove peer from data ndp peer list */
- wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
-#ifdef RTT_SUPPORT
- rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
- if (rng_inst) {
- /* Trigger/Reset geofence RTT */
- wl_cfgnan_reset_geofence_ranging(cfg,
- rng_inst, RTT_SCHED_DP_REJECTED);
- }
-#endif /* RTT_SUPPORT */
- } else {
- WL_ERR(("%s:Status code = %x not expected\n",
- __FUNCTION__, ev_dp->status));
- ret = BCME_ERROR;
- goto fail;
- }
- WL_TRACE(("Responder status %d\n", nan_event_data->status));
- } else if (event_num == WL_NAN_EVENT_DATAPATH_END) {
- /* Mapping to common struct between DHD and HAL */
- *hal_event_id = GOOGLE_NAN_EVENT_DATA_END;
-#ifdef WL_NAN_DISC_CACHE
- if (ev_dp->role != NAN_DP_ROLE_INITIATOR) {
- /* Only at Responder side,
- * If dp is ended,
- * clear the resp ndp id from the svc info cache
- */
- svc_info = wl_cfgnan_get_svc_inst(cfg, 0, nan_event_data->ndp_id);
- if (svc_info) {
- for (i = 0; i < NAN_MAX_SVC_INST; i++) {
- if (svc_info->ndp_id[i] == nan_event_data->ndp_id) {
- svc_info->ndp_id[i] = 0;
- }
- }
- } else {
- WL_DBG(("couldn't find entry for ndp id = %d\n",
- nan_event_data->ndp_id));
- }
- }
-#endif /* WL_NAN_DISC_CACHE */
- /* Remove peer from data ndp peer list */
- wl_cfgnan_data_remove_peer(cfg, &ev_dp->peer_nmi);
- wl_cfgnan_update_dp_info(cfg, false, nan_event_data->ndp_id);
-#ifdef RTT_SUPPORT
- WL_INFORM_MEM(("DP_END for REMOTE_NMI: " MACDBG "\n",
- MAC2STRDBG(&ev_dp->peer_nmi)));
- rng_inst = wl_cfgnan_check_for_ranging(cfg, &ev_dp->peer_nmi);
- if (rng_inst) {
- /* Trigger/Reset geofence RTT */
- WL_INFORM_MEM(("sched geofence rtt from DP_END ctx: " MACDBG "\n",
- MAC2STRDBG(&rng_inst->peer_addr)));
- wl_cfgnan_reset_geofence_ranging(cfg, rng_inst,
- RTT_SCHED_DP_END);
- }
-#endif /* RTT_SUPPORT */
- }
- } else {
- /* Follow though, not handling other IDs as of now */
- WL_DBG(("%s:ID = 0x%02x not supported\n", __FUNCTION__, xtlv->id));
- }
-fail:
- NAN_DBG_EXIT();
- return ret;
-}
-#define IN_GEOFENCE(ingress, egress, distance) (((distance) <= (ingress)) && \
- ((distance) >= (egress)))
-#define IS_INGRESS_VAL(ingress, distance) ((distance) < (ingress))
-#define IS_EGRESS_VAL(egress, distance) ((distance) > (egress))
-
-static bool
-wl_cfgnan_check_ranging_cond(nan_svc_info_t *svc_info, uint32 distance,
- uint8 *ranging_ind, uint32 prev_distance)
-{
- uint8 svc_ind = svc_info->ranging_ind;
- bool notify = FALSE;
- bool range_rep_ev_once =
- !!(svc_info->svc_range_status & SVC_RANGE_REP_EVENT_ONCE);
- uint32 ingress_limit = svc_info->ingress_limit;
- uint32 egress_limit = svc_info->egress_limit;
-
- WL_DBG(("wl_cfgnan_check_ranging_cond: Checking the svc ranging cnd %d"
- " distance %d prev_distance %d, range_rep_ev_once %d\n",
- svc_ind, distance, prev_distance, range_rep_ev_once));
- WL_DBG(("wl_cfgnan_check_ranging_cond: Checking the SVC ingress and"
- " egress limits %d %d\n", ingress_limit, egress_limit));
- if (svc_ind & NAN_RANGE_INDICATION_CONT) {
- *ranging_ind = NAN_RANGE_INDICATION_CONT;
- notify = TRUE;
- WL_ERR(("\n%s :Svc has continous Ind %d\n",
- __FUNCTION__, __LINE__));
- goto done;
- }
- if (svc_ind == (NAN_RANGE_INDICATION_INGRESS |
- NAN_RANGE_INDICATION_EGRESS)) {
- if (IN_GEOFENCE(ingress_limit, egress_limit, distance)) {
- /* if not already in geofence */
- if ((range_rep_ev_once == FALSE) ||
- (!IN_GEOFENCE(ingress_limit, egress_limit,
- prev_distance))) {
- notify = TRUE;
- if (distance < ingress_limit) {
- *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
- } else {
- *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
- }
- WL_ERR(("\n%s :Svc has geofence Ind %d res_ind %d\n",
- __FUNCTION__, __LINE__, *ranging_ind));
- }
- }
- goto done;
- }
-
- if (svc_ind == NAN_RANGE_INDICATION_INGRESS) {
- if (IS_INGRESS_VAL(ingress_limit, distance)) {
- if ((range_rep_ev_once == FALSE) ||
- (prev_distance == INVALID_DISTANCE) ||
- !IS_INGRESS_VAL(ingress_limit, prev_distance)) {
- notify = TRUE;
- *ranging_ind = NAN_RANGE_INDICATION_INGRESS;
- WL_ERR(("\n%s :Svc has ingress Ind %d\n",
- __FUNCTION__, __LINE__));
- }
- }
- goto done;
- }
- if (svc_ind == NAN_RANGE_INDICATION_EGRESS) {
- if (IS_EGRESS_VAL(egress_limit, distance)) {
- if ((range_rep_ev_once == FALSE) ||
- (prev_distance == INVALID_DISTANCE) ||
- !IS_EGRESS_VAL(egress_limit, prev_distance)) {
- notify = TRUE;
- *ranging_ind = NAN_RANGE_INDICATION_EGRESS;
- WL_ERR(("\n%s :Svc has egress Ind %d\n",
- __FUNCTION__, __LINE__));
- }
- }
- goto done;
- }
-done:
- svc_info->svc_range_status |= SVC_RANGE_REP_EVENT_ONCE;
- return notify;
-}
-
-static int
-wl_cfgnan_event_disc_result(struct bcm_cfg80211 *cfg,
- nan_event_data_t *nan_event_data)
-{
- int ret = BCME_OK;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
- ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
- GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH, nan_event_data);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to send event to nan hal\n"));
- }
-#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
- return ret;
-}
-
-static int32
-wl_cfgnan_notify_disc_with_ranging(struct bcm_cfg80211 *cfg,
- nan_ranging_inst_t *rng_inst, nan_event_data_t *nan_event_data, uint32 distance)
-{
- nan_svc_info_t *svc_info;
- bool notify_svc = FALSE;
- nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
- uint8 ranging_ind = 0;
- int ret = BCME_OK;
- int i = 0, j = 0;
-
- for (i = 0; i < MAX_SUBSCRIBES; i++) {
- svc_info = rng_inst->svc_idx[i];
- if (svc_info) {
- if (nan_event_data->ranging_result_present) {
- notify_svc = wl_cfgnan_check_ranging_cond(svc_info, distance,
- &ranging_ind, rng_inst->prev_distance_mm);
- nan_event_data->ranging_ind = ranging_ind;
- } else {
- /* Report only if ranging was needed */
- notify_svc = svc_info->ranging_required;
- }
- WL_DBG(("wl_cfgnan_notify_disc_with_ranging: Ranging notify for"
- " svc_id %d, notify %d and ind %d\n",
- svc_info->svc_id, notify_svc, ranging_ind));
- } else {
- continue;
- }
- if (notify_svc) {
- for (j = 0; j < NAN_MAX_CACHE_DISC_RESULT; j++) {
- if (!memcmp(&disc_res[j].peer,
- &(rng_inst->peer_addr), ETHER_ADDR_LEN) &&
- (svc_info->svc_id == disc_res[j].sub_id)) {
- ret = wl_nan_cache_to_event_data(&disc_res[j],
- nan_event_data, cfg->osh);
- ret = wl_cfgnan_event_disc_result(cfg, nan_event_data);
- /* If its not match once, clear it as the FW indicates
- * again.
- */
- if (!(svc_info->flags & WL_NAN_MATCH_ONCE)) {
- wl_cfgnan_remove_disc_result(cfg, svc_info->svc_id);
- }
- }
- }
- }
- }
- WL_DBG(("notify_disc_with_ranging done ret %d\n", ret));
- return ret;
-}
-
-#ifdef RTT_SUPPORT
-static int32
-wl_cfgnan_handle_directed_rtt_report(struct bcm_cfg80211 *cfg,
- nan_ranging_inst_t *rng_inst, uint8 rng_id)
-{
- int ret = BCME_OK;
- uint32 status;
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
-
- ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
- rng_id, NAN_RNG_TERM_FLAG_IMMEDIATE, &status);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("%s:nan range cancel failed ret = %d status = %d\n",
- __FUNCTION__, ret, status));
- }
- dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
-
- wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, RTT_SCHED_RNG_RPT_DIRECTED);
-
- WL_DBG(("Ongoing ranging session is cancelled \n"));
- return ret;
-}
-#endif /* RTT_SUPPORT */
-
-static void
-wl_cfgnan_disc_result_on_geofence_cancel(struct bcm_cfg80211 *cfg,
- nan_ranging_inst_t *rng_inst)
-{
- nan_event_data_t *nan_event_data = NULL;
-
- nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
- if (!nan_event_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- goto exit;
- }
-
- wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, nan_event_data, 0);
-
-exit:
- wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
-
- return;
-}
-
-#ifdef RTT_SUPPORT
-void
-wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
- wl_nan_ev_rng_rpt_ind_t *range_res)
-{
- nan_ranging_inst_t *rng_inst = NULL;
- nan_event_data_t nan_event_data;
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
-
- UNUSED_PARAMETER(nan_event_data);
- rng_inst = wl_cfgnan_check_for_ranging(cfg, &range_res->peer_m_addr);
- if (!rng_inst) {
- WL_ERR(("wl_cfgnan_process_range_report: No ranging instance "
- "but received RNG RPT event..check \n"));
- goto exit;
- }
-#ifdef NAN_RTT_DBG
- DUMP_NAN_RTT_INST(rng_inst);
- DUMP_NAN_RTT_RPT(range_res);
-#endif // endif
- range_res->rng_id = rng_inst->range_id;
- bzero(&nan_event_data, sizeof(nan_event_data));
- nan_event_data.ranging_result_present = 1;
- nan_event_data.range_measurement_cm = range_res->dist_mm;
- (void)memcpy_s(&nan_event_data.remote_nmi, ETHER_ADDR_LEN,
- &range_res->peer_m_addr, ETHER_ADDR_LEN);
- nan_event_data.ranging_ind = range_res->indication;
- if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
- /* check in cache and event match to host */
- wl_cfgnan_notify_disc_with_ranging(cfg, rng_inst, &nan_event_data,
- range_res->dist_mm);
- rng_inst->prev_distance_mm = range_res->dist_mm;
- /* Reset resp reject count on valid measurement */
- rng_inst->geof_retry_count = 0;
-#ifdef RTT_GEOFENCE_INTERVAL
- if (rtt_status->geofence_cfg.geofence_rtt_interval < 0) {
- ; /* Do Nothing */
- } else
-#endif /* RTT_GEOFENCE_INTERVAL */
- {
- wl_cfgnan_suspend_geofence_rng_session(bcmcfg_to_prmry_ndev(cfg),
- &rng_inst->peer_addr, RTT_GEO_SUSPN_RANGE_RES_REPORTED, 0);
- GEOFENCE_RTT_LOCK(rtt_status);
- dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
- GEOFENCE_RTT_UNLOCK(rtt_status);
- wl_cfgnan_reset_geofence_ranging(cfg,
- rng_inst, RTT_SCHED_RNG_RPT_GEOFENCE);
- }
- } else if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
- wl_cfgnan_handle_directed_rtt_report(cfg, rng_inst, range_res->rng_id);
- }
-
-exit:
- return;
-}
-#endif /* RTT_SUPPORT */
-
-static void
-wl_nan_print_status(wl_nan_conf_status_t *nstatus)
-{
- printf("> enabled: %d\n", nstatus->enabled);
- printf("> Current NMI: " MACDBG "\n", MAC2STRDBG(nstatus->nmi.octet));
- printf("> Current cluster_id: " MACDBG "\n", MAC2STRDBG(nstatus->cid.octet));
-
- switch (nstatus->role) {
- case WL_NAN_ROLE_AUTO:
- printf("> role: %s (%d)\n", "auto", nstatus->role);
- break;
- case WL_NAN_ROLE_NON_MASTER_NON_SYNC:
- printf("> role: %s (%d)\n", "non-master-non-sync", nstatus->role);
- break;
- case WL_NAN_ROLE_NON_MASTER_SYNC:
- printf("> role: %s (%d)\n", "non-master-sync", nstatus->role);
- break;
- case WL_NAN_ROLE_MASTER:
- printf("> role: %s (%d)\n", "master", nstatus->role);
- break;
- case WL_NAN_ROLE_ANCHOR_MASTER:
- printf("> role: %s (%d)\n", "anchor-master", nstatus->role);
- break;
- default:
- printf("> role: %s (%d)\n", "undefined", nstatus->role);
- break;
- }
-
- printf("> social channels: %d, %d\n",
- nstatus->social_chans[0], nstatus->social_chans[1]);
- printf("> master_rank: " NMRSTR "\n", NMR2STR(nstatus->mr));
- printf("> amr : " NMRSTR "\n", NMR2STR(nstatus->amr));
- printf("> hop_count: %d\n", nstatus->hop_count);
- printf("> ambtt: %d\n", nstatus->ambtt);
-}
-
-static void
-wl_cfgnan_clear_nan_event_data(struct bcm_cfg80211 *cfg,
- nan_event_data_t *nan_event_data)
-{
- if (nan_event_data) {
- if (nan_event_data->tx_match_filter.data) {
- MFREE(cfg->osh, nan_event_data->tx_match_filter.data,
- nan_event_data->tx_match_filter.dlen);
- nan_event_data->tx_match_filter.data = NULL;
- }
- if (nan_event_data->rx_match_filter.data) {
- MFREE(cfg->osh, nan_event_data->rx_match_filter.data,
- nan_event_data->rx_match_filter.dlen);
- nan_event_data->rx_match_filter.data = NULL;
- }
- if (nan_event_data->svc_info.data) {
- MFREE(cfg->osh, nan_event_data->svc_info.data,
- nan_event_data->svc_info.dlen);
- nan_event_data->svc_info.data = NULL;
- }
- if (nan_event_data->sde_svc_info.data) {
- MFREE(cfg->osh, nan_event_data->sde_svc_info.data,
- nan_event_data->sde_svc_info.dlen);
- nan_event_data->sde_svc_info.data = NULL;
- }
- MFREE(cfg->osh, nan_event_data, sizeof(*nan_event_data));
- }
-
-}
-
-#ifdef RTT_SUPPORT
-/*
- * Triggers rtt work thread
- * if geofence rtt pending,
- * clears ranging instance
- * otherwise
+ * $Id: wl_cfgnan.c 676811 2016-12-24 20:48:46Z $
*/
-void
-wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
- nan_ranging_inst_t * rng_inst, int sched_reason)
-{
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
- u8 rtt_invalid_reason = RTT_STATE_VALID;
- rtt_geofence_target_info_t *geofence_target = NULL;
- rtt_status_info_t *rtt_status = GET_RTTSTATE(dhd);
- int8 cur_idx = DHD_RTT_INVALID_TARGET_INDEX;
- int8 index = DHD_RTT_INVALID_TARGET_INDEX;
- bool geofence_state = dhd_rtt_get_geofence_rtt_state(dhd);
- bool retry = FALSE;
-
- WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging, sched_reason = %d, cur_idx = %d, "
- "geofence_interval = %d\n", sched_reason, rtt_status->geofence_cfg.cur_target_idx,
- rtt_status->geofence_cfg.geofence_rtt_interval));
- cur_idx = dhd_rtt_get_geofence_cur_target_idx(dhd);
- if (cur_idx == -1) {
- WL_INFORM_MEM(("wl_cfgnan_reset_geofence_ranging, "
- "Removing Ranging Instance " MACDBG "\n",
- MAC2STRDBG(&(rng_inst->peer_addr))));
- bzero(rng_inst, sizeof(*rng_inst));
- /* Cancel pending retry timer if any */
- if (delayed_work_pending(&rtt_status->rtt_retry_timer)) {
- cancel_delayed_work(&rtt_status->rtt_retry_timer);
- }
- goto exit;
- }
-
- /* Get current geofencing target */
- geofence_target = dhd_rtt_get_geofence_current_target(dhd);
-
- /* get target index for cur ranging inst */
- dhd_rtt_get_geofence_target(dhd,
- &rng_inst->peer_addr, &index);
- if ((sched_reason == RTT_SCHED_RTT_RETRY_GEOFENCE) &&
- (rng_inst->range_status == NAN_RANGING_IN_PROGRESS)) {
- /* if we are already inprogress with peer
- * (responder or directed RTT initiator)
- * retyr later if sched_reason = timeout
- */
- retry = TRUE;
- } else if (cur_idx == index) {
- /* Reset incoming Ranging instance */
- rng_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
- rng_inst->range_status = NAN_RANGING_REQUIRED;
- rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
- if ((sched_reason != RTT_SCHED_RNG_RPT_GEOFENCE) &&
- (sched_reason != RTT_SCHED_RTT_RETRY_GEOFENCE)) {
- rng_inst->prev_distance_mm = INVALID_DISTANCE;
- }
- } else {
- if (index == DHD_RTT_INVALID_TARGET_INDEX) {
- /* Remove incoming Ranging instance */
- WL_INFORM_MEM(("Removing Ranging Instance " MACDBG "\n",
- MAC2STRDBG(&(rng_inst->peer_addr))));
- bzero(rng_inst, sizeof(*rng_inst));
- } else {
- /* Reset incoming Ranging instance */
- rng_inst->range_type = RTT_TYPE_NAN_GEOFENCE;
- rng_inst->range_status = NAN_RANGING_REQUIRED;
- rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
- if ((sched_reason != RTT_SCHED_RNG_RPT_GEOFENCE) &&
- (sched_reason != RTT_SCHED_RTT_RETRY_GEOFENCE)) {
- rng_inst->prev_distance_mm = INVALID_DISTANCE;
- }
- }
- /* Create range inst if not present and reset explicitly */
- rng_inst = wl_cfgnan_get_ranging_inst(cfg,
- &geofence_target->peer_addr, NAN_RANGING_ROLE_INITIATOR);
- }
-
- /* Avoid schedule if
- * already geofence running
- * or Directed RTT in progress
- * or Invalid RTT state like
- * NDP with Peer
- */
- if ((geofence_state == TRUE) ||
- (!RTT_IS_STOPPED(rtt_status)) ||
- (rtt_invalid_reason != RTT_STATE_VALID)) {
- /* Not in valid RTT state, avoid schedule */
- goto exit;
- }
-
- if ((cur_idx == 0) && ((sched_reason == RTT_SCHED_RNG_RPT_GEOFENCE) ||
- (sched_reason == RTT_SCHED_RNG_TERM))) {
- /* First Target again after all done, retry over a timer */
- retry = TRUE;
- }
-
- if (retry && (rtt_status->geofence_cfg.geofence_rtt_interval >= 0)) {
- /* Move to first target and retry over a timer */
- WL_DBG(("Retry over a timer, cur_idx = %d\n",
- rtt_status->geofence_cfg.cur_target_idx));
- /* schedule proxd retry timer */
- schedule_delayed_work(&rtt_status->rtt_retry_timer,
- msecs_to_jiffies(rtt_status->geofence_cfg.geofence_rtt_interval));
- goto exit;
-
- }
-
- /* schedule RTT */
- dhd_rtt_schedule_rtt_work_thread(dhd, sched_reason);
-
-exit:
- return;
-}
-
-static bool
-wl_check_range_role_concurrency(dhd_pub_t *dhd, nan_ranging_inst_t *rng_inst)
-{
- ASSERT(rng_inst);
- if ((dhd_rtt_get_role_concurrency_state(dhd) == TRUE) &&
- (rng_inst->num_svc_ctx > 0)) {
- return TRUE;
- } else {
- return FALSE;
- }
-}
-
-static void
-wl_cfgnan_resolve_ranging_role_concurrecny(dhd_pub_t *dhd,
- nan_ranging_inst_t *rng_inst)
-{
- /* Update rang_inst to initiator and resolve role concurrency */
- rng_inst->range_role = NAN_RANGING_ROLE_INITIATOR;
- dhd_rtt_set_role_concurrency_state(dhd, FALSE);
-}
-#endif /* RTT_SUPPORT */
-
-static bool
-wl_cfgnan_geofence_retry_check(nan_ranging_inst_t *rng_inst, uint8 reason_code)
-{
- bool geof_retry = FALSE;
-
- switch (reason_code) {
- case NAN_RNG_TERM_IDLE_TIMEOUT:
- /* Fallthrough: Keep adding more reason code if needed */
- case NAN_RNG_TERM_RNG_RESP_TIMEOUT:
- case NAN_RNG_TERM_RNG_RESP_REJ:
- case NAN_RNG_TERM_RNG_TXS_FAIL:
- if (rng_inst->geof_retry_count <
- NAN_RNG_GEOFENCE_MAX_RETRY_CNT) {
- rng_inst->geof_retry_count++;
- geof_retry = TRUE;
- }
- break;
- default:
- /* FALSE for any other case */
- break;
- }
-
- return geof_retry;
-}
-
-s32
-wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
- bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *event, void *event_data)
-{
- uint16 data_len;
- uint32 event_num;
- s32 event_type;
- int hal_event_id = 0;
- nan_event_data_t *nan_event_data = NULL;
- nan_parse_event_ctx_t nan_event_ctx;
- uint16 tlvs_offset = 0;
- uint16 nan_opts_len = 0;
- uint8 *tlv_buf;
- s32 ret = BCME_OK;
- bcm_xtlv_opts_t xtlv_opt = BCM_IOV_CMD_OPT_ALIGN32;
- uint32 status;
- nan_svc_info_t *svc;
-
- UNUSED_PARAMETER(wl_nan_print_status);
- UNUSED_PARAMETER(status);
- NAN_DBG_ENTER();
- NAN_MUTEX_LOCK();
-
- if (!event || !event_data) {
- WL_ERR(("event data is NULL\n"));
- ret = -EINVAL;
- goto exit;
- }
-
- event_type = ntoh32(event->event_type);
- event_num = ntoh32(event->reason);
- data_len = ntoh32(event->datalen);
-
- if (NAN_INVALID_EVENT(event_num)) {
- WL_ERR(("unsupported event, num: %d, event type: %d\n", event_num, event_type));
- ret = -EINVAL;
- goto exit;
- }
- WL_DBG((">> Nan Event Received: %s (num=%d, len=%d)\n",
- nan_event_to_str(event_num), event_num, data_len));
-
-#ifdef WL_NAN_DEBUG
- prhex("nan_event_data:", event_data, data_len);
-#endif /* WL_NAN_DEBUG */
-
- if (!cfg->nan_init_state) {
- WL_ERR(("nan is not in initialized state, dropping nan related events\n"));
- ret = BCME_OK;
- goto exit;
- }
-
- nan_event_data = MALLOCZ(cfg->osh, sizeof(*nan_event_data));
- if (!nan_event_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- goto exit;
- }
-
- nan_event_ctx.cfg = cfg;
- nan_event_ctx.nan_evt_data = nan_event_data;
- /*
- * send as preformatted hex string
- * EVENT_NAN <event_type> <tlv_hex_string>
- */
- switch (event_num) {
- case WL_NAN_EVENT_START:
- case WL_NAN_EVENT_MERGE:
- case WL_NAN_EVENT_ROLE: {
- /* get nan status info as-is */
- bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
- wl_nan_conf_status_t *nstatus = (wl_nan_conf_status_t *)xtlv->data;
- WL_INFORM_MEM((">> Nan Mac Event Received: %s (num=%d, len=%d)\n",
- nan_event_to_str(event_num), event_num, data_len));
- WL_INFORM_MEM(("Nan Device Role %s\n", nan_role_to_str(nstatus->role)));
- /* Mapping to common struct between DHD and HAL */
- nan_event_data->enabled = nstatus->enabled;
- ret = memcpy_s(&nan_event_data->local_nmi, ETHER_ADDR_LEN,
- &nstatus->nmi, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy nmi\n"));
- goto exit;
- }
- ret = memcpy_s(&nan_event_data->clus_id, ETHER_ADDR_LEN,
- &nstatus->cid, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy cluster id\n"));
- goto exit;
- }
- nan_event_data->nan_de_evt_type = event_num;
-#ifdef WL_NAN_DEBUG
- wl_nan_print_status(nstatus);
-#endif /* WL_NAN_DEBUG */
- if (event_num == WL_NAN_EVENT_START) {
- OSL_SMP_WMB();
- cfg->nancfg.nan_event_recvd = true;
- OSL_SMP_WMB();
- wake_up(&cfg->nancfg.nan_event_wait);
- }
- hal_event_id = GOOGLE_NAN_EVENT_DE_EVENT;
- break;
- }
- case WL_NAN_EVENT_TERMINATED: {
- bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
- wl_nan_ev_terminated_t *pev = (wl_nan_ev_terminated_t *)xtlv->data;
-
- /* Mapping to common struct between DHD and HAL */
- WL_TRACE(("Instance ID: %d\n", pev->instance_id));
- nan_event_data->local_inst_id = pev->instance_id;
- WL_TRACE(("Service Type: %d\n", pev->svctype));
-
-#ifdef WL_NAN_DISC_CACHE
- if (pev->svctype == NAN_SC_SUBSCRIBE) {
- wl_cfgnan_remove_disc_result(cfg, pev->instance_id);
- }
-#endif /* WL_NAN_DISC_CACHE */
- /* Mapping reason code of FW to status code of framework */
- if (pev->reason == NAN_TERM_REASON_TIMEOUT ||
- pev->reason == NAN_TERM_REASON_USER_REQ ||
- pev->reason == NAN_TERM_REASON_COUNT_REACHED) {
- nan_event_data->status = NAN_STATUS_SUCCESS;
- ret = memcpy_s(nan_event_data->nan_reason,
- sizeof(nan_event_data->nan_reason),
- "NAN_STATUS_SUCCESS",
- strlen("NAN_STATUS_SUCCESS"));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy nan_reason\n"));
- goto exit;
- }
- } else {
- nan_event_data->status = NAN_STATUS_INTERNAL_FAILURE;
- ret = memcpy_s(nan_event_data->nan_reason,
- sizeof(nan_event_data->nan_reason),
- "NAN_STATUS_INTERNAL_FAILURE",
- strlen("NAN_STATUS_INTERNAL_FAILURE"));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy nan_reason\n"));
- goto exit;
- }
- }
-
- if (pev->svctype == NAN_SC_SUBSCRIBE) {
- hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED;
- } else {
- hal_event_id = GOOGLE_NAN_EVENT_PUBLISH_TERMINATED;
- }
-#ifdef WL_NAN_DISC_CACHE
- if (pev->reason != NAN_TERM_REASON_USER_REQ) {
- wl_cfgnan_clear_svc_from_all_ranging_inst(cfg, pev->instance_id);
- /* terminate ranging sessions */
- wl_cfgnan_terminate_all_obsolete_ranging_sessions(cfg);
- }
-#endif /* WL_NAN_DISC_CACHE */
- break;
- }
-
- case WL_NAN_EVENT_RECEIVE: {
- nan_opts_len = data_len;
- hal_event_id = GOOGLE_NAN_EVENT_FOLLOWUP;
- xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
- break;
- }
-
- case WL_NAN_EVENT_TXS: {
- bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
- wl_nan_event_txs_t *txs = (wl_nan_event_txs_t *)xtlv->data;
- wl_nan_event_sd_txs_t *txs_sd = NULL;
- if (txs->status == WL_NAN_TXS_SUCCESS) {
- WL_INFORM_MEM(("TXS success for type %d token %d\n",
- txs->type, txs->host_seq));
- nan_event_data->status = NAN_STATUS_SUCCESS;
- ret = memcpy_s(nan_event_data->nan_reason,
- sizeof(nan_event_data->nan_reason),
- "NAN_STATUS_SUCCESS",
- strlen("NAN_STATUS_SUCCESS"));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy nan_reason\n"));
- goto exit;
- }
- } else {
- /* TODO : populate status based on reason codes
- For now adding it as no ACK, so that app/framework can retry
- */
- WL_INFORM_MEM(("TXS failed for type %d status %d token %d\n",
- txs->type, txs->status, txs->host_seq));
- nan_event_data->status = NAN_STATUS_NO_OTA_ACK;
- ret = memcpy_s(nan_event_data->nan_reason,
- sizeof(nan_event_data->nan_reason),
- "NAN_STATUS_NO_OTA_ACK",
- strlen("NAN_STATUS_NO_OTA_ACK"));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy nan_reason\n"));
- goto exit;
- }
- }
- nan_event_data->reason = txs->reason_code;
- nan_event_data->token = txs->host_seq;
- if (txs->type == WL_NAN_FRM_TYPE_FOLLOWUP) {
- hal_event_id = GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND;
- xtlv = (bcm_xtlv_t *)(txs->opt_tlvs);
- if (txs->opt_tlvs_len && xtlv->id == WL_NAN_XTLV_SD_TXS) {
- txs_sd = (wl_nan_event_sd_txs_t*)xtlv->data;
- nan_event_data->local_inst_id = txs_sd->inst_id;
- } else {
- WL_ERR(("Invalid params in TX status for trasnmit followup"));
- ret = -EINVAL;
- goto exit;
- }
- } else { /* TODO: add for other frame types if required */
- ret = -EINVAL;
- goto exit;
- }
- break;
- }
-
- case WL_NAN_EVENT_DISCOVERY_RESULT: {
- nan_opts_len = data_len;
- hal_event_id = GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH;
- xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
- break;
- }
-#ifdef WL_NAN_DISC_CACHE
- case WL_NAN_EVENT_DISC_CACHE_TIMEOUT: {
- bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
- wl_nan_ev_disc_cache_timeout_t *cache_data =
- (wl_nan_ev_disc_cache_timeout_t *)xtlv->data;
- wl_nan_disc_expired_cache_entry_t *cache_entry = NULL;
- uint16 xtlv_len = xtlv->len;
- uint8 entry_idx = 0;
-
- if (xtlv->id == WL_NAN_XTLV_SD_DISC_CACHE_TIMEOUT) {
- xtlv_len = xtlv_len -
- OFFSETOF(wl_nan_ev_disc_cache_timeout_t, cache_exp_list);
- while ((entry_idx < cache_data->count) &&
- (xtlv_len >= sizeof(*cache_entry))) {
- cache_entry = &cache_data->cache_exp_list[entry_idx];
- /* Handle ranging cases for cache timeout */
- wl_cfgnan_ranging_clear_publish(cfg, &cache_entry->r_nmi_addr,
- cache_entry->l_sub_id);
- /* Invalidate local cache info */
- wl_cfgnan_remove_disc_result(cfg, cache_entry->l_sub_id);
- xtlv_len = xtlv_len - sizeof(*cache_entry);
- entry_idx++;
- }
- }
- break;
- }
- case WL_NAN_EVENT_RNG_REQ_IND: {
- wl_nan_ev_rng_req_ind_t *rng_ind;
- bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
-
- nan_opts_len = data_len;
- rng_ind = (wl_nan_ev_rng_req_ind_t *)xtlv->data;
- xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
- WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_REQ_IND range_id %d"
- " peer:" MACDBG "\n", rng_ind->rng_id,
- MAC2STRDBG(&rng_ind->peer_m_addr)));
-#ifdef RTT_SUPPORT
- ret = wl_cfgnan_handle_ranging_ind(cfg, rng_ind);
-#endif /* RTT_SUPPORT */
- /* no need to event to HAL */
- goto exit;
- }
-
- case WL_NAN_EVENT_RNG_TERM_IND: {
- bcm_xtlv_t *xtlv = (bcm_xtlv_t *)event_data;
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
- nan_ranging_inst_t *rng_inst;
- wl_nan_ev_rng_term_ind_t *range_term = (wl_nan_ev_rng_term_ind_t *)xtlv->data;
-#ifdef RTT_SUPPORT
- int8 index = -1;
- rtt_geofence_target_info_t* geofence_target;
- rtt_status_info_t *rtt_status;
- int rng_sched_reason = 0;
-#endif /* RTT_SUPPORT */
- BCM_REFERENCE(dhd);
- WL_INFORM_MEM(("Received WL_NAN_EVENT_RNG_TERM_IND peer: " MACDBG ", "
- " Range ID:%d Reason Code:%d\n", MAC2STRDBG(&range_term->peer_m_addr),
- range_term->rng_id, range_term->reason_code));
- rng_inst = wl_cfgnan_get_rng_inst_by_id(cfg, range_term->rng_id);
- if (rng_inst) {
-#ifdef RTT_SUPPORT
- rng_sched_reason = RTT_SCHED_RNG_TERM;
- if (rng_inst->range_type == RTT_TYPE_NAN_DIRECTED) {
- dhd_rtt_handle_nan_rtt_session_end(dhd, &rng_inst->peer_addr);
- } else if (rng_inst->range_type == RTT_TYPE_NAN_GEOFENCE) {
- if (wl_cfgnan_geofence_retry_check(rng_inst,
- range_term->reason_code)) {
- rtt_status = GET_RTTSTATE(dhd);
- GEOFENCE_RTT_LOCK(rtt_status);
- dhd_rtt_move_geofence_cur_target_idx_to_next(dhd);
- GEOFENCE_RTT_UNLOCK(rtt_status);
- } else {
- /* Report on ranging failure */
- wl_cfgnan_disc_result_on_geofence_cancel(cfg,
- rng_inst);
- WL_TRACE(("Reset the state on terminate\n"));
- geofence_target = dhd_rtt_get_geofence_target(dhd,
- &rng_inst->peer_addr, &index);
- if (geofence_target) {
- dhd_rtt_remove_geofence_target(dhd,
- &geofence_target->peer_addr);
- }
- }
- /* Set geofence RTT in progress state to false */
- dhd_rtt_set_geofence_rtt_state(dhd, FALSE);
- }
- if (rng_inst->range_role == NAN_RANGING_ROLE_RESPONDER &&
- wl_check_range_role_concurrency(dhd, rng_inst)) {
- /* Resolve role concurrency */
- wl_cfgnan_resolve_ranging_role_concurrecny(dhd, rng_inst);
- /* Override sched reason if role concurrency just resolved */
- rng_sched_reason = RTT_SCHED_RNG_TERM_PEND_ROLE_CHANGE;
- }
- /* Reset Ranging Instance and trigger ranging if applicable */
- wl_cfgnan_reset_geofence_ranging(cfg, rng_inst, rng_sched_reason);
-#endif /* RTT_SUPPORT */
- }
- break;
- }
-#endif /* WL_NAN_DISC_CACHE */
- /*
- * Data path events data are received in common event struct,
- * Handling all the events as part of one case, hence fall through is intentional
- */
- case WL_NAN_EVENT_PEER_DATAPATH_IND:
- case WL_NAN_EVENT_DATAPATH_ESTB:
- case WL_NAN_EVENT_DATAPATH_END: {
- ret = wl_nan_dp_cmn_event_data(cfg, event_data, data_len,
- &tlvs_offset, &nan_opts_len,
- event_num, &hal_event_id, nan_event_data);
- /* Avoiding optional param parsing for DP END Event */
- if (event_num == WL_NAN_EVENT_DATAPATH_END) {
- nan_opts_len = 0;
- xtlv_opt = BCM_IOV_CMD_OPT_ALIGN_NONE;
- }
- if (unlikely(ret)) {
- WL_ERR(("nan dp common event data parse failed\n"));
- goto exit;
- }
- break;
- }
- default:
- WL_ERR_RLMT(("WARNING: unimplemented NAN APP EVENT = %d\n", event_num));
- ret = BCME_ERROR;
- goto exit;
- }
-
- if (nan_opts_len) {
- tlv_buf = (uint8 *)event_data + tlvs_offset;
- /* Extract event data tlvs and pass their resp to cb fn */
- ret = bcm_unpack_xtlv_buf((void *)&nan_event_ctx, (const uint8*)tlv_buf,
- nan_opts_len, xtlv_opt, wl_cfgnan_set_vars_cbfn);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to unpack tlv data, ret=%d\n", ret));
- }
- }
-
-#ifdef WL_NAN_DISC_CACHE
- if (hal_event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
-#ifdef RTT_SUPPORT
- u8 rtt_invalid_reason = RTT_STATE_VALID;
- bool role_concur_state = 0;
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
-#endif /* RTT_SUPPORT */
- u16 update_flags = 0;
- WL_TRACE(("Cache disc res\n"));
- ret = wl_cfgnan_cache_disc_result(cfg, nan_event_data, &update_flags);
- if (ret) {
- WL_ERR(("Failed to cache disc result ret %d\n", ret));
- }
- if (nan_event_data->sde_control_flag & NAN_SDE_CF_RANGING_REQUIRED) {
- ret = wl_cfgnan_check_disc_result_for_ranging(cfg, nan_event_data);
- if (ret == BCME_OK) {
-#ifdef RTT_SUPPORT
- rtt_invalid_reason = dhd_rtt_invalid_states
- (bcmcfg_to_prmry_ndev(cfg), &nan_event_data->remote_nmi);
- role_concur_state = dhd_rtt_get_role_concurrency_state(dhd);
- /*
- * If instant RTT not possible,
- * send discovery result instantly like
- * incase of invalid rtt state as
- * NDP connected/connecting or role_concurrency
- * on, otherwise, disc result will be posted
- * on ranging report event
- */
- if (rtt_invalid_reason == RTT_STATE_VALID &&
- role_concur_state == FALSE) {
- /* Avoid sending disc result instantly */
- goto exit;
- }
-#endif /* RTT_SUPPORT */
- } else {
- /* TODO: should we terminate service if ranging fails ? */
- WL_INFORM_MEM(("Ranging failed or not required, " MACDBG
- " sub_id:%d , pub_id:%d\n",
- MAC2STRDBG(&nan_event_data->remote_nmi),
- nan_event_data->sub_id, nan_event_data->pub_id));
- }
- } else {
- nan_svc_info_t *svc_info = wl_cfgnan_get_svc_inst(cfg,
- nan_event_data->sub_id, 0);
- if (svc_info && svc_info->ranging_required &&
- (update_flags & NAN_DISC_CACHE_PARAM_SDE_CONTROL)) {
- wl_cfgnan_ranging_clear_publish(cfg,
- &nan_event_data->remote_nmi, nan_event_data->sub_id);
- }
- }
-
- /*
- * If tx match filter is present as part of active subscribe, keep same filter
- * values in discovery results also.
- */
- if (nan_event_data->sub_id == nan_event_data->requestor_id) {
- svc = wl_cfgnan_get_svc_inst(cfg, nan_event_data->sub_id, 0);
- if (svc && svc->tx_match_filter_len) {
- nan_event_data->tx_match_filter.dlen = svc->tx_match_filter_len;
- nan_event_data->tx_match_filter.data =
- MALLOCZ(cfg->osh, svc->tx_match_filter_len);
- if (!nan_event_data->tx_match_filter.data) {
- WL_ERR(("%s: tx_match_filter_data alloc failed\n",
- __FUNCTION__));
- nan_event_data->tx_match_filter.dlen = 0;
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(nan_event_data->tx_match_filter.data,
- nan_event_data->tx_match_filter.dlen,
- svc->tx_match_filter, svc->tx_match_filter_len);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy tx match filter data\n"));
- goto exit;
- }
- }
- }
- }
-#endif /* WL_NAN_DISC_CACHE */
-
- WL_TRACE(("Send up %s (%d) data to HAL, hal_event_id=%d\n",
- nan_event_to_str(event_num), event_num, hal_event_id));
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
- ret = wl_cfgvendor_send_nan_event(cfg->wdev->wiphy, bcmcfg_to_prmry_ndev(cfg),
- hal_event_id, nan_event_data);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to send event to nan hal, %s (%d)\n",
- nan_event_to_str(event_num), event_num));
- }
-#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
-
-exit:
- wl_cfgnan_clear_nan_event_data(cfg, nan_event_data);
-
- NAN_MUTEX_UNLOCK();
- NAN_DBG_EXIT();
- return ret;
-}
-
-#ifdef WL_NAN_DISC_CACHE
-static int
-wl_cfgnan_cache_disc_result(struct bcm_cfg80211 *cfg, void * data,
- u16 *disc_cache_update_flags)
-{
- nan_event_data_t* disc = (nan_event_data_t*)data;
- int i, add_index = 0;
- int ret = BCME_OK;
- nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
- *disc_cache_update_flags = 0;
-
- if (!cfg->nan_enable) {
- WL_DBG(("nan not enabled"));
- return BCME_NOTENABLED;
- }
- if (cfg->nan_disc_count == NAN_MAX_CACHE_DISC_RESULT) {
- WL_DBG(("cache full"));
- ret = BCME_NORESOURCE;
- goto done;
- }
-
- for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
- if (!disc_res[i].valid) {
- add_index = i;
- continue;
- }
- if (!memcmp(&disc_res[i].peer, &disc->remote_nmi, ETHER_ADDR_LEN) &&
- !memcmp(disc_res[i].svc_hash, disc->svc_name, WL_NAN_SVC_HASH_LEN)) {
- WL_DBG(("cache entry already present, i = %d", i));
- /* Update needed parameters here */
- if (disc_res[i].sde_control_flag != disc->sde_control_flag) {
- disc_res[i].sde_control_flag = disc->sde_control_flag;
- *disc_cache_update_flags |= NAN_DISC_CACHE_PARAM_SDE_CONTROL;
- }
- ret = BCME_OK; /* entry already present */
- goto done;
- }
- }
- WL_DBG(("adding cache entry: add_index = %d\n", add_index));
- disc_res[add_index].valid = 1;
- disc_res[add_index].pub_id = disc->pub_id;
- disc_res[add_index].sub_id = disc->sub_id;
- disc_res[add_index].publish_rssi = disc->publish_rssi;
- disc_res[add_index].peer_cipher_suite = disc->peer_cipher_suite;
- disc_res[add_index].sde_control_flag = disc->sde_control_flag;
- ret = memcpy_s(&disc_res[add_index].peer, ETHER_ADDR_LEN,
- &disc->remote_nmi, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy remote nmi\n"));
- goto done;
- }
- ret = memcpy_s(disc_res[add_index].svc_hash, WL_NAN_SVC_HASH_LEN,
- disc->svc_name, WL_NAN_SVC_HASH_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc hash\n"));
- goto done;
- }
-
- if (disc->svc_info.dlen && disc->svc_info.data) {
- disc_res[add_index].svc_info.dlen = disc->svc_info.dlen;
- disc_res[add_index].svc_info.data =
- MALLOCZ(cfg->osh, disc_res[add_index].svc_info.dlen);
- if (!disc_res[add_index].svc_info.data) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- disc_res[add_index].svc_info.dlen = 0;
- ret = BCME_NOMEM;
- goto done;
- }
- ret = memcpy_s(disc_res[add_index].svc_info.data, disc_res[add_index].svc_info.dlen,
- disc->svc_info.data, disc->svc_info.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc info\n"));
- goto done;
- }
- }
- if (disc->tx_match_filter.dlen && disc->tx_match_filter.data) {
- disc_res[add_index].tx_match_filter.dlen = disc->tx_match_filter.dlen;
- disc_res[add_index].tx_match_filter.data =
- MALLOCZ(cfg->osh, disc_res[add_index].tx_match_filter.dlen);
- if (!disc_res[add_index].tx_match_filter.data) {
- WL_ERR(("%s: memory allocation failed\n", __FUNCTION__));
- disc_res[add_index].tx_match_filter.dlen = 0;
- ret = BCME_NOMEM;
- goto done;
- }
- ret = memcpy_s(disc_res[add_index].tx_match_filter.data,
- disc_res[add_index].tx_match_filter.dlen,
- disc->tx_match_filter.data, disc->tx_match_filter.dlen);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy tx match filter\n"));
- goto done;
- }
- }
- cfg->nan_disc_count++;
- WL_DBG(("cfg->nan_disc_count = %d\n", cfg->nan_disc_count));
-
-done:
- return ret;
-}
-
-/* Sending command to FW for clearing discovery cache info in FW */
-static int
-wl_cfgnan_clear_disc_cache(struct bcm_cfg80211 *cfg, wl_nan_instance_id_t sub_id)
-{
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- uint32 status;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
- uint8 buf[NAN_IOCTL_BUF_SIZE];
- bcm_iov_batch_buf_t *nan_buf;
- bcm_iov_batch_subcmd_t *sub_cmd;
- uint16 subcmd_len;
-
- /* Same src and dest len here */
- memset_s(buf, sizeof(buf), 0, sizeof(buf));
-
- nan_buf = (bcm_iov_batch_buf_t*)buf;
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
-
- sub_cmd = (bcm_iov_batch_subcmd_t *)(&nan_buf->cmds[0]);
- ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
- sizeof(sub_id), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
-
- /* Fill the sub_command block */
- sub_cmd->id = htod16(WL_NAN_CMD_SD_DISC_CACHE_CLEAR);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(sub_id);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- /* Data size len vs buffer len check is already done above.
- * So, short buffer error is impossible.
- */
- (void)memcpy_s(sub_cmd->data, (nan_buf_size - OFFSETOF(bcm_iov_batch_subcmd_t, data)),
- &sub_id, sizeof(sub_id));
- /* adjust iov data len to the end of last data record */
- nan_buf_size -= (subcmd_len);
-
- nan_buf->count++;
- nan_buf->is_set = true;
- nan_buf_size = NAN_IOCTL_BUF_SIZE - nan_buf_size;
- /* Same src and dest len here */
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(bcmcfg_to_prmry_ndev(cfg), cfg,
- nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("Disc cache clear handler failed ret %d status %d\n",
- ret, status));
- goto fail;
- }
-
-fail:
- return ret;
-}
-
-static int wl_cfgnan_remove_disc_result(struct bcm_cfg80211 *cfg,
- uint8 local_subid)
-{
- int i;
- int ret = BCME_NOTFOUND;
- nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
- if (!cfg->nan_enable) {
- WL_DBG(("nan not enabled\n"));
- ret = BCME_NOTENABLED;
- goto done;
- }
- for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
- if ((disc_res[i].valid) && (disc_res[i].sub_id == local_subid)) {
- WL_TRACE(("make cache entry invalid\n"));
- if (disc_res[i].tx_match_filter.data) {
- MFREE(cfg->osh, disc_res[i].tx_match_filter.data,
- disc_res[i].tx_match_filter.dlen);
- }
- if (disc_res[i].svc_info.data) {
- MFREE(cfg->osh, disc_res[i].svc_info.data,
- disc_res[i].svc_info.dlen);
- }
- memset_s(&disc_res[i], sizeof(disc_res[i]), 0, sizeof(disc_res[i]));
- cfg->nan_disc_count--;
- ret = BCME_OK;
- }
- }
- WL_DBG(("couldn't find entry\n"));
-done:
- return ret;
-}
-
-static nan_disc_result_cache *
-wl_cfgnan_get_disc_result(struct bcm_cfg80211 *cfg, uint8 remote_pubid,
- struct ether_addr *peer)
-{
- int i;
- nan_disc_result_cache *disc_res = cfg->nan_disc_cache;
- if (remote_pubid) {
- for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
- if ((disc_res[i].pub_id == remote_pubid) &&
- !memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
- WL_DBG(("Found entry: i = %d\n", i));
- return &disc_res[i];
- }
- }
- } else {
- for (i = 0; i < NAN_MAX_CACHE_DISC_RESULT; i++) {
- if (!memcmp(&disc_res[i].peer, peer, ETHER_ADDR_LEN)) {
- WL_DBG(("Found entry: %d\n", i));
- return &disc_res[i];
- }
- }
- }
- return NULL;
-}
-#endif /* WL_NAN_DISC_CACHE */
-
-void
-wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
- nan_data_path_id ndp_id)
-{
- uint8 i;
- bool match_found = false;
-#ifdef ARP_OFFLOAD_SUPPORT
- dhd_pub_t *dhd = (struct dhd_pub *)(cfg->pub);
-#endif /* ARP_OFFLOAD_SUPPORT */
- /* As of now, we don't see a need to know which ndp is active.
- * so just keep tracking of ndp via count. If we need to know
- * the status of each ndp based on ndp id, we need to change
- * this implementation to use a bit mask.
- */
- if (!dhd) {
- WL_ERR(("dhd pub null!\n"));
- return;
- }
-
- if (add) {
- /* On first NAN DP establishment, disable ARP. */
-#ifdef ARP_OFFLOAD_SUPPORT
- if (!cfg->nan_dp_count) {
- dhd_arp_offload_set(dhd, 0);
- dhd_arp_offload_enable(dhd, false);
- }
-#endif /* ARP_OFFLOAD_SUPPORT */
- for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
- if (!cfg->nancfg.ndp_id[i]) {
- WL_TRACE(("Found empty field\n"));
- break;
- }
- }
-
- if (i == NAN_MAX_NDP_PEER) {
- WL_ERR(("%s:cannot accommodate ndp id\n", __FUNCTION__));
- return;
- }
- if (ndp_id) {
- cfg->nan_dp_count++;
- cfg->nancfg.ndp_id[i] = ndp_id;
- WL_DBG(("%s:Added ndp id = [%d] at i = %d\n",
- __FUNCTION__, cfg->nancfg.ndp_id[i], i));
- }
- } else {
- ASSERT(cfg->nan_dp_count);
- if (ndp_id) {
- for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
- if (cfg->nancfg.ndp_id[i] == ndp_id) {
- cfg->nancfg.ndp_id[i] = 0;
- WL_DBG(("%s:Removed ndp id = [%d] from i = %d\n",
- __FUNCTION__, ndp_id, i));
- match_found = true;
- if (cfg->nan_dp_count) {
- cfg->nan_dp_count--;
- }
- break;
- } else {
- WL_DBG(("couldn't find entry for ndp id = %d\n",
- ndp_id));
- }
- }
- if (match_found == false) {
- WL_ERR(("Received unsaved NDP Id = %d !!\n", ndp_id));
- }
- }
-
-#ifdef ARP_OFFLOAD_SUPPORT
- if (!cfg->nan_dp_count) {
- /* If NAN DP count becomes zero and if there
- * are no conflicts, enable back ARP offload.
- * As of now, the conflicting interfaces are AP
- * and P2P. But NAN + P2P/AP concurrency is not
- * supported.
- */
- dhd_arp_offload_set(dhd, dhd_arp_mode);
- dhd_arp_offload_enable(dhd, true);
- }
-#endif /* ARP_OFFLOAD_SUPPORT */
- }
- WL_INFORM_MEM(("NAN_DP_COUNT: %d\n", cfg->nan_dp_count));
-}
-
-bool
-wl_cfgnan_is_dp_active(struct net_device *ndev)
-{
- struct bcm_cfg80211 *cfg;
- bool nan_dp;
-
- if (!ndev || !ndev->ieee80211_ptr) {
- WL_ERR(("ndev/wdev null\n"));
- return false;
- }
-
- cfg = wiphy_priv(ndev->ieee80211_ptr->wiphy);
- nan_dp = cfg->nan_dp_count ? true : false;
-
- WL_DBG(("NAN DP status:%d\n", nan_dp));
- return nan_dp;
-}
-
-s32
-wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg)
-{
- int i;
- for (i = 0; i < NAN_MAX_NDI; i++) {
- if (!cfg->nancfg.ndi[i].in_use) {
- /* Free interface, use it */
- return i;
- }
- }
- /* Don't have a free interface */
- return WL_INVALID;
-}
-
-s32
-wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name)
-{
- u16 len;
- if (!name || (idx < 0) || (idx >= NAN_MAX_NDI)) {
- return -EINVAL;
- }
-
- /* Ensure ifname string size <= IFNAMSIZ including null termination */
- len = MIN(strlen(name), (IFNAMSIZ - 1));
- strncpy(cfg->nancfg.ndi[idx].ifname, name, len);
- cfg->nancfg.ndi[idx].ifname[len] = '\0';
- cfg->nancfg.ndi[idx].in_use = true;
- cfg->nancfg.ndi[idx].created = false;
-
- /* Don't have a free interface */
- return WL_INVALID;
-}
-
-s32
-wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name)
-{
- u16 len;
- int i;
- if (!name) {
- return -EINVAL;
- }
-
- len = MIN(strlen(name), IFNAMSIZ);
- for (i = 0; i < NAN_MAX_NDI; i++) {
- if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
- memset_s(&cfg->nancfg.ndi[i].ifname, IFNAMSIZ,
- 0x0, IFNAMSIZ);
- cfg->nancfg.ndi[i].in_use = false;
- cfg->nancfg.ndi[i].created = false;
- cfg->nancfg.ndi[i].nan_ndev = NULL;
- return i;
- }
- }
- return -EINVAL;
-}
-
-struct wl_ndi_data *
-wl_cfgnan_get_ndi_data(struct bcm_cfg80211 *cfg, char *name)
-{
- u16 len;
- int i;
- if (!name) {
- return NULL;
- }
-
- len = MIN(strlen(name), IFNAMSIZ);
- for (i = 0; i < NAN_MAX_NDI; i++) {
- if (strncmp(cfg->nancfg.ndi[i].ifname, name, len) == 0) {
- return &cfg->nancfg.ndi[i];
- }
- }
- return NULL;
-}
-
-s32
-wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg,
- struct net_device *nan_ndev)
-{
- s32 ret = BCME_OK;
- uint8 i = 0;
- for (i = 0; i < NAN_MAX_NDI; i++) {
- if (cfg->nancfg.ndi[i].in_use &&
- cfg->nancfg.ndi[i].created &&
- (cfg->nancfg.ndi[i].nan_ndev == nan_ndev)) {
- WL_INFORM_MEM(("iface name: %s, cfg->nancfg.ndi[i].nan_ndev = %p"
- " and nan_ndev = %p\n",
- (char*)cfg->nancfg.ndi[i].ifname,
- cfg->nancfg.ndi[i].nan_ndev, nan_ndev));
- ret = _wl_cfg80211_del_if(cfg, nan_ndev, NULL,
- (char*)cfg->nancfg.ndi[i].ifname);
- if (ret) {
- WL_ERR(("failed to del ndi [%d]\n", ret));
- goto exit;
- }
- /* After successful delete of interface,
- * clear up the ndi data
- */
- if (wl_cfgnan_del_ndi_data(cfg,
- (char*)cfg->nancfg.ndi[i].ifname) < 0) {
- WL_ERR(("Failed to find matching data for ndi:%s\n",
- (char*)cfg->nancfg.ndi[i].ifname));
- }
- }
- }
- exit:
- return ret;
-}
-
-int
-wl_cfgnan_get_status(struct net_device *ndev, wl_nan_conf_status_t *nan_status)
-{
- bcm_iov_batch_buf_t *nan_buf = NULL;
- uint16 subcmd_len;
- bcm_iov_batch_subcmd_t *sub_cmd = NULL;
- bcm_iov_batch_subcmd_t *sub_cmd_resp = NULL;
- uint8 resp_buf[NAN_IOCTL_BUF_SIZE];
- wl_nan_conf_status_t *nstatus = NULL;
- uint32 status;
- s32 ret = BCME_OK;
- uint16 nan_buf_size = NAN_IOCTL_BUF_SIZE;
- struct bcm_cfg80211 *cfg = wl_get_cfg(ndev);
- NAN_DBG_ENTER();
-
- nan_buf = MALLOCZ(cfg->osh, NAN_IOCTL_BUF_SIZE);
- if (!nan_buf) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto fail;
- }
-
- nan_buf->version = htol16(WL_NAN_IOV_BATCH_VERSION);
- nan_buf->count = 0;
- nan_buf_size -= OFFSETOF(bcm_iov_batch_buf_t, cmds[0]);
- sub_cmd = (bcm_iov_batch_subcmd_t*)(uint8 *)(&nan_buf->cmds[0]);
-
- ret = wl_cfg_nan_check_cmd_len(nan_buf_size,
- sizeof(*nstatus), &subcmd_len);
- if (unlikely(ret)) {
- WL_ERR(("nan_sub_cmd check failed\n"));
- goto fail;
- }
-
- nstatus = (wl_nan_conf_status_t *)sub_cmd->data;
- sub_cmd->id = htod16(WL_NAN_CMD_CFG_STATUS);
- sub_cmd->len = sizeof(sub_cmd->u.options) + sizeof(*nstatus);
- sub_cmd->u.options = htol32(BCM_XTLV_OPTION_ALIGN32);
- nan_buf_size -= subcmd_len;
- nan_buf->count = 1;
- nan_buf->is_set = false;
-
- memset_s(resp_buf, sizeof(resp_buf), 0, sizeof(resp_buf));
- ret = wl_cfgnan_execute_ioctl(ndev, cfg, nan_buf, nan_buf_size, &status,
- (void*)resp_buf, NAN_IOCTL_BUF_SIZE);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("get nan status failed ret %d status %d \n",
- ret, status));
- goto fail;
- }
- sub_cmd_resp = &((bcm_iov_batch_buf_t *)(resp_buf))->cmds[0];
- /* WL_NAN_CMD_CFG_STATUS return value doesn't use xtlv package */
- nstatus = ((wl_nan_conf_status_t *)&sub_cmd_resp->data[0]);
- ret = memcpy_s(nan_status, sizeof(wl_nan_conf_status_t),
- nstatus, sizeof(wl_nan_conf_status_t));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy tx match filter\n"));
- goto fail;
- }
-
-fail:
- if (nan_buf) {
- MFREE(cfg->osh, nan_buf, NAN_IOCTL_BUF_SIZE);
- }
- NAN_DBG_EXIT();
- return ret;
-}
-#endif /* WL_NAN */
/*
* Neighbor Awareness Networking
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- *
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgnan.h 825970 2019-06-18 05:28:31Z $
+ * $Id: wl_cfgnan.h 650782 2016-07-22 11:51:53Z $
*/
#ifndef _wl_cfgnan_h_
#define _wl_cfgnan_h_
-/* NAN structs versioning b/w DHD and HAL
-* define new version if any change in any of the shared structs
-*/
-#define NAN_HAL_VERSION_1 0x2
-
-#define NAN_EVENT_BUFFER_SIZE_LARGE 1024u
-
-#define NAN_RANGE_EXT_CANCEL_SUPPORT_VER 2
-#define WL_NAN_IOV_BATCH_VERSION 0x8000
-#define WL_NAN_AVAIL_REPEAT_INTVL 0x0200
-#define WL_NAN_AVAIL_START_INTVL 160
-#define WL_NAN_AVAIL_DURATION_INTVL 336
-#define NAN_IOCTL_BUF_SIZE 256u
-#define NAN_IOCTL_BUF_SIZE_MED 512u
-#define NAN_IOCTL_BUF_SIZE_LARGE 1024u
-#define NAN_EVENT_NAME_MAX_LEN 40u
-#define NAN_RTT_IOVAR_BUF_SIZE 1024u
+#define NAN_IOCTL_BUF_SIZE 512
+#define NAN_EVENT_NAME_MAX_LEN 40
+#define NAN_CONFIG_ATTR_MAX_LEN 24
+#define NAN_RTT_IOVAR_BUF_SIZE 1024
#define WL_NAN_EVENT_CLEAR_BIT 32
#define NAN_EVENT_MASK_ALL 0x7fffffff
-#define NAN_MAX_AWAKE_DW_INTERVAL 5
-#define NAN_MAXIMUM_ID_NUMBER 255
-#define NAN_MAXIMUM_MASTER_PREFERENCE 254
-#define NAN_ID_RESERVED 0
-#define NAN_ID_MIN 1
-#define NAN_ID_MAX 255
-#define NAN_DEF_SOCIAL_CHAN_2G 6
-#define NAN_DEF_SOCIAL_CHAN_5G 149
-#define NAN_DEF_SEC_SOCIAL_CHAN_5G 44
-#define NAN_MAX_SOCIAL_CHANNELS 3
-/* Keeping RSSI threshold value to be -70dBm */
-#define NAN_DEF_RSSI_NOTIF_THRESH -70
-/* Keeping default RSSI mid value to be -70dBm */
-#define NAN_DEF_RSSI_MID -75
-/* Keeping default RSSI close value to be -60dBm */
-#define NAN_DEF_RSSI_CLOSE -60
-#define WL_AVAIL_BIT_MAP "1111111111111111111111111111111100000000000000000000000000000000"
-#define WL_5G_AVAIL_BIT_MAP "0000000011111111111111111111111111111111000000000000000000000000"
-#define WL_AVAIL_CHANNEL_2G 6
-#define WL_AVAIL_BANDWIDTH_2G WL_CHANSPEC_BW_20
-#define WL_AVAIL_CHANNEL_5G 149
-#define WL_AVAIL_BANDWIDTH_5G WL_CHANSPEC_BW_80
-#define NAN_RANGING_PERIOD WL_AVAIL_PERIOD_1024
-#define NAN_SYNC_DEF_AWAKE_DW 1
-#define NAN_RNG_TERM_FLAG_NONE 0
-
-#define NAN_BLOOM_LENGTH_DEFAULT 240u
-#define NAN_SRF_MAX_MAC (NAN_BLOOM_LENGTH_DEFAULT / ETHER_ADDR_LEN)
-#define NAN_SRF_CTRL_FIELD_LEN 1u
+#define NAN_MAXIMUM_ID_NUMBER 255
+#define NAN_MAXIMUM_MASTER_PREFERENCE 255
+#ifdef NAN_DP
#define MAX_IF_ADD_WAIT_TIME 1000
-#define NAN_DP_ROLE_INITIATOR 0x0001
-#define NAN_DP_ROLE_RESPONDER 0x0002
-
-#define WL_NAN_OBUF_DATA_OFFSET (OFFSETOF(bcm_iov_batch_buf_t, cmds[0]) + \
- OFFSETOF(bcm_iov_batch_subcmd_t, data[0]))
+#endif /* NAN_DP */
+#define NAN_INVALID_ID(id) (id > NAN_MAXIMUM_ID_NUMBER)
#define NAN_INVALID_ROLE(role) (role > WL_NAN_ROLE_ANCHOR_MASTER)
#define NAN_INVALID_CHANSPEC(chanspec) ((chanspec == INVCHANSPEC) || \
(chanspec == 0))
(num >= WL_NAN_EVENT_INVALID))
#define NAN_INVALID_PROXD_EVENT(num) (num != WLC_E_PROXD_NAN_EVENT)
#define NAN_EVENT_BIT(event) (1U << (event - WL_NAN_EVENT_START))
-#define NAN_EVENT_MAP(event) ((event) - WL_NAN_EVENT_START)
#define NAME_TO_STR(name) #name
#define NAN_ID_CTRL_SIZE ((NAN_MAXIMUM_ID_NUMBER/8) + 1)
-#define tolower(c) bcm_tolower(c)
-
-#define NMR2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5], (a)[6], (a)[7]
-#define NMRSTR "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
-
-#define NAN_DBG_ENTER() {WL_DBG(("Enter: %s\n", __FUNCTION__));}
-#define NAN_DBG_EXIT() {WL_DBG(("Exit: %s\n", __FUNCTION__));}
-
-/* Service Control Type length */
-#define NAN_SVC_CONTROL_TYPE_MASK ((1 << NAN_SVC_CONTROL_TYPE_LEN) - 1)
-
-#ifndef strtoul
-#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
-#endif // endif
-
-#define NAN_MAC_ADDR_LEN 6u
-#define NAN_DP_MAX_APP_INFO_LEN 512u
-
-#define NAN_SDE_CF_DP_REQUIRED (1 << 2)
-#define NAN_SDE_CF_DP_TYPE (1 << 3)
-#define NAN_SDE_CF_MULTICAST_TYPE (1 << 4)
-#define NAN_SDE_CF_SECURITY_REQUIRED (1 << 6)
-#define NAN_SDE_CF_RANGING_REQUIRED (1 << 7)
-#define NAN_SDE_CF_RANGE_PRESENT (1 << 8)
-
-#define CHECK_BIT(m, n) ((m >> n) & 1)? 1 : 0
-#define WL_NAN_EVENT_DIC_MAC_ADDR_BIT 0
-#define WL_NAN_EVENT_START_EVENT 1
-#define WL_NAN_EVENT_JOIN_EVENT 2
-
-/* Disabling svc specific(as per part of sub & pub calls) events based on below bits */
-#define WL_NAN_EVENT_SUPPRESS_TERMINATE_BIT 0
-#define WL_NAN_EVENT_SUPPRESS_MATCH_EXP_BIT 1
-#define WL_NAN_EVENT_SUPPRESS_RECEIVE_BIT 2
-#define WL_NAN_EVENT_SUPPRESS_REPLIED_BIT 3
-
-/* Disabling tranmsit followup events based on below bit */
-#define WL_NAN_EVENT_SUPPRESS_FOLLOWUP_RECEIVE_BIT 0
-
-#define C2S(x) case x: return #x;
-#define NAN_BLOOM_LENGTH_DEFAULT 240u
-#define NAN_SRF_MAX_MAC (NAN_BLOOM_LENGTH_DEFAULT / ETHER_ADDR_LEN)
-#define NAN_MAX_PMK_LEN 32u
-#define NAN_ERROR_STR_LEN 255u
-
-/* NAN related Capabilities */
-#define MAX_CONCURRENT_NAN_CLUSTERS 1
-#define MAX_PUBLISHES 8u
-#define MAX_SUBSCRIBES 8u
-#define MAX_SVC_NAME_LEN 255u
-#define MAX_MATCH_FILTER_LEN 255u
-#define MAX_TOTAL_MATCH_FILTER_LEN 510u
-#define NAN_MAX_SERVICE_SPECIFIC_INFO_LEN 255u
-#define MAX_NDI_INTERFACES 1
-#define MAX_NDP_SESSIONS 5
-#define MAX_APP_INFO_LEN 255u
-#define MAX_QUEUED_TX_FOLLOUP_MSGS 10
-#define MAX_SDEA_SVC_INFO_LEN 255u
-#define MAX_SUBSCRIBE_ADDRESS 10
-#define CIPHER_SUITE_SUPPORTED 1
-#define MAX_SCID_LEN 0
-#define IS_NDP_SECURITY_SUPPORTED true
-#define NDP_SUPPORTED_BANDS 2
-#define NAN_MAX_RANGING_INST 8u
-#define NAN_MAX_RANGING_SSN_ALLOWED 1u
-#define NAN_MAX_SVC_INST (MAX_PUBLISHES + MAX_SUBSCRIBES)
-#define NAN_SVC_INST_SIZE 32u
-#define NAN_START_STOP_TIMEOUT 5000
-#define NAN_MAX_NDP_PEER 8u
-#define NAN_DISABLE_CMD_DELAY_TIMER 4000u
-
-#ifdef WL_NAN_DEBUG
-#define NAN_MUTEX_LOCK() {WL_DBG(("Mutex Lock: Enter: %s\n", __FUNCTION__)); \
- mutex_lock(&cfg->nancfg.nan_sync);}
-#define NAN_MUTEX_UNLOCK() {mutex_unlock(&cfg->nancfg.nan_sync); \
- WL_DBG(("Mutex Unlock: Exit: %s\n", __FUNCTION__));}
-#else
-#define NAN_MUTEX_LOCK() {mutex_lock(&cfg->nancfg.nan_sync);}
-#define NAN_MUTEX_UNLOCK() {mutex_unlock(&cfg->nancfg.nan_sync);}
-#endif /* WL_NAN_DEBUG */
-#define NAN_ATTR_SUPPORT_2G_CONFIG (1<<0)
-#define NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG (1<<1)
-#define NAN_ATTR_SDF_2G_SUPPORT_CONFIG (1<<2)
-#define NAN_ATTR_SUPPORT_5G_CONFIG (1<<3)
-#define NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG (1<<4)
-#define NAN_ATTR_SDF_5G_SUPPORT_CONFIG (1<<5)
-#define NAN_ATTR_2G_DW_CONFIG (1<<6)
-#define NAN_ATTR_5G_DW_CONFIG (1<<7)
-#define NAN_ATTR_2G_CHAN_CONFIG (1<<8)
-#define NAN_ATTR_5G_CHAN_CONFIG (1<<9)
-#define NAN_ATTR_2G_DWELL_TIME_CONFIG (1<<10)
-#define NAN_ATTR_5G_DWELL_TIME_CONFIG (1<<11)
-#define NAN_ATTR_2G_SCAN_PERIOD_CONFIG (1<<12)
-#define NAN_ATTR_5G_SCAN_PERIOD_CONFIG (1<<13)
-#define NAN_ATTR_RSSI_CLOSE_CONFIG (1<<14)
-#define NAN_ATTR_RSSI_MIDDLE_2G_CONFIG (1<<15)
-#define NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG (1<<16)
-#define NAN_ATTR_RSSI_CLOSE_5G_CONFIG (1<<17)
-#define NAN_ATTR_RSSI_MIDDLE_5G_CONFIG (1<<18)
-#define NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG (1<<19)
-#define NAN_ATTR_RSSI_WINDOW_SIZE_CONFIG (1<<20)
-#define NAN_ATTR_HOP_COUNT_LIMIT_CONFIG (1<<21)
-#define NAN_ATTR_SID_BEACON_CONFIG (1<<22)
-#define NAN_ATTR_HOP_COUNT_FORCE_CONFIG (1<<23)
-#define NAN_ATTR_RAND_FACTOR_CONFIG (1<<24)
-#define NAN_ATTR_CLUSTER_VAL_CONFIG (1<<25)
-#define NAN_ATTR_IF_ADDR_CONFIG (1<<26)
-#define NAN_ATTR_OUI_CONFIG (1<<27)
-#define NAN_ATTR_SUB_SID_BEACON_CONFIG (1<<28)
-#define NAN_IOVAR_NAME_SIZE 4u
-#define NAN_XTLV_ID_LEN_SIZE OFFSETOF(bcm_xtlv_t, data)
-#define NAN_RANGING_INDICATE_CONTINUOUS_MASK 0x01
-#define NAN_RANGE_REQ_CMD 0
-#define NAN_RNG_REQ_ACCEPTED_BY_HOST 1
-#define NAN_RNG_REQ_REJECTED_BY_HOST 0
-
-#define NAN_RNG_GEOFENCE_MAX_RETRY_CNT 3u
-
-typedef uint32 nan_data_path_id;
-
-typedef enum nan_stop_reason_code {
- NAN_CONCURRENCY_CONFLICT = 0,
- NAN_USER_INITIATED = 1,
- NAN_BUS_IS_DOWN = 2,
- NAN_DEINITIALIZED = 3,
- NAN_COUNTRY_CODE_CHANGE = 4
-} nan_stop_reason_code_t;
-
-typedef enum nan_range_status {
- NAN_RANGING_INVALID = 0,
- NAN_RANGING_REQUIRED = 1,
- NAN_RANGING_IN_PROGRESS = 2
-} nan_range_status_t;
-
-typedef enum nan_range_role {
- NAN_RANGING_ROLE_INVALID = 0,
- NAN_RANGING_ROLE_INITIATOR = 1,
- NAN_RANGING_ROLE_RESPONDER = 2
-} nan_range_role_t;
-
-typedef struct nan_svc_inst {
- uint8 inst_id; /* publisher/subscriber id */
- uint8 inst_type; /* publisher/subscriber */
-} nan_svc_inst_t;
-
-/* Range Status Flag bits for svc info */
-#define SVC_RANGE_REP_EVENT_ONCE 0x01
-
-/* Range Status Flag bits for svc info */
-#define SVC_RANGE_REP_EVENT_ONCE 0x01
-
-typedef struct nan_svc_info {
- bool valid;
- nan_data_path_id ndp_id[NAN_MAX_SVC_INST];
- uint8 svc_hash[WL_NAN_SVC_HASH_LEN]; /* service hash */
- uint8 svc_id;
- uint8 ranging_required;
- uint8 ranging_ind;
- uint8 status;
- uint32 ranging_interval;
- uint32 ingress_limit;
- uint32 egress_limit;
- uint32 flags;
- uint8 tx_match_filter[MAX_MATCH_FILTER_LEN]; /* TX match filter */
- uint8 tx_match_filter_len;
- uint8 svc_range_status; /* For managing any svc range status flags */
-} nan_svc_info_t;
-
-/* NAN Peer DP state */
-typedef enum {
- NAN_PEER_DP_NOT_CONNECTED = 0,
- NAN_PEER_DP_CONNECTING = 1,
- NAN_PEER_DP_CONNECTED = 2
-} nan_peer_dp_state_t;
-
-typedef struct nan_ndp_peer {
- uint8 peer_dp_state;
- uint8 dp_count;
- struct ether_addr peer_addr;
-} nan_ndp_peer_t;
-
-#define INVALID_DISTANCE 0xFFFFFFFF
-typedef struct nan_ranging_inst {
- uint8 range_id;
- nan_range_status_t range_status;
- struct ether_addr peer_addr;
- int range_type;
- uint8 num_svc_ctx;
- nan_svc_info_t *svc_idx[MAX_SUBSCRIBES];
- uint32 prev_distance_mm;
- nan_range_role_t range_role;
- bool in_use;
- uint8 geof_retry_count;
-} nan_ranging_inst_t;
-
-#define DUMP_NAN_RTT_INST(inst) { printf("svc instance ID %d", (inst)->svc_inst_id); \
- printf("Range ID %d", (inst)->range_id); \
- printf("range_status %d", (inst)->range_status); \
- printf("Range Type %d", (inst)->range_type); \
- printf("Peer MAC "MACDBG"\n", MAC2STRDBG((inst)->peer_addr.octet)); \
- }
-
-#define DUMP_NAN_RTT_RPT(rpt) { printf("Range ID %d", (rpt)->rng_id); \
- printf("Distance in MM %d", (rpt)->dist_mm); \
- printf("range_indication %d", (rpt)->indication); \
- printf("Peer MAC "MACDBG"\n", MAC2STRDBG((rpt)->peer_m_addr.octet)); \
- }
-/*
- * Data request Initiator/Responder
- * app/service related info
- */
-typedef struct nan_data_path_app_info {
- uint16 ndp_app_info_len;
- uint8 ndp_app_info[NAN_DP_MAX_APP_INFO_LEN];
-} nan_data_path_app_info_t;
-
-/* QoS configuration */
-typedef enum {
- NAN_DP_CONFIG_NO_QOS = 0,
- NAN_DP_CONFIG_QOS
-} nan_data_path_qos_cfg_t;
-
-/* Data request Responder's response */
-typedef enum {
- NAN_DP_REQUEST_ACCEPT = 0,
- NAN_DP_REQUEST_REJECT
-} nan_data_path_response_code_t;
-
-/* NAN DP security Configuration */
-typedef enum {
- NAN_DP_CONFIG_NO_SECURITY = 0,
- NAN_DP_CONFIG_SECURITY
-} nan_data_path_security_cfg_status_t;
-
-/* NAN Security Key Input Type */
-typedef enum {
- NAN_SECURITY_KEY_INPUT_PMK = 1,
- NAN_SECURITY_KEY_INPUT_PASSPHRASE
-} nan_security_key_input_type;
-
-/* Configuration params of Data request Initiator/Responder */
-typedef struct nan_data_path_cfg {
- /* Status Indicating Security/No Security */
- nan_data_path_security_cfg_status_t security_cfg;
- nan_data_path_qos_cfg_t qos_cfg;
-} nan_data_path_cfg_t;
-
+#define SUPP_EVENT_PREFIX "CTRL-EVENT-"
+#define EVENT_RTT_STATUS_STR "NAN-RTT-STATUS"
+
+#define TIMESTAMP_PREFIX "TSF=" /* timestamp */
+#define AMR_PREFIX "AMR=" /* anchor master rank */
+#define DISTANCE_PREFIX "DIST=" /* distance */
+#define ATTR_PREFIX "ATTR=" /* attribute */
+#define ROLE_PREFIX "ROLE=" /* role */
+#define CHAN_PREFIX "CHAN=" /* channel */
+#define BITMAP_PREFIX "BMAP=" /* bitmap */
+#define DEBUG_PREFIX "DEBUG=" /* debug enable/disable flag */
+#define DW_LEN_PREFIX "DW_LEN=" /* discovery window length */
+#define DW_INT_PREFIX "DW_INT=" /* discovery window interval */
+#define STATUS_PREFIX "STATUS=" /* status */
+#define PUB_ID_PREFIX "PUB_ID=" /* publisher id */
+#define SUB_ID_PREFIX "SUB_ID=" /* subscriber id */
+#define INSTANCE_ID_PREFIX "LOCAL_ID=" /* Instance id */
+#define REMOTE_INSTANCE_ID_PREFIX "PEER_ID=" /* Peer id */
+
+#ifdef NAN_P2P_CONFIG
+#define P2P_IE_PREFIX "P2P_IE=" /* p2p ie id */
+#define IE_EN_PREFIX "ENBLE_IE=" /* enable p2p ie */
+#endif
+#define PUB_PR_PREFIX "PUB_PR=" /* publish period */
+#define PUB_INT_PREFIX "PUB_INT=" /* publish interval (ttl) */
+#define CLUS_ID_PREFIX "CLUS_ID=" /* cluster id */
+#define IF_ADDR_PREFIX "IF_ADDR=" /* IF address */
+#define MAC_ADDR_PREFIX "MAC_ADDR=" /* mac address */
+#define SVC_HASH_PREFIX "SVC_HASH=" /* service hash */
+#define SVC_INFO_PREFIX "SVC_INFO=" /* service information */
+#define HOP_COUNT_PREFIX "HOP_COUNT=" /* hop count */
+#define MASTER_PREF_PREFIX "MASTER_PREF=" /* master preference */
+#define ACTIVE_OPTION "ACTIVE" /* Active Subscribe. */
+#define SOLICITED_OPTION "SOLICITED" /* Solicited Publish. */
+#define UNSOLICITED_OPTION "UNSOLICITED" /* Unsolicited Publish. */
+/* anchor master beacon transmission time */
+#define AMBTT_PREFIX "AMBTT="
+/* passive scan period for cluster merge */
+#define SCAN_PERIOD_PREFIX "SCAN_PERIOD="
+/* passive scan interval for cluster merge */
+#define SCAN_INTERVAL_PREFIX "SCAN_INTERVAL="
+#define BCN_INTERVAL_PREFIX "BCN_INTERVAL="
+
+#define NAN_EVENT_STR_STARTED "NAN-STARTED"
+#define NAN_EVENT_STR_JOINED "NAN-JOINED"
+#define NAN_EVENT_STR_ROLE_CHANGE "NAN-ROLE-CHANGE"
+#define NAN_EVENT_STR_SCAN_COMPLETE "NAN-SCAN-COMPLETE"
+#define NAN_EVENT_STR_SDF_RX "NAN-SDF-RX"
+#define NAN_EVENT_STR_REPLIED "NAN-REPLIED"
+#define NAN_EVENT_STR_TERMINATED "NAN-TERMINATED"
+#define NAN_EVENT_STR_FOLLOWUP_RX "NAN-FOLLOWUP-RX"
+#define NAN_EVENT_STR_STATUS_CHANGE "NAN-STATUS-CHANGE"
+#define NAN_EVENT_STR_MERGED "NAN-MERGED"
+#define NAN_EVENT_STR_STOPPED "NAN-STOPPED"
+#define NAN_EVENT_STR_P2P_RX "NAN-P2P-RX"
+#define NAN_EVENT_STR_WINDOW_BEGUN_P2P "NAN-WINDOW-BEGUN-P2P"
+#define NAN_EVENT_STR_WINDOW_BEGUN_MESH "NAN-WINDOW-BEGUN-MESH"
+#define NAN_EVENT_STR_WINDOW_BEGUN_IBSS "NAN-WINDOW-BEGUN-IBSS"
+#define NAN_EVENT_STR_WINDOW_BEGUN_RANGING "NAN-WINDOW-BEGUN-RANGING"
+#define NAN_EVENT_STR_INVALID "NAN-INVALID"
+
+#ifdef NAN_DP
enum nan_dp_states {
NAN_DP_STATE_DISABLED = 0,
NAN_DP_STATE_ENABLED = 1
};
+#endif /* NAN_DP */
-enum {
- SRF_TYPE_BLOOM_FILTER = 0,
- SRF_TYPE_SEQ_MAC_ADDR = 1
+enum nan_de_event_type {
+ NAN_EVENT_START = 0,
+ NAN_EVENT_JOIN = 1
};
-/* NAN Match indication type */
-typedef enum {
- NAN_MATCH_ALG_MATCH_ONCE = 0,
- NAN_MATCH_ALG_MATCH_CONTINUOUS = 1,
- NAN_MATCH_ALG_MATCH_NEVER = 2
-} nan_match_alg;
-
typedef struct nan_str_data {
- uint32 dlen;
- uint8 *data;
+ u8 *data;
+ u32 dlen;
} nan_str_data_t;
typedef struct nan_mac_list {
- uint32 num_mac_addr;
- uint8 *list;
+ u8 *list;
+ u32 num_mac_addr;
} nan_mac_list_t;
+typedef struct nan_config_attr {
+ char name[NAN_CONFIG_ATTR_MAX_LEN]; /* attribute name */
+ u16 type; /* attribute xtlv type */
+} nan_config_attr_t;
+
typedef struct wl_nan_sid_beacon_tune {
- uint8 sid_enable; /* flag for sending service id in beacon */
- uint8 sid_count; /* Limit for number of SIDs to be included in Beacons */
- uint8 sub_sid_enable; /* flag for sending subscribe service id in beacon */
- uint8 sub_sid_count; /* Limit for number of SUb SIDs to be included in Beacons */
+ u8 sid_enable; /* flag for sending service id in beacon */
+ u8 sid_count; /* Limit for number of SIDs to be included in Beacons */
} wl_nan_sid_beacon_ctrl_t;
-typedef struct nan_avail_cmd_data {
- chanspec_t chanspec[NAN_MAX_SOCIAL_CHANNELS]; /* channel */
- uint32 bmap; /* bitmap */
- uint8 duration;
- uint8 avail_period;
- /* peer mac address reqd for ranging avail type */
- struct ether_addr peer_nmi;
- bool no_config_avail;
-} nan_avail_cmd_data;
-
-typedef struct nan_discover_cmd_data {
- nan_str_data_t svc_info; /* service information */
- nan_str_data_t sde_svc_info; /* extended service information */
- nan_str_data_t svc_hash; /* service hash */
- nan_str_data_t rx_match; /* matching filter rx */
- nan_str_data_t tx_match; /* matching filter tx */
- nan_str_data_t key; /* Security key information */
- nan_str_data_t scid; /* security context information */
- nan_data_path_cfg_t ndp_cfg;
- struct ether_addr mac_addr; /* mac address */
+typedef struct nan_cmd_data {
+ nan_config_attr_t attr; /* set config attributes */
+ nan_str_data_t svc_hash; /* service hash */
+ nan_str_data_t svc_info; /* service information */
+ nan_str_data_t p2p_info; /* p2p information */
+ struct ether_addr mac_addr; /* mac address */
+ struct ether_addr clus_id; /* cluster id */
+ struct ether_addr if_addr; /* if addr */
+ u32 beacon_int; /* beacon interval */
+ u32 ttl; /* time to live */
+ u32 period; /* publish period */
+ u32 bmap; /* bitmap */
+ u32 role; /* role */
+ wl_nan_instance_id_t pub_id; /* publisher id */
+ wl_nan_instance_id_t sub_id; /* subscriber id */
+ wl_nan_instance_id_t local_id; /* Local id */
+ wl_nan_instance_id_t remote_id; /* Remote id */
+ uint32 flags; /* Flag bits */
+ u16 dw_len; /* discovery window length */
+ u16 master_pref; /* master preference */
+ chanspec_t chanspec; /* channel */
+ u8 debug_flag; /* debug enable/disable flag */
+ u8 life_count; /* life count of the instance */
+ u8 srf_type; /* SRF type */
+ u8 srf_include; /* SRF include */
+ u8 use_srf; /* use SRF */
+ nan_str_data_t rx_match; /* matching filter rx */
+ nan_str_data_t tx_match; /* matching filter tx */
nan_mac_list_t mac_list; /* mac list */
- wl_nan_instance_id_t pub_id; /* publisher id */
- wl_nan_instance_id_t sub_id; /* subscriber id */
- wl_nan_instance_id_t local_id; /* Local id */
- wl_nan_instance_id_t remote_id; /* Remote id */
- uint32 status;
- uint32 ttl; /* time to live */
- uint32 period; /* publish period */
- uint32 flags; /* Flag bits */
- bool sde_control_config; /* whether sde_control present */
- uint16 sde_control_flag;
- uint16 token; /* transmit fup token id */
- uint8 csid; /* cipher suite type */
- nan_security_key_input_type key_type; /* cipher suite type */
- uint8 priority; /* Priority of Transmit */
- uint8 life_count; /* life count of the instance */
- uint8 srf_type; /* SRF type */
- uint8 srf_include; /* SRF include */
- uint8 use_srf; /* use SRF */
- uint8 recv_ind_flag; /* Receive Indication Flag */
- uint8 disc_ind_cfg; /* Discovery Ind cfg */
- uint8 ranging_indication;
- uint32 ranging_intvl_msec; /* ranging interval in msec */
- uint32 ingress_limit;
- uint32 egress_limit;
- bool response;
- uint8 service_responder_policy;
- bool svc_update;
-} nan_discover_cmd_data_t;
-
-typedef struct nan_datapath_cmd_data {
- nan_avail_cmd_data avail_params; /* Avail config params */
- nan_str_data_t svc_hash; /* service hash */
- nan_str_data_t svc_info; /* service information */
- nan_str_data_t key; /* security key information */
- nan_data_path_response_code_t rsp_code;
- nan_data_path_id ndp_instance_id;
- nan_data_path_cfg_t ndp_cfg;
- wl_nan_instance_id_t pub_id; /* publisher id */
- nan_security_key_input_type key_type; /* cipher suite type */
- struct ether_addr if_addr; /* if addr */
- struct ether_addr mac_addr; /* mac address */
- chanspec_t chanspec[NAN_MAX_SOCIAL_CHANNELS]; /* channel */
- uint32 status;
- uint32 bmap; /* bitmap */
- uint16 service_instance_id;
- uint16 sde_control_flag;
- uint8 csid; /* cipher suite type */
- uint8 peer_disc_mac_addr[ETHER_ADDR_LEN];
- uint8 peer_ndi_mac_addr[ETHER_ADDR_LEN];
- uint8 num_ndp_instances;
- uint8 duration;
- char ndp_iface[IFNAMSIZ+1];
-} nan_datapath_cmd_data_t;
-
-typedef struct nan_rssi_cmd_data {
- int8 rssi_middle_2dot4g_val;
- int8 rssi_close_2dot4g_val;
- int8 rssi_proximity_2dot4g_val;
- int8 rssi_proximity_5g_val;
- int8 rssi_middle_5g_val;
- int8 rssi_close_5g_val;
- uint16 rssi_window_size; /* Window size over which rssi calculated */
-} nan_rssi_cmd_data_t;
-
-typedef struct election_metrics {
- uint8 random_factor; /* Configured random factor */
- uint8 master_pref; /* configured master preference */
-} election_metrics_t;
-
-typedef struct nan_awake_dws {
- uint8 dw_interval_2g; /* 2G DW interval */
- uint8 dw_interval_5g; /* 5G DW interval */
-} nan_awake_dws_t;
-
-typedef struct nan_config_cmd_data {
- nan_rssi_cmd_data_t rssi_attr; /* RSSI related data */
- election_metrics_t metrics;
- nan_awake_dws_t awake_dws; /* Awake DWs */
- nan_avail_cmd_data avail_params; /* Avail config params */
- nan_str_data_t p2p_info; /* p2p information */
- nan_str_data_t scid; /* security context information */
- struct ether_addr clus_id; /* cluster id */
- struct ether_addr mac_addr; /* mac address */
- wl_nan_sid_beacon_ctrl_t sid_beacon; /* sending service id in beacon */
- chanspec_t chanspec[NAN_MAX_SOCIAL_CHANNELS]; /* channel */
- uint32 status;
- uint32 bmap; /* bitmap */
- uint32 nan_oui; /* configured nan oui */
- uint32 warmup_time; /* Warm up time */
- uint8 duration;
- uint8 hop_count_limit; /* hop count limit */
- uint8 support_5g; /* To decide dual band support */
- uint8 support_2g; /* To decide dual band support */
- uint8 beacon_2g_val;
- uint8 beacon_5g_val;
- uint8 sdf_2g_val;
- uint8 sdf_5g_val;
- uint8 dwell_time[NAN_MAX_SOCIAL_CHANNELS];
- uint8 scan_period[NAN_MAX_SOCIAL_CHANNELS];
- uint8 config_cluster_val;
- uint8 disc_ind_cfg; /* Discovery Ind cfg */
- uint8 csid; /* cipher suite type */
- uint32 nmi_rand_intvl; /* nmi randomization interval */
-} nan_config_cmd_data_t;
+ uint8 hop_count_limit; /* hop count limit */
+ uint8 nan_band; /* nan band <A/B/AUTO> */
+ uint8 support_5g; /* To decide dual band support */
+ uint32 nan_oui; /* configured nan oui */
+#ifdef NAN_DP
+ struct ether_addr data_cluster_id; /* data cluster id */
+#endif /* NAN_DP */
+ wl_nan_sid_beacon_ctrl_t sid_beacon; /* sending service id in beacon */
+} nan_cmd_data_t;
+
+typedef int (nan_func_t)(struct net_device *ndev, struct bcm_cfg80211 *cfg,
+ char *cmd, int size, nan_cmd_data_t *cmd_data);
+
+typedef struct nan_cmd {
+ const char *name; /* command name */
+ nan_func_t *func; /* command hadler */
+} nan_cmd_t;
typedef struct nan_event_hdr {
- uint32 flags; /* future use */
- uint16 event_subtype;
+ u16 event_subtype;
+ u32 flags; /* future use */
} nan_event_hdr_t;
-typedef struct nan_event_data {
- uint8 svc_name[WL_NAN_SVC_HASH_LEN]; /* service name */
- uint8 enabled; /* NAN Enabled */
- uint8 nan_de_evt_type; /* DE event type */
- uint8 status; /* status */
- uint8 ndp_id; /* data path instance id */
- uint8 security; /* data path security */
- uint8 type;
- uint8 attr_num;
- uint8 reason; /* reason */
- wl_nan_instance_id_t pub_id; /* publisher id */
- wl_nan_instance_id_t sub_id; /* subscriber id */
- wl_nan_instance_id_t local_inst_id; /* local instance id */
- wl_nan_instance_id_t requestor_id; /* Requestor instance id */
- int publish_rssi; /* discovery rssi value */
- int sub_rssi; /* Sub rssi value */
- int fup_rssi; /* followup rssi */
- uint16 attr_list_len; /* sizeof attributes attached to payload */
- nan_str_data_t svc_info; /* service info */
- nan_str_data_t vend_info; /* vendor info */
- nan_str_data_t sde_svc_info; /* extended service information */
- nan_str_data_t tx_match_filter; /* tx match filter */
- nan_str_data_t rx_match_filter; /* rx match filter */
- struct ether_addr local_nmi; /* local nmi */
- struct ether_addr clus_id; /* cluster id */
- struct ether_addr remote_nmi; /* remote nmi */
- struct ether_addr initiator_ndi; /* initiator_ndi */
- struct ether_addr responder_ndi; /* responder_ndi */
- uint16 token; /* transmit fup token id */
- uint8 peer_cipher_suite; /* peer cipher suite type */
- nan_str_data_t scid; /* security context information */
- char nan_reason[NAN_ERROR_STR_LEN]; /* Describe the NAN reason type */
- uint16 sde_control_flag;
- uint8 ranging_result_present;
- uint32 range_measurement_cm;
- uint32 ranging_ind;
- uint8 rng_id;
-} nan_event_data_t;
-
-/*
- * Various NAN Protocol Response code
-*/
-typedef enum {
- /* NAN Protocol Response Codes */
- NAN_STATUS_SUCCESS = 0,
- /* NAN Discovery Engine/Host driver failures */
- NAN_STATUS_INTERNAL_FAILURE = 1,
- /* NAN OTA failures */
- NAN_STATUS_PROTOCOL_FAILURE = 2,
- /* if the publish/subscribe id is invalid */
- NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID = 3,
- /* If we run out of resources allocated */
- NAN_STATUS_NO_RESOURCE_AVAILABLE = 4,
- /* if invalid params are passed */
- NAN_STATUS_INVALID_PARAM = 5,
- /* if the requestor instance id is invalid */
- NAN_STATUS_INVALID_REQUESTOR_INSTANCE_ID = 6,
- /* if the ndp id is invalid */
- NAN_STATUS_INVALID_NDP_ID = 7,
- /* if NAN is enabled when wifi is turned off */
- NAN_STATUS_NAN_NOT_ALLOWED = 8,
- /* if over the air ack is not received */
- NAN_STATUS_NO_OTA_ACK = 9,
- /* If NAN is already enabled and we are try to re-enable the same */
- NAN_STATUS_ALREADY_ENABLED = 10,
- /* If followup message internal queue is full */
- NAN_STATUS_FOLLOWUP_QUEUE_FULL = 11,
- /* Unsupported concurrency session enabled, NAN disabled notified */
- NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED = 12
-} nan_status_type_t;
-
-typedef struct {
- nan_status_type_t status;
- char nan_reason[NAN_ERROR_STR_LEN]; /* Describe the NAN reason type */
-} nan_hal_status_t;
-
-typedef struct nan_parse_event_ctx {
- struct bcm_cfg80211 *cfg;
- nan_event_data_t *nan_evt_data;
-} nan_parse_event_ctx_t;
-
-/* Capabilities info supported by FW */
-typedef struct nan_hal_capabilities {
- uint32 max_concurrent_nan_clusters;
- uint32 max_publishes;
- uint32 max_subscribes;
- uint32 max_service_name_len;
- uint32 max_match_filter_len;
- uint32 max_total_match_filter_len;
- uint32 max_service_specific_info_len;
- uint32 max_vsa_data_len;
- uint32 max_mesh_data_len;
- uint32 max_ndi_interfaces;
- uint32 max_ndp_sessions;
- uint32 max_app_info_len;
- uint32 max_queued_transmit_followup_msgs;
- uint32 ndp_supported_bands;
- uint32 cipher_suites_supported;
- uint32 max_scid_len;
- bool is_ndp_security_supported;
- uint32 max_sdea_service_specific_info_len;
- uint32 max_subscribe_address;
- uint32 ndpe_attr_supported;
-} nan_hal_capabilities_t;
+typedef struct wl_nan_tlv_data {
+ wl_nan_status_t nstatus; /* status data */
+ wl_nan_disc_params_t params; /* discovery parameters */
+ struct ether_addr mac_addr; /* peer mac address */
+ struct ether_addr clus_id; /* cluster id */
+ nan_str_data_t svc_info; /* service info */
+ nan_str_data_t vend_info; /* vendor info */
+ /* anchor master beacon transmission time */
+ u32 ambtt;
+ u32 dev_role; /* device role */
+ u16 inst_id; /* instance id */
+ u16 peer_inst_id; /* Peer instance id */
+ u16 pub_id; /* publisher id */
+ u16 sub_id; /* subscriber id */
+ u16 master_pref; /* master preference */
+ chanspec_t chanspec; /* channel */
+ u8 amr[NAN_MASTER_RANK_LEN]; /* anchor master role */
+ u8 svc_name[WL_NAN_SVC_HASH_LEN]; /* service name */
+ u8 hop_count; /* hop count */
+ u8 enabled; /* nan status flag */
+ nan_scan_params_t scan_params; /* scan_param */
+ int reason_code; /* reason code */
+} wl_nan_tlv_data_t;
+
+typedef struct _nan_de_event_data {
+ wl_nan_cfg_status_t *nstatus;
+ u8 nan_de_evt_type;
+} nan_de_event_data_t;
typedef struct _nan_hal_resp {
- uint16 instance_id;
- uint16 subcmd;
- int32 status;
- int32 value;
- /* Identifier for the instance of the NDP */
- uint16 ndp_instance_id;
- /* Publisher NMI */
- uint8 pub_nmi[NAN_MAC_ADDR_LEN];
- /* SVC_HASH */
- uint8 svc_hash[WL_NAN_SVC_HASH_LEN];
- char nan_reason[NAN_ERROR_STR_LEN]; /* Describe the NAN reason type */
- char pad[3];
- nan_hal_capabilities_t capabilities;
+ unsigned short instance_id;
+ unsigned short subcmd;
+ int status;
+ int value;
} nan_hal_resp_t;
-typedef struct wl_nan_iov {
- uint16 nan_iov_len;
- uint8 *nan_iov_buf;
-} wl_nan_iov_t;
-
-#ifdef WL_NAN_DISC_CACHE
+#ifdef NAN_DP
+typedef struct nan_data_path_peer {
+ struct ether_addr addr; /* peer mac address */
+ chanspec_t chanspec; /* Channel Specification */
+} nan_data_path_peer_t;
+#endif /* NAN_DP */
-#define NAN_MAX_CACHE_DISC_RESULT 16
-typedef struct {
- bool valid;
- wl_nan_instance_id_t pub_id;
- wl_nan_instance_id_t sub_id;
- uint8 svc_hash[WL_NAN_SVC_HASH_LEN];
- struct ether_addr peer;
- int8 publish_rssi;
- uint8 peer_cipher_suite;
- uint8 security;
- nan_str_data_t svc_info; /* service info */
- nan_str_data_t vend_info; /* vendor info */
- nan_str_data_t sde_svc_info; /* extended service information */
- nan_str_data_t tx_match_filter; /* tx match filter */
- uint16 sde_control_flag;
-} nan_disc_result_cache;
-
-typedef struct nan_datapath_sec_info {
- nan_data_path_id ndp_instance_id;
- wl_nan_instance_id_t pub_id; /* publisher id */
- struct ether_addr mac_addr; /* mac address */
-} nan_datapath_sec_info_cmd_data_t;
-#endif /* WL_NAN_DISC_CACHE */
-
-typedef enum {
- NAN_RANGING_AUTO_RESPONSE_ENABLE = 0,
- NAN_RANGING_AUTO_RESPONSE_DISABLE
-} NanRangingAutoResponseCfg;
-
-extern int wl_cfgnan_set_vars_cbfn(void *ctx, const uint8 *tlv_buf,
+extern int wl_cfgnan_set_vars_cbfn(void *ctx, uint8 *tlv_buf,
uint16 type, uint16 len);
-extern int wl_cfgnan_config_eventmask(struct net_device *ndev, struct bcm_cfg80211 *cfg,
- uint8 event_ind_flag, bool disable_events);
+extern int wl_cfgnan_enable_events(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg);
extern int wl_cfgnan_start_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask);
-extern int wl_cfgnan_stop_handler(struct net_device *ndev, struct bcm_cfg80211 *cfg);
-extern void wl_cfgnan_delayed_disable(struct work_struct *work);
-extern int wl_cfgnan_config_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data, uint32 nan_attr_mask);
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_stop_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
extern int wl_cfgnan_support_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data);
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
extern int wl_cfgnan_status_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_config_cmd_data_t *cmd_data);
-extern int wl_cfgnan_publish_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data);
-extern int wl_cfgnan_subscribe_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data);
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_pub_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_p2p_ie_add_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_p2p_ie_enable_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_p2p_ie_del_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+
+extern int wl_cfgnan_sub_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
extern int wl_cfgnan_cancel_pub_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data);
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
extern int wl_cfgnan_cancel_sub_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data);
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
extern int wl_cfgnan_transmit_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_discover_cmd_data_t *cmd_data);
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_set_config_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_rtt_config_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+extern int wl_cfgnan_rtt_find_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+#ifdef WL_NAN_DEBUG
+extern int wl_cfgnan_debug_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+#endif /* WL_NAN_DEBUG */
+extern int wl_cfgnan_cmd_handler(struct net_device *dev,
+ struct bcm_cfg80211 *cfg, char *cmd, int cmd_len);
extern s32 wl_cfgnan_notify_nan_status(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
-extern int wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg, uint8 *p_inst_id);
-extern int wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg, uint8 inst_id);
-extern int bcm_xtlv_size_for_data(int dlen, bcm_xtlv_opts_t opts);
-extern int wl_cfgnan_get_capablities_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_hal_capabilities_t *capabilities);
-
-extern int wl_cfgnan_data_path_iface_create_delete_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, char *ifname, uint16 type, uint8 busstate);
-extern int wl_cfgnan_data_path_request_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data,
- uint8 *ndp_instance_id);
-extern int wl_cfgnan_data_path_response_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_datapath_cmd_data_t *cmd_data);
-extern int wl_cfgnan_data_path_end_handler(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, nan_data_path_id ndp_instance_id,
- int *status);
-
-#ifdef WL_NAN_DISC_CACHE
-extern int wl_cfgnan_sec_info_handler(struct bcm_cfg80211 *cfg,
- nan_datapath_sec_info_cmd_data_t *cmd_data, nan_hal_resp_t *nan_req_resp);
-/* ranging quest and response iovar handler */
-extern int wl_cfgnan_trigger_ranging(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, void *event_data, nan_svc_info_t *svc,
- uint8 range_req, bool accept_req);
-#endif /* WL_NAN_DISC_CACHE */
-void wl_cfgnan_disable_cleanup(struct bcm_cfg80211 *cfg);
-void wl_cfgnan_deinit_cleanup(struct bcm_cfg80211 *cfg);
-
-extern bool wl_cfgnan_is_dp_active(struct net_device *ndev);
-extern s32 wl_cfgnan_get_ndi_idx(struct bcm_cfg80211 *cfg);
-extern s32 wl_cfgnan_add_ndi_data(struct bcm_cfg80211 *cfg, s32 idx, char *name);
-extern s32 wl_cfgnan_del_ndi_data(struct bcm_cfg80211 *cfg, char *name);
-extern struct wl_ndi_data *wl_cfgnan_get_ndi_data(struct bcm_cfg80211 *cfg, char *name);
-extern int wl_cfgnan_disable(struct bcm_cfg80211 *cfg);
-extern nan_ranging_inst_t *wl_cfgnan_get_ranging_inst(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer, nan_range_role_t range_role);
-extern nan_ranging_inst_t* wl_cfgnan_check_for_ranging(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer);
-#ifdef RTT_SUPPORT
-extern int wl_cfgnan_trigger_geofencing_ranging(struct net_device *dev,
- struct ether_addr *peer_addr);
-#endif /* RTT_SUPPORT */
-extern int wl_cfgnan_suspend_geofence_rng_session(struct net_device *ndev,
- struct ether_addr *peer, int suspend_reason, u8 cancel_flags);
-extern nan_ndp_peer_t* wl_cfgnan_data_get_peer(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer_addr);
-bool wl_cfgnan_data_dp_exists(struct bcm_cfg80211 *cfg);
-bool wl_cfgnan_data_dp_exists_with_peer(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer_addr);
-extern s32 wl_cfgnan_delete_ndp(struct bcm_cfg80211 *cfg, struct net_device *nan_ndev);
-void wl_cfgnan_data_set_peer_dp_state(struct bcm_cfg80211 *cfg,
- struct ether_addr *peer_addr, nan_peer_dp_state_t state);
-#ifdef RTT_SUPPORT
-int wl_cfgnan_terminate_directed_rtt_sessions(struct net_device *ndev, struct bcm_cfg80211 *cfg);
-void wl_cfgnan_reset_geofence_ranging(struct bcm_cfg80211 *cfg,
- nan_ranging_inst_t * rng_inst, int sched_reason);
-void wl_cfgnan_process_range_report(struct bcm_cfg80211 *cfg,
- wl_nan_ev_rng_rpt_ind_t *range_res);
-#endif /* RTT_SUPPORT */
-int wl_cfgnan_cancel_ranging(struct net_device *ndev,
- struct bcm_cfg80211 *cfg, uint8 range_id, uint8 flags, uint32 *status);
-bool wl_cfgnan_ranging_allowed(struct bcm_cfg80211 *cfg);
-uint8 wl_cfgnan_cancel_rng_responders(struct net_device *ndev,
- struct bcm_cfg80211 *cfg);
-extern int wl_cfgnan_get_status(struct net_device *ndev, wl_nan_conf_status_t *nan_status);
-extern void wl_cfgnan_update_dp_info(struct bcm_cfg80211 *cfg, bool add,
- nan_data_path_id ndp_id);
-nan_status_type_t wl_cfgvendor_brcm_to_nanhal_status(int32 vendor_status);
+extern s32 wl_cfgnan_notify_proxd_status(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+extern int wl_cfgnan_generate_inst_id(struct bcm_cfg80211 *cfg,
+ uint8 inst_type, uint8 *p_inst_id);
+extern int wl_cfgnan_validate_inst_id(struct bcm_cfg80211 *cfg,
+ uint8 inst_id);
+extern int wl_cfgnan_remove_inst_id(struct bcm_cfg80211 *cfg,
+ uint8 inst_id);
+extern int wl_cfgnan_get_inst_type(struct bcm_cfg80211 *cfg,
+ uint8 inst_id, uint8 *inst_type);
typedef enum {
- NAN_ATTRIBUTE_HEADER = 100,
- NAN_ATTRIBUTE_HANDLE = 101,
- NAN_ATTRIBUTE_TRANSAC_ID = 102,
-
+ NAN_ATTRIBUTE_HEADER = 100,
+ NAN_ATTRIBUTE_HANDLE,
+ NAN_ATTRIBUTE_TRANSAC_ID,
/* NAN Enable request attributes */
- NAN_ATTRIBUTE_2G_SUPPORT = 103,
- NAN_ATTRIBUTE_5G_SUPPORT = 104,
- NAN_ATTRIBUTE_CLUSTER_LOW = 105,
- NAN_ATTRIBUTE_CLUSTER_HIGH = 106,
- NAN_ATTRIBUTE_SID_BEACON = 107,
- NAN_ATTRIBUTE_SYNC_DISC_2G_BEACON = 108,
- NAN_ATTRIBUTE_SYNC_DISC_5G_BEACON = 109,
- NAN_ATTRIBUTE_SDF_2G_SUPPORT = 110,
- NAN_ATTRIBUTE_SDF_5G_SUPPORT = 111,
- NAN_ATTRIBUTE_RSSI_CLOSE = 112,
- NAN_ATTRIBUTE_RSSI_MIDDLE = 113,
- NAN_ATTRIBUTE_RSSI_PROXIMITY = 114,
- NAN_ATTRIBUTE_HOP_COUNT_LIMIT = 115,
- NAN_ATTRIBUTE_RANDOM_TIME = 116,
- NAN_ATTRIBUTE_MASTER_PREF = 117,
- NAN_ATTRIBUTE_PERIODIC_SCAN_INTERVAL = 118,
-
- /* Nan Publish/Subscribe request attributes */
- NAN_ATTRIBUTE_PUBLISH_ID = 119,
- NAN_ATTRIBUTE_TTL = 120,
- NAN_ATTRIBUTE_PERIOD = 121,
- NAN_ATTRIBUTE_REPLIED_EVENT_FLAG = 122,
- NAN_ATTRIBUTE_PUBLISH_TYPE = 123,
- NAN_ATTRIBUTE_TX_TYPE = 124,
- NAN_ATTRIBUTE_PUBLISH_COUNT = 125,
- NAN_ATTRIBUTE_SERVICE_NAME_LEN = 126,
- NAN_ATTRIBUTE_SERVICE_NAME = 127,
- NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN = 128,
- NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO = 129,
- NAN_ATTRIBUTE_RX_MATCH_FILTER_LEN = 130,
- NAN_ATTRIBUTE_RX_MATCH_FILTER = 131,
- NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN = 132,
- NAN_ATTRIBUTE_TX_MATCH_FILTER = 133,
- NAN_ATTRIBUTE_SUBSCRIBE_ID = 134,
- NAN_ATTRIBUTE_SUBSCRIBE_TYPE = 135,
- NAN_ATTRIBUTE_SERVICERESPONSEFILTER = 136,
- NAN_ATTRIBUTE_SERVICERESPONSEINCLUDE = 137,
- NAN_ATTRIBUTE_USESERVICERESPONSEFILTER = 138,
- NAN_ATTRIBUTE_SSIREQUIREDFORMATCHINDICATION = 139,
- NAN_ATTRIBUTE_SUBSCRIBE_MATCH = 140,
- NAN_ATTRIBUTE_SUBSCRIBE_COUNT = 141,
- NAN_ATTRIBUTE_MAC_ADDR = 142,
- NAN_ATTRIBUTE_MAC_ADDR_LIST = 143,
- NAN_ATTRIBUTE_MAC_ADDR_LIST_NUM_ENTRIES = 144,
- NAN_ATTRIBUTE_PUBLISH_MATCH = 145,
-
+ NAN_ATTRIBUTE_5G_SUPPORT,
+ NAN_ATTRIBUTE_CLUSTER_LOW,
+ NAN_ATTRIBUTE_CLUSTER_HIGH,
+ NAN_ATTRIBUTE_SID_BEACON,
+ NAN_ATTRIBUTE_SYNC_DISC_5G,
+ NAN_ATTRIBUTE_RSSI_CLOSE,
+ NAN_ATTRIBUTE_RSSI_MIDDLE,
+ NAN_ATTRIBUTE_RSSI_PROXIMITY,
+ NAN_ATTRIBUTE_HOP_COUNT_LIMIT,
+ NAN_ATTRIBUTE_RANDOM_TIME,
+ NAN_ATTRIBUTE_MASTER_PREF,
+ NAN_ATTRIBUTE_PERIODIC_SCAN_INTERVAL,
+ /* Nan Publish/Subscribe request Attributes */
+ NAN_ATTRIBUTE_PUBLISH_ID,
+ NAN_ATTRIBUTE_TTL,
+ NAN_ATTRIBUTE_PERIOD,
+ NAN_ATTRIBUTE_REPLIED_EVENT_FLAG,
+ NAN_ATTRIBUTE_PUBLISH_TYPE,
+ NAN_ATTRIBUTE_TX_TYPE,
+ NAN_ATTRIBUTE_PUBLISH_COUNT,
+ NAN_ATTRIBUTE_SERVICE_NAME_LEN,
+ NAN_ATTRIBUTE_SERVICE_NAME,
+ NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN,
+ NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO,
+ NAN_ATTRIBUTE_RX_MATCH_FILTER_LEN,
+ NAN_ATTRIBUTE_RX_MATCH_FILTER,
+ NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN,
+ NAN_ATTRIBUTE_TX_MATCH_FILTER,
+ NAN_ATTRIBUTE_SUBSCRIBE_ID,
+ NAN_ATTRIBUTE_SUBSCRIBE_TYPE,
+ NAN_ATTRIBUTE_SERVICERESPONSEFILTER,
+ NAN_ATTRIBUTE_SERVICERESPONSEINCLUDE,
+ NAN_ATTRIBUTE_USESERVICERESPONSEFILTER,
+ NAN_ATTRIBUTE_SSIREQUIREDFORMATCHINDICATION,
+ NAN_ATTRIBUTE_SUBSCRIBE_MATCH,
+ NAN_ATTRIBUTE_SUBSCRIBE_COUNT,
+ NAN_ATTRIBUTE_MAC_ADDR,
+ NAN_ATTRIBUTE_MAC_ADDR_LIST,
+ NAN_ATTRIBUTE_MAC_ADDR_LIST_NUM_ENTRIES,
+ NAN_ATTRIBUTE_PUBLISH_MATCH,
/* Nan Event attributes */
- NAN_ATTRIBUTE_ENABLE_STATUS = 146,
- NAN_ATTRIBUTE_JOIN_STATUS = 147,
- NAN_ATTRIBUTE_ROLE = 148,
- NAN_ATTRIBUTE_MASTER_RANK = 149,
- NAN_ATTRIBUTE_ANCHOR_MASTER_RANK = 150,
- NAN_ATTRIBUTE_CNT_PEND_TXFRM = 151,
- NAN_ATTRIBUTE_CNT_BCN_TX = 152,
- NAN_ATTRIBUTE_CNT_BCN_RX = 153,
- NAN_ATTRIBUTE_CNT_SVC_DISC_TX = 154,
- NAN_ATTRIBUTE_CNT_SVC_DISC_RX = 155,
- NAN_ATTRIBUTE_AMBTT = 156,
- NAN_ATTRIBUTE_CLUSTER_ID = 157,
- NAN_ATTRIBUTE_INST_ID = 158,
- NAN_ATTRIBUTE_OUI = 159,
- NAN_ATTRIBUTE_STATUS = 160,
- NAN_ATTRIBUTE_DE_EVENT_TYPE = 161,
- NAN_ATTRIBUTE_MERGE = 162,
- NAN_ATTRIBUTE_IFACE = 163,
- NAN_ATTRIBUTE_CHANNEL = 164,
- NAN_ATTRIBUTE_PEER_ID = 165,
- NAN_ATTRIBUTE_NDP_ID = 167,
- NAN_ATTRIBUTE_SECURITY = 168,
- NAN_ATTRIBUTE_QOS = 169,
- NAN_ATTRIBUTE_RSP_CODE = 170,
- NAN_ATTRIBUTE_INST_COUNT = 171,
- NAN_ATTRIBUTE_PEER_DISC_MAC_ADDR = 172,
- NAN_ATTRIBUTE_PEER_NDI_MAC_ADDR = 173,
- NAN_ATTRIBUTE_IF_ADDR = 174,
- NAN_ATTRIBUTE_WARMUP_TIME = 175,
- NAN_ATTRIBUTE_RECV_IND_CFG = 176,
- NAN_ATTRIBUTE_RSSI_CLOSE_5G = 177,
- NAN_ATTRIBUTE_RSSI_MIDDLE_5G = 178,
- NAN_ATTRIBUTE_RSSI_PROXIMITY_5G = 179,
- NAN_ATTRIBUTE_CONNMAP = 180,
- NAN_ATTRIBUTE_24G_CHANNEL = 181,
- NAN_ATTRIBUTE_5G_CHANNEL = 182,
- NAN_ATTRIBUTE_DWELL_TIME = 183,
- NAN_ATTRIBUTE_SCAN_PERIOD = 184,
- NAN_ATTRIBUTE_RSSI_WINDOW_SIZE = 185,
- NAN_ATTRIBUTE_CONF_CLUSTER_VAL = 186,
- NAN_ATTRIBUTE_AVAIL_BIT_MAP = 187,
- NAN_ATTRIBUTE_ENTRY_CONTROL = 188,
- NAN_ATTRIBUTE_CIPHER_SUITE_TYPE = 189,
- NAN_ATTRIBUTE_KEY_TYPE = 190,
- NAN_ATTRIBUTE_KEY_LEN = 191,
- NAN_ATTRIBUTE_SCID = 192,
- NAN_ATTRIBUTE_SCID_LEN = 193,
- NAN_ATTRIBUTE_SDE_CONTROL_CONFIG_DP = 194,
- NAN_ATTRIBUTE_SDE_CONTROL_SECURITY = 195,
- NAN_ATTRIBUTE_SDE_CONTROL_DP_TYPE = 196,
- NAN_ATTRIBUTE_SDE_CONTROL_RANGE_SUPPORT = 197,
- NAN_ATTRIBUTE_NO_CONFIG_AVAIL = 198,
- NAN_ATTRIBUTE_2G_AWAKE_DW = 199,
- NAN_ATTRIBUTE_5G_AWAKE_DW = 200,
- NAN_ATTRIBUTE_RANGING_INTERVAL = 201,
- NAN_ATTRIBUTE_RANGING_INDICATION = 202,
- NAN_ATTRIBUTE_RANGING_INGRESS_LIMIT = 203,
- NAN_ATTRIBUTE_RANGING_EGRESS_LIMIT = 204,
- NAN_ATTRIBUTE_RANGING_AUTO_ACCEPT = 205,
- NAN_ATTRIBUTE_RANGING_RESULT = 206,
- NAN_ATTRIBUTE_DISC_IND_CFG = 207,
- NAN_ATTRIBUTE_RSSI_THRESHOLD_FLAG = 208,
- NAN_ATTRIBUTE_KEY_DATA = 209,
- NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO_LEN = 210,
- NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO = 211,
- NAN_ATTRIBUTE_REASON = 212,
- NAN_ATTRIBUTE_DWELL_TIME_5G = 215,
- NAN_ATTRIBUTE_SCAN_PERIOD_5G = 216,
- NAN_ATTRIBUTE_SVC_RESPONDER_POLICY = 217,
- NAN_ATTRIBUTE_EVENT_MASK = 218,
- NAN_ATTRIBUTE_SUB_SID_BEACON = 219,
- NAN_ATTRIBUTE_RANDOMIZATION_INTERVAL = 220,
- NAN_ATTRIBUTE_CMD_RESP_DATA = 221
+ NAN_ATTRIBUTE_ENABLE_STATUS,
+ NAN_ATTRIBUTE_JOIN_STATUS,
+ NAN_ATTRIBUTE_ROLE,
+ NAN_ATTRIBUTE_CHANNEL, /* channel */
+ NAN_ATTRIBUTE_PEER_ID,
+ NAN_ATTRIBUTE_INST_ID,
+ NAN_ATTRIBUTE_OUI,
+ NAN_ATTRIBUTE_DATA_IF_ADD, /* NAN DP Interface Address */
+ NAN_ATTRIBUTE_STATUS,
+ NAN_ATTRIBUTE_DE_EVENT_TYPE
} NAN_ATTRIBUTE;
-enum geofence_suspend_reason {
- RTT_GEO_SUSPN_HOST_DIR_RTT_TRIG = 0,
- RTT_GEO_SUSPN_PEER_RTT_TRIGGER = 1,
- RTT_GEO_SUSPN_HOST_NDP_TRIGGER = 2,
- RTT_GEO_SUSPN_PEER_NDP_TRIGGER = 3,
- RTT_GEO_SUSPN_RANGE_RES_REPORTED = 4
-};
+#define NAN_BLOOM_LENGTH_DEFAULT 240
+#define NAN_SRF_MAX_MAC (NAN_BLOOM_LENGTH_DEFAULT / ETHER_ADDR_LEN)
+
+#ifdef NAN_DP
+int wl_cfgnan_data_path_open_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+int wl_cfgnan_data_path_close_handler(struct net_device *ndev,
+ struct bcm_cfg80211 *cfg, char *cmd, int size, nan_cmd_data_t *cmd_data);
+#endif /* NAN_DP */
#endif /* _wl_cfgnan_h_ */
/*
* Linux cfgp2p driver
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgp2p.c 819430 2019-05-13 11:38:06Z $
+ * $Id: wl_cfgp2p.c 699163 2017-05-12 05:18:23Z $
*
*/
#include <typedefs.h>
#include <asm/uaccess.h>
#include <bcmutils.h>
-#include <bcmstdlib_s.h>
#include <bcmendian.h>
#include <ethernet.h>
#include <802.11.h>
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
-#include <wl_cfgscan.h>
#include <wldev_common.h>
#include <wl_android.h>
#include <dngl_stats.h>
#include <dhdioctl.h>
#include <wlioctl.h>
#include <dhd_cfg80211.h>
-#include <dhd_bus.h>
#include <dhd_config.h>
+#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
+extern int dhd_bus_mem_dump(dhd_pub_t *dhd);
+#endif /* BCMPCIE && DHD_FW_COREDUMP */
+
static s8 scanparambuf[WLC_IOCTL_SMLEN];
-static bool wl_cfgp2p_has_ie(const bcm_tlv_t *ie, const u8 **tlvs, u32 *tlvs_len,
- const u8 *oui, u32 oui_len, u8 type);
+static bool
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
struct wireless_dev *wdev, bool notify);
};
#endif /* WL_ENABLE_P2P_IF */
+
bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
{
wifi_p2p_pub_act_frame_t *pact_frm;
bool wl_cfgp2p_find_gas_subtype(u8 subtype, u8* data, u32 len)
{
- const bcm_tlv_t *ie = (bcm_tlv_t *)data;
- const u8 *frame = NULL;
+ bcm_tlv_t *ie = (bcm_tlv_t *)data;
+ u8 *frame = NULL;
u16 id, flen;
/* Skipped first ANQP Element, if frame has anqp elemnt */
- ie = bcm_parse_tlvs(ie, len, DOT11_MNG_ADVERTISEMENT_ID);
+ ie = bcm_parse_tlvs(ie, (int)len, DOT11_MNG_ADVERTISEMENT_ID);
if (ie == NULL)
return false;
- frame = (const uint8 *)ie + ie->len + TLV_HDR_LEN + GAS_RESP_LEN;
+ frame = (uint8 *)ie + ie->len + TLV_HDR_LEN + GAS_RESP_LEN;
id = ((u16) (((frame)[1] << 8) | (frame)[0]));
flen = ((u16) (((frame)[3] << 8) | (frame)[2]));
" channel=%d\n", (tx)? "TX" : "RX", channel));
}
+
}
}
s32
wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg)
{
-#ifdef WL_P2P_USE_RANDMAC
- struct ether_addr primary_mac;
-#endif /* WL_P2P_USE_RANDMAC */
- cfg->p2p = MALLOCZ(cfg->osh, sizeof(struct p2p_info));
- if (cfg->p2p == NULL) {
+ if (!(cfg->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
CFGP2P_ERR(("struct p2p_info allocation failed\n"));
return -ENOMEM;
}
-#ifdef WL_P2P_USE_RANDMAC
- get_primary_mac(cfg, &primary_mac);
- wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
-#endif /* WL_P2P_USE_RANDMAC */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ cfg->p2p->cfg = cfg;
+#endif
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg);
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0;
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
void
wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg)
{
- CFGP2P_INFO(("In\n"));
+ CFGP2P_ERR(("In\n"));
if (cfg->p2p) {
- MFREE(cfg->osh, cfg->p2p, sizeof(struct p2p_info));
+ kfree(cfg->p2p);
cfg->p2p = NULL;
}
cfg->p2p_supported = 0;
wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg)
{
struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+ struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
s32 ret = BCME_OK;
s32 val = 0;
-#ifdef WL_P2P_USE_RANDMAC
- struct ether_addr *p2p_dev_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
-#else
- struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
- struct ether_addr *p2p_dev_addr = &null_eth_addr;
-#endif // endif
/* Do we have to check whether APSTA is enabled or not ? */
ret = wldev_iovar_getint(ndev, "apsta", &val);
if (ret < 0) {
* After Initializing firmware, we have to set current mac address to
* firmware for P2P device address
*/
- ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", p2p_dev_addr,
- sizeof(*p2p_dev_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync);
+ ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
+ sizeof(null_eth_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync);
if (ret && ret != BCME_UNSUPPORTED) {
CFGP2P_ERR(("failed to update device address ret %d\n", ret));
}
err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (unlikely(err < 0)) {
- CFGP2P_ERR(("'cfg p2p_ifadd' error %d\n", err));
+ printk("'cfg p2p_ifadd' error %d\n", err);
return err;
}
s32 ret;
struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
- CFGP2P_INFO(("------ cfg p2p_ifdis "MACDBG" dev->ifindex:%d \n",
- MAC2STRDBG(mac->octet), netdev->ifindex));
+ CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdis "MACDBG"\n",
+ netdev->ifindex, MAC2STRDBG(mac->octet)));
ret = wldev_iovar_setbuf(netdev, "p2p_ifdis", mac, sizeof(*mac),
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (unlikely(ret < 0)) {
- CFGP2P_ERR(("'cfg p2p_ifdis' error %d\n", ret));
+ printk("'cfg p2p_ifdis' error %d\n", ret);
}
return ret;
}
wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
{
s32 ret;
-#ifdef WL_DISABLE_HE_P2P
- s32 bssidx = 0;
-#endif /* WL_DISABLE_HE_P2P */
struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
- CFGP2P_ERR(("------ cfg p2p_ifdel "MACDBG" dev->ifindex:%d\n",
- MAC2STRDBG(mac->octet), netdev->ifindex));
+ CFGP2P_ERR(("------primary idx %d : cfg p2p_ifdel "MACDBG"\n",
+ netdev->ifindex, MAC2STRDBG(mac->octet)));
ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (unlikely(ret < 0)) {
- CFGP2P_ERR(("'cfg p2p_ifdel' error %d\n", ret));
+ printk("'cfg p2p_ifdel' error %d\n", ret);
}
-#ifdef WL_DISABLE_HE_P2P
- if ((bssidx = wl_get_bssidx_by_wdev(cfg, netdev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find index failed\n"));
- ret = BCME_ERROR;
- return ret;
- }
- WL_DBG(("Enabling back HE for P2P\n"));
- wl_cfg80211_set_he_mode(netdev, cfg, bssidx, WL_IF_TYPE_P2P_DISC, TRUE);
- if (ret < 0) {
- WL_ERR(("failed to set he features, error=%d\n", ret));
- }
-#endif /* WL_DISABLE_HE_P2P */
-
return ret;
}
err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (unlikely(err < 0)) {
- CFGP2P_ERR(("'cfg p2p_ifupd' error %d\n", err));
+ printk("'cfg p2p_ifupd' error %d\n", err);
} else if (if_type == WL_P2P_IF_GO) {
cfg->p2p->p2p_go_count++;
}
return err;
}
+
/* Get the index of a created P2P BSS.
* Parameters:
* @mac : MAC address of the created BSS
if (ret == 0) {
memcpy(index, getbuf, sizeof(s32));
- CFGP2P_DBG(("---cfg p2p_if ==> %d\n", *index));
+ CFGP2P_INFO(("---cfg p2p_if ==> %d\n", *index));
}
return ret;
/* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */
discovery_mode.state = mode;
- discovery_mode.chspec = wl_ch_host_to_driver(channel);
+ discovery_mode.chspec = wl_ch_host_to_driver(cfg, bssidx, channel);
discovery_mode.dwell = listen_ms;
ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
s32 bssidx = 0;
s32 ret = BCME_OK;
- struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
- BCM_REFERENCE(ndev);
CFGP2P_DBG(("enter\n"));
if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) > 0) {
CFGP2P_ERR(("do nothing, already initialized\n"));
- goto exit;
+ return ret;
}
ret = wl_cfgp2p_set_discovery(cfg, 1);
if (ret < 0) {
CFGP2P_ERR(("set discover error\n"));
- goto exit;
+ return ret;
}
/* Enable P2P Discovery in the WL Driver */
ret = wl_cfgp2p_get_disc_idx(cfg, &bssidx);
+
if (ret < 0) {
- goto exit;
+ return ret;
}
-
/* In case of CFG80211 case, check if p2p_discovery interface has allocated p2p_wdev */
if (!cfg->p2p_wdev) {
CFGP2P_ERR(("p2p_wdev is NULL.\n"));
- ret = -ENODEV;
- goto exit;
+ return BCME_NODEVICE;
}
-
- /* Once p2p also starts using interface_create iovar, the ifidx may change.
- * so that time, the ifidx returned in WLC_E_IF should be used for populating
- * the netinfo
- */
- ret = wl_alloc_netinfo(cfg, NULL, cfg->p2p_wdev, WL_IF_TYPE_STA, 0, bssidx, 0);
+ /* Make an entry in the netinfo */
+ ret = wl_alloc_netinfo(cfg, NULL, cfg->p2p_wdev, WL_MODE_BSS, 0, bssidx);
if (unlikely(ret)) {
- goto exit;
+ return ret;
}
+
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) =
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = bssidx;
wl_cfgp2p_set_discovery(cfg, 0);
wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
- ret = 0;
- goto exit;
- }
-
- /* Clear our saved WPS and P2P IEs for the discovery BSS */
- wl_cfg80211_clear_p2p_disc_ies(cfg);
-exit:
- if (ret) {
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
+ return 0;
}
return ret;
}
}
/* Clear our saved WPS and P2P IEs for the discovery BSS */
- wl_cfg80211_clear_p2p_disc_ies(cfg);
+ wl_cfg80211_clear_per_bss_ies(cfg, bssidx);
/* Set the discovery state to SCAN */
wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
{
s32 ret = BCME_OK;
s32 bssidx;
- bcm_struct_cfgdev *cfgdev;
CFGP2P_DBG(("enter\n"));
- mutex_lock(&cfg->if_sync);
-#ifdef WL_IFACE_MGMT
- if ((ret = wl_cfg80211_handle_if_role_conflict(cfg, WL_IF_TYPE_P2P_DISC)) != BCME_OK) {
- WL_ERR(("secondary iface is active, p2p enable discovery is not supported\n"));
- goto exit;
- }
-#endif /* WL_IFACE_MGMT */
-
if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
- CFGP2P_DBG((" DISCOVERY is already initialized, we have nothing to do\n"));
+ CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
goto set_ie;
}
bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
} else if ((bssidx = wl_get_bssidx_by_wdev(cfg, cfg->p2p_wdev)) < 0) {
WL_ERR(("Find p2p index from wdev(%p) failed\n", cfg->p2p_wdev));
- ret = BCME_ERROR;
- goto exit;
+ return BCME_ERROR;
}
-#if defined(WL_CFG80211_P2P_DEV_IF)
- /* For 3.8+ kernels, pass p2p discovery wdev */
- cfgdev = cfg->p2p_wdev;
-#else
- /* Prior to 3.8 kernel, there is no netless p2p, so pass p2p0 ndev */
- cfgdev = ndev_to_cfgdev(dev);
-#endif /* WL_CFG80211_P2P_DEV_IF */
- ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, cfgdev,
- bssidx, VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+ ret = wl_cfg80211_set_mgmt_vndr_ies(cfg, ndev_to_cfgdev(dev),
+ bssidx,
+ VNDR_IE_PRBREQ_FLAG, ie, ie_len);
+
if (unlikely(ret < 0)) {
CFGP2P_ERR(("set probreq ie occurs error %d\n", ret));
goto exit;
}
}
exit:
- if (ret) {
- wl_flush_fw_log_buffer(dev, FW_LOGSET_MASK_ALL);
- }
- mutex_unlock(&cfg->if_sync);
return ret;
}
}
#ifdef DHD_IFDEBUG
- WL_ERR(("%s: bssidx: %d\n",
+ WL_ERR(("%s: (cfg)->p2p->bss[type].bssidx: %d\n",
__FUNCTION__, (cfg)->p2p->bss[P2PAPI_BSSCFG_DEVICE].bssidx));
-#endif // endif
+#endif
bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
if (bssidx <= 0) {
CFGP2P_ERR((" do nothing, not initialized\n"));
/* Do a scan abort to stop the driver's scan engine in case it is still
* waiting out an action frame tx dwell time.
*/
-#ifdef NOT_YET
- if (wl_get_p2p_status(cfg, SCANNING)) {
- p2pwlu_scan_abort(hdl, FALSE);
- }
-#endif // endif
wl_clr_p2p_status(cfg, DISCOVERY_ON);
ret = wl_cfgp2p_deinit_discovery(cfg);
return ret;
}
-/* Scan parameters */
-#define P2PAPI_SCAN_NPROBES 1
-#define P2PAPI_SCAN_DWELL_TIME_MS 80
-#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
-#define P2PAPI_SCAN_HOME_TIME_MS 60
-#define P2PAPI_SCAN_NPROBS_TIME_MS 30
-#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
s32
-wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active_scan,
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active,
u32 num_chans, u16 *channels,
s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
p2p_scan_purpose_t p2p_scan_purpose)
s8 *memblk;
wl_p2p_scan_t *p2p_params;
wl_escan_params_t *eparams;
- wl_escan_params_v2_t *eparams_v2;
wlc_ssid_t ssid;
- u32 sync_id = 0;
- s32 nprobes = 0;
- s32 active_time = 0;
- const struct ether_addr *mac_addr = NULL;
- u32 scan_type = 0;
- struct net_device *pri_dev = NULL;
-
- pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ /* Scan parameters */
+#define P2PAPI_SCAN_NPROBES 1
+#define P2PAPI_SCAN_DWELL_TIME_MS 80
+#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40
+#define P2PAPI_SCAN_HOME_TIME_MS 60
+#define P2PAPI_SCAN_NPROBS_TIME_MS 30
+#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
+
+ struct net_device *pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
/* Allocate scan params which need space for 3 channels and 0 ssids */
- if (cfg->scan_params_v2) {
- eparams_size = (WL_SCAN_PARAMS_V2_FIXED_SIZE +
- OFFSETOF(wl_escan_params_v2_t, params)) +
- num_chans * sizeof(eparams->params.channel_list[0]);
- } else {
- eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
- OFFSETOF(wl_escan_params_t, params)) +
- num_chans * sizeof(eparams->params.channel_list[0]);
- }
+ eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
+ OFFSETOF(wl_escan_params_t, params)) +
+ num_chans * sizeof(eparams->params.channel_list[0]);
memsize = sizeof(wl_p2p_scan_t) + eparams_size;
memblk = scanparambuf;
memsize, sizeof(scanparambuf)));
return -1;
}
- bzero(memblk, memsize);
- bzero(cfg->ioctl_buf, WLC_IOCTL_MAXLEN);
+ memset(memblk, 0, memsize);
+ memset(cfg->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
if (search_state == WL_P2P_DISC_ST_SEARCH) {
/*
* If we in SEARCH STATE, we don't need to set SSID explictly
wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
/* use null ssid */
ssid.SSID_len = 0;
- bzero(&ssid.SSID, sizeof(ssid.SSID));
+ memset(&ssid.SSID, 0, sizeof(ssid.SSID));
} else if (search_state == WL_P2P_DISC_ST_SCAN) {
/* SCAN STATE 802.11 SCAN
* WFD Supplicant has p2p_find command with (type=progressive, type= full)
wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
/* use wild card ssid */
ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN;
- bzero(&ssid.SSID, sizeof(ssid.SSID));
+ memset(&ssid.SSID, 0, sizeof(ssid.SSID));
memcpy(&ssid.SSID, WL_P2P_WILDCARD_SSID, WL_P2P_WILDCARD_SSID_LEN);
} else {
CFGP2P_ERR((" invalid search state %d\n", search_state));
return -1;
}
+
/* Fill in the P2P scan structure at the start of the iovar param block */
p2p_params = (wl_p2p_scan_t*) memblk;
p2p_params->type = 'E';
+ /* Fill in the Scan structure that follows the P2P scan structure */
+ eparams = (wl_escan_params_t*) (p2p_params + 1);
+ eparams->params.bss_type = DOT11_BSSTYPE_ANY;
+ if (active)
+ eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE;
+ else
+ eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE;
- if (!active_scan) {
- scan_type = WL_SCANFLAGS_PASSIVE;
- }
+ if (tx_dst_addr == NULL)
+ memcpy(&eparams->params.bssid, ðer_bcast, ETHER_ADDR_LEN);
+ else
+ memcpy(&eparams->params.bssid, tx_dst_addr, ETHER_ADDR_LEN);
- if (tx_dst_addr == NULL) {
- mac_addr = ðer_bcast;
- } else {
- mac_addr = tx_dst_addr;
- }
+ if (ssid.SSID_len)
+ memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t));
+
+ eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
switch (p2p_scan_purpose) {
case P2P_SCAN_SOCIAL_CHANNEL:
- active_time = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS;
+ eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
break;
case P2P_SCAN_AFX_PEER_NORMAL:
case P2P_SCAN_AFX_PEER_REDUCED:
- active_time = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS;
+ eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS);
break;
case P2P_SCAN_CONNECT_TRY:
- active_time = WL_SCAN_CONNECT_DWELL_TIME_MS;
+ eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
break;
- default:
- active_time = wl_get_drv_status_all(cfg, CONNECTED) ?
- -1 : P2PAPI_SCAN_DWELL_TIME_MS;
+ default :
+ if (wl_get_drv_status_all(cfg, CONNECTED))
+ eparams->params.active_time = -1;
+ else
+ eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
break;
}
- if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY) {
- nprobes = active_time /
- WL_SCAN_JOIN_PROBE_INTERVAL_MS;
- } else {
- nprobes = active_time /
- P2PAPI_SCAN_NPROBS_TIME_MS;
- }
+ if (p2p_scan_purpose == P2P_SCAN_CONNECT_TRY)
+ eparams->params.nprobes = htod32(eparams->params.active_time /
+ WL_SCAN_JOIN_PROBE_INTERVAL_MS);
+ else
+ eparams->params.nprobes = htod32((eparams->params.active_time /
+ P2PAPI_SCAN_NPROBS_TIME_MS));
- if (nprobes <= 0) {
- nprobes = 1;
- }
- wl_escan_set_sync_id(sync_id, cfg);
- /* Fill in the Scan structure that follows the P2P scan structure */
- if (cfg->scan_params_v2) {
- eparams_v2 = (wl_escan_params_v2_t*) (p2p_params + 1);
- eparams_v2->version = htod16(ESCAN_REQ_VERSION_V2);
- eparams_v2->action = htod16(action);
- eparams_v2->params.version = htod16(WL_SCAN_PARAMS_VERSION_V2);
- eparams_v2->params.length = htod16(sizeof(wl_scan_params_v2_t));
- eparams_v2->params.bss_type = DOT11_BSSTYPE_ANY;
- eparams_v2->params.scan_type = htod32(scan_type);
- (void)memcpy_s(&eparams_v2->params.bssid, ETHER_ADDR_LEN, mac_addr, ETHER_ADDR_LEN);
- eparams_v2->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
- eparams_v2->params.active_time = htod32(active_time);
- eparams_v2->params.nprobes = htod32(nprobes);
- eparams_v2->params.passive_time = htod32(-1);
- eparams_v2->sync_id = sync_id;
- for (i = 0; i < num_chans; i++) {
- eparams_v2->params.channel_list[i] =
- wl_ch_host_to_driver(channels[i]);
- }
- eparams_v2->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
- if (ssid.SSID_len)
- (void)memcpy_s(&eparams_v2->params.ssid,
- sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
- sync_id = eparams_v2->sync_id;
- } else {
- eparams = (wl_escan_params_t*) (p2p_params + 1);
- eparams->version = htod32(ESCAN_REQ_VERSION);
- eparams->action = htod16(action);
- eparams->params.bss_type = DOT11_BSSTYPE_ANY;
- eparams->params.scan_type = htod32(scan_type);
- (void)memcpy_s(&eparams->params.bssid, ETHER_ADDR_LEN, mac_addr, ETHER_ADDR_LEN);
- eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS);
- eparams->params.active_time = htod32(active_time);
- eparams->params.nprobes = htod32(nprobes);
- eparams->params.passive_time = htod32(-1);
- eparams->sync_id = sync_id;
- for (i = 0; i < num_chans; i++) {
- eparams->params.channel_list[i] =
- wl_ch_host_to_driver(channels[i]);
- }
- eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
- if (ssid.SSID_len)
- (void)memcpy_s(&eparams->params.ssid,
- sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
- sync_id = eparams->sync_id;
- }
+ if (eparams->params.nprobes <= 0)
+ eparams->params.nprobes = 1;
+ CFGP2P_DBG(("nprobes # %d, active_time %d\n",
+ eparams->params.nprobes, eparams->params.active_time));
+ eparams->params.passive_time = htod32(-1);
+ eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ (num_chans & WL_SCAN_PARAMS_COUNT_MASK));
+ for (i = 0; i < num_chans; i++) {
+ eparams->params.channel_list[i] = wl_ch_host_to_driver(cfg, bssidx, channels[i]);
+ }
+ eparams->version = htod32(ESCAN_REQ_VERSION);
+ eparams->action = htod16(action);
+ wl_escan_set_sync_id(eparams->sync_id, cfg);
wl_escan_set_type(cfg, WL_SCANTYPE_P2P);
+ CFGP2P_INFO(("SCAN CHANNELS : "));
- CFGP2P_DBG(("nprobes:%d active_time:%d\n", nprobes, active_time));
- CFGP2P_DBG(("SCAN CHANNELS : "));
- CFGP2P_DBG(("%d", channels[0]));
- for (i = 1; i < num_chans; i++) {
- CFGP2P_DBG((",%d", channels[i]));
+ for (i = 0; i < num_chans; i++) {
+ if (i == 0) CFGP2P_INFO(("%d", channels[i]));
+ else CFGP2P_INFO((",%d", channels[i]));
}
- CFGP2P_DBG(("\n"));
+
+ CFGP2P_INFO(("\n"));
ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
- WL_MSG(dev->name, "P2P_SEARCH sync ID: %d, bssidx: %d\n", sync_id, bssidx);
- if (ret == BCME_OK) {
+ printf("%s: P2P_SEARCH sync ID: %d, bssidx: %d\n", __FUNCTION__, eparams->sync_id, bssidx);
+ if (ret == BCME_OK)
wl_set_p2p_status(cfg, SCANNING);
- }
return ret;
}
}
}
- default_chan_list = (u16 *)MALLOCZ(cfg->osh, chan_cnt * sizeof(*default_chan_list));
+ default_chan_list = kzalloc(chan_cnt * sizeof(*default_chan_list), GFP_KERNEL);
if (default_chan_list == NULL) {
CFGP2P_ERR(("channel list allocation failed \n"));
ret = -ENOMEM;
ret = wl_cfgp2p_escan(cfg, ndev, true, chan_cnt,
default_chan_list, WL_P2P_DISC_ST_SEARCH,
WL_SCAN_ACTION_START, bssidx, NULL, p2p_scan_purpose);
- MFREE(cfg->osh, default_chan_list, chan_cnt * sizeof(*default_chan_list));
+ kfree(default_chan_list);
exit:
return ret;
}
/* Check whether the given IE looks like WFA WFDisplay IE. */
#ifndef WFA_OUI_TYPE_WFD
#define WFA_OUI_TYPE_WFD 0x0a /* WiFi Display OUI TYPE */
-#endif // endif
+#endif
#define wl_cfgp2p_is_wfd_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \
(const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_WFD)
+
/* Is any of the tlvs the expected entry? If
* not update the tlvs buffer pointer/length.
*/
static bool
-wl_cfgp2p_has_ie(const bcm_tlv_t *ie, const u8 **tlvs, u32 *tlvs_len,
- const u8 *oui, u32 oui_len, u8 type)
+wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type)
{
/* If the contents match the OUI and the type */
- if (ie->len >= oui_len + 1 &&
- !bcmp(ie->data, oui, oui_len) &&
- type == ie->data[oui_len]) {
+ if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
+ !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) &&
+ type == ie[TLV_BODY_OFF + oui_len]) {
return TRUE;
}
+ if (tlvs == NULL)
+ return FALSE;
/* point to the next ie */
- if (tlvs != NULL) {
- bcm_tlv_buffer_advance_past(ie, tlvs, tlvs_len);
- }
+ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN;
+ /* calculate the length of the rest of the buffer */
+ *tlvs_len -= (int)(ie - *tlvs);
+ /* update the pointer to the start of the buffer */
+ *tlvs = ie;
return FALSE;
}
-const wpa_ie_fixed_t *
-wl_cfgp2p_find_wpaie(const u8 *parse, u32 len)
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len)
{
- const bcm_tlv_t *ie;
+ bcm_tlv_t *ie;
- while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_wpa_ie(ie, &parse, &len)) {
- return (const wpa_ie_fixed_t *)ie;
+ while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) {
+ return (wpa_ie_fixed_t *)ie;
}
}
return NULL;
}
-const wpa_ie_fixed_t *
-wl_cfgp2p_find_wpsie(const u8 *parse, u32 len)
+wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len)
{
- const bcm_tlv_t *ie;
+ bcm_tlv_t *ie;
- while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_wps_ie(ie, &parse, &len)) {
- return (const wpa_ie_fixed_t *)ie;
+ while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) {
+ return (wpa_ie_fixed_t *)ie;
}
}
return NULL;
}
wifi_p2p_ie_t *
-wl_cfgp2p_find_p2pie(const u8 *parse, u32 len)
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len)
{
bcm_tlv_t *ie;
- while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_p2p_ie(ie, &parse, &len)) {
+ while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) {
return (wifi_p2p_ie_t *)ie;
}
}
return NULL;
}
-const wifi_wfd_ie_t *
-wl_cfgp2p_find_wfdie(const u8 *parse, u32 len)
+wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len)
{
- const bcm_tlv_t *ie;
+ bcm_tlv_t *ie;
- while ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_wfd_ie(ie, &parse, &len)) {
- return (const wifi_wfd_ie_t *)ie;
+ while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_wfd_ie((uint8*)ie, &parse, &len)) {
+ return (wifi_wfd_ie_t *)ie;
}
}
return NULL;
}
-
u32
wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
- s8 *oui, s32 ie_id, const s8 *data, s32 datalen, const s8* add_del_cmd)
+ s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd)
{
vndr_ie_setbuf_t hdr; /* aligned temporary vndr_ie buffer header */
s32 iecount;
/* Validate the pktflag parameter */
if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
- VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG |
- VNDR_IE_DISASSOC_FLAG))) {
+ VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) {
CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag));
return -1;
}
/* Copy the vndr_ie SET command ("add"/"del") to the buffer */
- strlcpy(hdr.cmd, add_del_cmd, sizeof(hdr.cmd));
+ strncpy(hdr.cmd, add_del_cmd, VNDR_IE_CMD_LEN - 1);
+ hdr.cmd[VNDR_IE_CMD_LEN - 1] = '\0';
/* Set the IE count - the buffer contains only 1 IE */
iecount = htod32(1);
memcpy((void *)&hdr.vndr_ie_buffer.iecount, &iecount, sizeof(s32));
- /* For vendor ID DOT11_MNG_ID_EXT_ID, need to set pkt flag to VNDR_IE_CUSTOM_FLAG */
- if (ie_id == DOT11_MNG_ID_EXT_ID) {
- pktflag = pktflag | VNDR_IE_CUSTOM_FLAG;
- }
-
/* Copy packet flags that indicate which packets will contain this IE */
pktflag = htod32(pktflag);
memcpy((void *)&hdr.vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag,
CFGP2P_ERR((" argument is invalid\n"));
goto exit;
}
- if (!cfg->p2p) {
- CFGP2P_ERR(("p2p if does not exist\n"));
- goto exit;
- }
+
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
*type = i;
* so lets do it from thread context.
*/
void
-wl_cfgp2p_listen_expired(unsigned long data)
+wl_cfgp2p_listen_expired(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct timer_list *t
+#else
+ ulong data
+#endif
+)
{
wl_event_msg_t msg;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct p2p_info *p2p = from_timer(p2p, t, listen_timer);
+ struct bcm_cfg80211 *cfg = p2p->cfg;
+#else
struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *) data;
+#endif
struct net_device *ndev;
CFGP2P_DBG((" Enter\n"));
/* Irrespective of whether timer is running or not, reset
* the LISTEN state.
*/
-#ifdef NOT_YET
- wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
-#endif /* NOT_YET */
if (timer_pending(&cfg->p2p->listen_timer)) {
del_timer_sync(&cfg->p2p->listen_timer);
if (notify) {
{
#define EXTRA_DELAY_TIME 100
s32 ret = BCME_OK;
- timer_list_compat_t *_timer;
+ struct timer_list *_timer;
s32 extra_delay;
struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
return ret;
}
+
s32
wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable)
{
if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
- CFGP2P_DBG((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
+ CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
if (status == WLC_E_STATUS_SUCCESS) {
wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
- CFGP2P_ACTION(("TX actfrm : ACK\n"));
+ CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n"));
if (!cfg->need_wait_afrx && cfg->af_sent_channel) {
CFGP2P_DBG(("no need to wait next AF.\n"));
wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
}
else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
wl_set_p2p_status(cfg, ACTION_TX_NOACK);
- if (status == WLC_E_STATUS_SUPPRESS) {
- CFGP2P_ACTION(("TX actfrm : SUPPRES\n"));
- } else {
- CFGP2P_ACTION(("TX actfrm : NO ACK\n"));
- }
+ CFGP2P_INFO(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
wl_stop_wait_next_action_frame(cfg, ndev, bsscfgidx);
}
} else {
- CFGP2P_DBG((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
+ CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
"status : %d\n", status));
if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
s32 timeout = 0;
wl_eventmsg_buf_t buf;
- CFGP2P_DBG(("\n"));
- CFGP2P_DBG(("channel : %u , dwell time : %u\n",
+
+ CFGP2P_INFO(("\n"));
+ CFGP2P_INFO(("channel : %u , dwell time : %u\n",
af_params->channel, af_params->dwell_time));
wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (ret < 0) {
- CFGP2P_ACTION(("TX actfrm : ERROR\n"));
+ CFGP2P_ERR((" sending action frame is failed\n"));
goto exit;
}
msecs_to_jiffies(af_params->dwell_time + WL_AF_TX_EXTRA_TIME_MAX));
if (timeout >= 0 && wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
- CFGP2P_DBG(("tx action frame operation is completed\n"));
+ CFGP2P_INFO(("tx action frame operation is completed\n"));
ret = BCME_OK;
} else if (ETHER_ISBCAST(&cfg->afx_hdl->tx_dst_addr)) {
- CFGP2P_DBG(("bcast tx action frame operation is completed\n"));
+ CFGP2P_INFO(("bcast tx action frame operation is completed\n"));
ret = BCME_OK;
} else {
ret = BCME_ERROR;
- CFGP2P_DBG(("tx action frame operation is failed\n"));
+ CFGP2P_INFO(("tx action frame operation is failed\n"));
}
/* clear status bit for action tx */
wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
exit:
- CFGP2P_DBG((" via act frame iovar : status = %d\n", ret));
+ CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
bzero(&buf, sizeof(wl_eventmsg_buf_t));
wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, false);
struct ether_addr *mac_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE);
struct ether_addr *int_addr;
-#ifdef WL_P2P_USE_RANDMAC
- dhd_generate_mac_addr(mac_addr);
-#else
memcpy(mac_addr, primary_addr, sizeof(struct ether_addr));
mac_addr->octet[0] |= 0x02;
WL_DBG(("P2P Discovery address:"MACDBG "\n", MAC2STRDBG(mac_addr->octet)));
-#endif /* WL_P2P_USE_RANDMAC */
int_addr = wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_CONNECTION1);
memcpy(int_addr, mac_addr, sizeof(struct ether_addr));
{
struct net_device *ndev = NULL;
struct wireless_dev *wdev = NULL;
+ s32 i = 0, index = -1;
#if defined(WL_CFG80211_P2P_DEV_IF)
ndev = bcmcfg_to_prmry_ndev(cfg);
}
#endif /* WL_CFG80211_P2P_DEV_IF !defined(KEEP_WIFION_OPTION) */
+ for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
+ index = wl_to_p2p_bss_bssidx(cfg, i);
+ if (index != WL_INVALID)
+ wl_cfg80211_clear_per_bss_ies(cfg, index);
+ }
wl_cfgp2p_deinit_priv(cfg);
return 0;
}
int iovar_len = sizeof(dongle_noa);
CFGP2P_DBG((" Enter\n"));
- bzero(&dongle_noa, sizeof(dongle_noa));
+ memset(&dongle_noa, 0, sizeof(dongle_noa));
if (wl_cfgp2p_vif_created(cfg)) {
cfg->p2p->noa.desc[0].start = 0;
return BCME_ERROR;
}
- memset_s(&csa_arg, sizeof(csa_arg), 0, sizeof(csa_arg));
csa_arg.mode = DOT11_CSA_MODE_ADVISORY;
csa_arg.count = P2P_ECSA_CNT;
csa_arg.reg = 0;
int bw;
int ret = BCME_OK;
+
sscanf(buf, "%3d", &bw);
if (bw == 0) {
algo = 0;
return BCME_OK;
}
-const u8 *
-wl_cfgp2p_retreive_p2pattrib(const void *buf, u8 element_id)
+u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id)
{
- const wifi_p2p_ie_t *ie = NULL;
+ wifi_p2p_ie_t *ie = NULL;
u16 len = 0;
- const u8 *subel;
+ u8 *subel;
u8 subelt_id;
u16 subelt_len;
return 0;
}
- ie = (const wifi_p2p_ie_t*) buf;
+ ie = (wifi_p2p_ie_t*) buf;
len = ie->len;
/* Point subel to the P2P IE's subelt field.
#define P2P_GROUP_CAPAB_GO_BIT 0x01
-const u8*
-wl_cfgp2p_find_attrib_in_all_p2p_Ies(const u8 *parse, u32 len, u32 attrib)
+u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib)
{
bcm_tlv_t *ie;
- const u8* pAttrib;
- uint ie_len;
+ u8* pAttrib;
- CFGP2P_DBG(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len));
- ie_len = len;
- while ((ie = bcm_parse_tlvs(parse, ie_len, DOT11_MNG_VS_ID))) {
- if (wl_cfgp2p_is_p2p_ie(ie, &parse, &ie_len) == TRUE) {
+ CFGP2P_INFO(("Starting parsing parse %p attrib %d remaining len %d ", parse, attrib, len));
+ while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) {
+ if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len) == TRUE) {
/* Have the P2p ie. Now check for attribute */
- if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(ie, attrib)) != NULL) {
- CFGP2P_DBG(("P2P attribute %d was found at parse %p",
+ if ((pAttrib = wl_cfgp2p_retreive_p2pattrib(parse, attrib)) != NULL) {
+ CFGP2P_INFO(("P2P attribute %d was found at parse %p",
attrib, parse));
return pAttrib;
}
else {
- /* move to next IE */
- bcm_tlv_buffer_advance_past(ie, &parse, &ie_len);
-
+ parse += (ie->len + TLV_HDR_LEN);
+ len -= (ie->len + TLV_HDR_LEN);
CFGP2P_INFO(("P2P Attribute %d not found Moving parse"
- " to %p len to %d", attrib, parse, ie_len));
+ " to %p len to %d", attrib, parse, len));
}
}
else {
/* It was not p2p IE. parse will get updated automatically to next TLV */
- CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, ie_len));
+ CFGP2P_INFO(("IT was NOT P2P IE parse %p len %d", parse, len));
}
}
CFGP2P_ERR(("P2P attribute %d was NOT found", attrib));
return NULL;
}
-const u8 *
+u8 *
wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length)
{
- const u8 *capability = NULL;
+ u8 *capability = NULL;
bool p2p_go = 0;
- const u8 *ptr = NULL;
-
- if (bi->length != bi->ie_offset + bi->ie_length) {
- return NULL;
- }
+ u8 *ptr = NULL;
if ((capability = wl_cfgp2p_find_attrib_in_all_p2p_Ies(((u8 *) bi) + bi->ie_offset,
bi->ie_length, P2P_SEID_P2P_INFO)) == NULL) {
cfg->p2p_wdev = wdev;
cfg->p2p_net = net;
- WL_MSG(net->name, "P2P Interface Registered\n");
+ printk("%s: P2P Interface Registered\n", net->name);
return ret;
}
* For Android PRIV CMD handling map it to primary I/F
*/
if (cmd == SIOCDEVPRIVATE+1) {
- ret = wl_android_priv_cmd(ndev, ifr);
+ ret = wl_android_priv_cmd(ndev, ifr, cmd);
} else {
CFGP2P_ERR(("%s: IOCTL req 0x%x on p2p0 I/F. Ignoring. \n",
*/
wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
#else
- dhd->hang_reason = HANG_REASON_IFACE_DEL_FAILURE;
+ dhd->hang_reason = HANG_REASON_IFACE_OP_FAILURE;
#if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
if (dhd->memdump_enabled) {
/* Load the dongle side dump to host
* memory and then BUG_ON()
*/
- dhd->memdump_type = DUMP_TYPE_IFACE_OP_FAILURE;
+ dhd->memdump_type = DUMP_TYPE_HANG_ON_IFACE_OP_FAIL;
dhd_bus_mem_dump(dhd);
}
#endif /* BCMPCIE && DHD_FW_COREDUMP */
#endif /* EXPLICIT_DISCIF_CLEANUP */
}
- wdev = (struct wireless_dev *)MALLOCZ(cfg->osh, sizeof(*wdev));
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (unlikely(!wdev)) {
WL_ERR(("Could not allocate wireless device\n"));
return ERR_PTR(-ENOMEM);
}
- bzero(&primary_mac, sizeof(primary_mac));
+ memset(&primary_mac, 0, sizeof(primary_mac));
get_primary_mac(cfg, &primary_mac);
-#ifndef WL_P2P_USE_RANDMAC
wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
-#endif /* WL_P2P_USE_RANDMAC */
wdev->wiphy = cfg->wdev->wiphy;
wdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
memcpy(wdev->address, wl_to_p2p_bss_macaddr(cfg, P2PAPI_BSSCFG_DEVICE), ETHER_ADDR_LEN);
+
/* store p2p wdev ptr for further reference. */
cfg->p2p_wdev = wdev;
printf("P2P interface registered\n");
printf("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev);
+
return wdev;
}
p2p_on(cfg) = true;
#if defined(P2P_IE_MISSING_FIX)
cfg->p2p_prb_noti = false;
-#endif // endif
+#endif
- CFGP2P_DBG(("P2P interface started\n"));
+ printf("P2P interface started\n");
exit:
return ret;
wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
{
int ret = 0;
- struct net_device *ndev = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
if (!cfg)
CFGP2P_DBG(("Enter\n"));
- /* Check if cfg80211 interface is already down */
- ndev = bcmcfg_to_prmry_ndev(cfg);
- if (!wl_get_drv_status(cfg, READY, ndev)) {
- WL_DBG(("cfg80211 interface is already down\n"));
- return; /* it is even not ready */
- }
-
ret = wl_cfg80211_scan_stop(cfg, wdev);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
}
- if (!p2p_is_on(cfg)) {
+ if (!cfg->p2p)
return;
- }
-
-#ifdef P2P_LISTEN_OFFLOADING
- wl_cfg80211_p2plo_deinit(cfg);
-#endif /* P2P_LISTEN_OFFLOADING */
/* Cancel any on-going listen */
wl_cfgp2p_cancel_listen(cfg, bcmcfg_to_prmry_ndev(cfg), wdev, TRUE);
p2p_on(cfg) = false;
- CFGP2P_DBG(("Exit. P2P interface stopped\n"));
+ printf("Exit. P2P interface stopped\n");
return;
}
{
bool rollback_lock = false;
- if (!wdev || !cfg) {
- WL_ERR(("wdev or cfg is NULL\n"));
+ if (!wdev)
return -EINVAL;
- }
- WL_INFORM(("Enter\n"));
-
- if (!cfg->p2p_wdev) {
- WL_ERR(("Already deleted p2p_wdev\n"));
- return -EINVAL;
- }
+ WL_TRACE(("Enter\n"));
+ printf("%s: wdev: %p, wdev->net: %p\n", __FUNCTION__, wdev, wdev->netdev);
if (!rtnl_is_locked()) {
rtnl_lock();
synchronize_rcu();
- MFREE(cfg->osh, wdev, sizeof(*wdev));
+ kfree(wdev);
- cfg->p2p_wdev = NULL;
+ if (cfg)
+ cfg->p2p_wdev = NULL;
- CFGP2P_ERR(("P2P interface unregistered\n"));
+ printf("P2P interface unregistered\n");
return 0;
}
/*
* Linux cfgp2p driver
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgp2p.h 794110 2018-12-12 05:03:21Z $
+ * $Id: wl_cfgp2p.h 676811 2016-12-24 20:48:46Z $
*/
#ifndef _wl_cfgp2p_h_
#define _wl_cfgp2p_h_
};
struct p2p_info {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct bcm_cfg80211 *cfg;
+#endif
bool on; /**< p2p on/off switch */
bool scan;
int16 search_state;
s8 vir_ifname[IFNAMSIZ];
unsigned long status;
struct p2p_bss bss[P2PAPI_BSSCFG_MAX];
- timer_list_compat_t listen_timer;
+ struct timer_list listen_timer;
wl_p2p_sched_t noa;
wl_p2p_ops_t ops;
wlc_ssid_t ssid;
#define MAX_VNDR_IE_NUMBER 10
struct parsed_vndr_ie_info {
- const char *ie_ptr;
+ char *ie_ptr;
u32 ie_len; /**< total length including id & length field */
vndr_ie_t vndrie;
};
WLP2P_STATUS_DISC_IN_PROGRESS
};
+
#define wl_to_p2p_bss_ndev(cfg, type) ((cfg)->p2p->bss[type].dev)
#define wl_to_p2p_bss_bssidx(cfg, type) ((cfg)->p2p->bss[type].bssidx)
#define wl_to_p2p_bss_macaddr(cfg, type) &((cfg)->p2p->bss[type].mac_addr)
/* dword align allocation */
#define WLC_IOCTL_MAXLEN 8192
-#define CFGP2P_ERROR_TEXT "[dhd] CFGP2P-ERROR) "
+#ifdef CUSTOMER_HW4_DEBUG
+#define CFGP2P_ERROR_TEXT "CFGP2P-INFO2) "
+#else
+#define CFGP2P_ERROR_TEXT "CFGP2P-ERROR) "
+#endif /* CUSTOMER_HW4_DEBUG */
#ifdef DHD_LOG_DUMP
-#define CFGP2P_ERR_MSG(x, args...) \
- do { \
- if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : " x, __func__, ## args); \
- DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
- DHD_LOG_DUMP_WRITE(x, ## args); \
- } \
- } while (0)
-#define CFGP2P_ERR(x) CFGP2P_ERR_MSG x
-#define CFGP2P_INFO_MSG(x, args...) \
+#define CFGP2P_ERR(args) \
do { \
- if (wl_dbg_level & WL_DBG_INFO) { \
- printk(KERN_INFO "[dhd] CFGP2P-INFO) %s : " x, __func__, ## args); \
- DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
- DHD_LOG_DUMP_WRITE(x, ## args); \
- } \
- } while (0)
-#define CFGP2P_INFO(x) CFGP2P_INFO_MSG x
-#define CFGP2P_ACTION_MSG(x, args...) \
- do { \
- if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
- printk(KERN_INFO "[dhd] CFGP2P-ACTION) %s :" x, __func__, ## args); \
- DHD_LOG_DUMP_WRITE("[%s] %s: ", dhd_log_dump_get_timestamp(), __func__); \
- DHD_LOG_DUMP_WRITE(x, ## args); \
+ if (wl_dbg_level & WL_DBG_ERR) { \
+ printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__); \
+ printk args; \
+ DHD_LOG_DUMP_WRITE("[%s] %s: ", \
+ dhd_log_dump_get_timestamp(), __func__); \
+ DHD_LOG_DUMP_WRITE args; \
} \
} while (0)
-#define CFGP2P_ACTION(x) CFGP2P_ACTION_MSG x
#else
-#define CFGP2P_ERR_MSG(x, args...) \
+#define CFGP2P_ERR(args) \
do { \
if (wl_dbg_level & WL_DBG_ERR) { \
- printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : " x, __func__, ## args); \
+ printk(KERN_INFO CFGP2P_ERROR_TEXT "%s : ", __func__); \
+ printk args; \
} \
} while (0)
-#define CFGP2P_ERR(x) CFGP2P_ERR_MSG x
-#define CFGP2P_INFO_MSG(x, args...) \
+#endif /* DHD_LOG_DUMP */
+#define CFGP2P_INFO(args) \
do { \
if (wl_dbg_level & WL_DBG_INFO) { \
- printk(KERN_INFO "[dhd] CFGP2P-INFO) %s : " x, __func__, ## args); \
+ printk(KERN_INFO "CFGP2P-INFO) %s : ", __func__); \
+ printk args; \
} \
} while (0)
-#define CFGP2P_INFO(x) CFGP2P_INFO_MSG x
-#define CFGP2P_ACTION_MSG(x, args...) \
+#define CFGP2P_DBG(args) \
do { \
- if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
- printk(KERN_INFO "[dhd] CFGP2P-ACTION) %s :" x, __func__, ## args); \
+ if (wl_dbg_level & WL_DBG_DBG) { \
+ printk(KERN_INFO "CFGP2P-DEBUG) %s :", __func__); \
+ printk args; \
} \
} while (0)
-#define CFGP2P_ACTION(x) CFGP2P_ACTION_MSG x
-#endif /* DHD_LOG_DUMP */
-#define CFGP2P_DBG_MSG(x, args...) \
+#define CFGP2P_ACTION(args) \
do { \
- if (wl_dbg_level & WL_DBG_DBG) { \
- printk(KERN_INFO "[dhd] CFGP2P-DEBUG) %s :" x, __func__, ## args); \
+ if (wl_dbg_level & WL_DBG_P2P_ACTION) { \
+ printk(KERN_INFO "CFGP2P-ACTION) %s :", __func__); \
+ printk args; \
} \
} while (0)
-#define CFGP2P_DBG(x) CFGP2P_DBG_MSG x
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+#define INIT_TIMER(timer, func, duration, extra_delay) \
+ do { \
+ timer_setup(timer, func, 0); \
+ timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+ add_timer(timer); \
+ } while (0);
+#else
#define INIT_TIMER(timer, func, duration, extra_delay) \
do { \
- init_timer_compat(timer, func, cfg); \
- timer_expires(timer) = jiffies + msecs_to_jiffies(duration + extra_delay); \
+ init_timer(timer); \
+ timer->function = func; \
+ timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
+ timer->data = (unsigned long) cfg; \
add_timer(timer); \
} while (0);
+#endif
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 0, 8))
#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_CFG80211_P2P_DEV_IF)
#define WL_CFG80211_P2P_DEV_IF
+#ifdef WL_ENABLE_P2P_IF
+#undef WL_ENABLE_P2P_IF
+#endif
+
#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
#undef WL_SUPPORT_BACKPORTED_KPATCHES
-#endif // endif
+#endif
#else
#ifdef WLP2P
+#ifndef WL_ENABLE_P2P_IF
/* Enable P2P network Interface if P2P support is enabled */
#define WL_ENABLE_P2P_IF
+#endif /* WL_ENABLE_P2P_IF */
#endif /* WLP2P */
#endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */
#define P2P_ECSA_CNT 50
extern void
-wl_cfgp2p_listen_expired(unsigned long data);
+wl_cfgp2p_listen_expired(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct timer_list *t
+#else
+ ulong data
+#endif
+);
extern bool
wl_cfgp2p_is_pub_action(void *frame, u32 frame_len);
extern bool
wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr);
-extern const wpa_ie_fixed_t *
-wl_cfgp2p_find_wpaie(const u8 *parse, u32 len);
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpaie(u8 *parse, u32 len);
-extern const wpa_ie_fixed_t *
-wl_cfgp2p_find_wpsie(const u8 *parse, u32 len);
+extern wpa_ie_fixed_t *
+wl_cfgp2p_find_wpsie(u8 *parse, u32 len);
extern wifi_p2p_ie_t *
-wl_cfgp2p_find_p2pie(const u8 *parse, u32 len);
+wl_cfgp2p_find_p2pie(u8 *parse, u32 len);
-extern const wifi_wfd_ie_t *
-wl_cfgp2p_find_wfdie(const u8 *parse, u32 len);
+extern wifi_wfd_ie_t *
+wl_cfgp2p_find_wfdie(u8 *parse, u32 len);
extern s32
wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len);
extern s32
wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type);
+
extern s32
wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
extern s32
wl_cfgp2p_increase_p2p_bw(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
-extern const u8 *
-wl_cfgp2p_retreive_p2pattrib(const void *buf, u8 element_id);
+extern u8 *
+wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id);
-extern const u8*
-wl_cfgp2p_find_attrib_in_all_p2p_Ies(const u8 *parse, u32 len, u32 attrib);
+extern u8*
+wl_cfgp2p_find_attrib_in_all_p2p_Ies(u8 *parse, u32 len, u32 attrib);
-extern const u8 *
+extern u8 *
wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length);
extern s32
extern u32
wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
- s8 *oui, s32 ie_id, const s8 *data, s32 datalen, const s8* add_del_cmd);
+ s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd);
extern int wl_cfgp2p_get_conn_idx(struct bcm_cfg80211 *cfg);
+++ /dev/null
-/*
- * Linux cfg80211 driver scan related code
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id$
- */
-/* */
-#include <typedefs.h>
-#include <linuxver.h>
-#include <osl.h>
-#include <linux/kernel.h>
-
-#include <bcmutils.h>
-#include <bcmstdlib_s.h>
-#include <bcmwifi_channels.h>
-#include <bcmendian.h>
-#include <ethernet.h>
-#include <802.11.h>
-#include <bcmiov.h>
-#include <linux/if_arp.h>
-#include <asm/uaccess.h>
-
-#include <ethernet.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/wait.h>
-#include <net/cfg80211.h>
-#include <net/rtnetlink.h>
-
-#include <wlioctl.h>
-#include <bcmevent.h>
-#include <wldev_common.h>
-#include <wl_cfg80211.h>
-#include <wl_cfgscan.h>
-#include <wl_cfgp2p.h>
-#include <bcmdevs.h>
-#include <wl_android.h>
-#include <dngl_stats.h>
-#include <dhd.h>
-#include <dhd_linux.h>
-#include <dhd_debug.h>
-#include <dhdioctl.h>
-#include <wlioctl.h>
-#include <dhd_cfg80211.h>
-#include <dhd_bus.h>
-#include <wl_cfgvendor.h>
-#ifdef BCMPCIE
-#include <dhd_flowring.h>
-#endif // endif
-#ifdef PNO_SUPPORT
-#include <dhd_pno.h>
-#endif /* PNO_SUPPORT */
-#ifdef RTT_SUPPORT
-#include "dhd_rtt.h"
-#endif /* RTT_SUPPORT */
-#include <dhd_config.h>
-
-#define ACTIVE_SCAN 1
-#define PASSIVE_SCAN 0
-
-#define MIN_P2P_IE_LEN 8 /* p2p_ie->OUI(3) + p2p_ie->oui_type(1) +
- * Attribute ID(1) + Length(2) + 1(Mininum length:1)
- */
-#define MAX_P2P_IE_LEN 251 /* Up To 251 */
-
-#define WPS_ATTR_REQ_TYPE 0x103a
-#define WPS_REQ_TYPE_ENROLLEE 0x01
-
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
-#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
-bool g_first_broadcast_scan = TRUE;
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
-#ifdef P2P_LISTEN_OFFLOADING
-void wl_cfg80211_cancel_p2plo(struct bcm_cfg80211 *cfg);
-#endif /* P2P_LISTEN_OFFLOADING */
-static void _wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted);
-
-extern int passive_channel_skip;
-
-#ifdef WL11U
-static bcm_tlv_t *
-wl_cfg80211_find_interworking_ie(const u8 *parse, u32 len)
-{
- bcm_tlv_t *ie;
-
-/* unfortunately it's too much work to dispose the const cast - bcm_parse_tlvs
- * is used everywhere and changing its prototype to take const qualifier needs
- * a massive change to all its callers...
- */
-
- if ((ie = bcm_parse_tlvs(parse, len, DOT11_MNG_INTERWORKING_ID))) {
- return ie;
- }
- return NULL;
-}
-
-static s32
-wl_cfg80211_clear_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx)
-{
- ie_setbuf_t ie_setbuf;
-
- WL_DBG(("clear interworking IE\n"));
-
- bzero(&ie_setbuf, sizeof(ie_setbuf_t));
-
- ie_setbuf.ie_buffer.iecount = htod32(1);
- ie_setbuf.ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
- ie_setbuf.ie_buffer.ie_list[0].ie_data.len = 0;
-
- return wldev_iovar_setbuf_bsscfg(ndev, "ie", &ie_setbuf, sizeof(ie_setbuf),
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
-}
-
-static s32
-wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
- uint8 ie_id, uint8 *data, uint8 data_len)
-{
- s32 err = BCME_OK;
- s32 buf_len;
- ie_setbuf_t *ie_setbuf;
- ie_getbuf_t ie_getbufp;
- char getbuf[WLC_IOCTL_SMLEN];
-
- if (ie_id != DOT11_MNG_INTERWORKING_ID) {
- WL_ERR(("unsupported (id=%d)\n", ie_id));
- return BCME_UNSUPPORTED;
- }
-
- /* access network options (1 octet) is the mandatory field */
- if (!data || data_len == 0 || data_len > IW_IES_MAX_BUF_LEN) {
- WL_ERR(("wrong interworking IE (len=%d)\n", data_len));
- return BCME_BADARG;
- }
-
- /* Validate the pktflag parameter */
- if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG |
- VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG |
- VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG|
- VNDR_IE_CUSTOM_FLAG))) {
- WL_ERR(("invalid packet flag 0x%x\n", pktflag));
- return BCME_BADARG;
- }
-
- buf_len = sizeof(ie_setbuf_t) + data_len - 1;
-
- ie_getbufp.id = DOT11_MNG_INTERWORKING_ID;
- if (wldev_iovar_getbuf_bsscfg(ndev, "ie", (void *)&ie_getbufp,
- sizeof(ie_getbufp), getbuf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)
- == BCME_OK) {
- if (!memcmp(&getbuf[TLV_HDR_LEN], data, data_len)) {
- WL_DBG(("skip to set interworking IE\n"));
- return BCME_OK;
- }
- }
-
- /* if already set with previous values, delete it first */
- if (cfg->wl11u) {
- if ((err = wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx)) != BCME_OK) {
- return err;
- }
- }
-
- ie_setbuf = (ie_setbuf_t *)MALLOCZ(cfg->osh, buf_len);
- if (!ie_setbuf) {
- WL_ERR(("Error allocating buffer for IE\n"));
- return -ENOMEM;
- }
- strlcpy(ie_setbuf->cmd, "add", sizeof(ie_setbuf->cmd));
-
- /* Buffer contains only 1 IE */
- ie_setbuf->ie_buffer.iecount = htod32(1);
- /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */
- ie_setbuf->ie_buffer.ie_list[0].pktflag = htod32(pktflag);
-
- /* Now, add the IE to the buffer */
- ie_setbuf->ie_buffer.ie_list[0].ie_data.id = DOT11_MNG_INTERWORKING_ID;
- ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len;
- /* Returning void here as max data_len can be 8 */
- (void)memcpy_s((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], sizeof(uint8),
- data, data_len);
-
- if ((err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
- cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync))
- == BCME_OK) {
- WL_DBG(("set interworking IE\n"));
- cfg->wl11u = TRUE;
- err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
- }
-
- MFREE(cfg->osh, ie_setbuf, buf_len);
- return err;
-}
-#endif /* WL11U */
-
-#ifdef WL_BCNRECV
-/* Beacon recv results handler sending to upper layer */
-static s32
-wl_bcnrecv_result_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- wl_bss_info_v109_2_t *bi, uint32 scan_status)
-{
- s32 err = BCME_OK;
- struct wiphy *wiphy = NULL;
- wl_bcnrecv_result_t *bcn_recv = NULL;
- struct osl_timespec ts;
- if (!bi) {
- WL_ERR(("%s: bi is NULL\n", __func__));
- err = BCME_NORESOURCE;
- goto exit;
- }
- if ((bi->length - bi->ie_length) < sizeof(wl_bss_info_v109_2_t)) {
- WL_ERR(("bi info version doesn't support bcn_recv attributes\n"));
- goto exit;
- }
-
- if (scan_status == WLC_E_STATUS_RXBCN) {
- wiphy = cfg->wdev->wiphy;
- if (!wiphy) {
- WL_ERR(("wiphy is NULL\n"));
- err = BCME_NORESOURCE;
- goto exit;
- }
- bcn_recv = (wl_bcnrecv_result_t *)MALLOCZ(cfg->osh, sizeof(*bcn_recv));
- if (unlikely(!bcn_recv)) {
- WL_ERR(("Failed to allocate memory\n"));
- return -ENOMEM;
- }
- /* Returning void here as copy size does not exceed dest size of SSID */
- (void)memcpy_s((char *)bcn_recv->SSID, DOT11_MAX_SSID_LEN,
- (char *)bi->SSID, DOT11_MAX_SSID_LEN);
- /* Returning void here as copy size does not exceed dest size of ETH_LEN */
- (void)memcpy_s(&bcn_recv->BSSID, ETHER_ADDR_LEN, &bi->BSSID, ETH_ALEN);
- bcn_recv->channel = wf_chspec_ctlchan(
- wl_chspec_driver_to_host(bi->chanspec));
- bcn_recv->beacon_interval = bi->beacon_period;
-
- /* kernal timestamp */
- osl_get_monotonic_boottime(&ts);
- bcn_recv->system_time = ((u64)ts.tv_sec*1000000)
- + ts.tv_nsec / 1000;
- bcn_recv->timestamp[0] = bi->timestamp[0];
- bcn_recv->timestamp[1] = bi->timestamp[1];
- if ((err = wl_android_bcnrecv_event(cfgdev_to_wlc_ndev(cfgdev, cfg),
- BCNRECV_ATTR_BCNINFO, 0, 0,
- (uint8 *)bcn_recv, sizeof(*bcn_recv)))
- != BCME_OK) {
- WL_ERR(("failed to send bcnrecv event, error:%d\n", err));
- }
- } else {
- WL_DBG(("Ignoring Escan Event:%d \n", scan_status));
- }
-exit:
- if (bcn_recv) {
- MFREE(cfg->osh, bcn_recv, sizeof(*bcn_recv));
- }
- return err;
-}
-#endif /* WL_BCNRECV */
-
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
-#ifndef WL_DRV_AVOID_SCANCACHE
-static void
-wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
-{
- int idx;
- for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
- int len = BUF_OVERFLOW_MGMT_COUNT - idx - 1;
- if (bss->RSSI < candidate[idx].RSSI) {
- if (len) {
- /* In the below memcpy operation the candidate array always has the
- * buffer space available to max 'len' calculated in the for loop.
- */
- (void)memcpy_s(&candidate[idx + 1],
- (sizeof(removal_element_t) * len),
- &candidate[idx], sizeof(removal_element_t) * len);
- }
- candidate[idx].RSSI = bss->RSSI;
- candidate[idx].length = bss->length;
- (void)memcpy_s(&candidate[idx].BSSID, ETHER_ADDR_LEN,
- &bss->BSSID, ETHER_ADDR_LEN);
- return;
- }
- }
-}
-
-static void
-wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
- wl_bss_info_t *bi)
-{
- int idx1, idx2;
- int total_delete_len = 0;
- for (idx1 = 0; idx1 < BUF_OVERFLOW_MGMT_COUNT; idx1++) {
- int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
- wl_bss_info_t *bss = NULL;
- if (candidate[idx1].RSSI >= bi->RSSI)
- continue;
- for (idx2 = 0; idx2 < list->count; idx2++) {
- bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length)) :
- list->bss_info;
- if (!bcmp(&candidate[idx1].BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
- candidate[idx1].RSSI == bss->RSSI &&
- candidate[idx1].length == dtoh32(bss->length)) {
- u32 delete_len = dtoh32(bss->length);
- WL_DBG(("delete scan info of " MACDBG " to add new AP\n",
- MAC2STRDBG(bss->BSSID.octet)));
- if (idx2 < list->count -1) {
- memmove((u8 *)bss, (u8 *)bss + delete_len,
- list->buflen - cur_len - delete_len);
- }
- list->buflen -= delete_len;
- list->count--;
- total_delete_len += delete_len;
- /* if delete_len is greater than or equal to result length */
- if (total_delete_len >= bi->length) {
- return;
- }
- break;
- }
- cur_len += dtoh32(bss->length);
- }
- }
-}
-#endif /* WL_DRV_AVOID_SCANCACHE */
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-
-s32
-wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- s32 err = BCME_OK;
- s32 status = ntoh32(e->status);
- wl_escan_result_t *escan_result;
- struct net_device *ndev = NULL;
-#ifndef WL_DRV_AVOID_SCANCACHE
- wl_bss_info_t *bi;
- u32 bi_length;
- const wifi_p2p_ie_t * p2p_ie;
- const u8 *p2p_dev_addr = NULL;
- wl_scan_results_t *list;
- wl_bss_info_t *bss = NULL;
- u32 i;
-#endif /* WL_DRV_AVOID_SCANCACHE */
- u16 channel;
- struct ieee80211_supported_band *band;
-
- WL_DBG((" enter event type : %d, status : %d \n",
- ntoh32(e->event_type), ntoh32(e->status)));
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-
- mutex_lock(&cfg->scan_sync);
- /* P2P SCAN is coming from primary interface */
- if (wl_get_p2p_status(cfg, SCANNING)) {
- if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
- ndev = cfg->afx_hdl->dev;
- else
- ndev = cfg->escan_info.ndev;
- }
- escan_result = (wl_escan_result_t *)data;
-#ifdef WL_BCNRECV
- if (cfg->bcnrecv_info.bcnrecv_state == BEACON_RECV_STARTED &&
- status == WLC_E_STATUS_RXBCN) {
- /* handle beacon recv scan results */
- wl_bss_info_v109_2_t *bi_info;
- bi_info = (wl_bss_info_v109_2_t *)escan_result->bss_info;
- err = wl_bcnrecv_result_handler(cfg, cfgdev, bi_info, status);
- goto exit;
- }
-#endif /* WL_BCNRECV */
- if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
- WL_ERR_RLMT(("escan is not ready. drv_scan_status 0x%x"
- " e_type %d e_states %d\n",
- wl_get_drv_status(cfg, SCANNING, ndev),
- ntoh32(e->event_type), ntoh32(e->status)));
- goto exit;
- }
-
-#ifndef WL_DRV_AVOID_SCANCACHE
- if (status == WLC_E_STATUS_PARTIAL) {
- WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
- if (!escan_result) {
- WL_ERR(("Invalid escan result (NULL pointer)\n"));
- goto exit;
- }
- if ((dtoh32(escan_result->buflen) > (int)ESCAN_BUF_SIZE) ||
- (dtoh32(escan_result->buflen) < sizeof(wl_escan_result_t))) {
- WL_ERR(("Invalid escan buffer len:%d\n", dtoh32(escan_result->buflen)));
- goto exit;
- }
- if (dtoh16(escan_result->bss_count) != 1) {
- WL_ERR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
- goto exit;
- }
- bi = escan_result->bss_info;
- if (!bi) {
- WL_ERR(("Invalid escan bss info (NULL pointer)\n"));
- goto exit;
- }
- bi_length = dtoh32(bi->length);
- if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
- WL_ERR(("Invalid bss_info length %d: ignoring\n", bi_length));
- goto exit;
- }
-
- /* +++++ terence 20130524: skip invalid bss */
- channel =
- bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
- if (channel <= CH_MAX_2G_CHANNEL)
- band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
- else
- band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_5GHZ];
- if (!band) {
- WL_ERR(("No valid band\n"));
- goto exit;
- }
- if (!dhd_conf_match_channel(cfg->pub, channel))
- goto exit;
- /* ----- terence 20130524: skip invalid bss */
-
- if (wl_escan_check_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id) < 0)
- goto exit;
-
- if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
- if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
- WL_DBG(("Ignoring IBSS result\n"));
- goto exit;
- }
- }
-
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
- if (p2p_dev_addr && !memcmp(p2p_dev_addr,
- cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
- s32 channel = wf_chspec_ctlchan(
- wl_chspec_driver_to_host(bi->chanspec));
-
- if ((channel > MAXCHANNEL) || (channel <= 0))
- channel = WL_INVALID;
- else
- WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
- " channel : %d\n",
- MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
- channel));
-
- wl_clr_p2p_status(cfg, SCANNING);
- cfg->afx_hdl->peer_chan = channel;
- complete(&cfg->act_frm_scan);
- goto exit;
- }
-
- } else {
- int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- removal_element_t candidate[BUF_OVERFLOW_MGMT_COUNT];
- int remove_lower_rssi = FALSE;
-
- bzero(candidate, sizeof(removal_element_t)*BUF_OVERFLOW_MGMT_COUNT);
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-
- list = wl_escan_get_buf(cfg, FALSE);
- if (scan_req_match(cfg)) {
-#ifdef WL_HOST_BAND_MGMT
- s32 channel_band = 0;
- chanspec_t chspec;
-#endif /* WL_HOST_BAND_MGMT */
- /* p2p scan && allow only probe response */
- if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
- (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
- goto exit;
- if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
- bi->ie_length)) == NULL) {
- WL_ERR(("Couldn't find P2PIE in probe"
- " response/beacon\n"));
- goto exit;
- }
-#ifdef WL_HOST_BAND_MGMT
- chspec = wl_chspec_driver_to_host(bi->chanspec);
- channel_band = CHSPEC2WLC_BAND(chspec);
-
- if ((cfg->curr_band == WLC_BAND_5G) &&
- (channel_band == WLC_BAND_2G)) {
- /* Avoid sending the GO results in band conflict */
- if (wl_cfgp2p_retreive_p2pattrib(p2p_ie,
- P2P_SEID_GROUP_ID) != NULL)
- goto exit;
- }
-#endif /* WL_HOST_BAND_MGMT */
- }
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- if (bi_length > ESCAN_BUF_SIZE - list->buflen)
- remove_lower_rssi = TRUE;
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-
- for (i = 0; i < list->count; i++) {
- bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
- : list->bss_info;
- if (!bss) {
- WL_ERR(("bss is NULL\n"));
- goto exit;
- }
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- WL_DBG(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
- bss->SSID, MAC2STRDBG(bss->BSSID.octet),
- i, bss->RSSI, list->count));
-
- if (remove_lower_rssi)
- wl_cfg80211_find_removal_candidate(bss, candidate);
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-
- if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
- (CHSPEC_BAND(wl_chspec_driver_to_host(bi->chanspec))
- == CHSPEC_BAND(wl_chspec_driver_to_host(bss->chanspec))) &&
- bi->SSID_len == bss->SSID_len &&
- !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
-
- /* do not allow beacon data to update
- *the data recd from a probe response
- */
- if (!(bss->flags & WL_BSS_FLAGS_FROM_BEACON) &&
- (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
- goto exit;
-
- WL_DBG(("%s("MACDBG"), i=%d prev: RSSI %d"
- " flags 0x%x, new: RSSI %d flags 0x%x\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
- bss->RSSI, bss->flags, bi->RSSI, bi->flags));
-
- if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
- (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
- /* preserve max RSSI if the measurements are
- * both on-channel or both off-channel
- */
- WL_DBG(("%s("MACDBG"), same onchan"
- ", RSSI: prev %d new %d\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- bss->RSSI, bi->RSSI));
- bi->RSSI = MAX(bss->RSSI, bi->RSSI);
- } else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
- (bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
- /* preserve the on-channel rssi measurement
- * if the new measurement is off channel
- */
- WL_DBG(("%s("MACDBG"), prev onchan"
- ", RSSI: prev %d new %d\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- bss->RSSI, bi->RSSI));
- bi->RSSI = bss->RSSI;
- bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
- }
- if (dtoh32(bss->length) != bi_length) {
- u32 prev_len = dtoh32(bss->length);
-
- WL_DBG(("bss info replacement"
- " is occured(bcast:%d->probresp%d)\n",
- bss->ie_length, bi->ie_length));
- WL_DBG(("%s("MACDBG"), replacement!(%d -> %d)\n",
- bss->SSID, MAC2STRDBG(bi->BSSID.octet),
- prev_len, bi_length));
-
- if ((list->buflen - prev_len) + bi_length
- > ESCAN_BUF_SIZE) {
- WL_ERR(("Buffer is too small: keep the"
- " previous result of this AP\n"));
- /* Only update RSSI */
- bss->RSSI = bi->RSSI;
- bss->flags |= (bi->flags
- & WL_BSS_FLAGS_RSSI_ONCHANNEL);
- goto exit;
- }
-
- if (i < list->count - 1) {
- /* memory copy required by this case only */
- memmove((u8 *)bss + bi_length,
- (u8 *)bss + prev_len,
- list->buflen - cur_len - prev_len);
- }
- list->buflen -= prev_len;
- list->buflen += bi_length;
- }
- list->version = dtoh32(bi->version);
- /* In the above code under check
- * '(dtoh32(bss->length) != bi_length)'
- * buffer overflow is avoided. bi_length
- * is already accounted in list->buflen
- */
- if ((err = memcpy_s((u8 *)bss,
- (ESCAN_BUF_SIZE - (list->buflen - bi_length)),
- (u8 *)bi, bi_length)) != BCME_OK) {
- WL_ERR(("Failed to copy the recent bss_info."
- "err:%d recv_len:%d bi_len:%d\n", err,
- ESCAN_BUF_SIZE - (list->buflen - bi_length),
- bi_length));
- /* This scenario should never happen. If it happens,
- * set list->count to zero for recovery
- */
- list->count = 0;
- list->buflen = 0;
- ASSERT(0);
- }
- goto exit;
- }
- cur_len += dtoh32(bss->length);
- }
- if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
-#ifdef ESCAN_BUF_OVERFLOW_MGMT
- wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
- if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
- WL_DBG(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
- MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
- goto exit;
- }
-#else
- WL_ERR(("Buffer is too small: ignoring\n"));
- goto exit;
-#endif /* ESCAN_BUF_OVERFLOW_MGMT */
- }
- /* In the previous step check is added to ensure the bi_legth does not
- * exceed the ESCAN_BUF_SIZE
- */
- (void)memcpy_s(&(((char *)list)[list->buflen]),
- (ESCAN_BUF_SIZE - list->buflen), bi, bi_length);
- list->version = dtoh32(bi->version);
- list->buflen += bi_length;
- list->count++;
-
- /*
- * !Broadcast && number of ssid = 1 && number of channels =1
- * means specific scan to association
- */
- if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
- WL_ERR(("P2P assoc scan fast aborted.\n"));
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, false, true);
- goto exit;
- }
- }
- }
- else if (status == WLC_E_STATUS_SUCCESS) {
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
- escan_result->sync_id);
-
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- WL_DBG(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- WL_INFORM_MEM(("ESCAN COMPLETED\n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
- cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
- if (!scan_req_match(cfg)) {
- WL_DBG(("SCAN COMPLETED: scanned AP count=%d\n",
- cfg->bss_list->count));
- }
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, false, false);
- }
- wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
- } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
- (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
- (status == WLC_E_STATUS_NEWASSOC)) {
- /* Dump FW preserve buffer content */
- if (status == WLC_E_STATUS_ABORT) {
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
- }
- /* Handle all cases of scan abort */
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_print_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id);
- WL_DBG(("ESCAN ABORT reason: %d\n", status));
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- WL_DBG(("ACTION FRAME SCAN DONE\n"));
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- wl_clr_p2p_status(cfg, SCANNING);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- WL_INFORM_MEM(("ESCAN ABORTED\n"));
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- if (p2p_scan(cfg) && cfg->scan_request &&
- (cfg->scan_request->flags & NL80211_SCAN_FLAG_FLUSH)) {
- WL_ERR(("scan list is changed"));
- cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
- } else
-#endif // endif
- cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
-
- if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
- cfg->bss_list->count));
- }
-#ifdef DUAL_ESCAN_RESULT_BUFFER
- if (escan_result->sync_id != cfg->escan_info.cur_sync_id) {
- /* If sync_id is not matching, then the abort might have
- * come for the old scan req or for the in-driver initiated
- * scan. So do abort for scan_req for which sync_id is
- * matching.
- */
- WL_INFORM_MEM(("sync_id mismatch (%d != %d). "
- "Ignore the scan abort event.\n",
- escan_result->sync_id, cfg->escan_info.cur_sync_id));
- goto exit;
- } else {
- /* sync id is matching, abort the scan */
- WL_INFORM_MEM(("scan aborted for sync_id: %d \n",
- cfg->escan_info.cur_sync_id));
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, true, false);
- }
-#else
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, true, false);
-#endif /* DUAL_ESCAN_RESULT_BUFFER */
- } else {
- /* If there is no pending host initiated scan, do nothing */
- WL_DBG(("ESCAN ABORT: No pending scans. Ignoring event.\n"));
- }
- wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
- } else if (status == WLC_E_STATUS_TIMEOUT) {
- WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
- WL_ERR(("reason[0x%x]\n", e->reason));
- if (e->reason == 0xFFFFFFFF) {
- wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
- }
- } else {
- WL_ERR(("unexpected Escan Event %d : abort\n", status));
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_print_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id);
- if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
- WL_DBG(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
- if (!scan_req_match(cfg)) {
- WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
- "scanned AP count=%d\n",
- cfg->bss_list->count));
- }
- wl_inform_bss(cfg);
- wl_notify_escan_complete(cfg, ndev, true, false);
- }
- wl_escan_increment_sync_id(cfg, 2);
- }
-#else /* WL_DRV_AVOID_SCANCACHE */
- err = wl_escan_without_scan_cache(cfg, escan_result, ndev, e, status);
-#endif /* WL_DRV_AVOID_SCANCACHE */
-exit:
- mutex_unlock(&cfg->scan_sync);
- return err;
-}
-
-/* Find listen channel */
-static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
- const u8 *ie, u32 ie_len)
-{
- const wifi_p2p_ie_t *p2p_ie;
- const u8 *end, *pos;
- s32 listen_channel;
-
- pos = (const u8 *)ie;
-
- p2p_ie = wl_cfgp2p_find_p2pie(pos, ie_len);
-
- if (p2p_ie == NULL) {
- return 0;
- }
-
- if (p2p_ie->len < MIN_P2P_IE_LEN || p2p_ie->len > MAX_P2P_IE_LEN) {
- CFGP2P_ERR(("p2p_ie->len out of range - %d\n", p2p_ie->len));
- return 0;
- }
- pos = p2p_ie->subelts;
- end = p2p_ie->subelts + (p2p_ie->len - 4);
-
- CFGP2P_DBG((" found p2p ie ! lenth %d \n",
- p2p_ie->len));
-
- while (pos < end) {
- uint16 attr_len;
- if (pos + 2 >= end) {
- CFGP2P_DBG((" -- Invalid P2P attribute"));
- return 0;
- }
- attr_len = ((uint16) (((pos + 1)[1] << 8) | (pos + 1)[0]));
-
- if (pos + 3 + attr_len > end) {
- CFGP2P_DBG(("P2P: Attribute underflow "
- "(len=%u left=%d)",
- attr_len, (int) (end - pos - 3)));
- return 0;
- }
-
- /* if Listen Channel att id is 6 and the vailue is valid,
- * return the listen channel
- */
- if (pos[0] == 6) {
- /* listen channel subel length format
- * 1(id) + 2(len) + 3(country) + 1(op. class) + 1(chan num)
- */
- listen_channel = pos[1 + 2 + 3 + 1];
-
- if (listen_channel == SOCIAL_CHAN_1 ||
- listen_channel == SOCIAL_CHAN_2 ||
- listen_channel == SOCIAL_CHAN_3) {
- CFGP2P_DBG((" Found my Listen Channel %d \n", listen_channel));
- return listen_channel;
- }
- }
- pos += 3 + attr_len;
- }
- return 0;
-}
-
-#ifdef WL_SCAN_TYPE
-static u32
-wl_cfgscan_map_nl80211_scan_type(struct bcm_cfg80211 *cfg, struct cfg80211_scan_request *request)
-{
- u32 scan_flags = 0;
-
- if (!request) {
- return scan_flags;
- }
-
- if (request->flags & NL80211_SCAN_FLAG_LOW_SPAN) {
- scan_flags |= WL_SCANFLAGS_LOW_SPAN;
- }
- if (request->flags & NL80211_SCAN_FLAG_HIGH_ACCURACY) {
- scan_flags |= WL_SCANFLAGS_HIGH_ACCURACY;
- }
- if (request->flags & NL80211_SCAN_FLAG_LOW_POWER) {
- scan_flags |= WL_SCANFLAGS_LOW_POWER_SCAN;
- }
- if (request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) {
- scan_flags |= WL_SCANFLAGS_LOW_PRIO;
- }
-
- WL_INFORM(("scan flags. wl:%x cfg80211:%x\n", scan_flags, request->flags));
- return scan_flags;
-}
-#endif /* WL_SCAN_TYPE */
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
-#define IS_RADAR_CHAN(flags) (flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN))
-#else
-#define IS_RADAR_CHAN(flags) (flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
-#endif // endif
-static void
-wl_cfgscan_populate_scan_channels(struct bcm_cfg80211 *cfg, u16 *channel_list,
- struct cfg80211_scan_request *request, u32 *num_channels)
-{
- u32 i = 0, j = 0;
- u32 channel;
- u32 n_channels = 0;
- u32 chanspec = 0;
-
- if (!request || !request->n_channels) {
- /* Do full channel scan */
- return;
- }
-
- n_channels = request->n_channels;
- for (i = 0; i < n_channels; i++) {
- channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
- /* SKIP DFS channels for Secondary interface */
- if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
- (IS_RADAR_CHAN(request->channels[i]->flags)))
- continue;
- if (!dhd_conf_match_channel(cfg->pub, channel))
- continue;
-
- chanspec = WL_CHANSPEC_BW_20;
- if (chanspec == INVCHANSPEC) {
- WL_ERR(("Invalid chanspec! Skipping channel\n"));
- continue;
- }
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
- if (request->channels[i]->band == IEEE80211_BAND_60GHZ) {
- /* Not supported */
- continue;
- }
-#endif /* LINUX_VER >= 3.6 */
-
- if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
-#ifdef WL_HOST_BAND_MGMT
- if (cfg->curr_band == WLC_BAND_5G) {
- WL_DBG(("In 5G only mode, omit 2G channel:%d\n", channel));
- continue;
- }
-#endif /* WL_HOST_BAND_MGMT */
- chanspec |= WL_CHANSPEC_BAND_2G;
- } else {
-#ifdef WL_HOST_BAND_MGMT
- if (cfg->curr_band == WLC_BAND_2G) {
- WL_DBG(("In 2G only mode, omit 5G channel:%d\n", channel));
- continue;
- }
-#endif /* WL_HOST_BAND_MGMT */
- chanspec |= WL_CHANSPEC_BAND_5G;
- }
- channel_list[j] = channel;
- channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
- channel_list[j] |= chanspec;
- WL_SCAN(("Chan : %d, Channel spec: %x \n",
- channel, channel_list[j]));
- channel_list[j] = wl_chspec_host_to_driver(channel_list[j]);
- j++;
- }
- *num_channels = j;
-
-}
-
-static void
-wl_cfgscan_populate_scan_ssids(struct bcm_cfg80211 *cfg, u8 *buf_ptr, u32 buf_len,
- struct cfg80211_scan_request *request, u32 *ssid_num)
-{
- u32 n_ssids;
- wlc_ssid_t ssid;
- int i, j = 0;
-
- if (!request || !buf_ptr) {
- /* Do full channel scan */
- return;
- }
-
- n_ssids = request->n_ssids;
- if (n_ssids > 0) {
-
- if (buf_len < (n_ssids * sizeof(wlc_ssid_t))) {
- WL_ERR(("buf len not sufficient for scan ssids\n"));
- return;
- }
-
- for (i = 0; i < n_ssids; i++) {
- bzero(&ssid, sizeof(wlc_ssid_t));
- ssid.SSID_len = MIN(request->ssids[i].ssid_len, DOT11_MAX_SSID_LEN);
- /* Returning void here, as per previous line copy length does not exceed
- * DOT11_MAX_SSID_LEN
- */
- (void)memcpy_s(ssid.SSID, DOT11_MAX_SSID_LEN, request->ssids[i].ssid,
- ssid.SSID_len);
- if (!ssid.SSID_len) {
- WL_SCAN(("%d: Broadcast scan\n", i));
- } else {
- WL_SCAN(("%d: scan for %s size =%d\n", i,
- ssid.SSID, ssid.SSID_len));
- }
- /* For multiple ssid case copy the each SSID info the ptr below corresponds
- * to that so dest is of type wlc_ssid_t
- */
- (void)memcpy_s(buf_ptr, sizeof(wlc_ssid_t), &ssid, sizeof(wlc_ssid_t));
- buf_ptr += sizeof(wlc_ssid_t);
- j++;
- }
- } else {
- WL_SCAN(("Broadcast scan\n"));
- }
- *ssid_num = j;
-}
-
-static s32
-wl_scan_prep(struct bcm_cfg80211 *cfg, void *scan_params, u32 len,
- struct cfg80211_scan_request *request)
-{
- wl_scan_params_t *params = NULL;
- wl_scan_params_v2_t *params_v2 = NULL;
- u32 scan_type = 0;
- u32 scan_param_size = 0;
- u32 n_channels = 0;
- u32 n_ssids = 0;
- uint16 *chan_list = NULL;
- u32 channel_offset = 0;
- u32 cur_offset;
-
- if (!scan_params) {
- return BCME_ERROR;
- }
-
- if (cfg->active_scan == PASSIVE_SCAN) {
- WL_INFORM_MEM(("Enforcing passive scan\n"));
- scan_type = WL_SCANFLAGS_PASSIVE;
- }
-
- WL_DBG(("Preparing Scan request\n"));
- if (cfg->scan_params_v2) {
- params_v2 = (wl_scan_params_v2_t *)scan_params;
- scan_param_size = sizeof(wl_scan_params_v2_t);
- channel_offset = offsetof(wl_scan_params_v2_t, channel_list);
- } else {
- params = (wl_scan_params_t *)scan_params;
- scan_param_size = sizeof(wl_scan_params_t);
- channel_offset = offsetof(wl_scan_params_t, channel_list);
- }
-
- if (params_v2) {
- /* scan params ver2 */
-#if defined(WL_SCAN_TYPE)
- scan_type += wl_cfgscan_map_nl80211_scan_type(cfg, request);
-#endif /* WL_SCAN_TYPE */
-
- (void)memcpy_s(¶ms_v2->bssid, ETHER_ADDR_LEN, ðer_bcast, ETHER_ADDR_LEN);
- params_v2->version = htod16(WL_SCAN_PARAMS_VERSION_V2);
- params_v2->length = htod16(sizeof(wl_scan_params_v2_t));
- params_v2->bss_type = DOT11_BSSTYPE_ANY;
- params_v2->scan_type = htod32(scan_type);
- params_v2->nprobes = htod32(-1);
- params_v2->active_time = htod32(-1);
- params_v2->passive_time = htod32(-1);
- params_v2->home_time = htod32(-1);
- params_v2->channel_num = 0;
- bzero(¶ms_v2->ssid, sizeof(wlc_ssid_t));
- chan_list = params_v2->channel_list;
- } else {
- /* scan params ver 1 */
- if (!params) {
- ASSERT(0);
- return BCME_ERROR;
- }
- (void)memcpy_s(¶ms->bssid, ETHER_ADDR_LEN, ðer_bcast, ETHER_ADDR_LEN);
- params->bss_type = DOT11_BSSTYPE_ANY;
- params->scan_type = 0;
- params->nprobes = htod32(-1);
- params->active_time = htod32(-1);
- params->passive_time = htod32(-1);
- params->home_time = htod32(-1);
- params->channel_num = 0;
- bzero(¶ms->ssid, sizeof(wlc_ssid_t));
- chan_list = params->channel_list;
- }
-
- if (!request) {
- /* scan_request null, do scan based on base config */
- WL_DBG(("scan_request is null\n"));
- return BCME_OK;
- }
-
- WL_INFORM(("n_channels:%d n_ssids:%d\n", request->n_channels, request->n_ssids));
-
- cur_offset = channel_offset;
- /* Copy channel array if applicable */
- if ((request->n_channels > 0) && chan_list) {
- if (len >= (scan_param_size + (request->n_channels * sizeof(u16)))) {
- wl_cfgscan_populate_scan_channels(cfg,
- chan_list, request, &n_channels);
- cur_offset += (n_channels * (sizeof(u16)));
- }
- }
-
- /* Copy ssid array if applicable */
- if (request->n_ssids > 0) {
- cur_offset = (u32) roundup(cur_offset, sizeof(u32));
- if (len > (cur_offset + (request->n_ssids * sizeof(wlc_ssid_t)))) {
- u32 rem_len = len - cur_offset;
- wl_cfgscan_populate_scan_ssids(cfg,
- ((u8 *)scan_params + cur_offset), rem_len, request, &n_ssids);
- }
- }
-
- if (n_ssids || n_channels) {
- u32 channel_num =
- htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (n_channels & WL_SCAN_PARAMS_COUNT_MASK));
- if (params_v2) {
- params_v2->channel_num = channel_num;
- if (n_channels == 1) {
- params_v2->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
- params_v2->nprobes = htod32(
- params_v2->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
- }
- } else {
- params->channel_num = channel_num;
- if (n_channels == 1) {
- params->active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
- params->nprobes = htod32(
- params->active_time / WL_SCAN_JOIN_PROBE_INTERVAL_MS);
- }
- }
- }
-
- WL_INFORM(("scan_prep done. n_channels:%d n_ssids:%d\n", n_channels, n_ssids));
- return BCME_OK;
-}
-
-static s32
-wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
-{
- wl_uint32_list_t *list;
- s32 err = BCME_OK;
- if (valid_chan_list == NULL || size <= 0)
- return -ENOMEM;
-
- bzero(valid_chan_list, size);
- list = (wl_uint32_list_t *)(void *) valid_chan_list;
- list->count = htod32(WL_NUMCHANNELS);
- err = wldev_ioctl_get(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size);
- if (err != 0) {
- WL_ERR(("get channels failed with %d\n", err));
- }
-
- return err;
-}
-
-static s32
-wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- struct cfg80211_scan_request *request, uint16 action)
-{
- s32 err = BCME_OK;
- u32 n_channels;
- u32 n_ssids;
- s32 params_size;
- wl_escan_params_t *eparams = NULL;
- wl_escan_params_v2_t *eparams_v2 = NULL;
- u8 *scan_params = NULL;
- u8 *params = NULL;
- u8 chan_buf[sizeof(u32)*(WL_NUMCHANNELS + 1)];
- u32 num_chans = 0;
- s32 channel;
- u32 n_valid_chan;
- s32 search_state = WL_P2P_DISC_ST_SCAN;
- u32 i, j, n_nodfs = 0;
- u16 *default_chan_list = NULL;
- wl_uint32_list_t *list;
- s32 bssidx = -1;
- struct net_device *dev = NULL;
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- bool is_first_init_2g_scan = false;
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
- p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
- u32 chan_mem = 0;
- u32 sync_id = 0;
-
- WL_DBG(("Enter \n"));
-
- /* scan request can come with empty request : perform all default scan */
- if (!cfg) {
- err = -EINVAL;
- goto exit;
- }
-
- if (cfg->scan_params_v2) {
- params_size = (WL_SCAN_PARAMS_V2_FIXED_SIZE +
- OFFSETOF(wl_escan_params_v2_t, params));
- } else {
- params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
- }
-
- if (!cfg->p2p_supported || !p2p_scan(cfg)) {
- /* LEGACY SCAN TRIGGER */
- WL_SCAN((" LEGACY E-SCAN START\n"));
-
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- if (!request) {
- err = -EINVAL;
- goto exit;
- }
- if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
-#ifdef USE_INITIAL_2G_SCAN
- struct ieee80211_channel tmp_channel_list[CH_MAX_2G_CHANNEL];
- /* allow one 5G channel to add previous connected channel in 5G */
- bool allow_one_5g_channel = TRUE;
- j = 0;
- for (i = 0; i < request->n_channels; i++) {
- int tmp_chan = ieee80211_frequency_to_channel
- (request->channels[i]->center_freq);
- if (tmp_chan > CH_MAX_2G_CHANNEL) {
- if (allow_one_5g_channel)
- allow_one_5g_channel = FALSE;
- else
- continue;
- }
- if (j > CH_MAX_2G_CHANNEL) {
- WL_ERR(("Index %d exceeds max 2.4GHz channels %d"
- " and previous 5G connected channel\n",
- j, CH_MAX_2G_CHANNEL));
- break;
- }
- bcopy(request->channels[i], &tmp_channel_list[j],
- sizeof(struct ieee80211_channel));
- WL_SCAN(("channel of request->channels[%d]=%d\n", i, tmp_chan));
- j++;
- }
- if ((j > 0) && (j <= CH_MAX_2G_CHANNEL)) {
- for (i = 0; i < j; i++)
- bcopy(&tmp_channel_list[i], request->channels[i],
- sizeof(struct ieee80211_channel));
-
- request->n_channels = j;
- is_first_init_2g_scan = true;
- }
- else
- WL_ERR(("Invalid number of 2.4GHz channels %d\n", j));
-
- WL_SCAN(("request->n_channels=%d\n", request->n_channels));
-#else /* USE_INITIAL_SHORT_DWELL_TIME */
- is_first_init_2g_scan = true;
-#endif /* USE_INITIAL_2G_SCAN */
- g_first_broadcast_scan = false;
- }
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
-
- /* if scan request is not empty parse scan request paramters */
- if (request != NULL) {
- n_channels = request->n_channels;
- n_ssids = request->n_ssids;
- if (n_channels % 2)
- /* If n_channels is odd, add a padd of u16 */
- params_size += sizeof(u16) * (n_channels + 1);
- else
- params_size += sizeof(u16) * n_channels;
-
- /* Allocate space for populating ssids in wl_escan_params_t struct */
- params_size += sizeof(struct wlc_ssid) * n_ssids;
- }
- params = MALLOCZ(cfg->osh, params_size);
- if (params == NULL) {
- err = -ENOMEM;
- goto exit;
- }
-
- wl_escan_set_sync_id(sync_id, cfg);
- if (cfg->scan_params_v2) {
- eparams_v2 = (wl_escan_params_v2_t *)params;
- scan_params = (u8 *)&eparams_v2->params;
- eparams_v2->version = htod32(ESCAN_REQ_VERSION_V2);
- eparams_v2->action = htod16(action);
- eparams_v2->sync_id = sync_id;
- } else {
- eparams = (wl_escan_params_t *)params;
- scan_params = (u8 *)&eparams->params;
- eparams->version = htod32(ESCAN_REQ_VERSION);
- eparams->action = htod16(action);
- eparams->sync_id = sync_id;
- }
-
- if (wl_scan_prep(cfg, scan_params, params_size, request) < 0) {
- WL_ERR(("scan_prep failed\n"));
- err = -EINVAL;
- goto exit;
- }
-
-#if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
- /* Override active_time to reduce scan time if it's first bradcast scan. */
- if (is_first_init_2g_scan) {
- if (eparams_v2) {
- eparams_v2->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
- } else {
- eparams->params.active_time = FIRST_SCAN_ACTIVE_DWELL_TIME_MS;
- }
- }
-#endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
-
- wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
- if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
- WL_ERR(("ioctl buffer length not sufficient\n"));
- MFREE(cfg->osh, params, params_size);
- err = -ENOMEM;
- goto exit;
- }
-
- bssidx = wl_get_bssidx_by_wdev(cfg, ndev->ieee80211_ptr);
-// WL_MSG(ndev->name, "LEGACY_SCAN sync ID: %d, bssidx: %d\n", sync_id, bssidx);
- err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
- cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
- if (unlikely(err)) {
- if (err == BCME_EPERM)
- /* Scan Not permitted at this point of time */
- WL_DBG((" Escan not permitted at this time (%d)\n", err));
- else
- WL_ERR((" Escan set error (%d)\n", err));
- } else {
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_REQUESTED);
- }
- MFREE(cfg->osh, params, params_size);
- }
- else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
- /* P2P SCAN TRIGGER */
- s32 _freq = 0;
- n_nodfs = 0;
-
- if (request && request->n_channels) {
- num_chans = request->n_channels;
- WL_SCAN((" chann number : %d\n", num_chans));
- chan_mem = (u32)(num_chans * sizeof(*default_chan_list));
- default_chan_list = MALLOCZ(cfg->osh, chan_mem);
- if (default_chan_list == NULL) {
- WL_ERR(("channel list allocation failed \n"));
- err = -ENOMEM;
- goto exit;
- }
- if (!wl_get_valid_channels(ndev, chan_buf, sizeof(chan_buf))) {
-#ifdef P2P_SKIP_DFS
- int is_printed = false;
-#endif /* P2P_SKIP_DFS */
- list = (wl_uint32_list_t *) chan_buf;
- n_valid_chan = dtoh32(list->count);
- if (n_valid_chan > WL_NUMCHANNELS) {
- WL_ERR(("wrong n_valid_chan:%d\n", n_valid_chan));
- MFREE(cfg->osh, default_chan_list, chan_mem);
- err = -EINVAL;
- goto exit;
- }
-
- for (i = 0; i < num_chans; i++)
- {
-#ifdef WL_HOST_BAND_MGMT
- int channel_band = 0;
-#endif /* WL_HOST_BAND_MGMT */
- _freq = request->channels[i]->center_freq;
- channel = ieee80211_frequency_to_channel(_freq);
-#ifdef WL_HOST_BAND_MGMT
- channel_band = (channel > CH_MAX_2G_CHANNEL) ?
- WLC_BAND_5G : WLC_BAND_2G;
- if ((cfg->curr_band != WLC_BAND_AUTO) &&
- (cfg->curr_band != channel_band) &&
- !IS_P2P_SOCIAL_CHANNEL(channel))
- continue;
-#endif /* WL_HOST_BAND_MGMT */
-
- /* ignore DFS channels */
- if (request->channels[i]->flags &
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
- (IEEE80211_CHAN_NO_IR
- | IEEE80211_CHAN_RADAR))
-#else
- (IEEE80211_CHAN_RADAR
- | IEEE80211_CHAN_PASSIVE_SCAN))
-#endif // endif
- continue;
-#ifdef P2P_SKIP_DFS
- if (channel >= 52 && channel <= 144) {
- if (is_printed == false) {
- WL_ERR(("SKIP DFS CHANs(52~144)\n"));
- is_printed = true;
- }
- continue;
- }
-#endif /* P2P_SKIP_DFS */
-
- for (j = 0; j < n_valid_chan; j++) {
- /* allows only supported channel on
- * current reguatory
- */
- if (n_nodfs >= num_chans) {
- break;
- }
- if (channel == (dtoh32(list->element[j]))) {
- default_chan_list[n_nodfs++] =
- channel;
- }
- }
-
- }
- }
- if (num_chans == SOCIAL_CHAN_CNT && (
- (default_chan_list[0] == SOCIAL_CHAN_1) &&
- (default_chan_list[1] == SOCIAL_CHAN_2) &&
- (default_chan_list[2] == SOCIAL_CHAN_3))) {
- /* SOCIAL CHANNELS 1, 6, 11 */
- search_state = WL_P2P_DISC_ST_SEARCH;
- p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
- WL_DBG(("P2P SEARCH PHASE START \n"));
- } else if (((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION1)) &&
- (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) ||
- ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION2)) &&
- (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP))) {
- /* If you are already a GO, then do SEARCH only */
- WL_DBG(("Already a GO. Do SEARCH Only"));
- search_state = WL_P2P_DISC_ST_SEARCH;
- num_chans = n_nodfs;
- p2p_scan_purpose = P2P_SCAN_NORMAL;
-
- } else if (num_chans == 1) {
- p2p_scan_purpose = P2P_SCAN_CONNECT_TRY;
- WL_INFORM_MEM(("Trigger p2p join scan\n"));
- } else if (num_chans == SOCIAL_CHAN_CNT + 1) {
- /* SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan supported by
- * the supplicant
- */
- p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
- } else {
- WL_DBG(("P2P SCAN STATE START \n"));
- num_chans = n_nodfs;
- p2p_scan_purpose = P2P_SCAN_NORMAL;
- }
- } else {
- err = -EINVAL;
- goto exit;
- }
- err = wl_cfgp2p_escan(cfg, ndev, ACTIVE_SCAN, num_chans, default_chan_list,
- search_state, action,
- wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
- p2p_scan_purpose);
-
- if (!err)
- cfg->p2p->search_state = search_state;
-
- MFREE(cfg->osh, default_chan_list, chan_mem);
- }
-exit:
- if (unlikely(err)) {
- /* Don't print Error incase of Scan suppress */
- if ((err == BCME_EPERM) && cfg->scan_suppressed)
- WL_DBG(("Escan failed: Scan Suppressed \n"));
- else
- WL_ERR(("scan error (%d)\n", err));
- }
- return err;
-}
-
-s32
-wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request)
-{
- s32 err = BCME_OK;
- s32 passive_scan;
- s32 passive_scan_time;
- s32 passive_scan_time_org;
- wl_scan_results_t *results;
- WL_SCAN(("Enter \n"));
-
- results = wl_escan_get_buf(cfg, FALSE);
- results->version = 0;
- results->count = 0;
- results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
-
- cfg->escan_info.ndev = ndev;
- cfg->escan_info.wiphy = wiphy;
- cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
- passive_scan = cfg->active_scan ? 0 : 1;
- err = wldev_ioctl_set(ndev, WLC_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan));
- if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
- goto exit;
- }
-
- if (passive_channel_skip) {
-
- err = wldev_ioctl_get(ndev, WLC_GET_SCAN_PASSIVE_TIME,
- &passive_scan_time_org, sizeof(passive_scan_time_org));
- if (unlikely(err)) {
- WL_ERR(("== error (%d)\n", err));
- goto exit;
- }
-
- WL_SCAN(("PASSIVE SCAN time : %d \n", passive_scan_time_org));
-
- passive_scan_time = 0;
- err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
- &passive_scan_time, sizeof(passive_scan_time));
- if (unlikely(err)) {
- WL_ERR(("== error (%d)\n", err));
- goto exit;
- }
-
- WL_SCAN(("PASSIVE SCAN SKIPED!! (passive_channel_skip:%d) \n",
- passive_channel_skip));
- }
-
- err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
-
- if (passive_channel_skip) {
- err = wldev_ioctl_set(ndev, WLC_SET_SCAN_PASSIVE_TIME,
- &passive_scan_time_org, sizeof(passive_scan_time_org));
- if (unlikely(err)) {
- WL_ERR(("== error (%d)\n", err));
- goto exit;
- }
-
- WL_SCAN(("PASSIVE SCAN RECOVERED!! (passive_scan_time_org:%d) \n",
- passive_scan_time_org));
- }
-
-exit:
- return err;
-}
-
-static s32
-wl_get_scan_timeout_val(struct bcm_cfg80211 *cfg)
-{
- u32 scan_timer_interval_ms = WL_SCAN_TIMER_INTERVAL_MS;
-
- /* If NAN is enabled adding +10 sec to the existing timeout value */
-#ifdef WL_NAN
- if (cfg->nan_enable) {
- scan_timer_interval_ms += WL_SCAN_TIMER_INTERVAL_MS_NAN;
- }
-#endif /* WL_NAN */
- WL_MEM(("scan_timer_interval_ms %d\n", scan_timer_interval_ms));
- return scan_timer_interval_ms;
-}
-
-#define SCAN_EBUSY_RETRY_LIMIT 20
-static s32
-wl_cfgscan_handle_scanbusy(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 err)
-{
- s32 scanbusy_err = 0;
- static u32 busy_count = 0;
-
- if (!err) {
- busy_count = 0;
- return scanbusy_err;
- }
- if (err == BCME_BUSY || err == BCME_NOTREADY) {
- WL_ERR(("Scan err = (%d), busy?%d", err, -EBUSY));
- scanbusy_err = -EBUSY;
- } else if ((err == BCME_EPERM) && cfg->scan_suppressed) {
- WL_ERR(("Scan not permitted due to scan suppress\n"));
- scanbusy_err = -EPERM;
- } else {
- /* For all other fw errors, use a generic error code as return
- * value to cfg80211 stack
- */
- scanbusy_err = -EAGAIN;
- }
-
- if (scanbusy_err == -EBUSY) {
- /* Flush FW preserve buffer logs for checking failure */
- if (busy_count++ > (SCAN_EBUSY_RETRY_LIMIT/5)) {
- wl_flush_fw_log_buffer(ndev, FW_LOGSET_MASK_ALL);
- }
- if (busy_count > SCAN_EBUSY_RETRY_LIMIT) {
- struct ether_addr bssid;
- s32 ret = 0;
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- if (dhd_query_bus_erros(dhdp)) {
- return BCME_NOTREADY;
- }
- dhdp->scan_busy_occurred = TRUE;
- busy_count = 0;
- WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
- wl_get_drv_status(cfg, SCANNING, ndev),
- wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
- wl_get_drv_status(cfg, CONNECTING, ndev),
- wl_get_drv_status(cfg, CONNECTED, ndev),
- wl_get_drv_status(cfg, DISCONNECTING, ndev),
- wl_get_drv_status(cfg, AP_CREATING, ndev),
- wl_get_drv_status(cfg, AP_CREATED, ndev),
- wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
- wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
-
-#if defined(DHD_DEBUG) && defined(DHD_FW_COREDUMP)
- if (dhdp->memdump_enabled) {
- dhdp->memdump_type = DUMP_TYPE_SCAN_BUSY;
- dhd_bus_mem_dump(dhdp);
- }
-#endif /* DHD_DEBUG && DHD_FW_COREDUMP */
- dhdp->hang_reason = HANG_REASON_SCAN_BUSY;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- dhd_os_send_hang_message(dhdp);
-#else
- WL_ERR(("%s: HANG event is unsupported\n", __FUNCTION__));
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
-
- bzero(&bssid, sizeof(bssid));
- if ((ret = wldev_ioctl_get(ndev, WLC_GET_BSSID,
- &bssid, ETHER_ADDR_LEN)) == 0) {
- WL_ERR(("FW is connected with " MACDBG "/n",
- MAC2STRDBG(bssid.octet)));
- } else {
- WL_ERR(("GET BSSID failed with %d\n", ret));
- }
-
- wl_cfg80211_scan_abort(cfg);
-
- } else {
- /* Hold the context for 400msec, so that 10 subsequent scans
- * can give a buffer of 4sec which is enough to
- * cover any on-going scan in the firmware
- */
- WL_DBG(("Enforcing delay for EBUSY case \n"));
- msleep(400);
- }
- } else {
- busy_count = 0;
- }
-
- return scanbusy_err;
-}
-
-s32
-__wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request,
- struct cfg80211_ssid *this_ssid)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct cfg80211_ssid *ssids;
- struct ether_addr primary_mac;
- bool p2p_ssid;
-#ifdef WL11U
- bcm_tlv_t *interworking_ie;
-#endif // endif
- s32 err = 0;
- s32 bssidx = -1;
- s32 i;
- bool escan_req_failed = false;
- s32 scanbusy_err = 0;
-
- unsigned long flags;
-#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- struct net_device *remain_on_channel_ndev = NULL;
-#endif // endif
- /*
- * Hostapd triggers scan before starting automatic channel selection
- * to collect channel characteristics. However firmware scan engine
- * doesn't support any channel characteristics collection along with
- * scan. Hence return scan success.
- */
- if (request && (scan_req_iftype(request) == NL80211_IFTYPE_AP)) {
- WL_DBG(("Scan Command on SoftAP Interface. Ignoring...\n"));
-// terence 20161023: let it scan in SoftAP mode
-// return 0;
- }
-
- ndev = ndev_to_wlc_ndev(ndev, cfg);
-
- if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
- WL_ERR(("Sending Action Frames. Try it again.\n"));
- return -EAGAIN;
- }
-
- WL_DBG(("Enter wiphy (%p)\n", wiphy));
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- if (cfg->scan_request == NULL) {
- wl_clr_drv_status_all(cfg, SCANNING);
- WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
- } else {
- WL_ERR(("Scanning already\n"));
- return -EAGAIN;
- }
- }
- if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
- WL_ERR(("Scanning being aborted\n"));
- return -EAGAIN;
- }
- if (request && request->n_ssids > WL_SCAN_PARAMS_SSID_MAX) {
- WL_ERR(("request null or n_ssids > WL_SCAN_PARAMS_SSID_MAX\n"));
- return -EOPNOTSUPP;
- }
-#ifdef WL_BCNRECV
- /* check fakeapscan in progress then abort */
- wl_android_bcnrecv_stop(ndev, WL_BCNRECV_SCANBUSY);
-#endif /* WL_BCNRECV */
-
-#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- mutex_lock(&cfg->scan_sync);
- remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
- if (remain_on_channel_ndev) {
- WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
- wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
- }
- mutex_unlock(&cfg->scan_sync);
-#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
-
-#ifdef P2P_LISTEN_OFFLOADING
- wl_cfg80211_cancel_p2plo(cfg);
-#endif /* P2P_LISTEN_OFFLOADING */
-
- if (request) { /* scan bss */
- ssids = request->ssids;
- p2p_ssid = false;
- for (i = 0; i < request->n_ssids; i++) {
- if (ssids[i].ssid_len &&
- IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
- /* P2P Scan */
-#ifdef WL_BLOCK_P2P_SCAN_ON_STA
- if (!(IS_P2P_IFACE(request->wdev))) {
- /* P2P scan on non-p2p iface. Fail scan */
- WL_ERR(("p2p_search on non p2p iface\n"));
- goto scan_out;
- }
-#endif /* WL_BLOCK_P2P_SCAN_ON_STA */
- p2p_ssid = true;
- break;
- }
- }
- if (p2p_ssid) {
- if (cfg->p2p_supported) {
- /* p2p scan trigger */
- if (p2p_on(cfg) == false) {
- /* p2p on at the first time */
- p2p_on(cfg) = true;
- wl_cfgp2p_set_firm_p2p(cfg);
- get_primary_mac(cfg, &primary_mac);
-#ifndef WL_P2P_USE_RANDMAC
- wl_cfgp2p_generate_bss_mac(cfg, &primary_mac);
-#endif /* WL_P2P_USE_RANDMAC */
-#if defined(P2P_IE_MISSING_FIX)
- cfg->p2p_prb_noti = false;
-#endif // endif
- }
- wl_clr_p2p_status(cfg, GO_NEG_PHASE);
- WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
- p2p_scan(cfg) = true;
- }
- } else {
- /* legacy scan trigger
- * So, we have to disable p2p discovery if p2p discovery is on
- */
- if (cfg->p2p_supported) {
- p2p_scan(cfg) = false;
- /* If Netdevice is not equals to primary and p2p is on
- * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
- */
-
- if (p2p_scan(cfg) == false) {
- if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
- err = wl_cfgp2p_discover_enable_search(cfg,
- false);
- if (unlikely(err)) {
- goto scan_out;
- }
-
- }
- }
- }
- if (!cfg->p2p_supported || !p2p_scan(cfg)) {
- if ((bssidx = wl_get_bssidx_by_wdev(cfg,
- ndev->ieee80211_ptr)) < 0) {
- WL_ERR(("Find p2p index from ndev(%p) failed\n",
- ndev));
- err = BCME_ERROR;
- goto scan_out;
- }
-#ifdef WL11U
- if (request && (interworking_ie = wl_cfg80211_find_interworking_ie(
- request->ie, request->ie_len)) != NULL) {
- if ((err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
- VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
- interworking_ie->data,
- interworking_ie->len)) != BCME_OK) {
- WL_ERR(("Failed to add interworking IE"));
- }
- } else if (cfg->wl11u) {
- /* we have to clear IW IE and disable gratuitous APR */
- wl_cfg80211_clear_iw_ie(cfg, ndev, bssidx);
- err = wldev_iovar_setint_bsscfg(ndev, "grat_arp",
- 0, bssidx);
- /* we don't care about error here
- * because the only failure case is unsupported,
- * which is fine
- */
- if (unlikely(err)) {
- WL_ERR(("Set grat_arp failed:(%d) Ignore!\n", err));
- }
- cfg->wl11u = FALSE;
- }
-#endif /* WL11U */
- if (request) {
- err = wl_cfg80211_set_mgmt_vndr_ies(cfg,
- ndev_to_cfgdev(ndev), bssidx, VNDR_IE_PRBREQ_FLAG,
- request->ie, request->ie_len);
- }
-
- if (unlikely(err)) {
-// terence 20161023: let it scan in SoftAP mode
-// goto scan_out;
- }
-
- }
- }
- } else { /* scan in ibss */
- ssids = this_ssid;
- }
-
- if (request && cfg->p2p_supported) {
- WL_TRACE_HW4(("START SCAN\n"));
- DHD_OS_SCAN_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub),
- SCAN_WAKE_LOCK_TIMEOUT);
- DHD_DISABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
- }
-
- if (cfg->p2p_supported) {
- if (request && p2p_on(cfg) && p2p_scan(cfg)) {
-
- /* find my listen channel */
- cfg->afx_hdl->my_listen_chan =
- wl_find_listen_channel(cfg, request->ie,
- request->ie_len);
- err = wl_cfgp2p_enable_discovery(cfg, ndev,
- request->ie, request->ie_len);
-
- if (unlikely(err)) {
- goto scan_out;
- }
- }
- }
-
- mutex_lock(&cfg->scan_sync);
- err = wl_do_escan(cfg, wiphy, ndev, request);
- if (likely(!err)) {
- goto scan_success;
- } else {
- escan_req_failed = true;
- goto scan_out;
- }
-
-scan_success:
- wl_cfgscan_handle_scanbusy(cfg, ndev, BCME_OK);
- cfg->scan_request = request;
- wl_set_drv_status(cfg, SCANNING, ndev);
- /* Arm the timer */
- mod_timer(&cfg->scan_timeout,
- jiffies + msecs_to_jiffies(wl_get_scan_timeout_val(cfg)));
- mutex_unlock(&cfg->scan_sync);
- return 0;
-
-scan_out:
- if (escan_req_failed) {
- WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
- cfg->scan_request = NULL;
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
- mutex_unlock(&cfg->scan_sync);
- /* Handling for scan busy errors */
- scanbusy_err = wl_cfgscan_handle_scanbusy(cfg, ndev, err);
- if (scanbusy_err == BCME_NOTREADY) {
- /* In case of bus failures avoid ioctl calls */
- DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
- return -ENODEV;
- }
- err = scanbusy_err;
- }
-
- DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
- return err;
-}
-
-s32
-#if defined(WL_CFG80211_P2P_DEV_IF)
-wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
-#else
-wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request)
-#endif /* WL_CFG80211_P2P_DEV_IF */
-{
- s32 err = 0;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-#if defined(WL_CFG80211_P2P_DEV_IF)
- struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-
- WL_DBG(("Enter\n"));
- RETURN_EIO_IF_NOT_UP(cfg);
-
-#ifdef DHD_IFDEBUG
-#ifdef WL_CFG80211_P2P_DEV_IF
- PRINT_WDEV_INFO(request->wdev);
-#else
- PRINT_WDEV_INFO(ndev);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-#endif /* DHD_IFDEBUG */
-
- if (ndev == bcmcfg_to_prmry_ndev(cfg)) {
- if (wl_cfg_multip2p_operational(cfg)) {
- WL_ERR(("wlan0 scan failed, p2p devices are operational"));
- return -ENODEV;
- }
- }
- err = wl_cfg80211_check_in4way(cfg, ndev_to_wlc_ndev(ndev, cfg), NO_SCAN_IN4WAY,
- WL_EXT_STATUS_SCAN, NULL);
- if (err)
- return err;
-
- err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
- if (unlikely(err)) {
- WL_ERR(("scan error (%d)\n", err));
- }
-#ifdef WL_DRV_AVOID_SCANCACHE
- /* Reset roam cache after successful scan request */
-#ifdef ROAM_CHANNEL_CACHE
- if (!err) {
- reset_roam_cache(cfg);
- }
-#endif /* ROAM_CHANNEL_CACHE */
-#endif /* WL_DRV_AVOID_SCANCACHE */
- return err;
-}
-
-/* Note: This API should be invoked with scan_sync mutex
- * held so that scan_request data structures doesn't
- * get modified in between.
- */
-struct wireless_dev *
-wl_get_scan_wdev(struct bcm_cfg80211 *cfg)
-{
- struct wireless_dev *wdev = NULL;
-
- if (!cfg) {
- WL_ERR(("cfg ptr null\n"));
- return NULL;
- }
-
- if (!cfg->scan_request && !cfg->sched_scan_req) {
- /* No scans in progress */
- WL_MEM(("no scan in progress \n"));
- return NULL;
- }
-
- if (cfg->scan_request) {
- wdev = GET_SCAN_WDEV(cfg->scan_request);
-#ifdef WL_SCHED_SCAN
- } else if (cfg->sched_scan_req) {
- wdev = GET_SCHED_SCAN_WDEV(cfg->sched_scan_req);
-#endif /* WL_SCHED_SCAN */
- } else {
- WL_MEM(("no scan in progress \n"));
- }
-
- return wdev;
-}
-
-void wl_cfg80211_cancel_scan(struct bcm_cfg80211 *cfg)
-{
- struct wireless_dev *wdev = NULL;
- struct net_device *ndev = NULL;
-
- mutex_lock(&cfg->scan_sync);
- if (!cfg->scan_request && !cfg->sched_scan_req) {
- /* No scans in progress */
- WL_INFORM_MEM(("No scan in progress\n"));
- goto exit;
- }
-
- wdev = wl_get_scan_wdev(cfg);
- if (!wdev) {
- WL_ERR(("No wdev present\n"));
- goto exit;
- }
-
- ndev = wdev_to_wlc_ndev(wdev, cfg);
- wl_notify_escan_complete(cfg, ndev, true, true);
- WL_INFORM_MEM(("Scan aborted! \n"));
-exit:
- mutex_unlock(&cfg->scan_sync);
-}
-
-void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
-{
- void *params = NULL;
- s32 params_size = 0;
- s32 err = BCME_OK;
- struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
- u32 channel, channel_num;
-
- if (!in_atomic()) {
- /* Abort scan params only need space for 1 channel and 0 ssids */
- if (cfg->scan_params_v2) {
- params_size = WL_SCAN_PARAMS_V2_FIXED_SIZE + 1 * sizeof(uint16);
- } else {
- params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
- }
- params = MALLOCZ(cfg->osh, params_size);
- if (params == NULL) {
- WL_ERR(("mem alloc failed (%d bytes)\n", params_size));
- return;
- }
-
- /* Use magic value of channel=-1 to abort scan */
- channel = htodchanspec(-1);
- channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
- (1 & WL_SCAN_PARAMS_COUNT_MASK));
- if (cfg->scan_params_v2) {
- wl_scan_params_v2_t *params_v2 = (wl_scan_params_v2_t *)params;
- params_v2->channel_list[0] = channel;
- params_v2->channel_num = channel_num;
- } else {
- wl_scan_params_t *params_v1 = (wl_scan_params_t *)params;
- params_v1->channel_list[0] = channel;
- params_v1->channel_num = channel_num;
- }
- /* Do a scan abort to stop the driver's scan engine */
- err = wldev_ioctl_set(dev, WLC_SCAN, params, params_size);
- if (err < 0) {
- /* scan abort can fail if there is no outstanding scan */
- WL_DBG(("scan abort failed. ret:%d\n", err));
- }
- MFREE(cfg->osh, params, params_size);
- }
-#ifdef WLTDLS
- if (cfg->tdls_mgmt_frame) {
- MFREE(cfg->osh, cfg->tdls_mgmt_frame, cfg->tdls_mgmt_frame_len);
- cfg->tdls_mgmt_frame = NULL;
- cfg->tdls_mgmt_frame_len = 0;
- }
-#endif /* WLTDLS */
-}
-
-s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
- struct net_device *ndev,
- bool aborted, bool fw_abort)
-{
- s32 err = BCME_OK;
- unsigned long flags;
- struct net_device *dev;
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
-
- WL_DBG(("Enter \n"));
- BCM_REFERENCE(dhdp);
-
- if (!ndev) {
- WL_ERR(("ndev is null\n"));
- err = BCME_ERROR;
- goto out;
- }
-
- if (cfg->escan_info.ndev != ndev) {
- WL_ERR(("Outstanding scan req ndev not matching (%p:%p)\n",
- cfg->escan_info.ndev, ndev));
- err = BCME_ERROR;
- goto out;
- }
-
- if (cfg->scan_request) {
- dev = bcmcfg_to_prmry_ndev(cfg);
-#if defined(WL_ENABLE_P2P_IF)
- if (cfg->scan_request->dev != cfg->p2p_net)
- dev = cfg->scan_request->dev;
-#elif defined(WL_CFG80211_P2P_DEV_IF)
- if (cfg->scan_request->wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
- dev = cfg->scan_request->wdev->netdev;
-#endif // endif
- }
- else {
- WL_DBG(("cfg->scan_request is NULL. Internal scan scenario."
- "doing scan_abort for ndev %p primary %p",
- ndev, bcmcfg_to_prmry_ndev(cfg)));
- dev = ndev;
- }
- if (fw_abort && !in_atomic())
- wl_cfg80211_scan_abort(cfg);
- if (timer_pending(&cfg->scan_timeout))
- del_timer_sync(&cfg->scan_timeout);
- cfg->scan_enq_time = 0;
-#if defined(ESCAN_RESULT_PATCH)
- if (likely(cfg->scan_request)) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- if (aborted && cfg->p2p && p2p_scan(cfg) &&
- (cfg->scan_request->flags & NL80211_SCAN_FLAG_FLUSH)) {
- WL_ERR(("scan list is changed"));
- cfg->bss_list = wl_escan_get_buf(cfg, !aborted);
- } else
-#endif // endif
- cfg->bss_list = wl_escan_get_buf(cfg, aborted);
-
- wl_inform_bss(cfg);
- }
-#endif /* ESCAN_RESULT_PATCH */
- WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
-#ifdef WL_SCHED_SCAN
- if (cfg->sched_scan_req && !cfg->scan_request) {
- if (!aborted) {
- WL_INFORM_MEM(("[%s] Report sched scan done.\n", dev->name));
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
- cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy,
- cfg->sched_scan_req->reqid);
-#else
- cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
-#endif /* LINUX_VER > 4.11 */
- }
-
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_COMPLETE);
- cfg->sched_scan_running = FALSE;
- cfg->sched_scan_req = NULL;
- }
-#endif /* WL_SCHED_SCAN */
- if (likely(cfg->scan_request)) {
- WL_INFORM_MEM(("[%s] Report scan done.\n", dev->name));
- /* scan_sync mutex is already held */
- _wl_notify_scan_done(cfg, aborted);
- cfg->scan_request = NULL;
- }
- if (p2p_is_on(cfg))
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, dev);
- wake_up_interruptible(&dhdp->conf->event_complete);
-
- DHD_OS_SCAN_WAKE_UNLOCK((dhd_pub_t *)(cfg->pub));
- DHD_ENABLE_RUNTIME_PM((dhd_pub_t *)(cfg->pub));
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
-
-out:
- return err;
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
-void
-wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev)
-{
- struct bcm_cfg80211 *cfg;
-
- WL_DBG(("Enter wl_cfg80211_abort_scan\n"));
- cfg = wiphy_priv(wdev->wiphy);
-
- /* Check if any scan in progress only then abort */
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- wl_cfg80211_scan_abort(cfg);
- /* Only scan abort is issued here. As per the expectation of abort_scan
- * the status of abort is needed to be communicated using cfg80211_scan_done call.
- * Here we just issue abort request and let the scan complete path to indicate
- * abort to cfg80211 layer.
- */
- WL_DBG(("wl_cfg80211_abort_scan: Scan abort issued to FW\n"));
- }
-}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
-
-int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev)
-{
- int ret = 0;
-
- WL_TRACE(("Enter\n"));
-
- if (!cfg || !cfgdev) {
- return -EINVAL;
- }
-
- /* cancel scan and notify scan status */
- wl_cfg80211_cancel_scan(cfg);
-
- return ret;
-}
-
-/* This API is just meant as a wrapper for cfg80211_scan_done
- * API. This doesn't do state mgmt. For cancelling scan,
- * please use wl_cfg80211_cancel_scan API.
- */
-static void
-_wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted)
-{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
- struct cfg80211_scan_info info;
-#endif // endif
-
- if (!cfg->scan_request) {
- return;
- }
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
- memset_s(&info, sizeof(struct cfg80211_scan_info), 0, sizeof(struct cfg80211_scan_info));
- info.aborted = aborted;
- cfg80211_scan_done(cfg->scan_request, &info);
-#else
- cfg80211_scan_done(cfg->scan_request, aborted);
-#endif // endif
- cfg->scan_request = NULL;
-}
-
-#ifdef WL_DRV_AVOID_SCANCACHE
-static u32 wl_p2p_find_peer_channel(struct bcm_cfg80211 *cfg, s32 status, wl_bss_info_t *bi,
- u32 bi_length)
-{
- u32 ret;
- u8 *p2p_dev_addr = NULL;
-
- ret = wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL);
- if (!ret) {
- return ret;
- }
- if (status == WLC_E_STATUS_PARTIAL) {
- p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
- if (p2p_dev_addr && !memcmp(p2p_dev_addr,
- cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
- s32 channel = wf_chspec_ctlchan(
- wl_chspec_driver_to_host(bi->chanspec));
-
- if ((channel > MAXCHANNEL) || (channel <= 0)) {
- channel = WL_INVALID;
- } else {
- WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
- " channel : %d\n",
- MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
- channel));
- }
- wl_clr_p2p_status(cfg, SCANNING);
- cfg->afx_hdl->peer_chan = channel;
- complete(&cfg->act_frm_scan);
- }
- } else {
- WL_INFORM_MEM(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(cfg, SCANNING);
- wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
- if (cfg->afx_hdl->peer_chan == WL_INVALID)
- complete(&cfg->act_frm_scan);
- }
-
- return ret;
-}
-
-static s32 wl_escan_without_scan_cache(struct bcm_cfg80211 *cfg, wl_escan_result_t *escan_result,
- struct net_device *ndev, const wl_event_msg_t *e, s32 status)
-{
- s32 err = BCME_OK;
- wl_bss_info_t *bi;
- u32 bi_length;
- bool aborted = false;
- bool fw_abort = false;
- bool notify_escan_complete = false;
-
- if (wl_escan_check_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id) < 0) {
- goto exit;
- }
-
- wl_escan_print_sync_id(status, escan_result->sync_id,
- cfg->escan_info.cur_sync_id);
-
- if (!(status == WLC_E_STATUS_TIMEOUT) || !(status == WLC_E_STATUS_PARTIAL)) {
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- }
-
- if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
- notify_escan_complete = true;
- }
-
- if (status == WLC_E_STATUS_PARTIAL) {
- WL_DBG(("WLC_E_STATUS_PARTIAL \n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_RESULT_FOUND);
- if ((!escan_result) || (dtoh16(escan_result->bss_count) != 1)) {
- WL_ERR(("Invalid escan result (NULL pointer) or invalid bss_count\n"));
- goto exit;
- }
-
- bi = escan_result->bss_info;
- bi_length = dtoh32(bi->length);
- if ((!bi) ||
- (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE))) {
- WL_ERR(("Invalid escan bss info (NULL pointer)"
- "or invalid bss_info length\n"));
- goto exit;
- }
-
- if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
- if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
- WL_DBG(("Ignoring IBSS result\n"));
- goto exit;
- }
- }
-
- if (wl_p2p_find_peer_channel(cfg, status, bi, bi_length)) {
- goto exit;
- } else {
- if (scan_req_match(cfg)) {
- /* p2p scan && allow only probe response */
- if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
- (bi->flags & WL_BSS_FLAGS_FROM_BEACON))
- goto exit;
- }
-#ifdef ROAM_CHANNEL_CACHE
- add_roam_cache(cfg, bi);
-#endif /* ROAM_CHANNEL_CACHE */
- err = wl_inform_single_bss(cfg, bi, false);
-#ifdef ROAM_CHANNEL_CACHE
- /* print_roam_cache(); */
- update_roam_cache(cfg, ioctl_version);
-#endif /* ROAM_CHANNEL_CACHE */
-
- /*
- * !Broadcast && number of ssid = 1 && number of channels =1
- * means specific scan to association
- */
- if (wl_cfgp2p_is_p2p_specific_scan(cfg->scan_request)) {
- WL_ERR(("P2P assoc scan fast aborted.\n"));
- aborted = false;
- fw_abort = true;
- }
- /* Directly exit from function here and
- * avoid sending notify completion to cfg80211
- */
- goto exit;
- }
- } else if (status == WLC_E_STATUS_SUCCESS) {
- if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
- goto exit;
- }
- WL_INFORM_MEM(("ESCAN COMPLETED\n"));
- DBG_EVENT_LOG((dhd_pub_t *)cfg->pub, WIFI_EVENT_DRIVER_SCAN_COMPLETE);
-
- /* Update escan complete status */
- aborted = false;
- fw_abort = false;
-
- } else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
- (status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
- (status == WLC_E_STATUS_NEWASSOC)) {
- /* Handle all cases of scan abort */
-
- WL_DBG(("ESCAN ABORT reason: %d\n", status));
- if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
- goto exit;
- }
- WL_INFORM_MEM(("ESCAN ABORTED\n"));
-
- /* Update escan complete status */
- aborted = true;
- fw_abort = false;
-
- } else if (status == WLC_E_STATUS_TIMEOUT) {
- WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
- WL_ERR(("reason[0x%x]\n", e->reason));
- if (e->reason == 0xFFFFFFFF) {
- /* Update escan complete status */
- aborted = true;
- fw_abort = true;
- }
- } else {
- WL_ERR(("unexpected Escan Event %d : abort\n", status));
-
- if (wl_p2p_find_peer_channel(cfg, status, NULL, 0)) {
- goto exit;
- }
- /* Update escan complete status */
- aborted = true;
- fw_abort = false;
- }
-
- /* Notify escan complete status */
- if (notify_escan_complete) {
- wl_notify_escan_complete(cfg, ndev, aborted, fw_abort);
- }
-
-exit:
- return err;
-
-}
-#endif /* WL_DRV_AVOID_SCANCACHE */
-
-s32
-wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- struct channel_info channel_inform;
- struct wl_scan_results *bss_list;
- struct net_device *ndev = NULL;
- u32 len = WL_SCAN_BUF_MAX;
- s32 err = 0;
- unsigned long flags;
-
- WL_DBG(("Enter \n"));
- if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
- WL_DBG(("scan is not ready \n"));
- return err;
- }
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-
- mutex_lock(&cfg->scan_sync);
- wl_clr_drv_status(cfg, SCANNING, ndev);
- bzero(&channel_inform, sizeof(channel_inform));
- err = wldev_ioctl_get(ndev, WLC_GET_CHANNEL, &channel_inform,
- sizeof(channel_inform));
- if (unlikely(err)) {
- WL_ERR(("scan busy (%d)\n", err));
- goto scan_done_out;
- }
- channel_inform.scan_channel = dtoh32(channel_inform.scan_channel);
- if (unlikely(channel_inform.scan_channel)) {
-
- WL_DBG(("channel_inform.scan_channel (%d)\n",
- channel_inform.scan_channel));
- }
- cfg->bss_list = cfg->scan_results;
- bss_list = cfg->bss_list;
- bzero(bss_list, len);
- bss_list->buflen = htod32(len);
- err = wldev_ioctl_get(ndev, WLC_SCAN_RESULTS, bss_list, len);
- if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
- WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
- err = -EINVAL;
- goto scan_done_out;
- }
- bss_list->buflen = dtoh32(bss_list->buflen);
- bss_list->version = dtoh32(bss_list->version);
- bss_list->count = dtoh32(bss_list->count);
-
- err = wl_inform_bss(cfg);
-
-scan_done_out:
- del_timer_sync(&cfg->scan_timeout);
- WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
- if (cfg->scan_request) {
- _wl_notify_scan_done(cfg, false);
- cfg->scan_request = NULL;
- }
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
- WL_DBG(("cfg80211_scan_done\n"));
- mutex_unlock(&cfg->scan_sync);
- return err;
-}
-
-void wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted)
-{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
- struct cfg80211_scan_info info;
-
- bzero(&info, sizeof(struct cfg80211_scan_info));
- info.aborted = aborted;
- cfg80211_scan_done(cfg->scan_request, &info);
-#else
- cfg80211_scan_done(cfg->scan_request, aborted);
-#endif // endif
-}
-
-#ifdef WL_SCHED_SCAN
-#define PNO_TIME 30
-#define PNO_REPEAT 4
-#define PNO_FREQ_EXPO_MAX 2
-static bool
-is_ssid_in_list(struct cfg80211_ssid *ssid, struct cfg80211_ssid *ssid_list, int count)
-{
- int i;
-
- if (!ssid || !ssid_list)
- return FALSE;
-
- for (i = 0; i < count; i++) {
- if (ssid->ssid_len == ssid_list[i].ssid_len) {
- if (strncmp(ssid->ssid, ssid_list[i].ssid, ssid->ssid_len) == 0)
- return TRUE;
- }
- }
- return FALSE;
-}
-
-int
-wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
- struct net_device *dev,
- struct cfg80211_sched_scan_request *request)
-{
- ushort pno_time = PNO_TIME;
- int pno_repeat = PNO_REPEAT;
- int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
- wlc_ssid_ext_t ssids_local[MAX_PFN_LIST_COUNT];
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- struct cfg80211_ssid *ssid = NULL;
- struct cfg80211_ssid *hidden_ssid_list = NULL;
- log_conn_event_t *event_data = NULL;
- tlv_log *tlv_data = NULL;
- u32 alloc_len, tlv_len;
- u32 payload_len;
- int ssid_cnt = 0;
- int i;
- int ret = 0;
- unsigned long flags;
-
- if (!request) {
- WL_ERR(("Sched scan request was NULL\n"));
- return -EINVAL;
- }
-
- WL_DBG(("Enter \n"));
- WL_PNO((">>> SCHED SCAN START\n"));
- WL_PNO(("Enter n_match_sets:%d n_ssids:%d \n",
- request->n_match_sets, request->n_ssids));
- WL_PNO(("ssids:%d pno_time:%d pno_repeat:%d pno_freq:%d \n",
- request->n_ssids, pno_time, pno_repeat, pno_freq_expo_max));
-
- if (!request->n_ssids || !request->n_match_sets) {
- WL_ERR(("Invalid sched scan req!! n_ssids:%d \n", request->n_ssids));
- return -EINVAL;
- }
-
- bzero(&ssids_local, sizeof(ssids_local));
-
- if (request->n_ssids > 0) {
- hidden_ssid_list = request->ssids;
- }
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN;
- event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
- if (!event_data) {
- WL_ERR(("%s: failed to allocate log_conn_event_t with "
- "length(%d)\n", __func__, alloc_len));
- return -ENOMEM;
- }
- bzero(event_data, alloc_len);
- event_data->tlvs = NULL;
- tlv_len = sizeof(tlv_log);
- event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
- if (!event_data->tlvs) {
- WL_ERR(("%s: failed to allocate log_tlv with "
- "length(%d)\n", __func__, tlv_len));
- MFREE(cfg->osh, event_data, alloc_len);
- return -ENOMEM;
- }
- }
- for (i = 0; i < request->n_match_sets && ssid_cnt < MAX_PFN_LIST_COUNT; i++) {
- ssid = &request->match_sets[i].ssid;
- /* No need to include null ssid */
- if (ssid->ssid_len) {
- ssids_local[ssid_cnt].SSID_len = MIN(ssid->ssid_len,
- (uint32)DOT11_MAX_SSID_LEN);
- /* In previous step max SSID_len is limited to DOT11_MAX_SSID_LEN,
- * returning void
- */
- (void)memcpy_s(ssids_local[ssid_cnt].SSID, DOT11_MAX_SSID_LEN, ssid->ssid,
- ssids_local[ssid_cnt].SSID_len);
- if (is_ssid_in_list(ssid, hidden_ssid_list, request->n_ssids)) {
- ssids_local[ssid_cnt].hidden = TRUE;
- WL_PNO((">>> PNO hidden SSID (%s) \n", ssid->ssid));
- } else {
- ssids_local[ssid_cnt].hidden = FALSE;
- WL_PNO((">>> PNO non-hidden SSID (%s) \n", ssid->ssid));
- }
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0))
- if (request->match_sets[i].rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
- ssids_local[ssid_cnt].rssi_thresh =
- (int8)request->match_sets[i].rssi_thold;
- }
-#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 15, 0)) */
- ssid_cnt++;
- }
- }
-
- if (ssid_cnt) {
- if ((ret = dhd_dev_pno_set_for_ssid(dev, ssids_local, ssid_cnt,
- pno_time, pno_repeat, pno_freq_expo_max, NULL, 0)) < 0) {
- WL_ERR(("PNO setup failed!! ret=%d \n", ret));
- ret = -EINVAL;
- goto exit;
- }
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- for (i = 0; i < ssid_cnt; i++) {
- payload_len = sizeof(log_conn_event_t);
- event_data->event = WIFI_EVENT_DRIVER_PNO_ADD;
- tlv_data = event_data->tlvs;
- /* ssid */
- tlv_data->tag = WIFI_TAG_SSID;
- tlv_data->len = ssids_local[i].SSID_len;
- (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
- ssids_local[i].SSID, ssids_local[i].SSID_len);
- payload_len += TLV_LOG_SIZE(tlv_data);
-
- dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
- event_data, payload_len);
- }
- }
-
- WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
- cfg->sched_scan_req = request;
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
- } else {
- ret = -EINVAL;
- }
-exit:
- if (event_data) {
- MFREE(cfg->osh, event_data->tlvs, tlv_len);
- MFREE(cfg->osh, event_data, alloc_len);
- }
- return ret;
-}
-
-int
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 11, 0))
-wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid)
-#else
-wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
-#endif /* LINUX_VER > 4.11 */
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- unsigned long flags;
-
- WL_DBG(("Enter \n"));
- WL_PNO((">>> SCHED SCAN STOP\n"));
-
- if (dhd_dev_pno_stop_for_ssid(dev) < 0) {
- WL_ERR(("PNO Stop for SSID failed"));
- } else {
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_REMOVE);
- }
-
- if (cfg->sched_scan_req || cfg->sched_scan_running) {
- WL_PNO((">>> Sched scan running. Aborting it..\n"));
- wl_cfg80211_cancel_scan(cfg);
- }
- WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
- cfg->sched_scan_req = NULL;
- cfg->sched_scan_running = FALSE;
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
- return 0;
-}
-#endif /* WL_SCHED_SCAN */
-
-static void wl_scan_timeout(unsigned long data)
-{
- wl_event_msg_t msg;
- struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
- struct wireless_dev *wdev = NULL;
- struct net_device *ndev = NULL;
- struct wl_scan_results *bss_list;
- wl_bss_info_t *bi = NULL;
- s32 i;
- u32 channel;
- u64 cur_time = OSL_LOCALTIME_NS();
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- unsigned long flags;
-#ifdef RTT_SUPPORT
- rtt_status_info_t *rtt_status = NULL;
- UNUSED_PARAMETER(rtt_status);
-#endif /* RTT_SUPPORT */
-
- UNUSED_PARAMETER(cur_time);
- WL_CFG_DRV_LOCK(&cfg->cfgdrv_lock, flags);
- if (!(cfg->scan_request)) {
- WL_ERR(("timer expired but no scan request\n"));
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
- return;
- }
-
- wdev = GET_SCAN_WDEV(cfg->scan_request);
- WL_CFG_DRV_UNLOCK(&cfg->cfgdrv_lock, flags);
-
- if (!wdev) {
- WL_ERR(("No wireless_dev present\n"));
- return;
- }
-
- if (dhd_query_bus_erros(dhdp)) {
- return;
- }
-#if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
- if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON &&
- ((cfg->scan_deq_time < cfg->scan_enq_time) ||
- dhd_bus_query_dpc_sched_errors(dhdp))) {
- WL_ERR(("****SCAN event timeout due to scheduling problem\n"));
- /* change g_assert_type to trigger Kernel panic */
- g_assert_type = 2;
-#ifdef RTT_SUPPORT
- rtt_status = GET_RTTSTATE(dhdp);
-#endif /* RTT_SUPPORT */
- WL_ERR(("***SCAN event timeout. WQ state:0x%x scan_enq_time:"SEC_USEC_FMT
- " evt_hdlr_entry_time:"SEC_USEC_FMT" evt_deq_time:"SEC_USEC_FMT
- "\nscan_deq_time:"SEC_USEC_FMT" scan_hdlr_cmplt_time:"SEC_USEC_FMT
- " scan_cmplt_time:"SEC_USEC_FMT" evt_hdlr_exit_time:"SEC_USEC_FMT
- "\ncurrent_time:"SEC_USEC_FMT"\n", work_busy(&cfg->event_work),
- GET_SEC_USEC(cfg->scan_enq_time), GET_SEC_USEC(cfg->wl_evt_hdlr_entry_time),
- GET_SEC_USEC(cfg->wl_evt_deq_time), GET_SEC_USEC(cfg->scan_deq_time),
- GET_SEC_USEC(cfg->scan_hdlr_cmplt_time), GET_SEC_USEC(cfg->scan_cmplt_time),
- GET_SEC_USEC(cfg->wl_evt_hdlr_exit_time), GET_SEC_USEC(cur_time)));
- if (cfg->scan_enq_time) {
- WL_ERR(("Elapsed time(ns): %llu\n", (cur_time - cfg->scan_enq_time)));
- }
- WL_ERR(("lock_states:[%d:%d:%d:%d:%d:%d]\n",
- mutex_is_locked(&cfg->if_sync),
- mutex_is_locked(&cfg->usr_sync),
- mutex_is_locked(&cfg->pm_sync),
- mutex_is_locked(&cfg->scan_sync),
- spin_is_locked(&cfg->cfgdrv_lock),
- spin_is_locked(&cfg->eq_lock)));
-#ifdef RTT_SUPPORT
- WL_ERR(("RTT lock_state:[%d]\n",
- mutex_is_locked(&rtt_status->rtt_mutex)));
-#ifdef WL_NAN
- WL_ERR(("RTT and Geofence lock_states:[%d:%d]\n",
- mutex_is_locked(&cfg->nancfg.nan_sync),
- mutex_is_locked(&(rtt_status)->geofence_mutex)));
-#endif /* WL_NAN */
-#endif /* RTT_SUPPORT */
-
- /* use ASSERT() to trigger panic */
- ASSERT(0);
- }
-#endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
- dhd_bus_intr_count_dump(dhdp);
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) && !defined(CONFIG_MODULES)
- /* Print WQ states. Enable only for in-built drivers as the symbol is not exported */
- show_workqueue_state();
-#endif /* LINUX_VER >= 4.1 && !CONFIG_MODULES */
-
- bss_list = wl_escan_get_buf(cfg, FALSE);
- if (!bss_list) {
- WL_ERR(("bss_list is null. Didn't receive any partial scan results\n"));
- } else {
- WL_ERR(("Dump scan buffer:\n"
- "scanned AP count (%d)\n", bss_list->count));
-
- bi = next_bss(bss_list, bi);
- for_each_bss(bss_list, bi, i) {
- channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
- WL_ERR(("SSID :%s Channel :%d\n", bi->SSID, channel));
- }
- }
-
- ndev = wdev_to_wlc_ndev(wdev, cfg);
- bzero(&msg, sizeof(wl_event_msg_t));
- WL_ERR(("timer expired\n"));
- dhdp->scan_timeout_occurred = TRUE;
-#ifdef BCMPCIE
- (void)dhd_pcie_dump_int_regs(dhdp);
- dhd_pcie_dump_rc_conf_space_cap(dhdp);
-#endif /* BCMPCIE */
-#if 0
- if (dhdp->memdump_enabled) {
- dhdp->memdump_type = DUMP_TYPE_SCAN_TIMEOUT;
- dhd_bus_mem_dump(dhdp);
- }
-#endif /* DHD_FW_COREDUMP */
- /*
- * For the memdump sanity, blocking bus transactions for a while
- * Keeping it TRUE causes the sequential private cmd error
- */
- dhdp->scan_timeout_occurred = FALSE;
- msg.event_type = hton32(WLC_E_ESCAN_RESULT);
- msg.status = hton32(WLC_E_STATUS_TIMEOUT);
- msg.reason = 0xFFFFFFFF;
- wl_cfg80211_event(ndev, &msg, NULL);
-}
-
-s32 wl_init_scan(struct bcm_cfg80211 *cfg)
-{
- int err = 0;
-
- cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
- cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_init_sync_id(cfg);
-
- /* Init scan_timeout timer */
- init_timer_compat(&cfg->scan_timeout, wl_scan_timeout, cfg);
-
- wl_cfg80211_set_bcmcfg(cfg);
-
- return err;
-}
-
-#ifdef WL_SCHED_SCAN
-/* If target scan is not reliable, set the below define to "1" to do a
- * full escan
- */
-#define FULL_ESCAN_ON_PFN_NET_FOUND 0
-static s32
-wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
- const wl_event_msg_t *e, void *data)
-{
- wl_pfn_net_info_v1_t *netinfo, *pnetinfo;
- wl_pfn_net_info_v2_t *netinfo_v2, *pnetinfo_v2;
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- int err = 0;
- struct cfg80211_scan_request *request = NULL;
- struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
- struct ieee80211_channel *channel = NULL;
- int channel_req = 0;
- int band = 0;
- wl_pfn_scanresults_v1_t *pfn_result_v1 = (wl_pfn_scanresults_v1_t *)data;
- wl_pfn_scanresults_v2_t *pfn_result_v2 = (wl_pfn_scanresults_v2_t *)data;
- int n_pfn_results = 0;
- log_conn_event_t *event_data = NULL;
- tlv_log *tlv_data = NULL;
- u32 alloc_len, tlv_len;
- u32 payload_len;
- u8 tmp_buf[DOT11_MAX_SSID_LEN + 1];
-
- WL_DBG(("Enter\n"));
-
- /* These static asserts guarantee v1/v2 net_info and subnet_info are compatible
- * in size and SSID offset, allowing v1 to be used below except for the results
- * fields themselves (status, count, offset to netinfo).
- */
- STATIC_ASSERT(sizeof(wl_pfn_net_info_v1_t) == sizeof(wl_pfn_net_info_v2_t));
- STATIC_ASSERT(sizeof(wl_pfn_lnet_info_v1_t) == sizeof(wl_pfn_lnet_info_v2_t));
- STATIC_ASSERT(sizeof(wl_pfn_subnet_info_v1_t) == sizeof(wl_pfn_subnet_info_v2_t));
- STATIC_ASSERT(OFFSETOF(wl_pfn_subnet_info_v1_t, SSID) ==
- OFFSETOF(wl_pfn_subnet_info_v2_t, u.SSID));
-
- /* Extract the version-specific items */
- if (pfn_result_v1->version == PFN_SCANRESULT_VERSION_V1) {
- n_pfn_results = pfn_result_v1->count;
- pnetinfo = pfn_result_v1->netinfo;
- WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
-
- if (n_pfn_results > 0) {
- int i;
-
- if (n_pfn_results > MAX_PFN_LIST_COUNT)
- n_pfn_results = MAX_PFN_LIST_COUNT;
-
- bzero(&ssid, sizeof(ssid));
-
- request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
- sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
- channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
- (sizeof(struct ieee80211_channel) * n_pfn_results));
- if (!request || !channel) {
- WL_ERR(("No memory"));
- err = -ENOMEM;
- goto out_err;
- }
-
- request->wiphy = wiphy;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN +
- sizeof(uint16) + sizeof(int16);
- event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
- if (!event_data) {
- WL_ERR(("%s: failed to allocate the log_conn_event_t with "
- "length(%d)\n", __func__, alloc_len));
- goto out_err;
- }
- tlv_len = 3 * sizeof(tlv_log);
- event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
- if (!event_data->tlvs) {
- WL_ERR(("%s: failed to allocate the tlv_log with "
- "length(%d)\n", __func__, tlv_len));
- goto out_err;
- }
- }
-
- for (i = 0; i < n_pfn_results; i++) {
- netinfo = &pnetinfo[i];
- if (!netinfo) {
- WL_ERR(("Invalid netinfo ptr. index:%d", i));
- err = -EINVAL;
- goto out_err;
- }
- if (netinfo->pfnsubnet.SSID_len > DOT11_MAX_SSID_LEN) {
- WL_ERR(("Wrong SSID length:%d\n",
- netinfo->pfnsubnet.SSID_len));
- err = -EINVAL;
- goto out_err;
- }
- /* In previous step max SSID_len limited to DOT11_MAX_SSID_LEN
- * and tmp_buf size is DOT11_MAX_SSID_LEN+1
- */
- (void)memcpy_s(tmp_buf, DOT11_MAX_SSID_LEN,
- netinfo->pfnsubnet.SSID, netinfo->pfnsubnet.SSID_len);
- tmp_buf[netinfo->pfnsubnet.SSID_len] = '\0';
- WL_PNO((">>> SSID:%s Channel:%d \n",
- tmp_buf, netinfo->pfnsubnet.channel));
- /* PFN result doesn't have all the info which are required by
- * the supplicant. (For e.g IEs) Do a target Escan so that
- * sched scan results are reported via wl_inform_single_bss in
- * the required format. Escan does require the scan request in
- * the form of cfg80211_scan_request. For timebeing, create
- * cfg80211_scan_request one out of the received PNO event.
- */
-
- ssid[i].ssid_len = netinfo->pfnsubnet.SSID_len;
- /* Returning void as ssid[i].ssid_len is limited to max of
- * DOT11_MAX_SSID_LEN
- */
- (void)memcpy_s(ssid[i].ssid, IEEE80211_MAX_SSID_LEN,
- netinfo->pfnsubnet.SSID, ssid[i].ssid_len);
- request->n_ssids++;
-
- channel_req = netinfo->pfnsubnet.channel;
- band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
- : NL80211_BAND_5GHZ;
- channel[i].center_freq =
- ieee80211_channel_to_frequency(channel_req, band);
- channel[i].band = band;
- channel[i].flags |= IEEE80211_CHAN_NO_HT40;
- request->channels[i] = &channel[i];
- request->n_channels++;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- payload_len = sizeof(log_conn_event_t);
- event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
- tlv_data = event_data->tlvs;
-
- /* ssid */
- tlv_data->tag = WIFI_TAG_SSID;
- tlv_data->len = ssid[i].ssid_len;
- (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
- ssid[i].ssid, ssid[i].ssid_len);
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* channel */
- tlv_data->tag = WIFI_TAG_CHANNEL;
- tlv_data->len = sizeof(uint16);
- (void)memcpy_s(tlv_data->value, sizeof(uint16),
- &channel_req, sizeof(uint16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* rssi */
- tlv_data->tag = WIFI_TAG_RSSI;
- tlv_data->len = sizeof(int16);
- (void)memcpy_s(tlv_data->value, sizeof(int16),
- &netinfo->RSSI, sizeof(int16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
- &event_data->event, payload_len);
- }
- }
-
- /* assign parsed ssid array */
- if (request->n_ssids)
- request->ssids = &ssid[0];
-
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- /* Abort any on-going scan */
- wl_cfg80211_cancel_scan(cfg);
- }
-
- if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
- WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
- err = wl_cfgp2p_discover_enable_search(cfg, false);
- if (unlikely(err)) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- p2p_scan(cfg) = false;
- }
- wl_set_drv_status(cfg, SCANNING, ndev);
-#if FULL_ESCAN_ON_PFN_NET_FOUND
- WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, NULL);
-#else
- WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, request);
-#endif // endif
- if (err) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
- cfg->sched_scan_running = TRUE;
- }
- else {
- WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
- }
-
- } else if (pfn_result_v2->version == PFN_SCANRESULT_VERSION_V2) {
- n_pfn_results = pfn_result_v2->count;
- pnetinfo_v2 = (wl_pfn_net_info_v2_t *)pfn_result_v2->netinfo;
-
- if (e->event_type == WLC_E_PFN_NET_LOST) {
- WL_PNO(("Do Nothing %d\n", e->event_type));
- return 0;
- }
-
- WL_INFORM_MEM(("PFN NET FOUND event. count:%d \n", n_pfn_results));
-
- if (n_pfn_results > 0) {
- int i;
-
- if (n_pfn_results > MAX_PFN_LIST_COUNT)
- n_pfn_results = MAX_PFN_LIST_COUNT;
-
- bzero(&ssid, sizeof(ssid));
-
- request = (struct cfg80211_scan_request *)MALLOCZ(cfg->osh,
- sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
- channel = (struct ieee80211_channel *)MALLOCZ(cfg->osh,
- (sizeof(struct ieee80211_channel) * n_pfn_results));
- if (!request || !channel) {
- WL_ERR(("No memory"));
- err = -ENOMEM;
- goto out_err;
- }
-
- request->wiphy = wiphy;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- alloc_len = sizeof(log_conn_event_t) + DOT11_MAX_SSID_LEN +
- sizeof(uint16) + sizeof(int16);
- event_data = (log_conn_event_t *)MALLOC(cfg->osh, alloc_len);
- if (!event_data) {
- WL_ERR(("%s: failed to allocate the log_conn_event_t with "
- "length(%d)\n", __func__, alloc_len));
- goto out_err;
- }
- tlv_len = 3 * sizeof(tlv_log);
- event_data->tlvs = (tlv_log *)MALLOC(cfg->osh, tlv_len);
- if (!event_data->tlvs) {
- WL_ERR(("%s: failed to allocate the tlv_log with "
- "length(%d)\n", __func__, tlv_len));
- goto out_err;
- }
- }
-
- for (i = 0; i < n_pfn_results; i++) {
- netinfo_v2 = &pnetinfo_v2[i];
- if (!netinfo_v2) {
- WL_ERR(("Invalid netinfo ptr. index:%d", i));
- err = -EINVAL;
- goto out_err;
- }
- WL_PNO((">>> SSID:%s Channel:%d \n",
- netinfo_v2->pfnsubnet.u.SSID,
- netinfo_v2->pfnsubnet.channel));
- /* PFN result doesn't have all the info which are required by the
- * supplicant. (For e.g IEs) Do a target Escan so that sched scan
- * results are reported via wl_inform_single_bss in the required
- * format. Escan does require the scan request in the form of
- * cfg80211_scan_request. For timebeing, create
- * cfg80211_scan_request one out of the received PNO event.
- */
- ssid[i].ssid_len = MIN(DOT11_MAX_SSID_LEN,
- netinfo_v2->pfnsubnet.SSID_len);
- /* max ssid_len as in previous step DOT11_MAX_SSID_LEN is same
- * as DOT11_MAX_SSID_LEN = 32
- */
- (void)memcpy_s(ssid[i].ssid, IEEE80211_MAX_SSID_LEN,
- netinfo_v2->pfnsubnet.u.SSID, ssid[i].ssid_len);
- request->n_ssids++;
-
- channel_req = netinfo_v2->pfnsubnet.channel;
- band = (channel_req <= CH_MAX_2G_CHANNEL) ? NL80211_BAND_2GHZ
- : NL80211_BAND_5GHZ;
- channel[i].center_freq =
- ieee80211_channel_to_frequency(channel_req, band);
- channel[i].band = band;
- channel[i].flags |= IEEE80211_CHAN_NO_HT40;
- request->channels[i] = &channel[i];
- request->n_channels++;
-
- if (DBG_RING_ACTIVE(dhdp, DHD_EVENT_RING_ID)) {
- payload_len = sizeof(log_conn_event_t);
- event_data->event = WIFI_EVENT_DRIVER_PNO_NETWORK_FOUND;
- tlv_data = event_data->tlvs;
-
- /* ssid */
- tlv_data->tag = WIFI_TAG_SSID;
- tlv_data->len = netinfo_v2->pfnsubnet.SSID_len;
- (void)memcpy_s(tlv_data->value, DOT11_MAX_SSID_LEN,
- ssid[i].ssid, ssid[i].ssid_len);
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* channel */
- tlv_data->tag = WIFI_TAG_CHANNEL;
- tlv_data->len = sizeof(uint16);
- (void)memcpy_s(tlv_data->value, sizeof(uint16),
- &channel_req, sizeof(uint16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- /* rssi */
- tlv_data->tag = WIFI_TAG_RSSI;
- tlv_data->len = sizeof(int16);
- (void)memcpy_s(tlv_data->value, sizeof(uint16),
- &netinfo_v2->RSSI, sizeof(int16));
- payload_len += TLV_LOG_SIZE(tlv_data);
- tlv_data = TLV_LOG_NEXT(tlv_data);
-
- dhd_os_push_push_ring_data(dhdp, DHD_EVENT_RING_ID,
- &event_data->event, payload_len);
- }
- }
-
- /* assign parsed ssid array */
- if (request->n_ssids)
- request->ssids = &ssid[0];
-
- if (wl_get_drv_status_all(cfg, SCANNING)) {
- /* Abort any on-going scan */
- wl_cfg80211_cancel_scan(cfg);
- }
-
- if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
- WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
- err = wl_cfgp2p_discover_enable_search(cfg, false);
- if (unlikely(err)) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- p2p_scan(cfg) = false;
- }
-
- wl_set_drv_status(cfg, SCANNING, ndev);
-#if FULL_ESCAN_ON_PFN_NET_FOUND
- WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, NULL);
-#else
- WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
- err = wl_do_escan(cfg, wiphy, ndev, request);
-#endif // endif
- if (err) {
- wl_clr_drv_status(cfg, SCANNING, ndev);
- goto out_err;
- }
- DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_PNO_SCAN_REQUESTED);
- cfg->sched_scan_running = TRUE;
- }
- else {
- WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
- }
- } else {
- WL_ERR(("Unsupported version %d, expected %d or %d\n", pfn_result_v1->version,
- PFN_SCANRESULT_VERSION_V1, PFN_SCANRESULT_VERSION_V2));
- return 0;
- }
-out_err:
- if (request) {
- MFREE(cfg->osh, request,
- sizeof(*request) + sizeof(*request->channels) * n_pfn_results);
- }
- if (channel) {
- MFREE(cfg->osh, channel,
- (sizeof(struct ieee80211_channel) * n_pfn_results));
- }
-
- if (event_data) {
- if (event_data->tlvs) {
- MFREE(cfg->osh, event_data->tlvs, tlv_len);
- }
- MFREE(cfg->osh, event_data, alloc_len);
- }
- return err;
-}
-#endif /* WL_SCHED_SCAN */
-
-#ifdef PNO_SUPPORT
-s32
-wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- struct net_device *ndev = NULL;
-#ifdef GSCAN_SUPPORT
- void *ptr;
- int send_evt_bytes = 0;
- u32 event = be32_to_cpu(e->event_type);
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
-#endif /* GSCAN_SUPPORT */
-
- WL_INFORM_MEM((">>> PNO Event\n"));
-
- if (!data) {
- WL_ERR(("Data received is NULL!\n"));
- return 0;
- }
-
- ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
-#ifdef GSCAN_SUPPORT
- ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
- if (ptr) {
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
- MFREE(cfg->osh, ptr, send_evt_bytes);
- }
- if (!dhd_dev_is_legacy_pno_enabled(ndev))
- return 0;
-#endif /* GSCAN_SUPPORT */
-
-#ifndef WL_SCHED_SCAN
- mutex_lock(&cfg->usr_sync);
- /* TODO: Use cfg80211_sched_scan_results(wiphy); */
- CFG80211_DISCONNECTED(ndev, 0, NULL, 0, false, GFP_KERNEL);
- mutex_unlock(&cfg->usr_sync);
-#else
- /* If cfg80211 scheduled scan is supported, report the pno results via sched
- * scan results
- */
- wl_notify_sched_scan_results(cfg, ndev, e, data);
-#endif /* WL_SCHED_SCAN */
- return 0;
-}
-#endif /* PNO_SUPPORT */
-
-#ifdef GSCAN_SUPPORT
-s32
-wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data)
-{
- s32 err = 0;
- u32 event = be32_to_cpu(e->event_type);
- void *ptr = NULL;
- int send_evt_bytes = 0;
- int event_type;
- struct net_device *ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
- u32 len = ntoh32(e->datalen);
- u32 buf_len = 0;
-
- switch (event) {
- case WLC_E_PFN_BEST_BATCHING:
- err = dhd_dev_retrieve_batch_scan(ndev);
- if (err < 0) {
- WL_ERR(("Batch retrieval already in progress %d\n", err));
- } else {
- event_type = WIFI_SCAN_THRESHOLD_NUM_SCANS;
- if (data && len) {
- event_type = *((int *)data);
- }
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_GSCAN_BATCH_SCAN_EVENT,
- &event_type, sizeof(int));
- }
- break;
- case WLC_E_PFN_SCAN_COMPLETE:
- event_type = WIFI_SCAN_COMPLETE;
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_COMPLETE_EVENT,
- &event_type, sizeof(int));
- break;
- case WLC_E_PFN_BSSID_NET_FOUND:
- ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
- HOTLIST_FOUND, &buf_len);
- if (ptr) {
- wl_cfgvendor_send_hotlist_event(wiphy, ndev,
- ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT);
- dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_FOUND);
- } else {
- err = -ENOMEM;
- }
- break;
- case WLC_E_PFN_BSSID_NET_LOST:
- /* WLC_E_PFN_BSSID_NET_LOST is conflict shared with WLC_E_PFN_SCAN_ALLGONE
- * We currently do not use WLC_E_PFN_SCAN_ALLGONE, so if we get it, ignore
- */
- if (len) {
- ptr = dhd_dev_hotlist_scan_event(ndev, data, &send_evt_bytes,
- HOTLIST_LOST, &buf_len);
- if (ptr) {
- wl_cfgvendor_send_hotlist_event(wiphy, ndev,
- ptr, send_evt_bytes, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT);
- dhd_dev_gscan_hotlist_cache_cleanup(ndev, HOTLIST_LOST);
- MFREE(cfg->osh, ptr, buf_len);
- } else {
- err = -ENOMEM;
- }
- } else {
- err = -EINVAL;
- }
- break;
- case WLC_E_PFN_GSCAN_FULL_RESULT:
- ptr = dhd_dev_process_full_gscan_result(ndev, data, len, &send_evt_bytes);
- if (ptr) {
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_FULL_RESULTS_EVENT, ptr, send_evt_bytes);
- MFREE(cfg->osh, ptr, send_evt_bytes);
- } else {
- err = -ENOMEM;
- }
- break;
- case WLC_E_PFN_SSID_EXT:
- ptr = dhd_dev_process_epno_result(ndev, data, event, &send_evt_bytes);
- if (ptr) {
- wl_cfgvendor_send_async_event(wiphy, ndev,
- GOOGLE_SCAN_EPNO_EVENT, ptr, send_evt_bytes);
- MFREE(cfg->osh, ptr, send_evt_bytes);
- } else {
- err = -ENOMEM;
- }
- break;
- default:
- WL_ERR(("Unknown event %d\n", event));
- break;
- }
- return err;
-}
-#endif /* GSCAN_SUPPORT */
-
-void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command)
-{
- struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
-
- if (strcmp(command, "SCAN-ACTIVE") == 0) {
- cfg->active_scan = 1;
- } else if (strcmp(command, "SCAN-PASSIVE") == 0) {
- cfg->active_scan = 0;
- } else
- WL_ERR(("Unknown command \n"));
- return;
-}
+++ /dev/null
-/*
- * Header for Linux cfg80211 scan
- *
- * Copyright (C) 1999-2019, Broadcom.
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
- *
- *
- * <<Broadcom-WL-IPTag/Open:>>
- *
- * $Id$
- */
-
-#ifndef _wl_cfgscan_h_
-#define _wl_cfgscan_h_
-
-#include <linux/wireless.h>
-#include <typedefs.h>
-#include <ethernet.h>
-#include <wlioctl.h>
-#include <linux/wireless.h>
-#include <net/cfg80211.h>
-#include <linux/rfkill.h>
-#include <osl.h>
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
-#define GET_SCAN_WDEV(scan_request) \
- (scan_request && scan_request->dev) ? scan_request->dev->ieee80211_ptr : NULL;
-#else
-#define GET_SCAN_WDEV(scan_request) \
- scan_request ? scan_request->wdev : NULL;
-#endif // endif
-#ifdef WL_SCHED_SCAN
-#define GET_SCHED_SCAN_WDEV(scan_request) \
- (scan_request && scan_request->dev) ? scan_request->dev->ieee80211_ptr : NULL;
-#endif /* WL_SCHED_SCAN */
-
-extern s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-extern s32 wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy,
- struct net_device *ndev, struct cfg80211_scan_request *request);
-extern s32 __wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request, struct cfg80211_ssid *this_ssid);
-#if defined(WL_CFG80211_P2P_DEV_IF)
-extern s32 wl_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request);
-#else
-extern s32 wl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request);
-extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
-#endif /* WL_CFG80211_P2P_DEV_IF */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
-extern void wl_cfg80211_abort_scan(struct wiphy *wiphy, struct wireless_dev *wdev);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
-extern void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
-extern s32 wl_init_scan(struct bcm_cfg80211 *cfg);
-extern int wl_cfg80211_scan_stop(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev);
-extern s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-extern void wl_cfg80211_set_passive_scan(struct net_device *dev, char *command);
-#ifdef PNO_SUPPORT
-extern s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* PNO_SUPPORT */
-#ifdef GSCAN_SUPPORT
-extern s32 wl_notify_gscan_event(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
- const wl_event_msg_t *e, void *data);
-#endif /* GSCAN_SUPPORT */
-
-#ifdef WL_SCHED_SCAN
-extern int wl_cfg80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_sched_scan_request *request);
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 11, 0))
-extern int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev, u64 reqid);
-#else
-extern int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev);
-#endif /* LINUX_VER > 4.11 */
-#endif /* WL_SCHED_SCAN */
-extern void wl_notify_scan_done(struct bcm_cfg80211 *cfg, bool aborted);
-#endif /* _wl_cfgscan_h_ */
/*
* Linux cfg80211 Vendor Extension Code
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgvendor.c 825970 2019-06-18 05:28:31Z $
+ * $Id: wl_cfgvendor.c 710862 2017-07-14 07:43:59Z $
*/
/*
#include <wldev_common.h>
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
-#ifdef WL_NAN
-#include <wl_cfgnan.h>
-#endif /* WL_NAN */
#include <wl_android.h>
#include <wl_cfgvendor.h>
#ifdef PROP_TXSTATUS
#include <dhd_wlfc.h>
-#endif // endif
+#endif
#include <brcm_nl80211.h>
-char*
-wl_get_kernel_timestamp(void)
-{
- static char buf[32];
- u64 ts_nsec;
- unsigned long rem_nsec;
-
- ts_nsec = local_clock();
- rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
- snprintf(buf, sizeof(buf), "%5lu.%06lu",
- (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
-
- return buf;
-}
+#ifdef STAT_REPORT
+#include <wl_statreport.h>
+#endif
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
-#if defined(WL_SUPP_EVENT)
-int
-wl_cfgvendor_send_supp_eventstring(const char *func_name, const char *fmt, ...)
-{
- char buf[SUPP_LOG_LEN] = {0};
- struct bcm_cfg80211 *cfg;
- struct wiphy *wiphy;
- va_list args;
- int len;
- int prefix_len;
- int rem_len;
-
- cfg = wl_cfg80211_get_bcmcfg();
- if (!cfg || !cfg->wdev) {
- WL_DBG(("supp evt invalid arg\n"));
- return BCME_OK;
- }
-
- wiphy = cfg->wdev->wiphy;
- prefix_len = snprintf(buf, SUPP_LOG_LEN, "[DHD]<%s> %s: ",
- wl_get_kernel_timestamp(), __func__);
- /* Remaining buffer len */
- rem_len = SUPP_LOG_LEN - (prefix_len + 1);
- /* Print the arg list on to the remaining part of the buffer */
- va_start(args, fmt);
- len = vsnprintf((buf + prefix_len), rem_len, fmt, args);
- va_end(args);
- if (len < 0) {
- return -EINVAL;
- }
-
- if (len > rem_len) {
- /* If return length is greater than buffer len,
- * then its truncated buffer case.
- */
- len = rem_len;
- }
-
- /* Ensure the buffer is null terminated */
- len += prefix_len;
- buf[len] = '\0';
- len++;
-
- return wl_cfgvendor_send_async_event(wiphy,
- bcmcfg_to_prmry_ndev(cfg), BRCM_VENDOR_EVENT_PRIV_STR, buf, len);
-}
-
-int
-wl_cfgvendor_notify_supp_event_str(const char *evt_name, const char *fmt, ...)
-{
- char buf[SUPP_LOG_LEN] = {0};
- struct bcm_cfg80211 *cfg;
- struct wiphy *wiphy;
- va_list args;
- int len;
- int prefix_len;
- int rem_len;
-
- cfg = wl_cfg80211_get_bcmcfg();
- if (!cfg || !cfg->wdev) {
- WL_DBG(("supp evt invalid arg\n"));
- return BCME_OK;
- }
- wiphy = cfg->wdev->wiphy;
- prefix_len = snprintf(buf, SUPP_LOG_LEN, "%s ", evt_name);
- /* Remaining buffer len */
- rem_len = SUPP_LOG_LEN - (prefix_len + 1);
- /* Print the arg list on to the remaining part of the buffer */
- va_start(args, fmt);
- len = vsnprintf((buf + prefix_len), rem_len, fmt, args);
- va_end(args);
- if (len < 0) {
- return -EINVAL;
- }
-
- if (len > rem_len) {
- /* If return length is greater than buffer len,
- * then its truncated buffer case.
- */
- len = rem_len;
- }
-
- /* Ensure the buffer is null terminated */
- len += prefix_len;
- buf[len] = '\0';
- len++;
-
- return wl_cfgvendor_send_async_event(wiphy,
- bcmcfg_to_prmry_ndev(cfg), BRCM_VENDOR_EVENT_PRIV_STR, buf, len);
-}
-#endif /* WL_SUPP_EVENT */
/*
* This API is to be used for asynchronous vendor events. This
int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
struct net_device *dev, int event_id, const void *data, int len)
{
- gfp_t kflags;
+ u16 kflags;
struct sk_buff *skb;
kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
/* Alloc the SKB for vendor_event */
#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(dev), len, event_id, kflags);
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, len, event_id, kflags);
#else
skb = cfg80211_vendor_event_alloc(wiphy, len, event_id, kflags);
#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
static int
wl_cfgvendor_send_cmd_reply(struct wiphy *wiphy,
- const void *data, int len)
+ struct net_device *dev, const void *data, int len)
{
struct sk_buff *skb;
- int err;
/* Alloc the SKB for vendor_event */
skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len);
if (unlikely(!skb)) {
WL_ERR(("skb alloc failed"));
- err = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
/* Push the data to the skb */
nla_put_nohdr(skb, len, data);
- err = cfg80211_vendor_cmd_reply(skb);
-exit:
- WL_DBG(("wl_cfgvendor_send_cmd_reply status %d", err));
- return err;
+
+ return cfg80211_vendor_cmd_reply(skb);
}
static int
reply = dhd_dev_get_feature_set(bcmcfg_to_prmry_ndev(cfg));
- err = wl_cfgvendor_send_cmd_reply(wiphy, &reply, sizeof(int));
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ &reply, sizeof(int));
if (unlikely(err))
WL_ERR(("Vendor Command reply failed ret:%d \n", err));
goto exit;
}
- err = nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
- MAX_FEATURE_SET_CONCURRRENT_GROUPS);
- if (unlikely(err)) {
- kfree_skb(skb);
- goto exit;
- }
+ nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, MAX_FEATURE_SET_CONCURRRENT_GROUPS);
for (i = 0; i < MAX_FEATURE_SET_CONCURRRENT_GROUPS; i++) {
reply = dhd_dev_get_feature_set_matrix(bcmcfg_to_prmry_ndev(cfg), i);
if (reply != WIFI_FEATURE_INVALID) {
- err = nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET,
- reply);
- if (unlikely(err)) {
- kfree_skb(skb);
- goto exit;
- }
+ nla_put_u32(skb, ANDR_WIFI_ATTRIBUTE_FEATURE_SET, reply);
}
}
wl_cfgvendor_set_rand_mac_oui(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
- int err = -EINVAL;
+ int err = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
int type;
-
- if (!data) {
- WL_ERR(("data is not available\n"));
- goto exit;
- }
-
- if (len <= 0) {
- WL_ERR(("invalid len %d\n", len));
- goto exit;
- }
+ uint8 random_mac_oui[DOT11_OUI_LEN];
type = nla_type(data);
if (type == ANDR_WIFI_ATTRIBUTE_RANDOM_MAC_OUI) {
- if (nla_len(data) != DOT11_OUI_LEN) {
- WL_ERR(("nla_len not matched.\n"));
- goto exit;
- }
- err = dhd_dev_cfg_rand_mac_oui(bcmcfg_to_prmry_ndev(cfg), nla_data(data));
+ memcpy(random_mac_oui, nla_data(data), DOT11_OUI_LEN);
+
+ err = dhd_dev_cfg_rand_mac_oui(bcmcfg_to_prmry_ndev(cfg), random_mac_oui);
if (unlikely(err))
WL_ERR(("Bad OUI, could not set:%d \n", err));
+
+ } else {
+ err = -1;
}
-exit:
+
return err;
}
#ifdef CUSTOM_FORCE_NODFS_FLAG
wl_cfgvendor_set_nodfs_flag(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
- int err = -EINVAL;
+ int err = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
int type;
u32 nodfs;
- if (!data) {
- WL_ERR(("data is not available\n"));
- return -EINVAL;
- }
-
- if (len <= 0) {
- WL_ERR(("invalid len %d\n", len));
- return -EINVAL;
- }
-
type = nla_type(data);
if (type == ANDR_WIFI_ATTRIBUTE_NODFS_SET) {
nodfs = nla_get_u32(data);
err = dhd_dev_set_nodfs(bcmcfg_to_prmry_ndev(cfg), nodfs);
+ } else {
+ err = -1;
}
-
return err;
}
#endif /* CUSTOM_FORCE_NODFS_FLAG */
int err = BCME_ERROR, rem, type;
char country_code[WLC_CNTRY_BUF_SZ] = {0};
const struct nlattr *iter;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
nla_for_each_attr(iter, data, len, rem) {
type = nla_type(iter);
switch (type) {
case ANDR_WIFI_ATTRIBUTE_COUNTRY:
- err = memcpy_s(country_code, WLC_CNTRY_BUF_SZ,
- nla_data(iter), nla_len(iter));
- if (err) {
- WL_ERR(("Failed to copy country code: %d\n", err));
- return err;
- }
+ memcpy(country_code, nla_data(iter),
+ MIN(nla_len(iter), WLC_CNTRY_BUF_SZ));
break;
default:
WL_ERR(("Unknown type: %d\n", type));
return err;
}
}
- /* country code is unique for dongle..hence using primary interface. */
- err = wl_cfg80211_set_country_code(primary_ndev, country_code, true, true, -1);
+
+ err = wldev_set_country(wdev->netdev, country_code, true, true, -1);
if (err < 0) {
WL_ERR(("Set country failed ret:%d\n", err));
}
wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
struct net_device *dev, void *data, int len, wl_vendor_event_t event)
{
- gfp_t kflags;
+ u16 kflags;
const void *ptr;
struct sk_buff *skb;
int malloc_len, total, iter_cnt_to_send, cnt;
/* Alloc the SKB for vendor_event */
#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- skb = cfg80211_vendor_event_alloc(wiphy, ndev_to_wdev(dev),
- malloc_len, event, kflags);
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, malloc_len, event, kflags);
#else
skb = cfg80211_vendor_event_alloc(wiphy, malloc_len, event, kflags);
#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
return 0;
}
+
static int
wl_cfgvendor_gscan_get_capabilities(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
dhd_pno_gscan_capabilities_t *reply = NULL;
uint32 reply_len = 0;
+
reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
DHD_PNO_GET_CAPABILITIES, NULL, &reply_len);
if (!reply) {
return err;
}
- err = wl_cfgvendor_send_cmd_reply(wiphy, reply, reply_len);
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ reply, reply_len);
+
if (unlikely(err)) {
WL_ERR(("Vendor Command reply failed ret:%d \n", err));
}
- MFREE(cfg->osh, reply, reply_len);
+ kfree(reply);
return err;
}
if (!results) {
WL_ERR(("No results to send %d\n", err));
- err = wl_cfgvendor_send_cmd_reply(wiphy, results, 0);
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ results, 0);
if (unlikely(err))
WL_ERR(("Vendor Command reply failed ret:%d \n", err));
iter = results;
complete_flag = nla_reserve(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE,
sizeof(is_done));
-
- if (unlikely(!complete_flag)) {
- WL_ERR(("complete_flag could not be reserved"));
- kfree_skb(skb);
- dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
- return -ENOMEM;
- }
mem_needed = mem_needed - (SCAN_RESULTS_COMPLETE_FLAG_LEN + VENDOR_REPLY_OVERHEAD);
while (iter) {
is_done = 0;
break;
}
- err = nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
- if (unlikely(err)) {
- goto fail;
- }
- err = nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
- if (unlikely(err)) {
- goto fail;
- }
- err = nla_put_u32(skb, GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK, iter->scan_ch_bucket);
- if (unlikely(err)) {
- goto fail;
- }
+ nla_put_u32(skb, GSCAN_ATTRIBUTE_SCAN_ID, iter->scan_id);
+ nla_put_u8(skb, GSCAN_ATTRIBUTE_SCAN_FLAGS, iter->flag);
+ nla_put_u32(skb, GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK, iter->scan_ch_bucket);
+
num_results_iter = iter->tot_count - iter->tot_consumed;
- err = nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
- if (unlikely(err)) {
- goto fail;
- }
+ nla_put_u32(skb, GSCAN_ATTRIBUTE_NUM_OF_RESULTS, num_results_iter);
if (num_results_iter) {
ptr = &iter->results[iter->tot_consumed];
- err = nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
- num_results_iter * sizeof(wifi_gscan_result_t), ptr);
- if (unlikely(err)) {
- goto fail;
- }
iter->tot_consumed += num_results_iter;
+ nla_put(skb, GSCAN_ATTRIBUTE_SCAN_RESULTS,
+ num_results_iter * sizeof(wifi_gscan_result_t), ptr);
}
nla_nest_end(skb, scan_hdr);
mem_needed -= GSCAN_BATCH_RESULT_HDR_LEN +
(num_results_iter * sizeof(wifi_gscan_result_t));
iter = iter->next;
}
- /* Cleans up consumed results and returns TRUE if all results are consumed */
+ /* Returns TRUE if all result consumed */
is_done = dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
memcpy(nla_data(complete_flag), &is_done, sizeof(is_done));
dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
return cfg80211_vendor_cmd_reply(skb);
-fail:
- /* Free up consumed results which will now not be sent */
- (void)dhd_dev_gscan_batch_cache_cleanup(bcmcfg_to_prmry_ndev(cfg));
- kfree_skb(skb);
- dhd_dev_pno_unlock_access_batch_results(bcmcfg_to_prmry_ndev(cfg));
- return err;
}
static int
return -EINVAL;
}
+
}
static int
int type;
bool real_time = FALSE;
- if (!data) {
- WL_ERR(("data is not available\n"));
- return -EINVAL;
- }
-
- if (len <= 0) {
- WL_ERR(("invalid len %d\n", len));
- return -EINVAL;
- }
-
type = nla_type(data);
if (type == GSCAN_ATTRIBUTE_ENABLE_FULL_SCAN_RESULTS) {
int type, tmp;
const struct nlattr *iter;
- scan_param = (gscan_scan_params_t *)MALLOCZ(cfg->osh,
- sizeof(gscan_scan_params_t));
+ scan_param = kzalloc(sizeof(gscan_scan_params_t), GFP_KERNEL);
if (!scan_param) {
WL_ERR(("Could not set GSCAN scan cfg, mem alloc failure\n"));
err = -EINVAL;
}
exit:
- MFREE(cfg->osh, scan_param, sizeof(gscan_scan_params_t));
+ kfree(scan_param);
return err;
}
return -EINVAL;
}
- hotlist_params = (gscan_hotlist_scan_params_t *)MALLOCZ(cfg->osh,
- sizeof(*hotlist_params)
- + (sizeof(struct bssid_t) * (PFN_SWC_MAX_NUM_APS - 1)));
+ hotlist_params = kzalloc(sizeof(*hotlist_params)
+ + (sizeof(struct bssid_t) * (PFN_SWC_MAX_NUM_APS - 1)),
+ GFP_KERNEL);
if (!hotlist_params) {
WL_ERR(("Cannot Malloc memory.\n"));
nla_for_each_attr(iter, data, len, tmp2) {
type = nla_type(iter);
switch (type) {
- case GSCAN_ATTRIBUTE_HOTLIST_BSSID_COUNT:
- if (nla_len(iter) != sizeof(uint32)) {
- WL_DBG(("type:%d length:%d not matching.\n",
- type, nla_len(iter)));
- err = -EINVAL;
- goto exit;
- }
- hotlist_params->nbssid = (uint16)nla_get_u32(iter);
- if ((hotlist_params->nbssid == 0) ||
- (hotlist_params->nbssid > PFN_SWC_MAX_NUM_APS)) {
- WL_ERR(("nbssid:%d exceed limit.\n",
- hotlist_params->nbssid));
- err = -EINVAL;
- goto exit;
- }
- break;
case GSCAN_ATTRIBUTE_HOTLIST_BSSIDS:
- if (hotlist_params->nbssid == 0) {
- WL_ERR(("nbssid not retrieved.\n"));
- err = -EINVAL;
- goto exit;
- }
pbssid = hotlist_params->bssid;
nla_for_each_nested(outer, iter, tmp) {
- if (j >= hotlist_params->nbssid)
- break;
nla_for_each_nested(inner, outer, tmp1) {
type = nla_type(inner);
goto exit;
}
dummy = (int8)nla_get_u8(inner);
- WL_DBG(("dummy %d\n", dummy));
break;
default:
WL_ERR(("ATTR unknown %d\n", type));
goto exit;
}
}
- j++;
- }
- if (j != hotlist_params->nbssid) {
- WL_ERR(("bssid_cnt:%d != nbssid:%d.\n", j,
- hotlist_params->nbssid));
- err = -EINVAL;
- goto exit;
+ if (++j >= PFN_SWC_MAX_NUM_APS) {
+ WL_ERR(("cap hotlist max:%d\n", j));
+ break;
+ }
}
+ hotlist_params->nbssid = j;
break;
case GSCAN_ATTRIBUTE_HOTLIST_FLUSH:
if (nla_len(iter) != sizeof(uint8)) {
WL_ERR(("type:%d length:%d not matching.\n",
- type, nla_len(iter)));
+ type, nla_len(inner)));
err = -EINVAL;
goto exit;
}
case GSCAN_ATTRIBUTE_LOST_AP_SAMPLE_SIZE:
if (nla_len(iter) != sizeof(uint32)) {
WL_ERR(("type:%d length:%d not matching.\n",
- type, nla_len(iter)));
+ type, nla_len(inner)));
err = -EINVAL;
goto exit;
}
goto exit;
}
exit:
- MFREE(cfg->osh, hotlist_params, sizeof(*hotlist_params)
- + (sizeof(struct bssid_t) * (PFN_SWC_MAX_NUM_APS - 1)));
+ kfree(hotlist_params);
return err;
}
{
int err = 0;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pno_ssid_t *ssid_elem = NULL;
+ dhd_pno_ssid_t *ssid_elem;
int tmp, tmp1, tmp2, type = 0, num = 0;
const struct nlattr *outer, *inner, *iter;
uint8 flush = FALSE, i = 0;
- wl_ssid_ext_params_t params;
+ wl_pfn_ssid_params_t params;
nla_for_each_attr(iter, data, len, tmp2) {
type = nla_type(iter);
"long %d\n",
ssid_elem->SSID_len));
err = -EINVAL;
- MFREE(cfg->osh, ssid_elem,
- num);
goto exit;
}
break;
if (!ssid_elem->SSID_len) {
WL_ERR(("Broadcast SSID is illegal for ePNO\n"));
err = -EINVAL;
- MFREE(cfg->osh, ssid_elem, num);
goto exit;
}
dhd_pno_translate_epno_fw_flags(&ssid_elem->flags);
dhd_pno_set_epno_auth_flag(&ssid_elem->wpa_auth);
- MFREE(cfg->osh, ssid_elem, num);
}
break;
case GSCAN_ATTRIBUTE_EPNO_SSID_NUM:
uint16 *reply = NULL;
uint32 reply_len = 0, num_channels, mem_needed;
struct sk_buff *skb;
- dhd_pub_t *dhdp;
- struct net_device *ndev = wdev->netdev;
-
- if (!ndev) {
- WL_ERR(("ndev null\n"));
- return -EINVAL;
- }
-
- dhdp = wl_cfg80211_get_dhdp(ndev);
- if (!dhdp) {
- WL_ERR(("dhdp null\n"));
- return -EINVAL;
- }
-
- if (!data) {
- WL_ERR(("data is not available\n"));
- return -EINVAL;
- }
-
- if (len <= 0) {
- WL_ERR(("invalid len %d\n", len));
- return -EINVAL;
- }
type = nla_type(data);
+
if (type == GSCAN_ATTRIBUTE_BAND) {
band = nla_get_u32(data);
} else {
return -EINVAL;
}
- reply = dhd_pno_get_gscan(dhdp,
+ reply = dhd_dev_pno_get_gscan(bcmcfg_to_prmry_ndev(cfg),
DHD_PNO_GET_CHANNEL_LIST, &band, &reply_len);
+
if (!reply) {
WL_ERR(("Could not get channel list\n"));
err = -EINVAL;
WL_ERR(("Vendor Command reply failed ret:%d \n", err));
}
exit:
- MFREE(cfg->osh, reply, reply_len);
+ kfree(reply);
return err;
}
#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
}
#endif /* RSSI_MONITOR_SUPPORT */
+#ifdef DHDTCPACK_SUPPRESS
+static int wl_cfgvendor_set_tcpack_sup_mode(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int err = 0, tmp, type;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
+ uint8 enable = 0;
+ const struct nlattr *iter;
+
+ nla_for_each_attr(iter, data, len, tmp) {
+ type = nla_type(iter);
+ if (type == ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE) {
+ enable = (int8)nla_get_u32(iter);
+ }
+ }
+
+ if (dhd_dev_set_tcpack_sup_mode_cfg(ndev, enable) < 0) {
+ WL_ERR(("Could not set TCP Ack Suppress mode cfg\n"));
+ err = -EINVAL;
+ }
+ return err;
+}
+#endif /* DHDTCPACK_SUPPRESS */
+
#ifdef DHD_WAKE_STATUS
static int
wl_cfgvendor_get_wake_reason_stats(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
+ struct wireless_dev *wdev, const void *data, int len)
{
struct net_device *ndev = wdev_to_ndev(wdev);
wake_counts_t *pwake_count_info;
int ret, mem_needed;
-#if defined(DHD_DEBUG) && defined(DHD_WAKE_EVENT_STATUS)
+#if defined(DHD_WAKE_EVENT_STATUS) && defined(DHD_DEBUG)
int flowid;
-#endif /* DHD_DEBUG && DHD_WAKE_EVENT_STATUS */
- struct sk_buff *skb = NULL;
+#endif /* DHD_WAKE_EVENT_STATUS && DHD_DEBUG */
+ struct sk_buff *skb;
dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(ndev);
WL_DBG(("Recv get wake status info cmd.\n"));
skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
if (unlikely(!skb)) {
WL_ERR(("%s: can't allocate %d bytes\n", __FUNCTION__, mem_needed));
- ret = -ENOMEM;
+ return -ENOMEM;
goto exit;
}
#ifdef DHD_WAKE_EVENT_STATUS
WL_ERR(("pwake_count_info->rcwake %d\n", pwake_count_info->rcwake));
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, pwake_count_info->rcwake);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total count of CMD event, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT_USED, WLC_E_LAST);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Max count of event used, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE, (WLC_E_LAST * sizeof(uint)),
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, pwake_count_info->rcwake);
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT_USED, WLC_E_LAST);
+ nla_put(skb, WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE, (WLC_E_LAST * sizeof(uint)),
pwake_count_info->rc_event);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Event wake data, ret=%d\n", ret));
- goto exit;
- }
#ifdef DHD_DEBUG
for (flowid = 0; flowid < WLC_E_LAST; flowid++) {
if (pwake_count_info->rc_event[flowid] != 0) {
#endif /* DHD_WAKE_EVENT_STATUS */
#ifdef DHD_WAKE_RX_STATUS
WL_ERR(("pwake_count_info->rxwake %d\n", pwake_count_info->rxwake));
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE, pwake_count_info->rxwake);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total Wake due RX data, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT, pwake_count_info->rx_ucast);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due to RX unicast, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT, pwake_count_info->rx_mcast);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due RX multicast, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT, pwake_count_info->rx_bcast);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due to RX broadcast, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT, pwake_count_info->rx_arp);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due to ICMP pkt, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT, pwake_count_info->rx_icmpv6);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due ICMPV6 pkt, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA, pwake_count_info->rx_icmpv6_ra);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due to ICMPV6_RA, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA, pwake_count_info->rx_icmpv6_na);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due to ICMPV6_NA, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS, pwake_count_info->rx_icmpv6_ns);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due to ICMPV6_NS, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE, pwake_count_info->rxwake);
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT, pwake_count_info->rx_ucast);
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT, pwake_count_info->rx_mcast);
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT, pwake_count_info->rx_bcast);
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT, pwake_count_info->rx_arp);
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT, pwake_count_info->rx_icmpv6);
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA, pwake_count_info->rx_icmpv6_ra);
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA, pwake_count_info->rx_icmpv6_na);
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS, pwake_count_info->rx_icmpv6_ns);
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
pwake_count_info->rx_multi_ipv4);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due to RX IPV4 MULTICAST, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
pwake_count_info->rx_multi_ipv6);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due to RX IPV6 MULTICAST, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_OTHER_RX_MULTICAST_ADD_CNT,
+ nla_put_u32(skb, WAKE_STAT_ATTRIBUTE_OTHER_RX_MULTICAST_ADD_CNT,
pwake_count_info->rx_multi_other);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Total wake due to Other RX Multicast, ret=%d\n", ret));
- goto exit;
- }
#endif /* #ifdef DHD_WAKE_RX_STATUS */
ret = cfg80211_vendor_cmd_reply(skb);
if (unlikely(ret)) {
WL_ERR(("Vendor cmd reply for -get wake status failed:%d \n", ret));
}
- /* On cfg80211_vendor_cmd_reply() skb is consumed and freed in case of success or failure */
- return ret;
-
exit:
- /* Free skb memory */
- if (skb) {
- kfree_skb(skb);
- }
return ret;
}
#endif /* DHD_WAKE_STATUS */
-#ifdef DHDTCPACK_SUPPRESS
-static int
-wl_cfgvendor_set_tcpack_sup_mode(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
+#ifdef RTT_SUPPORT
+void
+wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data)
{
- int err = BCME_OK, type;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct net_device *ndev = wdev_to_wlc_ndev(wdev, cfg);
- uint8 enable = 0;
-
- if (!data) {
- WL_ERR(("data is not available\n"));
- err = BCME_BADARG;
- goto exit;
- }
-
- if (len <= 0) {
- WL_ERR(("Length of the nlattr is not valid len : %d\n", len));
- err = BCME_BADARG;
- goto exit;
- }
+ struct wireless_dev *wdev = (struct wireless_dev *)ctx;
+ struct wiphy *wiphy;
+ struct sk_buff *skb;
+ uint32 evt_complete = 0;
+ gfp_t kflags;
+ rtt_result_t *rtt_result;
+ rtt_results_header_t *rtt_header;
+ struct list_head *rtt_cache_list;
+ struct nlattr *rtt_nl_hdr;
+ wiphy = wdev->wiphy;
- type = nla_type(data);
- if (type == ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE) {
- enable = (uint8) nla_get_u32(data);
- err = dhd_dev_set_tcpack_sup_mode_cfg(ndev, enable);
- if (unlikely(err)) {
- WL_ERR(("Could not set TCP Ack Suppress mode cfg: %d\n", err));
- }
- } else {
- err = BCME_BADARG;
- }
-
-exit:
- return err;
-}
-#endif /* DHDTCPACK_SUPPRESS */
-
-#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
-static int
-wl_cfgvendor_notify_dump_completion(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhd_pub = cfg->pub;
- unsigned long flags = 0;
-
- WL_INFORM(("%s, [DUMP] received file dump notification from HAL\n", __FUNCTION__));
-
- DHD_GENERAL_LOCK(dhd_pub, flags);
- /* call wmb() to synchronize with the previous memory operations */
- OSL_SMP_WMB();
- DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhd_pub);
- /* Call another wmb() to make sure wait_for_dump_completion value
- * gets updated before waking up waiting context.
- */
- OSL_SMP_WMB();
- dhd_os_busbusy_wake(dhd_pub);
- DHD_GENERAL_UNLOCK(dhd_pub, flags);
-
- return BCME_OK;
-}
-#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
-
-#if defined(WL_CFG80211)
-static int
-wl_cfgvendor_set_hal_started(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- WL_INFORM(("%s,[DUMP] HAL STARTED\n", __FUNCTION__));
-
- cfg->hal_started = true;
- return BCME_OK;
-}
-
-static int
-wl_cfgvendor_stop_hal(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- WL_INFORM(("%s,[DUMP] HAL STOPPED\n", __FUNCTION__));
-
- cfg->hal_started = false;
- return BCME_OK;
-}
-#endif /* WL_CFG80211 */
-
-#ifdef RTT_SUPPORT
-void
-wl_cfgvendor_rtt_evt(void *ctx, void *rtt_data)
-{
- struct wireless_dev *wdev = (struct wireless_dev *)ctx;
- struct wiphy *wiphy;
- struct sk_buff *skb = NULL;
- uint32 evt_complete = 0;
- gfp_t kflags;
- rtt_result_t *rtt_result;
- rtt_results_header_t *rtt_header;
- struct list_head *rtt_cache_list;
- struct nlattr *rtt_nl_hdr;
- int ret = BCME_OK;
- wiphy = wdev->wiphy;
-
- WL_DBG(("In\n"));
- /* Push the data to the skb */
- if (!rtt_data) {
- WL_ERR(("rtt_data is NULL\n"));
- return;
+ WL_DBG(("In\n"));
+ /* Push the data to the skb */
+ if (!rtt_data) {
+ WL_ERR(("rtt_data is NULL\n"));
+ return;
}
rtt_cache_list = (struct list_head *)rtt_data;
kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
return;
}
evt_complete = 1;
- ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
- if (ret < 0) {
- WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
- goto free_mem;
- }
+ nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
cfg80211_vendor_event(skb, kflags);
return;
}
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
list_for_each_entry(rtt_header, rtt_cache_list, list) {
/* Alloc the SKB for vendor_event */
#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
if (list_is_last(&rtt_header->list, rtt_cache_list)) {
evt_complete = 1;
}
- ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
- if (ret < 0) {
- WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
- goto free_mem;
- }
+ nla_put_u32(skb, RTT_ATTRIBUTE_RESULTS_COMPLETE, evt_complete);
rtt_nl_hdr = nla_nest_start(skb, RTT_ATTRIBUTE_RESULTS_PER_TARGET);
if (!rtt_nl_hdr) {
WL_ERR(("rtt_nl_hdr is NULL\n"));
- dev_kfree_skb_any(skb);
break;
}
- ret = nla_put(skb, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN,
- &rtt_header->peer_mac);
- if (ret < 0) {
- WL_ERR(("Failed to put RTT_ATTRIBUTE_TARGET_MAC, ret:%d\n", ret));
- goto free_mem;
- }
- ret = nla_put_u32(skb, RTT_ATTRIBUTE_RESULT_CNT, rtt_header->result_cnt);
- if (ret < 0) {
- WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_CNT, ret:%d\n", ret));
- goto free_mem;
- }
+ nla_put(skb, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN, &rtt_header->peer_mac);
+ nla_put_u32(skb, RTT_ATTRIBUTE_RESULT_CNT, rtt_header->result_cnt);
list_for_each_entry(rtt_result, &rtt_header->result_list, list) {
- ret = nla_put(skb, RTT_ATTRIBUTE_RESULT,
+ nla_put(skb, RTT_ATTRIBUTE_RESULT,
rtt_result->report_len, &rtt_result->report);
- if (ret < 0) {
- WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT, ret:%d\n", ret));
- goto free_mem;
- }
- ret = nla_put(skb, RTT_ATTRIBUTE_RESULT_DETAIL,
- rtt_result->detail_len, &rtt_result->rtt_detail);
- if (ret < 0) {
- WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_DETAIL, ret:%d\n",
- ret));
- goto free_mem;
- }
}
nla_nest_end(skb, rtt_nl_hdr);
cfg80211_vendor_event(skb, kflags);
}
- GCC_DIAGNOSTIC_POP();
-
- return;
-
-free_mem:
- /* Free skb memory */
- if (skb) {
- kfree_skb(skb);
- }
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
}
static int
wl_cfgvendor_rtt_set_config(struct wiphy *wiphy, struct wireless_dev *wdev,
const void *data, int len) {
int err = 0, rem, rem1, rem2, type;
- int target_cnt = 0;
+ int target_cnt;
rtt_config_params_t rtt_param;
rtt_target_info_t* rtt_target = NULL;
const struct nlattr *iter, *iter1, *iter2;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
rtt_capabilities_t capability;
- bzero(&rtt_param, sizeof(rtt_param));
+ memset(&rtt_param, 0, sizeof(rtt_param));
WL_DBG(("In\n"));
err = dhd_dev_rtt_register_noti_callback(wdev->netdev, wdev, wl_cfgvendor_rtt_evt);
type = nla_type(iter);
switch (type) {
case RTT_ATTRIBUTE_TARGET_CNT:
- if (target_cnt != 0) {
- WL_ERR(("attempt to overwrite target_cnt"));
- err = -EINVAL;
- goto exit;
- }
target_cnt = nla_get_u8(iter);
if ((target_cnt <= 0) || (target_cnt > RTT_MAX_TARGET_CNT)) {
WL_ERR(("target_cnt is not valid : %d\n",
}
rtt_param.rtt_target_cnt = target_cnt;
- rtt_param.target_info = (rtt_target_info_t *)MALLOCZ(cfg->osh,
- TARGET_INFO_SIZE(target_cnt));
+ rtt_param.target_info = kzalloc(TARGET_INFO_SIZE(target_cnt), GFP_KERNEL);
if (rtt_param.target_info == NULL) {
WL_ERR(("failed to allocate target info for (%d)\n", target_cnt));
err = BCME_NOMEM;
}
rtt_target = rtt_param.target_info;
nla_for_each_nested(iter1, iter, rem1) {
- if ((uint8 *)rtt_target >= ((uint8 *)rtt_param.target_info +
- TARGET_INFO_SIZE(target_cnt))) {
- WL_ERR(("rtt_target increased over its max size"));
- err = -EINVAL;
- goto exit;
- }
nla_for_each_nested(iter2, iter1, rem2) {
type = nla_type(iter2);
switch (type) {
case RTT_ATTRIBUTE_TARGET_MAC:
- if (nla_len(iter2) != ETHER_ADDR_LEN) {
- WL_ERR(("mac_addr length not match\n"));
- err = -EINVAL;
- goto exit;
- }
memcpy(&rtt_target->addr, nla_data(iter2),
ETHER_ADDR_LEN);
break;
err = -EINVAL;
goto exit;
}
- WL_INFORM_MEM(("Target addr %s, Channel : %s for RTT \n",
+ WL_INFORM(("Target addr %s, Channel : %s for RTT \n",
bcm_ether_ntoa((const struct ether_addr *)&rtt_target->addr,
eabuf),
wf_chspec_ntoa(rtt_target->chanspec, chanbuf)));
exit:
/* free the target info list */
if (rtt_param.target_info) {
- MFREE(cfg->osh, rtt_param.target_info,
- TARGET_INFO_SIZE(target_cnt));
+ kfree(rtt_param.target_info);
+ rtt_param.target_info = NULL;
}
return err;
}
const void *data, int len)
{
int err = 0, rem, type, target_cnt = 0;
- int target_idx = 0;
+ int target_cnt_chk = 0;
const struct nlattr *iter;
- struct ether_addr *mac_list = NULL;
+ struct ether_addr *mac_list = NULL, *mac_addr = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
if (len <= 0) {
}
target_cnt = nla_get_u8(iter);
if ((target_cnt > 0) && (target_cnt < RTT_MAX_TARGET_CNT)) {
- mac_list = (struct ether_addr *)MALLOCZ(cfg->osh,
- target_cnt * ETHER_ADDR_LEN);
+ mac_list = (struct ether_addr *)kzalloc(target_cnt * ETHER_ADDR_LEN,
+ GFP_KERNEL);
if (mac_list == NULL) {
WL_ERR(("failed to allocate mem for mac list\n"));
err = -EINVAL;
goto exit;
}
+ mac_addr = &mac_list[0];
} else {
/* cancel the current whole RTT process */
goto cancel;
}
break;
case RTT_ATTRIBUTE_TARGET_MAC:
- if (mac_list == NULL) {
- WL_ERR(("ATTRIBUTE_TARGET_CNT not found before "
- " ATTRIBUTE_TARGET_MAC\n"));
- err = -EINVAL;
- goto exit;
- }
-
- if (target_idx >= target_cnt) {
- WL_ERR(("More TARGET_MAC entries found, "
- "expected TARGET_CNT:%d\n", target_cnt));
- err = -EINVAL;
- goto exit;
- }
-
- if (nla_len(iter) != ETHER_ADDR_LEN) {
- WL_ERR(("Invalid TARGET_MAC ATTR len :%d\n", nla_len(iter)));
+ if (mac_addr) {
+ memcpy(mac_addr++, nla_data(iter), ETHER_ADDR_LEN);
+ target_cnt_chk++;
+ if (target_cnt_chk > target_cnt) {
+ WL_ERR(("over target count\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ break;
+ } else {
+ WL_ERR(("mac_list is NULL\n"));
err = -EINVAL;
goto exit;
}
-
- memcpy(&mac_list[target_idx], nla_data(iter), ETHER_ADDR_LEN);
- target_idx++;
-
- break;
- default:
- WL_ERR(("Uknown type : %d\n", type));
- err = -EINVAL;
- goto exit;
}
}
cancel:
- if (mac_list && dhd_dev_rtt_cancel_cfg(
- bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) {
+ if (dhd_dev_rtt_cancel_cfg(bcmcfg_to_prmry_ndev(cfg), mac_list, target_cnt) < 0) {
WL_ERR(("Could not cancel RTT configuration\n"));
err = -EINVAL;
}
exit:
if (mac_list) {
- MFREE(cfg->osh, mac_list, target_cnt * ETHER_ADDR_LEN);
+ kfree(mac_list);
}
return err;
}
WL_ERR(("Vendor Command reply failed ret:%d \n", err));
goto exit;
}
- err = wl_cfgvendor_send_cmd_reply(wiphy, &capability, sizeof(capability));
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ &capability, sizeof(capability));
if (unlikely(err)) {
WL_ERR(("Vendor Command reply failed ret:%d \n", err));
WL_DBG(("Recv -get_avail_ch command \n"));
- bzero(&responder_info, sizeof(responder_info));
+ memset(&responder_info, 0, sizeof(responder_info));
err = get_responder_info(cfg, &responder_info);
if (unlikely(err)) {
WL_ERR(("Failed to get responder info:%d \n", err));
return err;
}
- err = wl_cfgvendor_send_cmd_reply(wiphy, &responder_info, sizeof(responder_info));
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ &responder_info, sizeof(responder_info));
+
if (unlikely(err)) {
WL_ERR(("Vendor cmd reply for -get_avail_ch failed ret:%d \n", err));
}
WL_DBG(("Recv rtt -enable_resp cmd.\n"));
- bzero(&responder_info, sizeof(responder_info));
+ memset(&responder_info, 0, sizeof(responder_info));
/*
*Passing channel as NULL until implementation
goto done;
}
done:
- err = wl_cfgvendor_send_cmd_reply(wiphy, &responder_info, sizeof(responder_info));
+ err = wl_cfgvendor_send_cmd_reply(wiphy, ndev,
+ &responder_info, sizeof(responder_info));
+
if (unlikely(err)) {
WL_ERR(("Vendor cmd reply for -enable_resp failed ret:%d \n", err));
}
int type;
uint32 lazy_roam_enable_flag;
- if (!data) {
- WL_ERR(("data is not available\n"));
- return -EINVAL;
- }
-
- if (len <= 0) {
- WL_ERR(("invaild len %d\n", len));
- return -EINVAL;
- }
-
type = nla_type(data);
if (type == GSCAN_ATTRIBUTE_LAZY_ROAM_ENABLE) {
err = dhd_dev_lazy_roam_enable(bcmcfg_to_prmry_ndev(cfg),
lazy_roam_enable_flag);
+
if (unlikely(err))
WL_ERR(("Could not enable lazy roam:%d \n", err));
- }
+ }
return err;
}
wlc_roam_exp_params_t roam_param;
const struct nlattr *iter;
- bzero(&roam_param, sizeof(roam_param));
+ memset(&roam_param, 0, sizeof(roam_param));
nla_for_each_attr(iter, data, len, tmp) {
type = nla_type(iter);
+
switch (type) {
case GSCAN_ATTRIBUTE_A_BAND_BOOST_THRESHOLD:
roam_param.a_band_boost_threshold = nla_get_u32(iter);
/* small helper function */
static wl_bssid_pref_cfg_t *
-create_bssid_pref_cfg(struct bcm_cfg80211 *cfg, uint32 num, uint32 *buf_len)
+create_bssid_pref_cfg(uint32 num)
{
+ uint32 mem_needed;
wl_bssid_pref_cfg_t *bssid_pref;
- *buf_len = sizeof(wl_bssid_pref_cfg_t);
- if (num) {
- *buf_len += (num - 1) * sizeof(wl_bssid_pref_list_t);
- }
- bssid_pref = (wl_bssid_pref_cfg_t *)MALLOC(cfg->osh, *buf_len);
-
+ mem_needed = sizeof(wl_bssid_pref_cfg_t);
+ if (num)
+ mem_needed += (num - 1) * sizeof(wl_bssid_pref_list_t);
+ bssid_pref = (wl_bssid_pref_cfg_t *) kmalloc(mem_needed, GFP_KERNEL);
return bssid_pref;
}
wl_bssid_pref_list_t *bssids;
int tmp, tmp1, tmp2, type;
const struct nlattr *outer, *inner, *iter;
- uint32 flush = 0, num = 0, buf_len = 0;
- uint8 bssid_found = 0, rssi_found = 0;
+ uint32 flush = 0, i = 0, num = 0;
/* Assumption: NUM attribute must come first */
nla_for_each_attr(iter, data, len, tmp2) {
type = nla_type(iter);
switch (type) {
case GSCAN_ATTRIBUTE_NUM_BSSID:
- if (num) {
- WL_ERR(("attempt overide bssid num.\n"));
- err = -EINVAL;
- goto exit;
- }
- if (nla_len(iter) != sizeof(uint32)) {
- WL_ERR(("nla_len not match\n"));
- err = -EINVAL;
- goto exit;
- }
num = nla_get_u32(iter);
- if (num == 0 || num > MAX_BSSID_PREF_LIST_NUM) {
- WL_ERR(("wrong BSSID num:%d\n", num));
+ if (num > MAX_BSSID_PREF_LIST_NUM) {
+ WL_ERR(("Too many Preferred BSSIDs!\n"));
err = -EINVAL;
goto exit;
}
- if ((bssid_pref = create_bssid_pref_cfg(cfg, num, &buf_len))
- == NULL) {
- WL_ERR(("Can't malloc memory\n"));
- err = -ENOMEM;
- goto exit;
- }
break;
case GSCAN_ATTRIBUTE_BSSID_PREF_FLUSH:
- if (nla_len(iter) != sizeof(uint32)) {
- WL_ERR(("nla_len not match\n"));
- err = -EINVAL;
- goto exit;
- }
flush = nla_get_u32(iter);
- if (flush != 1) {
- WL_ERR(("wrong flush value\n"));
- err = -EINVAL;
- goto exit;
- }
break;
case GSCAN_ATTRIBUTE_BSSID_PREF_LIST:
- if (!num || !bssid_pref) {
- WL_ERR(("bssid list count not set\n"));
- err = -EINVAL;
+ if (!num)
+ return -EINVAL;
+ if ((bssid_pref = create_bssid_pref_cfg(num)) == NULL) {
+ WL_ERR(("%s: Can't malloc memory\n", __FUNCTION__));
+ err = -ENOMEM;
goto exit;
}
- bssid_pref->count = 0;
+ bssid_pref->count = num;
bssids = bssid_pref->bssids;
nla_for_each_nested(outer, iter, tmp) {
- if (bssid_pref->count >= num) {
- WL_ERR(("too many bssid list\n"));
+ if (i >= num) {
+ WL_ERR(("CFGs don't seem right!\n"));
err = -EINVAL;
goto exit;
}
- bssid_found = 0;
- rssi_found = 0;
nla_for_each_nested(inner, outer, tmp1) {
type = nla_type(inner);
switch (type) {
- case GSCAN_ATTRIBUTE_BSSID_PREF:
- if (nla_len(inner) != ETHER_ADDR_LEN) {
- WL_ERR(("nla_len not match.\n"));
- err = -EINVAL;
- goto exit;
- }
- memcpy(&(bssids[bssid_pref->count].bssid),
- nla_data(inner), ETHER_ADDR_LEN);
- /* not used for now */
- bssids[bssid_pref->count].flags = 0;
- bssid_found = 1;
- break;
- case GSCAN_ATTRIBUTE_RSSI_MODIFIER:
- if (nla_len(inner) != sizeof(uint32)) {
- WL_ERR(("nla_len not match.\n"));
- err = -EINVAL;
- goto exit;
- }
- bssids[bssid_pref->count].rssi_factor =
- (int8) nla_get_u32(inner);
- rssi_found = 1;
- break;
- default:
- WL_ERR(("wrong type:%d\n", type));
- err = -EINVAL;
- goto exit;
- }
- if (bssid_found && rssi_found) {
- break;
+ case GSCAN_ATTRIBUTE_BSSID_PREF:
+ memcpy(&(bssids[i].bssid),
+ nla_data(inner), ETHER_ADDR_LEN);
+ /* not used for now */
+ bssids[i].flags = 0;
+ break;
+ case GSCAN_ATTRIBUTE_RSSI_MODIFIER:
+ bssids[i].rssi_factor =
+ (int8) nla_get_u32(inner);
+ break;
}
}
- bssid_pref->count++;
+ i++;
}
break;
default:
if (!bssid_pref) {
/* What if only flush is desired? */
if (flush) {
- if ((bssid_pref = create_bssid_pref_cfg(cfg, 0, &buf_len)) == NULL) {
+ if ((bssid_pref = create_bssid_pref_cfg(0)) == NULL) {
WL_ERR(("%s: Can't malloc memory\n", __FUNCTION__));
err = -ENOMEM;
goto exit;
err = dhd_dev_set_lazy_roam_bssid_pref(bcmcfg_to_prmry_ndev(cfg),
bssid_pref, flush);
exit:
- if (bssid_pref) {
- MFREE(cfg->osh, bssid_pref, buf_len);
- }
+ kfree(bssid_pref);
return err;
}
-#endif /* GSCAN_SUPPORT */
-#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
+
static int
wl_cfgvendor_set_bssid_blacklist(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
int err = 0;
int type, tmp;
const struct nlattr *iter;
- uint32 mem_needed = 0, flush = 0, num = 0;
+ uint32 mem_needed = 0, flush = 0, i = 0, num = 0;
/* Assumption: NUM attribute must come first */
nla_for_each_attr(iter, data, len, tmp) {
type = nla_type(iter);
switch (type) {
case GSCAN_ATTRIBUTE_NUM_BSSID:
- if (num != 0) {
- WL_ERR(("attempt to change BSSID num\n"));
- err = -EINVAL;
- goto exit;
- }
- if (nla_len(iter) != sizeof(uint32)) {
- WL_ERR(("not matching nla_len.\n"));
- err = -EINVAL;
- goto exit;
- }
num = nla_get_u32(iter);
- if (num == 0 || num > MAX_BSSID_BLACKLIST_NUM) {
- WL_ERR(("wrong BSSID count:%d\n", num));
+ if (num > MAX_BSSID_BLACKLIST_NUM) {
+ WL_ERR(("Too many Blacklist BSSIDs!\n"));
err = -EINVAL;
goto exit;
}
- if (!blacklist) {
- mem_needed = OFFSETOF(maclist_t, ea) +
- sizeof(struct ether_addr) * (num);
- blacklist = (maclist_t *)
- MALLOCZ(cfg->osh, mem_needed);
- if (!blacklist) {
- WL_ERR(("MALLOCZ failed.\n"));
- err = -ENOMEM;
- goto exit;
- }
- }
break;
case GSCAN_ATTRIBUTE_BSSID_BLACKLIST_FLUSH:
- if (nla_len(iter) != sizeof(uint32)) {
- WL_ERR(("not matching nla_len.\n"));
- err = -EINVAL;
- goto exit;
- }
flush = nla_get_u32(iter);
- if (flush != 1) {
- WL_ERR(("flush arg is worng:%d\n", flush));
- err = -EINVAL;
- goto exit;
- }
break;
case GSCAN_ATTRIBUTE_BLACKLIST_BSSID:
- if (num == 0 || !blacklist) {
- WL_ERR(("number of BSSIDs not received.\n"));
- err = -EINVAL;
- goto exit;
- }
- if (nla_len(iter) != ETHER_ADDR_LEN) {
- WL_ERR(("not matching nla_len.\n"));
- err = -EINVAL;
- goto exit;
- }
- if (blacklist->count >= num) {
- WL_ERR(("too many BSSIDs than expected:%d\n",
- blacklist->count));
- err = -EINVAL;
- goto exit;
+ if (num) {
+ if (!blacklist) {
+ mem_needed = sizeof(maclist_t) +
+ sizeof(struct ether_addr) * (num - 1);
+ blacklist = (maclist_t *)
+ kmalloc(mem_needed, GFP_KERNEL);
+ if (!blacklist) {
+ WL_ERR(("%s: Can't malloc %d bytes\n",
+ __FUNCTION__, mem_needed));
+ err = -ENOMEM;
+ goto exit;
+ }
+ blacklist->count = num;
+ }
+ if (i >= num) {
+ WL_ERR(("CFGs don't seem right!\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ memcpy(&(blacklist->ea[i]),
+ nla_data(iter), ETHER_ADDR_LEN);
+ i++;
}
- memcpy(&(blacklist->ea[blacklist->count]), nla_data(iter),
- ETHER_ADDR_LEN);
- blacklist->count++;
break;
- default:
- WL_ERR(("No such attribute:%d\n", type));
- break;
- }
- }
-
- if (blacklist && (blacklist->count != num)) {
- WL_ERR(("not matching bssid count:%d to expected:%d\n",
- blacklist->count, num));
- err = -EINVAL;
- goto exit;
+ default:
+ WL_ERR(("%s: No such attribute %d\n", __FUNCTION__, type));
+ break;
+ }
}
-
err = dhd_dev_set_blacklist_bssid(bcmcfg_to_prmry_ndev(cfg),
blacklist, mem_needed, flush);
exit:
- MFREE(cfg->osh, blacklist, mem_needed);
+ kfree(blacklist);
return err;
}
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
wl_ssid_whitelist_t *ssid_whitelist = NULL;
wlc_ssid_t *ssid_elem;
- int tmp, tmp1, mem_needed = 0, type;
- const struct nlattr *iter, *iter1;
- uint32 flush = 0, num = 0;
- int ssid_found = 0;
+ int tmp, tmp2, mem_needed = 0, type;
+ const struct nlattr *inner, *iter;
+ uint32 flush = 0, i = 0, num = 0;
/* Assumption: NUM attribute must come first */
- nla_for_each_attr(iter, data, len, tmp) {
+ nla_for_each_attr(iter, data, len, tmp2) {
type = nla_type(iter);
switch (type) {
- case GSCAN_ATTRIBUTE_NUM_WL_SSID:
- if (num != 0) {
- WL_ERR(("try to change SSID num\n"));
- err = -EINVAL;
- goto exit;
- }
- if (nla_len(iter) != sizeof(uint32)) {
- WL_ERR(("not matching nla_len.\n"));
- err = -EINVAL;
- goto exit;
- }
- num = nla_get_u32(iter);
- if (num == 0 || num > MAX_SSID_WHITELIST_NUM) {
- WL_ERR(("wrong SSID count:%d\n", num));
- err = -EINVAL;
- goto exit;
- }
- mem_needed = sizeof(wl_ssid_whitelist_t) +
- sizeof(wlc_ssid_t) * num;
- ssid_whitelist = (wl_ssid_whitelist_t *)
- MALLOCZ(cfg->osh, mem_needed);
- if (ssid_whitelist == NULL) {
- WL_ERR(("failed to alloc mem\n"));
- err = -ENOMEM;
- goto exit;
- }
- break;
- case GSCAN_ATTRIBUTE_WL_SSID_FLUSH:
- if (nla_len(iter) != sizeof(uint32)) {
- WL_ERR(("not matching nla_len.\n"));
- err = -EINVAL;
- goto exit;
- }
- flush = nla_get_u32(iter);
- if (flush != 1) {
- WL_ERR(("flush arg worng:%d\n", flush));
- err = -EINVAL;
- goto exit;
- }
- break;
- case GSCAN_ATTRIBUTE_WHITELIST_SSID_ELEM:
- if (!num || !ssid_whitelist) {
- WL_ERR(("num ssid is not set!\n"));
- err = -EINVAL;
- goto exit;
- }
- if (ssid_whitelist->ssid_count >= num) {
- WL_ERR(("too many SSIDs:%d\n",
- ssid_whitelist->ssid_count));
- err = -EINVAL;
- goto exit;
- }
-
- ssid_elem = &ssid_whitelist->ssids[
- ssid_whitelist->ssid_count];
- ssid_found = 0;
- nla_for_each_nested(iter1, iter, tmp1) {
- type = nla_type(iter1);
- switch (type) {
- case GSCAN_ATTRIBUTE_WL_SSID_LEN:
- if (nla_len(iter1) != sizeof(uint32)) {
- WL_ERR(("not match nla_len\n"));
- err = -EINVAL;
- goto exit;
- }
- ssid_elem->SSID_len = nla_get_u32(iter1);
- if (ssid_elem->SSID_len >
- DOT11_MAX_SSID_LEN) {
- WL_ERR(("wrong SSID len:%d\n",
- ssid_elem->SSID_len));
- err = -EINVAL;
- goto exit;
- }
- break;
- case GSCAN_ATTRIBUTE_WHITELIST_SSID:
- if (ssid_elem->SSID_len == 0) {
- WL_ERR(("SSID_len not received\n"));
- err = -EINVAL;
- goto exit;
- }
- if (nla_len(iter1) != ssid_elem->SSID_len) {
- WL_ERR(("not match nla_len\n"));
- err = -EINVAL;
- goto exit;
- }
- memcpy(ssid_elem->SSID, nla_data(iter1),
- ssid_elem->SSID_len);
- ssid_found = 1;
- break;
+ case GSCAN_ATTRIBUTE_NUM_WL_SSID:
+ num = nla_get_u32(iter);
+ if (num > MAX_SSID_WHITELIST_NUM) {
+ WL_ERR(("Too many WL SSIDs!\n"));
+ err = -EINVAL;
+ goto exit;
}
- if (ssid_found) {
- ssid_whitelist->ssid_count++;
- break;
+ mem_needed = sizeof(wl_ssid_whitelist_t);
+ if (num)
+ mem_needed += (num - 1) * sizeof(ssid_info_t);
+ ssid_whitelist = (wl_ssid_whitelist_t *)
+ kzalloc(mem_needed, GFP_KERNEL);
+ if (ssid_whitelist == NULL) {
+ WL_ERR(("%s: Can't malloc %d bytes\n",
+ __FUNCTION__, mem_needed));
+ err = -ENOMEM;
+ goto exit;
}
- }
- break;
- default:
- WL_ERR(("No such attribute: %d\n", type));
- break;
+ ssid_whitelist->ssid_count = num;
+ break;
+ case GSCAN_ATTRIBUTE_WL_SSID_FLUSH:
+ flush = nla_get_u32(iter);
+ break;
+ case GSCAN_ATTRIBUTE_WHITELIST_SSID_ELEM:
+ if (!num || !ssid_whitelist) {
+ WL_ERR(("num ssid is not set!\n"));
+ return -EINVAL;
+ }
+ if (i >= num) {
+ WL_ERR(("CFGs don't seem right!\n"));
+ err = -EINVAL;
+ goto exit;
+ }
+ ssid_elem = &ssid_whitelist->ssids[i];
+ nla_for_each_nested(inner, iter, tmp) {
+ type = nla_type(inner);
+ switch (type) {
+ case GSCAN_ATTRIBUTE_WHITELIST_SSID:
+ memcpy(ssid_elem->SSID,
+ nla_data(inner),
+ DOT11_MAX_SSID_LEN);
+ break;
+ case GSCAN_ATTRIBUTE_WL_SSID_LEN:
+ ssid_elem->SSID_len = (uint8)
+ nla_get_u32(inner);
+ break;
+ }
+ }
+ i++;
+ break;
+ default:
+ WL_ERR(("%s: No such attribute %d\n", __FUNCTION__, type));
+ break;
}
}
- if (ssid_whitelist && (ssid_whitelist->ssid_count != num)) {
- WL_ERR(("not matching ssid count:%d to expected:%d\n",
- ssid_whitelist->ssid_count, num));
- err = -EINVAL;
- goto exit;
- }
err = dhd_dev_set_whitelist_ssid(bcmcfg_to_prmry_ndev(cfg),
ssid_whitelist, mem_needed, flush);
- if (err == BCME_UNSUPPORTED) {
- /* If firmware doesn't support feature, ignore the error
- * Android framework doesn't populate/use whitelist ssids
- * as of now, but invokes whitelist as part of roam config
- * API. so this handler cannot be compiled out. but its
- * safe to ignore.
- */
- WL_ERR(("whilelist ssid not supported. Ignore."));
- err = BCME_OK;
- }
exit:
- MFREE(cfg->osh, ssid_whitelist, mem_needed);
- return err;
-}
-#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
-
-#ifdef ROAMEXP_SUPPORT
-typedef enum {
- FW_ROAMING_ENABLE = 1,
- FW_ROAMING_DISABLE,
- FW_ROAMING_PAUSE,
- FW_ROAMING_RESUME
-} fw_roaming_state_t;
-
-static int
-wl_cfgvendor_set_fw_roaming_state(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- fw_roaming_state_t requested_roaming_state;
- int type;
- int err = 0;
-
- if (!data) {
- WL_ERR(("data is not available\n"));
- return -EINVAL;
- }
-
- if (len <= 0) {
- WL_ERR(("invalid len %d\n", len));
- return -EINVAL;
- }
-
- /* Get the requested fw roaming state */
- type = nla_type(data);
- if (type != GSCAN_ATTRIBUTE_ROAM_STATE_SET) {
- WL_ERR(("%s: Invalid attribute %d\n", __FUNCTION__, type));
- return -EINVAL;
- }
-
- requested_roaming_state = nla_get_u32(data);
- WL_INFORM(("setting FW roaming state to %d\n", requested_roaming_state));
-
- if ((requested_roaming_state == FW_ROAMING_ENABLE) ||
- (requested_roaming_state == FW_ROAMING_RESUME)) {
- err = wldev_iovar_setint(wdev_to_ndev(wdev), "roam_off", FALSE);
- } else if ((requested_roaming_state == FW_ROAMING_DISABLE) ||
- (requested_roaming_state == FW_ROAMING_PAUSE)) {
- err = wldev_iovar_setint(wdev_to_ndev(wdev), "roam_off", TRUE);
- } else {
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int
-wl_cfgvendor_fw_roam_get_capability(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int err = 0;
- wifi_roaming_capabilities_t roaming_capability;
-
- /* Update max number of blacklist bssids supported */
- roaming_capability.max_blacklist_size = MAX_BSSID_BLACKLIST_NUM;
- roaming_capability.max_whitelist_size = MAX_SSID_WHITELIST_NUM;
- err = wl_cfgvendor_send_cmd_reply(wiphy, &roaming_capability,
- sizeof(roaming_capability));
- if (unlikely(err)) {
- WL_ERR(("Vendor cmd reply for fw roam capability failed ret:%d \n", err));
- }
-
+ kfree(ssid_whitelist);
return err;
}
-#endif /* ROAMEXP_SUPPORT */
+#endif /* GSCAN_SUPPORT */
static int
wl_cfgvendor_priv_string_handler(struct wiphy *wiphy,
void *buf = NULL, *cur;
int maxmsglen = PAGE_SIZE - 0x100;
struct sk_buff *reply;
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
-
- /* send to dongle only if we are not waiting for reload already */
- if (dhdp && dhdp->hang_was_sent) {
- WL_INFORM(("Bus down. HANG was sent up earlier\n"));
- DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, DHD_EVENT_TIMEOUT_MS);
- DHD_OS_WAKE_UNLOCK(dhdp);
- return OSL_ERROR(BCME_DONGLE_DOWN);
- }
-
- if (!data) {
- WL_ERR(("data is not available\n"));
- return BCME_BADARG;
- }
-
- if (len <= 0) {
- WL_ERR(("invalid len %d\n", len));
- return BCME_BADARG;
- }
- WL_DBG(("entry: cmd = %d\n", nlioc->cmd));
+ WL_ERR(("entry: cmd = %d\n", nlioc->cmd));
- if (nlioc->offset != sizeof(struct bcm_nlmsg_hdr) ||
- len <= sizeof(struct bcm_nlmsg_hdr)) {
- WL_ERR(("invalid offset %d\n", nlioc->offset));
- return BCME_BADARG;
- }
len -= sizeof(struct bcm_nlmsg_hdr);
ret_len = nlioc->len;
if (ret_len > 0 || len > 0) {
- if (len >= DHD_IOCTL_MAXLEN) {
+ if (len > DHD_IOCTL_MAXLEN) {
WL_ERR(("oversize input buffer %d\n", len));
- len = DHD_IOCTL_MAXLEN - 1;
+ len = DHD_IOCTL_MAXLEN;
}
- if (ret_len >= DHD_IOCTL_MAXLEN) {
+ if (ret_len > DHD_IOCTL_MAXLEN) {
WL_ERR(("oversize return buffer %d\n", ret_len));
- ret_len = DHD_IOCTL_MAXLEN - 1;
+ ret_len = DHD_IOCTL_MAXLEN;
}
-
payload = max(ret_len, len) + 1;
buf = vzalloc(payload);
if (!buf) {
return -ENOMEM;
}
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
memcpy(buf, (void *)((char *)nlioc + nlioc->offset), len);
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
*((char *)buf + len) = '\0';
}
}
cur = buf;
while (ret_len > 0) {
- msglen = ret_len > maxmsglen ? maxmsglen : ret_len;
+ msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len;
ret_len -= msglen;
payload = msglen + sizeof(msglen);
reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
struct net_device *
wl_cfgvendor_get_ndev(struct bcm_cfg80211 *cfg, struct wireless_dev *wdev,
- const char *data, unsigned long int *out_addr)
+ const void *data, unsigned long int *out_addr)
{
char *pos, *pos1;
char ifname[IFNAMSIZ + 1] = {0};
struct net_info *iter, *next;
struct net_device *ndev = NULL;
- ulong ifname_len;
*out_addr = (unsigned long int) data; /* point to command str by default */
/* check whether ifname=<ifname> is provided in the command */
WL_ERR(("command format error \n"));
return NULL;
}
-
- ifname_len = pos1 - pos;
- if (memcpy_s(ifname, (sizeof(ifname) - 1), pos, ifname_len) != BCME_OK) {
- WL_ERR(("Failed to copy data. len: %ld\n", ifname_len));
- return NULL;
- }
- GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
+ memcpy(ifname, pos, (pos1 - pos));
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
for_each_ndev(cfg, iter, next) {
if (iter->ndev) {
if (strncmp(iter->ndev->name, ifname,
}
}
}
- GCC_DIAGNOSTIC_POP();
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
WL_ERR(("Couldn't find ifname:%s in the netinfo list \n",
ifname));
return NULL;
return ndev;
}
-#ifdef WL_SAE
-static int
-wl_cfgvendor_set_sae_password(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int err = BCME_OK;
- struct net_device *net = wdev->netdev;
- struct bcm_cfg80211 *cfg = wl_get_cfg(net);
- wsec_pmk_t pmk;
- s32 bssidx;
-
- /* clear the content of pmk structure before usage */
- (void)memset_s(&pmk, sizeof(wsec_pmk_t), 0x0, sizeof(wsec_pmk_t));
-
- if ((bssidx = wl_get_bssidx_by_wdev(cfg, net->ieee80211_ptr)) < 0) {
- WL_ERR(("Find p2p index from wdev(%p) failed\n", net->ieee80211_ptr));
- return BCME_ERROR;
- }
-
- if ((len < WSEC_MIN_PSK_LEN) || (len >= WSEC_MAX_PASSPHRASE_LEN)) {
- WL_ERR(("Invalid passphrase length %d..should be >= 8 and < 256\n",
- len));
- err = BCME_BADLEN;
- goto done;
- }
- /* Set AUTH to SAE */
- err = wldev_iovar_setint_bsscfg(net, "wpa_auth", WPA3_AUTH_SAE_PSK, bssidx);
- if (unlikely(err)) {
- WL_ERR(("could not set wpa_auth (0x%x)\n", err));
- goto done;
- }
- pmk.key_len = htod16(len);
- bcopy((const u8*)data, pmk.key, len);
- pmk.flags = htod16(WSEC_PASSPHRASE);
-
- err = wldev_ioctl_set(net, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
- if (err) {
- WL_ERR(("\n failed to set pmk %d\n", err));
- goto done;
- } else {
- WL_MEM(("sae passphrase set successfully\n"));
- }
-done:
- return err;
-}
-#endif /* WL_SAE */
-
-#ifdef BCM_PRIV_CMD_SUPPORT
-/* strlen("ifname=") + IFNAMESIZE + strlen(" ") + '\0' */
-#define ANDROID_PRIV_CMD_IF_PREFIX_LEN (7 + IFNAMSIZ + 2)
/* Max length for the reply buffer. For BRCM_ATTR_DRIVER_CMD, the reply
* would be a formatted string and reply buf would be the size of the
* string.
int err = 0;
int data_len = 0, cmd_len = 0, tmp = 0, type = 0;
struct net_device *ndev = wdev->netdev;
+ char *reply_buf = NULL;
char *cmd = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
int bytes_written;
struct net_device *net = NULL;
unsigned long int cmd_out = 0;
-#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211)
- u32 cmd_buf_len = WL_DRIVER_PRIV_CMD_LEN;
- char cmd_prefix[ANDROID_PRIV_CMD_IF_PREFIX_LEN + 1] = {0};
- char *cmd_buf = NULL;
- char *current_pos;
- u32 cmd_offset;
-#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
+ u32 reply_len = WL_DRIVER_PRIV_CMD_LEN;
+
WL_DBG(("%s: Enter \n", __func__));
goto exit;
}
-#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211)
if (type == BRCM_ATTR_DRIVER_CMD) {
- if ((cmd_len >= WL_DRIVER_PRIV_CMD_LEN) ||
- (cmd_len < ANDROID_PRIV_CMD_IF_PREFIX_LEN)) {
- WL_ERR(("Unexpected command length (%u)."
- "Ignore the command\n", cmd_len));
+ if (cmd_len >= WL_DRIVER_PRIV_CMD_LEN) {
+ WL_ERR(("Unexpected command length. Ignore the command\n"));
err = -EINVAL;
goto exit;
}
-
- /* check whether there is any ifname prefix provided */
- if (memcpy_s(cmd_prefix, (sizeof(cmd_prefix) - 1),
- cmd, ANDROID_PRIV_CMD_IF_PREFIX_LEN) != BCME_OK) {
- WL_ERR(("memcpy failed for cmd buffer. len:%d\n", cmd_len));
- err = -ENOMEM;
- goto exit;
- }
-
- net = wl_cfgvendor_get_ndev(cfg, wdev, cmd_prefix, &cmd_out);
+ net = wl_cfgvendor_get_ndev(cfg, wdev, cmd, &cmd_out);
if (!cmd_out || !net) {
- WL_ERR(("ndev not found\n"));
err = -ENODEV;
goto exit;
}
-
- /* find offset of the command */
- current_pos = (char *)cmd_out;
- cmd_offset = current_pos - cmd_prefix;
-
- if (!current_pos || (cmd_offset) > ANDROID_PRIV_CMD_IF_PREFIX_LEN) {
- WL_ERR(("Invalid len cmd_offset: %u \n", cmd_offset));
- err = -EINVAL;
- goto exit;
- }
-
- /* Private command data in expected to be in str format. To ensure that
- * the data is null terminated, copy to a local buffer before use
- */
- cmd_buf = (char *)MALLOCZ(cfg->osh, cmd_buf_len);
- if (!cmd_buf) {
- WL_ERR(("memory alloc failed for %u \n", cmd_buf_len));
- err = -ENOMEM;
- goto exit;
- }
-
- /* Point to the start of command */
- if (memcpy_s(cmd_buf, (WL_DRIVER_PRIV_CMD_LEN - 1),
- (const void *)(cmd + cmd_offset),
- (cmd_len - cmd_offset - 1)) != BCME_OK) {
- WL_ERR(("memcpy failed for cmd buffer. len:%d\n", cmd_len));
+ cmd = (char *)cmd_out;
+ reply_buf = kzalloc(reply_len, GFP_KERNEL);
+ if (!reply_buf) {
+ WL_ERR(("memory alloc failed for %u \n", cmd_len));
err = -ENOMEM;
goto exit;
}
- cmd_buf[WL_DRIVER_PRIV_CMD_LEN - 1] = '\0';
-
- WL_DBG(("vendor_command: %s len: %u \n", cmd_buf, cmd_buf_len));
- bytes_written = wl_handle_private_cmd(net, cmd_buf, cmd_buf_len);
+ memcpy(reply_buf, cmd, cmd_len);
+ WL_DBG(("vendor_command: %s len: %u \n", cmd, cmd_len));
+ bytes_written = wl_handle_private_cmd(net, reply_buf, reply_len);
WL_DBG(("bytes_written: %d \n", bytes_written));
if (bytes_written == 0) {
- snprintf(cmd_buf, cmd_buf_len, "%s", "OK");
- data_len = sizeof("OK");
+ snprintf(reply_buf, reply_len, "%s", "OK");
+ data_len = strlen("OK");
} else if (bytes_written > 0) {
- if (bytes_written >= (cmd_buf_len - 1)) {
- /* Not expected */
- ASSERT(0);
- err = -EINVAL;
- goto exit;
- }
- data_len = bytes_written;
+ data_len = bytes_written > reply_len ?
+ reply_len : bytes_written;
} else {
/* -ve return value. Propagate the error back */
err = bytes_written;
goto exit;
}
- if ((data_len > 0) && (data_len < (cmd_buf_len - 1)) && cmd_buf) {
- err = wl_cfgvendor_send_cmd_reply(wiphy, cmd_buf, data_len);
- if (unlikely(err)) {
- WL_ERR(("Vendor Command reply failed ret:%d \n", err));
- } else {
- WL_DBG(("Vendor Command reply sent successfully!\n"));
- }
- } else {
- /* No data to be sent back as reply */
- WL_ERR(("Vendor_cmd: No reply expected. data_len:%u cmd_buf %p \n",
- data_len, cmd_buf));
- }
break;
}
-#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
}
-exit:
-#if defined(WL_ANDROID_PRIV_CMD_OVER_NL80211)
- if (cmd_buf) {
- MFREE(cfg->osh, cmd_buf, cmd_buf_len);
+ if ((data_len > 0) && reply_buf) {
+ err = wl_cfgvendor_send_cmd_reply(wiphy, wdev->netdev,
+ reply_buf, data_len+1);
+ if (unlikely(err))
+ WL_ERR(("Vendor Command reply failed ret:%d \n", err));
+ else
+ WL_DBG(("Vendor Command reply sent successfully!\n"));
+ } else {
+ /* No data to be sent back as reply */
+ WL_ERR(("Vendor_cmd: No reply expected. data_len:%u reply_buf %p \n",
+ data_len, reply_buf));
}
-#endif /* WL_ANDROID_PRIV_CMD_OVER_NL80211 && OEM_ANDROID */
+
+exit:
+ if (reply_buf)
+ kfree(reply_buf);
net_os_wake_unlock(ndev);
return err;
}
-#endif /* BCM_PRIV_CMD_SUPPORT */
-#ifdef WL_NAN
-static const char *nan_attr_to_str(u16 cmd)
+
+#ifdef LINKSTAT_SUPPORT
+#define NUM_RATE 32
+#define NUM_PEER 1
+#define NUM_CHAN 11
+#define HEADER_SIZE sizeof(ver_len)
+static int wl_cfgvendor_lstats_get_info(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
{
- switch (cmd) {
- C2S(NAN_ATTRIBUTE_HEADER)
- C2S(NAN_ATTRIBUTE_HANDLE)
- C2S(NAN_ATTRIBUTE_TRANSAC_ID)
- C2S(NAN_ATTRIBUTE_2G_SUPPORT)
- C2S(NAN_ATTRIBUTE_SDF_2G_SUPPORT)
- C2S(NAN_ATTRIBUTE_SDF_5G_SUPPORT)
- C2S(NAN_ATTRIBUTE_5G_SUPPORT)
- C2S(NAN_ATTRIBUTE_SYNC_DISC_2G_BEACON)
- C2S(NAN_ATTRIBUTE_SYNC_DISC_5G_BEACON)
- C2S(NAN_ATTRIBUTE_CLUSTER_LOW)
- C2S(NAN_ATTRIBUTE_CLUSTER_HIGH)
- C2S(NAN_ATTRIBUTE_SID_BEACON)
- C2S(NAN_ATTRIBUTE_RSSI_CLOSE)
- C2S(NAN_ATTRIBUTE_RSSI_MIDDLE)
- C2S(NAN_ATTRIBUTE_RSSI_PROXIMITY)
- C2S(NAN_ATTRIBUTE_RSSI_CLOSE_5G)
- C2S(NAN_ATTRIBUTE_RSSI_MIDDLE_5G)
- C2S(NAN_ATTRIBUTE_RSSI_PROXIMITY_5G)
- C2S(NAN_ATTRIBUTE_HOP_COUNT_LIMIT)
- C2S(NAN_ATTRIBUTE_RANDOM_TIME)
- C2S(NAN_ATTRIBUTE_MASTER_PREF)
- C2S(NAN_ATTRIBUTE_PERIODIC_SCAN_INTERVAL)
- C2S(NAN_ATTRIBUTE_PUBLISH_ID)
- C2S(NAN_ATTRIBUTE_TTL)
- C2S(NAN_ATTRIBUTE_PERIOD)
- C2S(NAN_ATTRIBUTE_REPLIED_EVENT_FLAG)
- C2S(NAN_ATTRIBUTE_PUBLISH_TYPE)
- C2S(NAN_ATTRIBUTE_TX_TYPE)
- C2S(NAN_ATTRIBUTE_PUBLISH_COUNT)
- C2S(NAN_ATTRIBUTE_SERVICE_NAME_LEN)
- C2S(NAN_ATTRIBUTE_SERVICE_NAME)
- C2S(NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN)
- C2S(NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO)
- C2S(NAN_ATTRIBUTE_RX_MATCH_FILTER_LEN)
- C2S(NAN_ATTRIBUTE_RX_MATCH_FILTER)
- C2S(NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN)
- C2S(NAN_ATTRIBUTE_TX_MATCH_FILTER)
- C2S(NAN_ATTRIBUTE_SUBSCRIBE_ID)
- C2S(NAN_ATTRIBUTE_SUBSCRIBE_TYPE)
- C2S(NAN_ATTRIBUTE_SERVICERESPONSEFILTER)
- C2S(NAN_ATTRIBUTE_SERVICERESPONSEINCLUDE)
- C2S(NAN_ATTRIBUTE_USESERVICERESPONSEFILTER)
- C2S(NAN_ATTRIBUTE_SSIREQUIREDFORMATCHINDICATION)
- C2S(NAN_ATTRIBUTE_SUBSCRIBE_MATCH)
- C2S(NAN_ATTRIBUTE_SUBSCRIBE_COUNT)
- C2S(NAN_ATTRIBUTE_MAC_ADDR)
- C2S(NAN_ATTRIBUTE_MAC_ADDR_LIST)
- C2S(NAN_ATTRIBUTE_MAC_ADDR_LIST_NUM_ENTRIES)
- C2S(NAN_ATTRIBUTE_PUBLISH_MATCH)
- C2S(NAN_ATTRIBUTE_ENABLE_STATUS)
- C2S(NAN_ATTRIBUTE_JOIN_STATUS)
- C2S(NAN_ATTRIBUTE_ROLE)
- C2S(NAN_ATTRIBUTE_MASTER_RANK)
- C2S(NAN_ATTRIBUTE_ANCHOR_MASTER_RANK)
- C2S(NAN_ATTRIBUTE_CNT_PEND_TXFRM)
- C2S(NAN_ATTRIBUTE_CNT_BCN_TX)
- C2S(NAN_ATTRIBUTE_CNT_BCN_RX)
- C2S(NAN_ATTRIBUTE_CNT_SVC_DISC_TX)
- C2S(NAN_ATTRIBUTE_CNT_SVC_DISC_RX)
- C2S(NAN_ATTRIBUTE_AMBTT)
- C2S(NAN_ATTRIBUTE_CLUSTER_ID)
- C2S(NAN_ATTRIBUTE_INST_ID)
- C2S(NAN_ATTRIBUTE_OUI)
- C2S(NAN_ATTRIBUTE_STATUS)
- C2S(NAN_ATTRIBUTE_DE_EVENT_TYPE)
- C2S(NAN_ATTRIBUTE_MERGE)
- C2S(NAN_ATTRIBUTE_IFACE)
- C2S(NAN_ATTRIBUTE_CHANNEL)
- C2S(NAN_ATTRIBUTE_24G_CHANNEL)
- C2S(NAN_ATTRIBUTE_5G_CHANNEL)
- C2S(NAN_ATTRIBUTE_PEER_ID)
- C2S(NAN_ATTRIBUTE_NDP_ID)
- C2S(NAN_ATTRIBUTE_SECURITY)
- C2S(NAN_ATTRIBUTE_QOS)
- C2S(NAN_ATTRIBUTE_RSP_CODE)
- C2S(NAN_ATTRIBUTE_INST_COUNT)
- C2S(NAN_ATTRIBUTE_PEER_DISC_MAC_ADDR)
- C2S(NAN_ATTRIBUTE_PEER_NDI_MAC_ADDR)
- C2S(NAN_ATTRIBUTE_IF_ADDR)
- C2S(NAN_ATTRIBUTE_WARMUP_TIME)
- C2S(NAN_ATTRIBUTE_RECV_IND_CFG)
- C2S(NAN_ATTRIBUTE_CONNMAP)
- C2S(NAN_ATTRIBUTE_DWELL_TIME)
- C2S(NAN_ATTRIBUTE_SCAN_PERIOD)
- C2S(NAN_ATTRIBUTE_RSSI_WINDOW_SIZE)
- C2S(NAN_ATTRIBUTE_CONF_CLUSTER_VAL)
- C2S(NAN_ATTRIBUTE_CIPHER_SUITE_TYPE)
- C2S(NAN_ATTRIBUTE_KEY_TYPE)
- C2S(NAN_ATTRIBUTE_KEY_LEN)
- C2S(NAN_ATTRIBUTE_SCID)
- C2S(NAN_ATTRIBUTE_SCID_LEN)
- C2S(NAN_ATTRIBUTE_SDE_CONTROL_CONFIG_DP)
- C2S(NAN_ATTRIBUTE_SDE_CONTROL_SECURITY)
- C2S(NAN_ATTRIBUTE_SDE_CONTROL_DP_TYPE)
- C2S(NAN_ATTRIBUTE_SDE_CONTROL_RANGE_SUPPORT)
- C2S(NAN_ATTRIBUTE_NO_CONFIG_AVAIL)
- C2S(NAN_ATTRIBUTE_2G_AWAKE_DW)
- C2S(NAN_ATTRIBUTE_5G_AWAKE_DW)
- C2S(NAN_ATTRIBUTE_RSSI_THRESHOLD_FLAG)
- C2S(NAN_ATTRIBUTE_KEY_DATA)
- C2S(NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO_LEN)
- C2S(NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO)
- C2S(NAN_ATTRIBUTE_REASON)
- C2S(NAN_ATTRIBUTE_DISC_IND_CFG)
- C2S(NAN_ATTRIBUTE_DWELL_TIME_5G)
- C2S(NAN_ATTRIBUTE_SCAN_PERIOD_5G)
- C2S(NAN_ATTRIBUTE_SUB_SID_BEACON)
- default:
- return "NAN_ATTRIBUTE_UNKNOWN";
- }
-}
+ static char iovar_buf[WLC_IOCTL_MAXLEN];
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ int err = 0, i;
+ wifi_radio_stat *radio;
+ wifi_radio_stat_h radio_h;
+ wl_wme_cnt_t *wl_wme_cnt;
+ wl_cnt_ge40mcst_v1_t *macstat_cnt;
+ wl_cnt_wlc_t *wlc_cnt;
+ scb_val_t scbval;
+ char *output = NULL;
+ char *outdata = NULL;
+ wifi_rate_stat_v1 *p_wifi_rate_stat_v1 = NULL;
+ wifi_rate_stat *p_wifi_rate_stat = NULL;
+ uint total_len = 0;
+ wifi_iface_stat iface;
+ wlc_rev_info_t revinfo;
+#ifdef CONFIG_COMPAT
+ compat_wifi_iface_stat compat_iface;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
+ int compat_task_state = in_compat_syscall();
+#else
+ int compat_task_state = is_compat_task();
+#endif
+#endif /* CONFIG_COMPAT */
-nan_hal_status_t nan_status_reasonstr_map[] = {
- {NAN_STATUS_SUCCESS, "NAN status success"},
- {NAN_STATUS_INTERNAL_FAILURE, "NAN Discovery engine failure"},
- {NAN_STATUS_PROTOCOL_FAILURE, "protocol failure"},
- {NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID, "invalid pub_sub ID"},
- {NAN_STATUS_NO_RESOURCE_AVAILABLE, "No space available"},
- {NAN_STATUS_INVALID_PARAM, "invalid param"},
- {NAN_STATUS_INVALID_REQUESTOR_INSTANCE_ID, "invalid req inst id"},
- {NAN_STATUS_INVALID_NDP_ID, "invalid ndp id"},
- {NAN_STATUS_NAN_NOT_ALLOWED, "Nan not allowed"},
- {NAN_STATUS_NO_OTA_ACK, "No OTA ack"},
- {NAN_STATUS_ALREADY_ENABLED, "NAN is Already enabled"},
- {NAN_STATUS_FOLLOWUP_QUEUE_FULL, "Follow-up queue full"},
- {NAN_STATUS_UNSUPPORTED_CONCURRENCY_NAN_DISABLED, "unsupported concurrency"},
-};
+ WL_INFORM(("%s: Enter \n", __func__));
+ RETURN_EIO_IF_NOT_UP(cfg);
-void
-wl_cfgvendor_add_nan_reason_str(nan_status_type_t status, nan_hal_resp_t *nan_req_resp)
-{
- int i = 0;
- int num = (int)(sizeof(nan_status_reasonstr_map)/sizeof(nan_status_reasonstr_map[0]));
- for (i = 0; i < num; i++) {
- if (nan_status_reasonstr_map[i].status == status) {
- strlcpy(nan_req_resp->nan_reason, nan_status_reasonstr_map[i].nan_reason,
- sizeof(nan_status_reasonstr_map[i].nan_reason));
- break;
- }
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ err = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg), WLC_GET_REVINFO, &revinfo,
+ sizeof(revinfo));
+ if (err != BCME_OK) {
+ goto exit;
}
-}
-nan_status_type_t
-wl_cfgvendor_brcm_to_nanhal_status(int32 vendor_status)
-{
- nan_status_type_t hal_status;
- switch (vendor_status) {
- case BCME_OK:
- hal_status = NAN_STATUS_SUCCESS;
- break;
- case BCME_BUSY:
- case BCME_NOTREADY:
- hal_status = NAN_STATUS_NAN_NOT_ALLOWED;
- break;
- case BCME_BADLEN:
- case BCME_BADBAND:
- case BCME_UNSUPPORTED:
- case BCME_USAGE_ERROR:
- case BCME_BADARG:
- hal_status = NAN_STATUS_INVALID_PARAM;
- break;
- case BCME_NOMEM:
- case BCME_NORESOURCE:
- case WL_NAN_E_SVC_SUB_LIST_FULL:
- hal_status = NAN_STATUS_NO_RESOURCE_AVAILABLE;
- break;
- case WL_NAN_E_SD_TX_LIST_FULL:
- hal_status = NAN_STATUS_FOLLOWUP_QUEUE_FULL;
- break;
- case WL_NAN_E_BAD_INSTANCE:
- hal_status = NAN_STATUS_INVALID_PUBLISH_SUBSCRIBE_ID;
- break;
- default:
- WL_ERR(("%s Unknown vendor status, status = %d\n",
- __func__, vendor_status));
- /* Generic error */
- hal_status = NAN_STATUS_INTERNAL_FAILURE;
+ outdata = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (outdata == NULL) {
+ WL_ERR(("%s: alloc failed\n", __func__));
+ return -ENOMEM;
}
- return hal_status;
-}
-
-static int
-wl_cfgvendor_nan_cmd_reply(struct wiphy *wiphy, int nan_cmd,
- nan_hal_resp_t *nan_req_resp, int ret, int nan_cmd_status)
-{
- int err;
- int nan_reply;
- nan_req_resp->subcmd = nan_cmd;
- if (ret == BCME_OK) {
- nan_reply = nan_cmd_status;
- } else {
- nan_reply = ret;
- }
- nan_req_resp->status = wl_cfgvendor_brcm_to_nanhal_status(nan_reply);
- nan_req_resp->value = ret;
- err = wl_cfgvendor_send_cmd_reply(wiphy, nan_req_resp,
- sizeof(*nan_req_resp));
- /* giving more prio to ret than err */
- return (ret == 0) ? err : ret;
-}
-static void
-wl_cfgvendor_free_disc_cmd_data(struct bcm_cfg80211 *cfg,
- nan_discover_cmd_data_t *cmd_data)
-{
- if (!cmd_data) {
- WL_ERR(("Cmd_data is null\n"));
- return;
- }
- if (cmd_data->svc_info.data) {
- MFREE(cfg->osh, cmd_data->svc_info.data, cmd_data->svc_info.dlen);
- }
- if (cmd_data->svc_hash.data) {
- MFREE(cfg->osh, cmd_data->svc_hash.data, cmd_data->svc_hash.dlen);
- }
- if (cmd_data->rx_match.data) {
- MFREE(cfg->osh, cmd_data->rx_match.data, cmd_data->rx_match.dlen);
- }
- if (cmd_data->tx_match.data) {
- MFREE(cfg->osh, cmd_data->tx_match.data, cmd_data->tx_match.dlen);
- }
- if (cmd_data->mac_list.list) {
- MFREE(cfg->osh, cmd_data->mac_list.list,
- cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN);
- }
- if (cmd_data->key.data) {
- MFREE(cfg->osh, cmd_data->key.data, NAN_MAX_PMK_LEN);
- }
- if (cmd_data->sde_svc_info.data) {
- MFREE(cfg->osh, cmd_data->sde_svc_info.data, cmd_data->sde_svc_info.dlen);
- }
- MFREE(cfg->osh, cmd_data, sizeof(*cmd_data));
-}
+ memset(&scbval, 0, sizeof(scb_val_t));
+ memset(outdata, 0, WLC_IOCTL_MAXLEN);
+ output = outdata;
-static void
-wl_cfgvendor_free_dp_cmd_data(struct bcm_cfg80211 *cfg,
- nan_datapath_cmd_data_t *cmd_data)
-{
- if (!cmd_data) {
- WL_ERR(("Cmd_data is null\n"));
- return;
- }
- if (cmd_data->svc_hash.data) {
- MFREE(cfg->osh, cmd_data->svc_hash.data, cmd_data->svc_hash.dlen);
- }
- if (cmd_data->svc_info.data) {
- MFREE(cfg->osh, cmd_data->svc_info.data, cmd_data->svc_info.dlen);
- }
- if (cmd_data->key.data) {
- MFREE(cfg->osh, cmd_data->key.data, NAN_MAX_PMK_LEN);
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (err != BCME_OK && err != BCME_UNSUPPORTED) {
+ WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wifi_radio_stat)));
+ goto exit;
}
- MFREE(cfg->osh, cmd_data, sizeof(*cmd_data));
-}
+ radio = (wifi_radio_stat *)iovar_buf;
-#define WL_NAN_EVENT_MAX_BUF 256
-#ifdef WL_NAN_DISC_CACHE
-static int
-wl_cfgvendor_nan_parse_dp_sec_info_args(struct wiphy *wiphy,
- const void *buf, int len, nan_datapath_sec_info_cmd_data_t *cmd_data)
-{
- int ret = BCME_OK;
- int attr_type;
- int rem = len;
- const struct nlattr *iter;
+ memset(&radio_h, 0, sizeof(wifi_radio_stat_h));
+ radio_h.on_time = radio->on_time;
+ radio_h.tx_time = radio->tx_time;
+ radio_h.rx_time = radio->rx_time;
+ radio_h.on_time_scan = radio->on_time_scan;
+ radio_h.on_time_nbd = radio->on_time_nbd;
+ radio_h.on_time_gscan = radio->on_time_gscan;
+ radio_h.on_time_roam_scan = radio->on_time_roam_scan;
+ radio_h.on_time_pno_scan = radio->on_time_pno_scan;
+ radio_h.on_time_hs20 = radio->on_time_hs20;
+ radio_h.num_channels = NUM_CHAN;
- NAN_DBG_ENTER();
+ memcpy(output, &radio_h, sizeof(wifi_radio_stat_h));
- nla_for_each_attr(iter, buf, len, rem) {
- attr_type = nla_type(iter);
- WL_TRACE(("attr: %s (%u)\n", nan_attr_to_str(attr_type), attr_type));
+ output += sizeof(wifi_radio_stat_h);
+ output += (NUM_CHAN * sizeof(wifi_channel_stat));
- switch (attr_type) {
- case NAN_ATTRIBUTE_MAC_ADDR:
- ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
- (char*)nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy mac addr\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_PUBLISH_ID:
- cmd_data->pub_id = nla_get_u16(iter);
- break;
- case NAN_ATTRIBUTE_NDP_ID:
- cmd_data->ndp_instance_id = nla_get_u32(iter);
- break;
- default:
- WL_ERR(("%s: Unknown type, %d\n", __FUNCTION__, attr_type));
- ret = BCME_BADARG;
- break;
- }
+ err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "wme_counters", NULL, 0,
+ iovar_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ goto exit;
}
- /* We need to call set_config_handler b/f calling start enable TBD */
- NAN_DBG_EXIT();
- return ret;
-}
-#endif /* WL_NAN_DISC_CACHE */
+ wl_wme_cnt = (wl_wme_cnt_t *)iovar_buf;
-int8 chanbuf[CHANSPEC_STR_LEN];
-static int
-wl_cfgvendor_nan_parse_datapath_args(struct wiphy *wiphy,
- const void *buf, int len, nan_datapath_cmd_data_t *cmd_data)
-{
- int ret = BCME_OK;
- int attr_type;
- int rem = len;
- const struct nlattr *iter;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- int chan;
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].ac, WIFI_AC_VO);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].tx_mpdu, wl_wme_cnt->tx[AC_VO].packets);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].rx_mpdu, wl_wme_cnt->rx[AC_VO].packets);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].mpdu_lost,
+ wl_wme_cnt->tx_failed[WIFI_AC_VO].packets);
- NAN_DBG_ENTER();
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].ac, WIFI_AC_VI);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].tx_mpdu, wl_wme_cnt->tx[AC_VI].packets);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].rx_mpdu, wl_wme_cnt->rx[AC_VI].packets);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].mpdu_lost,
+ wl_wme_cnt->tx_failed[WIFI_AC_VI].packets);
- nla_for_each_attr(iter, buf, len, rem) {
- attr_type = nla_type(iter);
- WL_TRACE(("attr: %s (%u)\n", nan_attr_to_str(attr_type), attr_type));
-
- switch (attr_type) {
- case NAN_ATTRIBUTE_NDP_ID:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->ndp_instance_id = nla_get_u32(iter);
- break;
- case NAN_ATTRIBUTE_IFACE:
- if (nla_len(iter) >= sizeof(cmd_data->ndp_iface)) {
- WL_ERR(("iface_name len wrong:%d\n", nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- strlcpy((char *)cmd_data->ndp_iface, (char *)nla_data(iter),
- nla_len(iter));
- break;
- case NAN_ATTRIBUTE_SECURITY:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->ndp_cfg.security_cfg = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_QOS:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->ndp_cfg.qos_cfg = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_RSP_CODE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->rsp_code = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_INST_COUNT:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->num_ndp_instances = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_PEER_DISC_MAC_ADDR:
- if (nla_len(iter) != ETHER_ADDR_LEN) {
- ret = -EINVAL;
- goto exit;
- }
- ret = memcpy_s((char*)&cmd_data->peer_disc_mac_addr,
- ETHER_ADDR_LEN, (char*)nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy peer_disc_mac_addr\n"));
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_PEER_NDI_MAC_ADDR:
- if (nla_len(iter) != ETHER_ADDR_LEN) {
- ret = -EINVAL;
- goto exit;
- }
- ret = memcpy_s((char*)&cmd_data->peer_ndi_mac_addr,
- ETHER_ADDR_LEN, (char*)nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy peer_ndi_mac_addr\n"));
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_MAC_ADDR:
- if (nla_len(iter) != ETHER_ADDR_LEN) {
- ret = -EINVAL;
- goto exit;
- }
- ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
- (char*)nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy mac_addr\n"));
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_IF_ADDR:
- if (nla_len(iter) != ETHER_ADDR_LEN) {
- ret = -EINVAL;
- goto exit;
- }
- ret = memcpy_s((char*)&cmd_data->if_addr, ETHER_ADDR_LEN,
- (char*)nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy if_addr\n"));
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_ENTRY_CONTROL:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->avail_params.duration = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_AVAIL_BIT_MAP:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->avail_params.bmap = nla_get_u32(iter);
- break;
- case NAN_ATTRIBUTE_CHANNEL: {
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- /* take the default channel start_factor frequency */
- chan = wf_mhz2channel((uint)nla_get_u32(iter), 0);
- if (chan <= CH_MAX_2G_CHANNEL) {
- cmd_data->avail_params.chanspec[0] =
- wf_channel2chspec(chan, WL_CHANSPEC_BW_20);
- } else {
- cmd_data->avail_params.chanspec[0] =
- wf_channel2chspec(chan, WL_CHANSPEC_BW_80);
- }
- if (cmd_data->avail_params.chanspec[0] == 0) {
- WL_ERR(("Channel is not valid \n"));
- ret = -EINVAL;
- goto exit;
- }
- WL_TRACE(("valid chanspec, chanspec = 0x%04x \n",
- cmd_data->avail_params.chanspec[0]));
- break;
- }
- case NAN_ATTRIBUTE_NO_CONFIG_AVAIL:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->avail_params.no_config_avail = (bool)nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_SERVICE_NAME_LEN: {
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->svc_hash.dlen) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->svc_hash.dlen = nla_get_u16(iter);
- if (cmd_data->svc_hash.dlen != WL_NAN_SVC_HASH_LEN) {
- WL_ERR(("invalid svc_hash length = %u\n", cmd_data->svc_hash.dlen));
- ret = -EINVAL;
- goto exit;
- }
- break;
- }
- case NAN_ATTRIBUTE_SERVICE_NAME:
- if ((!cmd_data->svc_hash.dlen) ||
- (nla_len(iter) != cmd_data->svc_hash.dlen)) {
- WL_ERR(("invalid svc_hash length = %d,%d\n",
- cmd_data->svc_hash.dlen, nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->svc_hash.data) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->svc_hash.data =
- MALLOCZ(cfg->osh, cmd_data->svc_hash.dlen);
- if (!cmd_data->svc_hash.data) {
- WL_ERR(("failed to allocate svc_hash data, len=%d\n",
- cmd_data->svc_hash.dlen));
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(cmd_data->svc_hash.data, cmd_data->svc_hash.dlen,
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc hash data\n"));
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->svc_info.dlen) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->svc_info.dlen = nla_get_u16(iter);
- if (cmd_data->svc_info.dlen > MAX_APP_INFO_LEN) {
- WL_ERR_RLMT(("Not allowed beyond :%d\n", MAX_APP_INFO_LEN));
- ret = -EINVAL;
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO:
- if ((!cmd_data->svc_info.dlen) ||
- (nla_len(iter) != cmd_data->svc_info.dlen)) {
- WL_ERR(("failed to allocate svc info by invalid len=%d,%d\n",
- cmd_data->svc_info.dlen, nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->svc_info.data) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->svc_info.data = MALLOCZ(cfg->osh, cmd_data->svc_info.dlen);
- if (cmd_data->svc_info.data == NULL) {
- WL_ERR(("failed to allocate svc info data, len=%d\n",
- cmd_data->svc_info.dlen));
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(cmd_data->svc_info.data, cmd_data->svc_info.dlen,
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc info\n"));
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_PUBLISH_ID:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->pub_id = nla_get_u32(iter);
- break;
- case NAN_ATTRIBUTE_CIPHER_SUITE_TYPE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->csid = nla_get_u8(iter);
- WL_TRACE(("CSID = %u\n", cmd_data->csid));
- break;
- case NAN_ATTRIBUTE_KEY_TYPE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->key_type = nla_get_u8(iter);
- WL_TRACE(("Key Type = %u\n", cmd_data->key_type));
- break;
- case NAN_ATTRIBUTE_KEY_LEN:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->key.dlen) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->key.dlen = nla_get_u32(iter);
- if ((!cmd_data->key.dlen) || (cmd_data->key.dlen > WL_NAN_NCS_SK_PMK_LEN)) {
- WL_ERR(("invalid key length = %u\n", cmd_data->key.dlen));
- ret = -EINVAL;
- goto exit;
- }
- WL_TRACE(("valid key length = %u\n", cmd_data->key.dlen));
- break;
- case NAN_ATTRIBUTE_KEY_DATA:
- if ((!cmd_data->key.dlen) ||
- (nla_len(iter) != cmd_data->key.dlen)) {
- WL_ERR(("failed to allocate key data by invalid len=%d,%d\n",
- cmd_data->key.dlen, nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->key.data) {
- WL_ERR(("trying to overwrite key data.\n"));
- ret = -EINVAL;
- goto exit;
- }
-
- cmd_data->key.data = MALLOCZ(cfg->osh, NAN_MAX_PMK_LEN);
- if (cmd_data->key.data == NULL) {
- WL_ERR(("failed to allocate key data, len=%d\n",
- cmd_data->key.dlen));
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(cmd_data->key.data, NAN_MAX_PMK_LEN,
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to key data\n"));
- goto exit;
- }
- break;
-
- default:
- WL_ERR(("Unknown type, %d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- }
-exit:
- /* We need to call set_config_handler b/f calling start enable TBD */
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_parse_discover_args(struct wiphy *wiphy,
- const void *buf, int len, nan_discover_cmd_data_t *cmd_data)
-{
- int ret = BCME_OK;
- int attr_type;
- int rem = len;
- const struct nlattr *iter;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- u8 val_u8;
- u32 bit_flag;
- u8 flag_match;
-
- NAN_DBG_ENTER();
-
- nla_for_each_attr(iter, buf, len, rem) {
- attr_type = nla_type(iter);
- WL_TRACE(("attr: %s (%u)\n", nan_attr_to_str(attr_type), attr_type));
-
- switch (attr_type) {
- case NAN_ATTRIBUTE_TRANSAC_ID:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->token = nla_get_u16(iter);
- break;
- case NAN_ATTRIBUTE_PERIODIC_SCAN_INTERVAL:
- break;
-
- /* Nan Publish/Subscribe request Attributes */
- case NAN_ATTRIBUTE_PUBLISH_ID:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->pub_id = nla_get_u16(iter);
- cmd_data->local_id = cmd_data->pub_id;
- break;
- case NAN_ATTRIBUTE_MAC_ADDR:
- if (nla_len(iter) != ETHER_ADDR_LEN) {
- ret = -EINVAL;
- goto exit;
- }
- ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
- (char*)nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy mac addr\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->svc_info.dlen) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->svc_info.dlen = nla_get_u16(iter);
- if (cmd_data->svc_info.dlen > NAN_MAX_SERVICE_SPECIFIC_INFO_LEN) {
- WL_ERR_RLMT(("Not allowed beyond :%d\n",
- NAN_MAX_SERVICE_SPECIFIC_INFO_LEN));
- ret = -EINVAL;
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO:
- if ((!cmd_data->svc_info.dlen) ||
- (nla_len(iter) != cmd_data->svc_info.dlen)) {
- WL_ERR(("failed to allocate svc info by invalid len=%d,%d\n",
- cmd_data->svc_info.dlen, nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->svc_info.data) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
-
- cmd_data->svc_info.data = MALLOCZ(cfg->osh, cmd_data->svc_info.dlen);
- if (cmd_data->svc_info.data == NULL) {
- WL_ERR(("failed to allocate svc info data, len=%d\n",
- cmd_data->svc_info.dlen));
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(cmd_data->svc_info.data, cmd_data->svc_info.dlen,
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc info\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_SUBSCRIBE_ID:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->sub_id = nla_get_u16(iter);
- cmd_data->local_id = cmd_data->sub_id;
- break;
- case NAN_ATTRIBUTE_SUBSCRIBE_TYPE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->flags |= nla_get_u8(iter) ? WL_NAN_SUB_ACTIVE : 0;
- break;
- case NAN_ATTRIBUTE_PUBLISH_COUNT:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->life_count = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_PUBLISH_TYPE: {
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- val_u8 = nla_get_u8(iter);
- if (val_u8 == 0) {
- cmd_data->flags |= WL_NAN_PUB_UNSOLICIT;
- } else if (val_u8 == 1) {
- cmd_data->flags |= WL_NAN_PUB_SOLICIT;
- } else {
- cmd_data->flags |= WL_NAN_PUB_BOTH;
- }
- break;
- }
- case NAN_ATTRIBUTE_PERIOD: {
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- if (nla_get_u16(iter) > NAN_MAX_AWAKE_DW_INTERVAL) {
- WL_ERR(("Invalid/Out of bound value = %u\n", nla_get_u16(iter)));
- ret = BCME_BADARG;
- break;
- }
- if (nla_get_u16(iter)) {
- cmd_data->period = 1 << (nla_get_u16(iter)-1);
- }
- break;
- }
- case NAN_ATTRIBUTE_REPLIED_EVENT_FLAG:
- break;
- case NAN_ATTRIBUTE_TTL:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->ttl = nla_get_u16(iter);
- break;
- case NAN_ATTRIBUTE_SERVICE_NAME_LEN: {
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->svc_hash.dlen) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
-
- cmd_data->svc_hash.dlen = nla_get_u16(iter);
- if (cmd_data->svc_hash.dlen != WL_NAN_SVC_HASH_LEN) {
- WL_ERR(("invalid svc_hash length = %u\n", cmd_data->svc_hash.dlen));
- ret = -EINVAL;
- goto exit;
- }
- break;
- }
- case NAN_ATTRIBUTE_SERVICE_NAME:
- if ((!cmd_data->svc_hash.dlen) ||
- (nla_len(iter) != cmd_data->svc_hash.dlen)) {
- WL_ERR(("invalid svc_hash length = %d,%d\n",
- cmd_data->svc_hash.dlen, nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->svc_hash.data) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
-
- cmd_data->svc_hash.data =
- MALLOCZ(cfg->osh, cmd_data->svc_hash.dlen);
- if (!cmd_data->svc_hash.data) {
- WL_ERR(("failed to allocate svc_hash data, len=%d\n",
- cmd_data->svc_hash.dlen));
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(cmd_data->svc_hash.data, cmd_data->svc_hash.dlen,
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy svc hash data\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_PEER_ID:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->remote_id = nla_get_u32(iter);
- break;
- case NAN_ATTRIBUTE_INST_ID:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->local_id = nla_get_u16(iter);
- break;
- case NAN_ATTRIBUTE_SUBSCRIBE_COUNT:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->life_count = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_SSIREQUIREDFORMATCHINDICATION: {
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- bit_flag = (u32)nla_get_u8(iter);
- cmd_data->flags |=
- bit_flag ? WL_NAN_SUB_MATCH_IF_SVC_INFO : 0;
- break;
- }
- case NAN_ATTRIBUTE_SUBSCRIBE_MATCH:
- case NAN_ATTRIBUTE_PUBLISH_MATCH: {
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- flag_match = nla_get_u8(iter);
-
- switch (flag_match) {
- case NAN_MATCH_ALG_MATCH_CONTINUOUS:
- /* Default fw behaviour, no need to set explicitly */
- break;
- case NAN_MATCH_ALG_MATCH_ONCE:
- cmd_data->flags |= WL_NAN_MATCH_ONCE;
- break;
- case NAN_MATCH_ALG_MATCH_NEVER:
- cmd_data->flags |= WL_NAN_MATCH_NEVER;
- break;
- default:
- WL_ERR(("invalid nan match alg = %u\n", flag_match));
- ret = -EINVAL;
- goto exit;
- }
- break;
- }
- case NAN_ATTRIBUTE_SERVICERESPONSEFILTER:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->srf_type = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_SERVICERESPONSEINCLUDE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->srf_include = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_USESERVICERESPONSEFILTER:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->use_srf = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_RX_MATCH_FILTER_LEN:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->rx_match.dlen) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->rx_match.dlen = nla_get_u16(iter);
- if (cmd_data->rx_match.dlen > MAX_MATCH_FILTER_LEN) {
- ret = -EINVAL;
- WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_RX_MATCH_FILTER:
- if ((!cmd_data->rx_match.dlen) ||
- (nla_len(iter) != cmd_data->rx_match.dlen)) {
- WL_ERR(("RX match filter len wrong:%d,%d\n",
- cmd_data->rx_match.dlen, nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->rx_match.data) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->rx_match.data =
- MALLOCZ(cfg->osh, cmd_data->rx_match.dlen);
- if (cmd_data->rx_match.data == NULL) {
- WL_ERR(("failed to allocate LEN=[%u]\n",
- cmd_data->rx_match.dlen));
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(cmd_data->rx_match.data, cmd_data->rx_match.dlen,
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy rx match data\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->tx_match.dlen) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->tx_match.dlen = nla_get_u16(iter);
- if (cmd_data->tx_match.dlen > MAX_MATCH_FILTER_LEN) {
- ret = -EINVAL;
- WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_MATCH_FILTER_LEN));
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_TX_MATCH_FILTER:
- if ((!cmd_data->tx_match.dlen) ||
- (nla_len(iter) != cmd_data->tx_match.dlen)) {
- WL_ERR(("TX match filter len wrong:%d,%d\n",
- cmd_data->tx_match.dlen, nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->tx_match.data) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->tx_match.data =
- MALLOCZ(cfg->osh, cmd_data->tx_match.dlen);
- if (cmd_data->tx_match.data == NULL) {
- WL_ERR(("failed to allocate LEN=[%u]\n",
- cmd_data->tx_match.dlen));
- ret = -EINVAL;
- goto exit;
- }
- ret = memcpy_s(cmd_data->tx_match.data, cmd_data->tx_match.dlen,
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy tx match data\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_MAC_ADDR_LIST_NUM_ENTRIES:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->mac_list.num_mac_addr) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->mac_list.num_mac_addr = nla_get_u16(iter);
- break;
- case NAN_ATTRIBUTE_MAC_ADDR_LIST:
- if ((!cmd_data->mac_list.num_mac_addr) ||
- (nla_len(iter) != (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN))) {
- WL_ERR(("wrong mac list len:%d,%d\n",
- cmd_data->mac_list.num_mac_addr, nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->mac_list.list) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->mac_list.list =
- MALLOCZ(cfg->osh, (cmd_data->mac_list.num_mac_addr
- * ETHER_ADDR_LEN));
- if (cmd_data->mac_list.list == NULL) {
- WL_ERR(("failed to allocate LEN=[%u]\n",
- (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN)));
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(cmd_data->mac_list.list,
- (cmd_data->mac_list.num_mac_addr * ETHER_ADDR_LEN),
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy list of mac addresses\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_TX_TYPE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- val_u8 = nla_get_u8(iter);
- if (val_u8 == 0) {
- cmd_data->flags |= WL_NAN_PUB_BCAST;
- WL_TRACE(("NAN_ATTRIBUTE_TX_TYPE: flags=NAN_PUB_BCAST\n"));
- }
- break;
- case NAN_ATTRIBUTE_SDE_CONTROL_CONFIG_DP:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- if (nla_get_u8(iter) == 1) {
- cmd_data->sde_control_flag
- |= NAN_SDE_CF_DP_REQUIRED;
- break;
- }
- break;
- case NAN_ATTRIBUTE_SDE_CONTROL_RANGE_SUPPORT:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->sde_control_config = TRUE;
- if (nla_get_u8(iter) == 1) {
- cmd_data->sde_control_flag
- |= NAN_SDE_CF_RANGING_REQUIRED;
- break;
- }
- break;
- case NAN_ATTRIBUTE_SDE_CONTROL_DP_TYPE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- if (nla_get_u8(iter) == 1) {
- cmd_data->sde_control_flag
- |= NAN_SDE_CF_MULTICAST_TYPE;
- break;
- }
- break;
- case NAN_ATTRIBUTE_SDE_CONTROL_SECURITY:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- if (nla_get_u8(iter) == 1) {
- cmd_data->sde_control_flag
- |= NAN_SDE_CF_SECURITY_REQUIRED;
- break;
- }
- break;
- case NAN_ATTRIBUTE_RECV_IND_CFG:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->recv_ind_flag = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_CIPHER_SUITE_TYPE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->csid = nla_get_u8(iter);
- WL_TRACE(("CSID = %u\n", cmd_data->csid));
- break;
- case NAN_ATTRIBUTE_KEY_TYPE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->key_type = nla_get_u8(iter);
- WL_TRACE(("Key Type = %u\n", cmd_data->key_type));
- break;
- case NAN_ATTRIBUTE_KEY_LEN:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->key.dlen) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->key.dlen = nla_get_u32(iter);
- if ((!cmd_data->key.dlen) || (cmd_data->key.dlen > WL_NAN_NCS_SK_PMK_LEN)) {
- WL_ERR(("invalid key length = %u\n",
- cmd_data->key.dlen));
- break;
- }
- WL_TRACE(("valid key length = %u\n", cmd_data->key.dlen));
- break;
- case NAN_ATTRIBUTE_KEY_DATA:
- if (!cmd_data->key.dlen ||
- (nla_len(iter) != cmd_data->key.dlen)) {
- WL_ERR(("failed to allocate key data by invalid len=%d,%d\n",
- cmd_data->key.dlen, nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->key.data) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
-
- cmd_data->key.data = MALLOCZ(cfg->osh, NAN_MAX_PMK_LEN);
- if (cmd_data->key.data == NULL) {
- WL_ERR(("failed to allocate key data, len=%d\n",
- cmd_data->key.dlen));
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(cmd_data->key.data, NAN_MAX_PMK_LEN,
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to key data\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_RSSI_THRESHOLD_FLAG:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- if (nla_get_u8(iter) == 1) {
- cmd_data->flags |=
- WL_NAN_RANGE_LIMITED;
- break;
- }
- break;
- case NAN_ATTRIBUTE_DISC_IND_CFG:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->disc_ind_cfg = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO_LEN:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->sde_svc_info.dlen) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->sde_svc_info.dlen = nla_get_u16(iter);
- if (cmd_data->sde_svc_info.dlen > MAX_SDEA_SVC_INFO_LEN) {
- ret = -EINVAL;
- WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_SDEA_SVC_INFO_LEN));
- goto exit;
- }
- break;
- case NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO:
- if ((!cmd_data->sde_svc_info.dlen) ||
- (nla_len(iter) != cmd_data->sde_svc_info.dlen)) {
- WL_ERR(("wrong sdea info len:%d,%d\n",
- cmd_data->sde_svc_info.dlen, nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->sde_svc_info.data) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->sde_svc_info.data = MALLOCZ(cfg->osh,
- cmd_data->sde_svc_info.dlen);
- if (cmd_data->sde_svc_info.data == NULL) {
- WL_ERR(("failed to allocate svc info data, len=%d\n",
- cmd_data->sde_svc_info.dlen));
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(cmd_data->sde_svc_info.data,
- cmd_data->sde_svc_info.dlen,
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to sdea info data\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_SECURITY:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->ndp_cfg.security_cfg = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_RANGING_INTERVAL:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->ranging_intvl_msec = nla_get_u32(iter);
- break;
- case NAN_ATTRIBUTE_RANGING_INGRESS_LIMIT:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->ingress_limit = nla_get_u32(iter);
- break;
- case NAN_ATTRIBUTE_RANGING_EGRESS_LIMIT:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->egress_limit = nla_get_u32(iter);
- break;
- case NAN_ATTRIBUTE_RANGING_INDICATION:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->ranging_indication = nla_get_u32(iter);
- break;
- /* Nan accept policy: Per service basis policy
- * Based on this policy(ALL/NONE), responder side
- * will send ACCEPT/REJECT
- */
- case NAN_ATTRIBUTE_SVC_RESPONDER_POLICY:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->service_responder_policy = nla_get_u8(iter);
- break;
- default:
- WL_ERR(("Unknown type, %d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- }
-exit:
- /* We need to call set_config_handler b/f calling start enable TBD */
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_parse_args(struct wiphy *wiphy, const void *buf,
- int len, nan_config_cmd_data_t *cmd_data, uint32 *nan_attr_mask)
-{
- int ret = BCME_OK;
- int attr_type;
- int rem = len;
- const struct nlattr *iter;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- int chan;
- u8 sid_beacon = 0, sub_sid_beacon = 0;
-
- NAN_DBG_ENTER();
-
- nla_for_each_attr(iter, buf, len, rem) {
- attr_type = nla_type(iter);
- WL_TRACE(("attr: %s (%u)\n", nan_attr_to_str(attr_type), attr_type));
-
- switch (attr_type) {
- /* NAN Enable request attributes */
- case NAN_ATTRIBUTE_2G_SUPPORT:{
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->support_2g = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_SUPPORT_2G_CONFIG;
- break;
- }
- case NAN_ATTRIBUTE_5G_SUPPORT:{
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->support_5g = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_SUPPORT_5G_CONFIG;
- break;
- }
- case NAN_ATTRIBUTE_CLUSTER_LOW: {
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->clus_id.octet[5] = nla_get_u16(iter);
- break;
- }
- case NAN_ATTRIBUTE_CLUSTER_HIGH: {
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->clus_id.octet[4] = nla_get_u16(iter);
- break;
- }
- case NAN_ATTRIBUTE_SID_BEACON: {
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- sid_beacon = nla_get_u8(iter);
- cmd_data->sid_beacon.sid_enable = (sid_beacon & 0x01);
- if (cmd_data->sid_beacon.sid_enable) {
- cmd_data->sid_beacon.sid_count = (sid_beacon >> 1);
- *nan_attr_mask |= NAN_ATTR_SID_BEACON_CONFIG;
- }
- break;
- }
- case NAN_ATTRIBUTE_SUB_SID_BEACON: {
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- sub_sid_beacon = nla_get_u8(iter);
- cmd_data->sid_beacon.sub_sid_enable = (sub_sid_beacon & 0x01);
- if (cmd_data->sid_beacon.sub_sid_enable) {
- cmd_data->sid_beacon.sub_sid_count = (sub_sid_beacon >> 1);
- *nan_attr_mask |= NAN_ATTR_SUB_SID_BEACON_CONFIG;
- }
- break;
- }
- case NAN_ATTRIBUTE_SYNC_DISC_2G_BEACON:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->beacon_2g_val = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_SYNC_DISC_2G_BEACON_CONFIG;
- break;
- case NAN_ATTRIBUTE_SYNC_DISC_5G_BEACON:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->beacon_5g_val = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_SYNC_DISC_5G_BEACON_CONFIG;
- break;
- case NAN_ATTRIBUTE_SDF_2G_SUPPORT:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->sdf_2g_val = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_SDF_2G_SUPPORT_CONFIG;
- break;
- case NAN_ATTRIBUTE_SDF_5G_SUPPORT:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->sdf_5g_val = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_SDF_5G_SUPPORT_CONFIG;
- break;
- case NAN_ATTRIBUTE_HOP_COUNT_LIMIT:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->hop_count_limit = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_HOP_COUNT_LIMIT_CONFIG;
- break;
- case NAN_ATTRIBUTE_RANDOM_TIME:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->metrics.random_factor = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_RAND_FACTOR_CONFIG;
- break;
- case NAN_ATTRIBUTE_MASTER_PREF:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->metrics.master_pref = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_OUI:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->nan_oui = nla_get_u32(iter);
- *nan_attr_mask |= NAN_ATTR_OUI_CONFIG;
- WL_TRACE(("nan_oui=%d\n", cmd_data->nan_oui));
- break;
- case NAN_ATTRIBUTE_WARMUP_TIME:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->warmup_time = nla_get_u16(iter);
- break;
- case NAN_ATTRIBUTE_AMBTT:
- case NAN_ATTRIBUTE_MASTER_RANK:
- WL_DBG(("Unhandled attribute, %d\n", attr_type));
- break;
- case NAN_ATTRIBUTE_CHANNEL: {
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- /* take the default channel start_factor frequency */
- chan = wf_mhz2channel((uint)nla_get_u32(iter), 0);
- if (chan <= CH_MAX_2G_CHANNEL) {
- cmd_data->chanspec[0] = wf_channel2chspec(chan, WL_CHANSPEC_BW_20);
- } else {
- cmd_data->chanspec[0] = wf_channel2chspec(chan, WL_CHANSPEC_BW_80);
- }
- if (cmd_data->chanspec[0] == 0) {
- WL_ERR(("Channel is not valid \n"));
- ret = -EINVAL;
- goto exit;
- }
- WL_TRACE(("valid chanspec, chanspec = 0x%04x \n",
- cmd_data->chanspec[0]));
- break;
- }
- case NAN_ATTRIBUTE_24G_CHANNEL: {
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- /* take the default channel start_factor frequency */
- chan = wf_mhz2channel((uint)nla_get_u32(iter), 0);
- /* 20MHz as BW */
- cmd_data->chanspec[1] = wf_channel2chspec(chan, WL_CHANSPEC_BW_20);
- if (cmd_data->chanspec[1] == 0) {
- WL_ERR((" 2.4GHz Channel is not valid \n"));
- ret = -EINVAL;
- break;
- }
- *nan_attr_mask |= NAN_ATTR_2G_CHAN_CONFIG;
- WL_TRACE(("valid 2.4GHz chanspec, chanspec = 0x%04x \n",
- cmd_data->chanspec[1]));
- break;
- }
- case NAN_ATTRIBUTE_5G_CHANNEL: {
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- /* take the default channel start_factor frequency */
- chan = wf_mhz2channel((uint)nla_get_u32(iter), 0);
- /* 20MHz as BW */
- cmd_data->chanspec[2] = wf_channel2chspec(chan, WL_CHANSPEC_BW_20);
- if (cmd_data->chanspec[2] == 0) {
- WL_ERR((" 5GHz Channel is not valid \n"));
- ret = -EINVAL;
- break;
- }
- *nan_attr_mask |= NAN_ATTR_5G_CHAN_CONFIG;
- WL_TRACE(("valid 5GHz chanspec, chanspec = 0x%04x \n",
- cmd_data->chanspec[2]));
- break;
- }
- case NAN_ATTRIBUTE_CONF_CLUSTER_VAL:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->config_cluster_val = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_CLUSTER_VAL_CONFIG;
- break;
- case NAN_ATTRIBUTE_DWELL_TIME:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->dwell_time[0] = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_2G_DWELL_TIME_CONFIG;
- break;
- case NAN_ATTRIBUTE_SCAN_PERIOD:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->scan_period[0] = nla_get_u16(iter);
- *nan_attr_mask |= NAN_ATTR_2G_SCAN_PERIOD_CONFIG;
- break;
- case NAN_ATTRIBUTE_DWELL_TIME_5G:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->dwell_time[1] = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_5G_DWELL_TIME_CONFIG;
- break;
- case NAN_ATTRIBUTE_SCAN_PERIOD_5G:
- if (nla_len(iter) != sizeof(uint16)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->scan_period[1] = nla_get_u16(iter);
- *nan_attr_mask |= NAN_ATTR_5G_SCAN_PERIOD_CONFIG;
- break;
- case NAN_ATTRIBUTE_AVAIL_BIT_MAP:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->bmap = nla_get_u32(iter);
- break;
- case NAN_ATTRIBUTE_ENTRY_CONTROL:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->avail_params.duration = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_RSSI_CLOSE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->rssi_attr.rssi_close_2dot4g_val = nla_get_s8(iter);
- *nan_attr_mask |= NAN_ATTR_RSSI_CLOSE_CONFIG;
- break;
- case NAN_ATTRIBUTE_RSSI_MIDDLE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->rssi_attr.rssi_middle_2dot4g_val = nla_get_s8(iter);
- *nan_attr_mask |= NAN_ATTR_RSSI_MIDDLE_2G_CONFIG;
- break;
- case NAN_ATTRIBUTE_RSSI_PROXIMITY:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->rssi_attr.rssi_proximity_2dot4g_val = nla_get_s8(iter);
- *nan_attr_mask |= NAN_ATTR_RSSI_PROXIMITY_2G_CONFIG;
- break;
- case NAN_ATTRIBUTE_RSSI_CLOSE_5G:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->rssi_attr.rssi_close_5g_val = nla_get_s8(iter);
- *nan_attr_mask |= NAN_ATTR_RSSI_CLOSE_5G_CONFIG;
- break;
- case NAN_ATTRIBUTE_RSSI_MIDDLE_5G:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->rssi_attr.rssi_middle_5g_val = nla_get_s8(iter);
- *nan_attr_mask |= NAN_ATTR_RSSI_MIDDLE_5G_CONFIG;
- break;
- case NAN_ATTRIBUTE_RSSI_PROXIMITY_5G:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->rssi_attr.rssi_proximity_5g_val = nla_get_s8(iter);
- *nan_attr_mask |= NAN_ATTR_RSSI_PROXIMITY_5G_CONFIG;
- break;
- case NAN_ATTRIBUTE_RSSI_WINDOW_SIZE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->rssi_attr.rssi_window_size = nla_get_u8(iter);
- *nan_attr_mask |= NAN_ATTR_RSSI_WINDOW_SIZE_CONFIG;
- break;
- case NAN_ATTRIBUTE_CIPHER_SUITE_TYPE:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->csid = nla_get_u8(iter);
- WL_TRACE(("CSID = %u\n", cmd_data->csid));
- break;
- case NAN_ATTRIBUTE_SCID_LEN:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->scid.dlen) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->scid.dlen = nla_get_u32(iter);
- if (cmd_data->scid.dlen > MAX_SCID_LEN) {
- ret = -EINVAL;
- WL_ERR_RLMT(("Not allowed beyond %d\n", MAX_SCID_LEN));
- goto exit;
- }
- WL_TRACE(("valid scid length = %u\n", cmd_data->scid.dlen));
- break;
- case NAN_ATTRIBUTE_SCID:
- if (!cmd_data->scid.dlen || (nla_len(iter) != cmd_data->scid.dlen)) {
- WL_ERR(("wrong scid len:%d,%d\n", cmd_data->scid.dlen,
- nla_len(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (cmd_data->scid.data) {
- WL_ERR(("trying to overwrite:%d\n", attr_type));
- ret = -EINVAL;
- goto exit;
- }
-
- cmd_data->scid.data = MALLOCZ(cfg->osh, cmd_data->scid.dlen);
- if (cmd_data->scid.data == NULL) {
- WL_ERR(("failed to allocate scid, len=%d\n",
- cmd_data->scid.dlen));
- ret = -ENOMEM;
- goto exit;
- }
- ret = memcpy_s(cmd_data->scid.data, cmd_data->scid.dlen,
- nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to scid data\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_2G_AWAKE_DW:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- if (nla_get_u32(iter) > NAN_MAX_AWAKE_DW_INTERVAL) {
- WL_ERR(("%s: Invalid/Out of bound value = %u\n",
- __FUNCTION__, nla_get_u32(iter)));
- ret = -EINVAL;
- goto exit;
- }
- if (nla_get_u32(iter)) {
- cmd_data->awake_dws.dw_interval_2g =
- 1 << (nla_get_u32(iter)-1);
- }
- *nan_attr_mask |= NAN_ATTR_2G_DW_CONFIG;
- break;
- case NAN_ATTRIBUTE_5G_AWAKE_DW:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- if (nla_get_u32(iter) > NAN_MAX_AWAKE_DW_INTERVAL) {
- WL_ERR(("%s: Invalid/Out of bound value = %u\n",
- __FUNCTION__, nla_get_u32(iter)));
- ret = BCME_BADARG;
- break;
- }
- if (nla_get_u32(iter)) {
- cmd_data->awake_dws.dw_interval_5g =
- 1 << (nla_get_u32(iter)-1);
- }
- *nan_attr_mask |= NAN_ATTR_5G_DW_CONFIG;
- break;
- case NAN_ATTRIBUTE_DISC_IND_CFG:
- if (nla_len(iter) != sizeof(uint8)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->disc_ind_cfg = nla_get_u8(iter);
- break;
- case NAN_ATTRIBUTE_MAC_ADDR:
- if (nla_len(iter) != ETHER_ADDR_LEN) {
- ret = -EINVAL;
- goto exit;
- }
- ret = memcpy_s((char*)&cmd_data->mac_addr, ETHER_ADDR_LEN,
- (char*)nla_data(iter), nla_len(iter));
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy mac addr\n"));
- return ret;
- }
- break;
- case NAN_ATTRIBUTE_RANDOMIZATION_INTERVAL:
- if (nla_len(iter) != sizeof(uint32)) {
- ret = -EINVAL;
- goto exit;
- }
- cmd_data->nmi_rand_intvl = nla_get_u8(iter);
- if (cmd_data->nmi_rand_intvl > 0) {
- cfg->nancfg.mac_rand = true;
- } else {
- cfg->nancfg.mac_rand = false;
- }
- break;
- default:
- WL_ERR(("%s: Unknown type, %d\n", __FUNCTION__, attr_type));
- ret = -EINVAL;
- goto exit;
- }
- }
-
-exit:
- /* We need to call set_config_handler b/f calling start enable TBD */
- NAN_DBG_EXIT();
- if (ret) {
- WL_ERR(("%s: Failed to parse attribute %d ret %d",
- __FUNCTION__, attr_type, ret));
- }
- return ret;
-
-}
-
-static int
-wl_cfgvendor_nan_dp_estb_event_data_filler(struct sk_buff *msg,
- nan_event_data_t *event_data) {
- int ret = BCME_OK;
- ret = nla_put_u32(msg, NAN_ATTRIBUTE_NDP_ID, event_data->ndp_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put NDP ID, ret=%d\n", ret));
- goto fail;
- }
- /*
- * NDI mac address of the peer
- * (required to derive target ipv6 address)
- */
- ret = nla_put(msg, NAN_ATTRIBUTE_PEER_NDI_MAC_ADDR, ETH_ALEN,
- event_data->responder_ndi.octet);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put resp ndi, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_RSP_CODE, event_data->status);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put response code, ret=%d\n", ret));
- goto fail;
- }
- if (event_data->svc_info.dlen && event_data->svc_info.data) {
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN,
- event_data->svc_info.dlen);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put svc info len, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO,
- event_data->svc_info.dlen, event_data->svc_info.data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put svc info, ret=%d\n", ret));
- goto fail;
- }
- }
-
-fail:
- return ret;
-}
-static int
-wl_cfgvendor_nan_dp_ind_event_data_filler(struct sk_buff *msg,
- nan_event_data_t *event_data) {
- int ret = BCME_OK;
-
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_PUBLISH_ID,
- event_data->pub_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put pub ID, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u32(msg, NAN_ATTRIBUTE_NDP_ID, event_data->ndp_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put NDP ID, ret=%d\n", ret));
- goto fail;
- }
- /* Discovery MAC addr of the peer/initiator */
- ret = nla_put(msg, NAN_ATTRIBUTE_MAC_ADDR, ETH_ALEN,
- event_data->remote_nmi.octet);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put remote NMI, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_SECURITY, event_data->security);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put security, ret=%d\n", ret));
- goto fail;
- }
- if (event_data->svc_info.dlen && event_data->svc_info.data) {
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN,
- event_data->svc_info.dlen);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put svc info len, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO,
- event_data->svc_info.dlen, event_data->svc_info.data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put svc info, ret=%d\n", ret));
- goto fail;
- }
- }
-
-fail:
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_tx_followup_ind_event_data_filler(struct sk_buff *msg,
- nan_event_data_t *event_data) {
- int ret = BCME_OK;
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_TRANSAC_ID, event_data->token);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put transaction id, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_HANDLE, event_data->local_inst_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put handle, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_STATUS, event_data->status);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put nan status, ret=%d\n", ret));
- goto fail;
- }
- if (event_data->status == NAN_STATUS_SUCCESS) {
- ret = nla_put(msg, NAN_ATTRIBUTE_REASON,
- strlen("NAN_STATUS_SUCCESS"), event_data->nan_reason);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put nan reason, ret=%d\n", ret));
- goto fail;
- }
- } else {
- ret = nla_put(msg, NAN_ATTRIBUTE_REASON,
- strlen("NAN_STATUS_NO_OTA_ACK"), event_data->nan_reason);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put nan reason, ret=%d\n", ret));
- goto fail;
- }
- }
-fail:
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_svc_terminate_event_filler(struct sk_buff *msg,
- struct bcm_cfg80211 *cfg, int event_id, nan_event_data_t *event_data) {
- int ret = BCME_OK;
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_HANDLE, event_data->local_inst_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put handle, ret=%d\n", ret));
- goto fail;
- }
-
- if (event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED) {
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_SUBSCRIBE_ID,
- event_data->local_inst_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put local inst id, ret=%d\n", ret));
- goto fail;
- }
- } else {
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_PUBLISH_ID,
- event_data->local_inst_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put local inst id, ret=%d\n", ret));
- goto fail;
- }
- }
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_STATUS, event_data->status);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put status, ret=%d\n", ret));
- goto fail;
- }
- if (event_data->status == NAN_STATUS_SUCCESS) {
- ret = nla_put(msg, NAN_ATTRIBUTE_REASON,
- strlen("NAN_STATUS_SUCCESS"), event_data->nan_reason);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put nan reason, ret=%d\n", ret));
- goto fail;
- }
- } else {
- ret = nla_put(msg, NAN_ATTRIBUTE_REASON,
- strlen("NAN_STATUS_INTERNAL_FAILURE"), event_data->nan_reason);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put nan reason, ret=%d\n", ret));
- goto fail;
- }
- }
-
- ret = wl_cfgnan_remove_inst_id(cfg, event_data->local_inst_id);
- if (ret) {
- WL_ERR(("failed to free svc instance-id[%d], ret=%d, event_id = %d\n",
- event_data->local_inst_id, ret, event_id));
- goto fail;
- }
-fail:
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_opt_params_filler(struct sk_buff *msg,
- nan_event_data_t *event_data) {
- int ret = BCME_OK;
- /* service specific info data */
- if (event_data->svc_info.dlen && event_data->svc_info.data) {
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO_LEN,
- event_data->svc_info.dlen);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put svc info len, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put(msg, NAN_ATTRIBUTE_SERVICE_SPECIFIC_INFO,
- event_data->svc_info.dlen, event_data->svc_info.data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put svc info, ret=%d\n", ret));
- goto fail;
- }
- WL_TRACE(("svc info len = %d\n", event_data->svc_info.dlen));
- }
-
- /* sdea service specific info data */
- if (event_data->sde_svc_info.dlen && event_data->sde_svc_info.data) {
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO_LEN,
- event_data->sde_svc_info.dlen);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put sdea svc info len, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put(msg, NAN_ATTRIBUTE_SDEA_SERVICE_SPECIFIC_INFO,
- event_data->sde_svc_info.dlen,
- event_data->sde_svc_info.data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put sdea svc info, ret=%d\n", ret));
- goto fail;
- }
- WL_TRACE(("sdea svc info len = %d\n", event_data->sde_svc_info.dlen));
- }
- /* service control discovery range limit */
- /* TODO: */
-
- /* service control binding bitmap */
- /* TODO: */
-fail:
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_tx_followup_event_filler(struct sk_buff *msg,
- nan_event_data_t *event_data) {
- int ret = BCME_OK;
- /* In followup pkt, instance id and requestor instance id are configured
- * from the transmitter perspective. As the event is processed with the
- * role of receiver, the local handle should use requestor instance
- * id (peer_inst_id)
- */
- WL_TRACE(("handle=%d\n", event_data->requestor_id));
- WL_TRACE(("inst id (local id)=%d\n", event_data->local_inst_id));
- WL_TRACE(("peer id (remote id)=%d\n", event_data->requestor_id));
- WL_TRACE(("peer mac addr=" MACDBG "\n",
- MAC2STRDBG(event_data->remote_nmi.octet)));
- WL_TRACE(("peer rssi: %d\n", event_data->fup_rssi));
- WL_TRACE(("attribute no: %d\n", event_data->attr_num));
- WL_TRACE(("attribute len: %d\n", event_data->attr_list_len));
-
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_HANDLE, event_data->requestor_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put handle, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u32(msg, NAN_ATTRIBUTE_INST_ID, event_data->local_inst_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put local inst id, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_PEER_ID, event_data->requestor_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put requestor inst id, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put(msg, NAN_ATTRIBUTE_MAC_ADDR, ETHER_ADDR_LEN,
- event_data->remote_nmi.octet);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put remote nmi, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_s8(msg, NAN_ATTRIBUTE_RSSI_PROXIMITY,
- event_data->fup_rssi);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put fup rssi, ret=%d\n", ret));
- goto fail;
- }
-fail:
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_sub_match_event_filler(struct sk_buff *msg,
- nan_event_data_t *event_data) {
- int ret = BCME_OK;
- WL_TRACE(("handle (sub_id)=%d\n", event_data->sub_id));
- WL_TRACE(("pub id=%d\n", event_data->pub_id));
- WL_TRACE(("sub id=%d\n", event_data->sub_id));
- WL_TRACE(("pub mac addr=" MACDBG "\n",
- MAC2STRDBG(event_data->remote_nmi.octet)));
- WL_TRACE(("attr no: %d\n", event_data->attr_num));
- WL_TRACE(("attr len: %d\n", event_data->attr_list_len));
-
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_HANDLE, event_data->sub_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put handle, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_PUBLISH_ID, event_data->pub_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put pub id, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_SUBSCRIBE_ID, event_data->sub_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put Sub Id, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put(msg, NAN_ATTRIBUTE_MAC_ADDR, ETHER_ADDR_LEN,
- event_data->remote_nmi.octet);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put remote NMI, ret=%d\n", ret));
- goto fail;
- }
- if (event_data->publish_rssi) {
- event_data->publish_rssi = -event_data->publish_rssi;
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_RSSI_PROXIMITY,
- event_data->publish_rssi);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put publish rssi, ret=%d\n", ret));
- goto fail;
- }
- }
- if (event_data->ranging_result_present) {
- ret = nla_put_u32(msg, NAN_ATTRIBUTE_RANGING_INDICATION,
- event_data->ranging_ind);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put ranging ind, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u32(msg, NAN_ATTRIBUTE_RANGING_RESULT,
- event_data->range_measurement_cm);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put range measurement cm, ret=%d\n",
- ret));
- goto fail;
- }
- }
- /*
- * handling optional service control, service response filter
- */
- if (event_data->tx_match_filter.dlen && event_data->tx_match_filter.data) {
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_TX_MATCH_FILTER_LEN,
- event_data->tx_match_filter.dlen);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put tx match filter len, ret=%d\n",
- ret));
- goto fail;
- }
- ret = nla_put(msg, NAN_ATTRIBUTE_TX_MATCH_FILTER,
- event_data->tx_match_filter.dlen,
- event_data->tx_match_filter.data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put tx match filter data, ret=%d\n",
- ret));
- goto fail;
- }
- WL_TRACE(("tx matching filter (%d):\n",
- event_data->tx_match_filter.dlen));
- }
-
-fail:
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_de_event_filler(struct sk_buff *msg, nan_event_data_t *event_data)
-{
- int ret = BCME_OK;
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_ENABLE_STATUS, event_data->enabled);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put event_data->enabled, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_DE_EVENT_TYPE,
- event_data->nan_de_evt_type);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put nan_de_evt_type, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put(msg, NAN_ATTRIBUTE_CLUSTER_ID, ETH_ALEN,
- event_data->clus_id.octet);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put clust id, ret=%d\n", ret));
- goto fail;
- }
- /* OOB tests requires local nmi */
- ret = nla_put(msg, NAN_ATTRIBUTE_MAC_ADDR, ETH_ALEN,
- event_data->local_nmi.octet);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put NMI, ret=%d\n", ret));
- goto fail;
- }
-fail:
- return ret;
-}
-
-#ifdef RTT_SUPPORT
-s32
-wl_cfgvendor_send_as_rtt_legacy_event(struct wiphy *wiphy, struct net_device *dev,
- wl_nan_ev_rng_rpt_ind_t *range_res, uint32 status)
-{
- s32 ret = BCME_OK;
- gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
- rtt_report_t *report = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct sk_buff *msg = NULL;
- struct nlattr *rtt_nl_hdr;
-
- NAN_DBG_ENTER();
-
- report = MALLOCZ(cfg->osh, sizeof(*report));
- if (!report) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
- if (range_res) {
- report->distance = range_res->dist_mm/10;
- ret = memcpy_s(&report->addr, ETHER_ADDR_LEN,
- &range_res->peer_m_addr, ETHER_ADDR_LEN);
- if (ret != BCME_OK) {
- WL_ERR(("Failed to copy peer_m_addr\n"));
- goto exit;
- }
- }
- report->status = (rtt_reason_t)status;
- report->type = RTT_TWO_WAY;
-
-#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
- LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- msg = cfg80211_vendor_event_alloc(wiphy, NULL, 100,
- GOOGLE_RTT_COMPLETE_EVENT, kflags);
-#else
- msg = cfg80211_vendor_event_alloc(wiphy, 100, GOOGLE_RTT_COMPLETE_EVENT, kflags);
-#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
- /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
- if (!msg) {
- WL_ERR(("%s: fail to allocate skb for vendor event\n", __FUNCTION__));
- ret = BCME_NOMEM;
- goto exit;
- }
-
- ret = nla_put_u32(msg, RTT_ATTRIBUTE_RESULTS_COMPLETE, 1);
- if (ret < 0) {
- WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS_COMPLETE\n"));
- goto exit;
- }
- rtt_nl_hdr = nla_nest_start(msg, RTT_ATTRIBUTE_RESULTS_PER_TARGET);
- if (!rtt_nl_hdr) {
- WL_ERR(("rtt_nl_hdr is NULL\n"));
- ret = BCME_NOMEM;
- goto exit;
- }
- ret = nla_put(msg, RTT_ATTRIBUTE_TARGET_MAC, ETHER_ADDR_LEN, &report->addr);
- if (ret < 0) {
- WL_ERR(("Failed to put RTT_ATTRIBUTE_TARGET_MAC\n"));
- goto exit;
- }
- ret = nla_put_u32(msg, RTT_ATTRIBUTE_RESULT_CNT, 1);
- if (ret < 0) {
- WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULT_CNT\n"));
- goto exit;
- }
- ret = nla_put(msg, RTT_ATTRIBUTE_RESULT,
- sizeof(*report), report);
- if (ret < 0) {
- WL_ERR(("Failed to put RTT_ATTRIBUTE_RESULTS\n"));
- goto exit;
- }
- nla_nest_end(msg, rtt_nl_hdr);
- cfg80211_vendor_event(msg, kflags);
- if (report) {
- MFREE(cfg->osh, report, sizeof(*report));
- }
-
- return ret;
-exit:
- if (msg)
- dev_kfree_skb_any(msg);
- WL_ERR(("Failed to send event GOOGLE_RTT_COMPLETE_EVENT,"
- " -- Free skb, ret = %d\n", ret));
- if (report)
- MFREE(cfg->osh, report, sizeof(*report));
- NAN_DBG_EXIT();
- return ret;
-}
-#endif /* RTT_SUPPORT */
-
-static int
-wl_cfgvendor_send_nan_async_resp(struct wiphy *wiphy, struct net_device *dev,
- int event_id, u8* nan_req_resp, u16 len)
-{
- int ret = BCME_OK;
- int buf_len = NAN_EVENT_BUFFER_SIZE_LARGE;
- gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
-
- struct sk_buff *msg;
-
- NAN_DBG_ENTER();
-
- /* Allocate the skb for vendor event */
- msg = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(dev), buf_len,
- event_id, kflags);
- if (!msg) {
- WL_ERR(("%s: fail to allocate skb for vendor event\n", __FUNCTION__));
- return -ENOMEM;
- }
-
- ret = nla_put(msg, NAN_ATTRIBUTE_CMD_RESP_DATA,
- len, (u8*)nan_req_resp);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put resp data, ret=%d\n",
- ret));
- goto fail;
- }
- WL_DBG(("Event sent up to hal, event_id = %d, ret = %d\n",
- event_id, ret));
- cfg80211_vendor_event(msg, kflags);
- NAN_DBG_EXIT();
- return ret;
-
-fail:
- dev_kfree_skb_any(msg);
- WL_ERR(("Event not implemented or unknown -- Free skb, event_id = %d, ret = %d\n",
- event_id, ret));
- NAN_DBG_EXIT();
- return ret;
-}
-
-int
-wl_cfgvendor_nan_send_async_disable_resp(struct wireless_dev *wdev)
-{
- int ret = BCME_OK;
- struct wiphy *wiphy = wdev->wiphy;
- nan_hal_resp_t nan_req_resp;
- bzero(&nan_req_resp, sizeof(nan_req_resp));
- nan_req_resp.status = NAN_STATUS_SUCCESS;
- nan_req_resp.value = BCME_OK;
-
- ret = wl_cfgvendor_send_nan_async_resp(wiphy, wdev->netdev,
- NAN_ASYNC_RESPONSE_DISABLED, (u8*)&nan_req_resp, sizeof(nan_req_resp));
- WL_INFORM_MEM(("[NAN] Disable done\n"));
- return ret;
-}
-
-int
-wl_cfgvendor_send_nan_event(struct wiphy *wiphy, struct net_device *dev,
- int event_id, nan_event_data_t *event_data)
-{
- int ret = BCME_OK;
- int buf_len = NAN_EVENT_BUFFER_SIZE_LARGE;
- gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
-
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct sk_buff *msg;
-
- NAN_DBG_ENTER();
-
- /* Allocate the skb for vendor event */
- msg = CFG80211_VENDOR_EVENT_ALLOC(wiphy, ndev_to_wdev(dev), buf_len,
- event_id, kflags);
- if (!msg) {
- WL_ERR(("%s: fail to allocate skb for vendor event\n", __FUNCTION__));
- return -ENOMEM;
- }
-
- switch (event_id) {
- case GOOGLE_NAN_EVENT_DE_EVENT: {
- WL_INFORM_MEM(("[NAN] GOOGLE_NAN_DE_EVENT cluster id=" MACDBG "nmi= " MACDBG "\n",
- MAC2STRDBG(event_data->clus_id.octet),
- MAC2STRDBG(event_data->local_nmi.octet)));
- ret = wl_cfgvendor_nan_de_event_filler(msg, event_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to fill de event data, ret=%d\n", ret));
- goto fail;
- }
- break;
- }
- case GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH:
- case GOOGLE_NAN_EVENT_FOLLOWUP: {
- if (event_id == GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH) {
- WL_DBG(("GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH\n"));
- ret = wl_cfgvendor_nan_sub_match_event_filler(msg, event_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to fill sub match event data, ret=%d\n", ret));
- goto fail;
- }
- } else if (event_id == GOOGLE_NAN_EVENT_FOLLOWUP) {
- WL_DBG(("GOOGLE_NAN_EVENT_FOLLOWUP\n"));
- ret = wl_cfgvendor_nan_tx_followup_event_filler(msg, event_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to fill sub match event data, ret=%d\n", ret));
- goto fail;
- }
- }
- ret = wl_cfgvendor_nan_opt_params_filler(msg, event_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to fill sub match event data, ret=%d\n", ret));
- goto fail;
- }
- break;
- }
-
- case GOOGLE_NAN_EVENT_DISABLED: {
- WL_INFORM_MEM(("[NAN] GOOGLE_NAN_EVENT_DISABLED\n"));
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_HANDLE, 0);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put handle, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u16(msg, NAN_ATTRIBUTE_STATUS, event_data->status);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put status, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put(msg, NAN_ATTRIBUTE_REASON,
- strlen("NAN_STATUS_SUCCESS"), event_data->nan_reason);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put reason code, ret=%d\n", ret));
- goto fail;
- }
- break;
- }
-
- case GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED:
- case GOOGLE_NAN_EVENT_PUBLISH_TERMINATED: {
- WL_DBG(("GOOGLE_NAN_SVC_TERMINATED, %d\n", event_id));
- ret = wl_cfgvendor_nan_svc_terminate_event_filler(msg, cfg, event_id, event_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to fill svc terminate event data, ret=%d\n", ret));
- goto fail;
- }
- break;
- }
-
- case GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND: {
- WL_DBG(("GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND %d\n",
- GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND));
- ret = wl_cfgvendor_nan_tx_followup_ind_event_data_filler(msg, event_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to fill tx follow up ind event data, ret=%d\n", ret));
- goto fail;
- }
-
- break;
- }
-
- case GOOGLE_NAN_EVENT_DATA_REQUEST: {
- WL_INFORM_MEM(("[NAN] GOOGLE_NAN_EVENT_DATA_REQUEST\n"));
- ret = wl_cfgvendor_nan_dp_ind_event_data_filler(msg, event_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to fill dp ind event data, ret=%d\n", ret));
- goto fail;
- }
- break;
- }
-
- case GOOGLE_NAN_EVENT_DATA_CONFIRMATION: {
- WL_INFORM_MEM(("[NAN] GOOGLE_NAN_EVENT_DATA_CONFIRMATION\n"));
-
- ret = wl_cfgvendor_nan_dp_estb_event_data_filler(msg, event_data);
- if (unlikely(ret)) {
- WL_ERR(("Failed to fill dp estb event data, ret=%d\n", ret));
- goto fail;
- }
- break;
- }
-
- case GOOGLE_NAN_EVENT_DATA_END: {
- WL_INFORM_MEM(("[NAN] GOOGLE_NAN_EVENT_DATA_END\n"));
- ret = nla_put_u8(msg, NAN_ATTRIBUTE_INST_COUNT, 1);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put inst count, ret=%d\n", ret));
- goto fail;
- }
- ret = nla_put_u32(msg, NAN_ATTRIBUTE_NDP_ID, event_data->ndp_id);
- if (unlikely(ret)) {
- WL_ERR(("Failed to put ndp id, ret=%d\n", ret));
- goto fail;
- }
- break;
- }
-
- default:
- goto fail;
- }
-
- cfg80211_vendor_event(msg, kflags);
- NAN_DBG_EXIT();
- return ret;
-
-fail:
- dev_kfree_skb_any(msg);
- WL_ERR(("Event not implemented or unknown -- Free skb, event_id = %d, ret = %d\n",
- event_id, ret));
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_req_subscribe(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- nan_discover_cmd_data_t *cmd_data = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
-
- NAN_DBG_ENTER();
- /* Blocking Subscribe if NAN is not enable */
- if (!cfg->nan_enable) {
- WL_ERR(("nan is not enabled, subscribe blocked\n"));
- ret = BCME_ERROR;
- goto exit;
- }
- cmd_data = (nan_discover_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
- ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse nan disc vendor args, ret = %d\n", ret));
- goto exit;
- }
-
- if (cmd_data->sub_id == 0) {
- ret = wl_cfgnan_generate_inst_id(cfg, &cmd_data->sub_id);
- if (ret) {
- WL_ERR(("failed to generate instance-id for subscribe\n"));
- goto exit;
- }
- } else {
- cmd_data->svc_update = true;
- }
-
- ret = wl_cfgnan_subscribe_handler(wdev->netdev, cfg, cmd_data);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR(("failed to subscribe error[%d], status = [%d]\n",
- ret, cmd_data->status));
- wl_cfgnan_remove_inst_id(cfg, cmd_data->sub_id);
- goto exit;
- }
-
- WL_DBG(("subscriber instance id=%d\n", cmd_data->sub_id));
-
- if (cmd_data->status == WL_NAN_E_OK) {
- nan_req_resp.instance_id = cmd_data->sub_id;
- } else {
- nan_req_resp.instance_id = 0;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_REQUEST_SUBSCRIBE,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- wl_cfgvendor_free_disc_cmd_data(cfg, cmd_data);
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_req_publish(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- nan_discover_cmd_data_t *cmd_data = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
- NAN_DBG_ENTER();
-
- /* Blocking Publish if NAN is not enable */
- if (!cfg->nan_enable) {
- WL_ERR(("nan is not enabled publish blocked\n"));
- ret = BCME_ERROR;
- goto exit;
- }
- cmd_data = (nan_discover_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
- ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse nan disc vendor args, ret = %d\n", ret));
- goto exit;
- }
-
- if (cmd_data->pub_id == 0) {
- ret = wl_cfgnan_generate_inst_id(cfg, &cmd_data->pub_id);
- if (ret) {
- WL_ERR(("failed to generate instance-id for publisher\n"));
- goto exit;
- }
- } else {
- cmd_data->svc_update = true;
- }
-
- ret = wl_cfgnan_publish_handler(wdev->netdev, cfg, cmd_data);
- if (unlikely(ret) || unlikely(cmd_data->status)) {
- WL_ERR(("failed to publish error[%d], status[%d]\n",
- ret, cmd_data->status));
- wl_cfgnan_remove_inst_id(cfg, cmd_data->pub_id);
- goto exit;
- }
-
- WL_DBG(("publisher instance id=%d\n", cmd_data->pub_id));
-
- if (cmd_data->status == WL_NAN_E_OK) {
- nan_req_resp.instance_id = cmd_data->pub_id;
- } else {
- nan_req_resp.instance_id = 0;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_REQUEST_PUBLISH,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- wl_cfgvendor_free_disc_cmd_data(cfg, cmd_data);
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_start_handler(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int ret = 0;
- nan_config_cmd_data_t *cmd_data;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
- uint32 nan_attr_mask = 0;
-
- cmd_data = (nan_config_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
- NAN_DBG_ENTER();
-
- if (cfg->nan_enable) {
- WL_ERR(("nan is already enabled\n"));
- ret = BCME_OK;
- goto exit;
- }
- bzero(&nan_req_resp, sizeof(nan_req_resp));
-
- cmd_data->sid_beacon.sid_enable = NAN_SID_ENABLE_FLAG_INVALID; /* Setting to some default */
- cmd_data->sid_beacon.sid_count = NAN_SID_BEACON_COUNT_INVALID; /* Setting to some default */
-
- ret = wl_cfgvendor_nan_parse_args(wiphy, data, len, cmd_data, &nan_attr_mask);
- if (ret) {
- WL_ERR(("failed to parse nan vendor args, ret %d\n", ret));
- goto exit;
- }
-
- ret = wl_cfgnan_start_handler(wdev->netdev, cfg, cmd_data, nan_attr_mask);
- if (ret) {
- WL_ERR(("failed to start nan error[%d]\n", ret));
- goto exit;
- }
- /* Initializing Instance Id List */
- bzero(cfg->nan_inst_ctrl, NAN_ID_CTRL_SIZE * sizeof(nan_svc_inst_t));
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_ENABLE,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- if (cmd_data) {
- if (cmd_data->scid.data) {
- MFREE(cfg->osh, cmd_data->scid.data, cmd_data->scid.dlen);
- cmd_data->scid.dlen = 0;
- }
- MFREE(cfg->osh, cmd_data, sizeof(*cmd_data));
- }
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_terminate_dp_rng_sessions(struct bcm_cfg80211 *cfg,
- struct wireless_dev *wdev, bool *ssn_exists)
-{
- int ret = 0;
- uint8 i = 0;
- int status = BCME_ERROR;
- nan_ranging_inst_t *ranging_inst = NULL;
-
- /* Cleanup active Data Paths If any */
- for (i = 0; i < NAN_MAX_NDP_PEER; i++) {
- if (cfg->nancfg.ndp_id[i]) {
- *ssn_exists = true;
- WL_DBG(("Found entry of ndp id = [%d], end dp associated to it\n",
- cfg->nancfg.ndp_id[i]));
- wl_cfgnan_data_path_end_handler(wdev->netdev, cfg,
- cfg->nancfg.ndp_id[i], &status);
- }
- }
-
- /* Cancel ranging sessiosns */
- for (i = 0; i < NAN_MAX_RANGING_INST; i++) {
- ranging_inst = &cfg->nan_ranging_info[i];
- if (ranging_inst->range_id) {
- *ssn_exists = true;
- ret = wl_cfgnan_cancel_ranging(bcmcfg_to_prmry_ndev(cfg), cfg,
- ranging_inst->range_id,
- NAN_RNG_TERM_FLAG_NONE, &status);
- if (unlikely(ret) || unlikely(status)) {
- WL_ERR(("nan range cancel failed ret = %d status = %d\n",
- ret, status));
- }
- }
- }
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_stop_handler(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
- bool ssn_exists = false;
-
- NAN_DBG_ENTER();
-
- if (!cfg->nan_init_state) {
- WL_ERR(("nan is not initialized/nmi doesnt exists\n"));
- ret = BCME_OK;
- goto exit;
- }
-
- mutex_lock(&cfg->if_sync);
- if (cfg->nan_enable) {
- cfg->nancfg.disable_reason = NAN_USER_INITIATED;
- wl_cfgvendor_terminate_dp_rng_sessions(cfg, wdev, &ssn_exists);
- if (ssn_exists == true) {
- /*
- * Schedule nan disable with 4sec delay to make sure
- * fw cleans any active Data paths and
- * notifies the peer about the dp session terminations
- */
- WL_INFORM_MEM(("Schedule Nan Disable Req, with 4sec\n"));
- schedule_delayed_work(&cfg->nan_disable,
- msecs_to_jiffies(NAN_DISABLE_CMD_DELAY_TIMER));
- } else {
- ret = wl_cfgnan_disable(cfg);
- if (ret) {
- WL_ERR(("failed to disable nan, error[%d]\n", ret));
- }
- }
- }
- mutex_unlock(&cfg->if_sync);
- bzero(&nan_req_resp, sizeof(nan_req_resp));
-exit:
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_config_handler(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int ret = 0;
- nan_config_cmd_data_t *cmd_data;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
- uint32 nan_attr_mask = 0;
-
- cmd_data = MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
- NAN_DBG_ENTER();
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
-
- cmd_data->avail_params.duration = NAN_BAND_INVALID; /* Setting to some default */
- cmd_data->sid_beacon.sid_enable = NAN_SID_ENABLE_FLAG_INVALID; /* Setting to some default */
- cmd_data->sid_beacon.sid_count = NAN_SID_BEACON_COUNT_INVALID; /* Setting to some default */
-
- ret = wl_cfgvendor_nan_parse_args(wiphy, data, len, cmd_data, &nan_attr_mask);
- if (ret) {
- WL_ERR(("failed to parse nan vendor args, ret = %d\n", ret));
- goto exit;
- }
-
- ret = wl_cfgnan_config_handler(wdev->netdev, cfg, cmd_data, nan_attr_mask);
- if (ret) {
- WL_ERR(("failed in config request, nan error[%d]\n", ret));
- goto exit;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_CONFIG,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- if (cmd_data) {
- if (cmd_data->scid.data) {
- MFREE(cfg->osh, cmd_data->scid.data, cmd_data->scid.dlen);
- cmd_data->scid.dlen = 0;
- }
- MFREE(cfg->osh, cmd_data, sizeof(*cmd_data));
- }
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_cancel_publish(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- nan_discover_cmd_data_t *cmd_data = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
-
- /* Blocking Cancel_Publish if NAN is not enable */
- if (!cfg->nan_enable) {
- WL_ERR(("nan is not enabled, cancel publish blocked\n"));
- ret = BCME_ERROR;
- goto exit;
- }
- cmd_data = (nan_discover_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
- NAN_DBG_ENTER();
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
-
- ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse nan disc vendor args, ret= %d\n", ret));
- goto exit;
- }
- nan_req_resp.instance_id = cmd_data->pub_id;
- WL_INFORM_MEM(("[NAN] cancel publish instance_id=%d\n", cmd_data->pub_id));
-
- ret = wl_cfgnan_cancel_pub_handler(wdev->netdev, cfg, cmd_data);
- if (ret) {
- WL_ERR(("failed to cancel publish nan instance-id[%d] error[%d]\n",
- cmd_data->pub_id, ret));
- goto exit;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_CANCEL_PUBLISH,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- wl_cfgvendor_free_disc_cmd_data(cfg, cmd_data);
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_cancel_subscribe(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- nan_discover_cmd_data_t *cmd_data = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
-
- /* Blocking Cancel_Subscribe if NAN is not enableb */
- if (!cfg->nan_enable) {
- WL_ERR(("nan is not enabled, cancel subscribe blocked\n"));
- ret = BCME_ERROR;
- goto exit;
- }
- cmd_data = MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
- NAN_DBG_ENTER();
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
-
- ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse nan disc vendor args, ret= %d\n", ret));
- goto exit;
- }
- nan_req_resp.instance_id = cmd_data->sub_id;
- WL_INFORM_MEM(("[NAN] cancel subscribe instance_id=%d\n", cmd_data->sub_id));
-
- ret = wl_cfgnan_cancel_sub_handler(wdev->netdev, cfg, cmd_data);
- if (ret) {
- WL_ERR(("failed to cancel subscribe nan instance-id[%d] error[%d]\n",
- cmd_data->sub_id, ret));
- goto exit;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_CANCEL_SUBSCRIBE,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- wl_cfgvendor_free_disc_cmd_data(cfg, cmd_data);
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_transmit(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- nan_discover_cmd_data_t *cmd_data = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
-
- /* Blocking Transmit if NAN is not enable */
- if (!cfg->nan_enable) {
- WL_ERR(("nan is not enabled, transmit blocked\n"));
- ret = BCME_ERROR;
- goto exit;
- }
- cmd_data = (nan_discover_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
- NAN_DBG_ENTER();
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
-
- ret = wl_cfgvendor_nan_parse_discover_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse nan disc vendor args, ret= %d\n", ret));
- goto exit;
- }
- nan_req_resp.instance_id = cmd_data->local_id;
- ret = wl_cfgnan_transmit_handler(wdev->netdev, cfg, cmd_data);
- if (ret) {
- WL_ERR(("failed to transmit-followup nan error[%d]\n", ret));
- goto exit;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_TRANSMIT,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- wl_cfgvendor_free_disc_cmd_data(cfg, cmd_data);
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_get_capablities(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
-
- NAN_DBG_ENTER();
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
- ret = wl_cfgnan_get_capablities_handler(wdev->netdev, cfg, &nan_req_resp.capabilities);
- if (ret) {
- WL_ERR(("Could not get capabilities\n"));
- ret = -EINVAL;
- goto exit;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_GET_CAPABILITIES,
- &nan_req_resp, ret, BCME_OK);
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_data_path_iface_create(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- nan_datapath_cmd_data_t *cmd_data = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
-
- if (!cfg->nan_init_state) {
- WL_ERR(("%s: NAN is not inited or Device doesn't support NAN \n", __func__));
- ret = -ENODEV;
- goto exit;
- }
-
- cmd_data = (nan_datapath_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
- NAN_DBG_ENTER();
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
-
- ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
- goto exit;
- }
-
- if (cfg->nan_enable) { /* new framework Impl, iface create called after nan enab */
- ret = wl_cfgnan_data_path_iface_create_delete_handler(wdev->netdev,
- cfg, cmd_data->ndp_iface,
- NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, dhdp->up);
- if (ret != BCME_OK) {
- WL_ERR(("failed to create iface, ret = %d\n", ret));
- goto exit;
- }
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_data_path_iface_delete(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- nan_datapath_cmd_data_t *cmd_data = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
-
- if (cfg->nan_init_state == false) {
- WL_ERR(("%s: NAN is not inited or Device doesn't support NAN \n", __func__));
- /* Deinit has taken care of cleaing the virtual iface */
- ret = BCME_OK;
- goto exit;
- }
-
- NAN_DBG_ENTER();
- cmd_data = (nan_datapath_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
- bzero(&nan_req_resp, sizeof(nan_req_resp));
- ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
- goto exit;
- }
-
- ret = wl_cfgnan_data_path_iface_create_delete_handler(wdev->netdev, cfg,
- (char*)cmd_data->ndp_iface,
- NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, dhdp->up);
- if (ret) {
- WL_ERR(("failed to delete ndp iface [%d]\n", ret));
- goto exit;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_data_path_request(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- nan_datapath_cmd_data_t *cmd_data = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
- uint8 ndp_instance_id = 0;
-
- if (!cfg->nan_enable) {
- WL_ERR(("nan is not enabled, nan data path request blocked\n"));
- ret = BCME_ERROR;
- goto exit;
- }
-
- NAN_DBG_ENTER();
- cmd_data = (nan_datapath_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
- ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
- goto exit;
- }
-
- ret = wl_cfgnan_data_path_request_handler(wdev->netdev, cfg,
- cmd_data, &ndp_instance_id);
- if (ret) {
- WL_ERR(("failed to request nan data path [%d]\n", ret));
- goto exit;
- }
-
- if (cmd_data->status == BCME_OK) {
- nan_req_resp.ndp_instance_id = cmd_data->ndp_instance_id;
- } else {
- nan_req_resp.ndp_instance_id = 0;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_REQUEST,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_data_path_response(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- nan_datapath_cmd_data_t *cmd_data = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
-
- if (!cfg->nan_enable) {
- WL_ERR(("nan is not enabled, nan data path response blocked\n"));
- ret = BCME_ERROR;
- goto exit;
- }
- NAN_DBG_ENTER();
- cmd_data = (nan_datapath_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
- ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
- goto exit;
- }
- ret = wl_cfgnan_data_path_response_handler(wdev->netdev, cfg, cmd_data);
- if (ret) {
- WL_ERR(("failed to response nan data path [%d]\n", ret));
- goto exit;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_RESPONSE,
- &nan_req_resp, ret, cmd_data ? cmd_data->status : BCME_OK);
- wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
- NAN_DBG_EXIT();
- return ret;
-}
-
-static int
-wl_cfgvendor_nan_data_path_end(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void * data, int len)
-{
- int ret = 0;
- nan_datapath_cmd_data_t *cmd_data = NULL;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
- int status = BCME_ERROR;
-
- NAN_DBG_ENTER();
- if (!cfg->nan_enable) {
- WL_ERR(("nan is not enabled, nan data path end blocked\n"));
- ret = BCME_OK;
- goto exit;
- }
- cmd_data = (nan_datapath_cmd_data_t *)MALLOCZ(cfg->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
- ret = wl_cfgvendor_nan_parse_datapath_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse nan datapath vendor args, ret = %d\n", ret));
- goto exit;
- }
- ret = wl_cfgnan_data_path_end_handler(wdev->netdev, cfg,
- cmd_data->ndp_instance_id, &status);
- if (ret) {
- WL_ERR(("failed to end nan data path [%d]\n", ret));
- goto exit;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_END,
- &nan_req_resp, ret, cmd_data ? status : BCME_OK);
- wl_cfgvendor_free_dp_cmd_data(cfg, cmd_data);
- NAN_DBG_EXIT();
- return ret;
-}
-
-#ifdef WL_NAN_DISC_CACHE
-static int
-wl_cfgvendor_nan_data_path_sec_info(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int ret = 0;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- nan_hal_resp_t nan_req_resp;
- nan_datapath_sec_info_cmd_data_t *cmd_data = NULL;
- dhd_pub_t *dhdp = wl_cfg80211_get_dhdp(wdev->netdev);
-
- NAN_DBG_ENTER();
- if (!cfg->nan_enable) {
- WL_ERR(("nan is not enabled\n"));
- ret = BCME_UNSUPPORTED;
- goto exit;
- }
- cmd_data = MALLOCZ(dhdp->osh, sizeof(*cmd_data));
- if (!cmd_data) {
- WL_ERR(("%s: memory allocation failed\n", __func__));
- ret = BCME_NOMEM;
- goto exit;
- }
-
- ret = wl_cfgvendor_nan_parse_dp_sec_info_args(wiphy, data, len, cmd_data);
- if (ret) {
- WL_ERR(("failed to parse sec info args\n"));
- goto exit;
- }
-
- bzero(&nan_req_resp, sizeof(nan_req_resp));
- ret = wl_cfgnan_sec_info_handler(cfg, cmd_data, &nan_req_resp);
- if (ret) {
- WL_ERR(("failed to retrieve svc hash/pub nmi error[%d]\n", ret));
- goto exit;
- }
-exit:
- ret = wl_cfgvendor_nan_cmd_reply(wiphy, NAN_WIFI_SUBCMD_DATA_PATH_SEC_INFO,
- &nan_req_resp, ret, BCME_OK);
- if (cmd_data) {
- MFREE(dhdp->osh, cmd_data, sizeof(*cmd_data));
- }
- NAN_DBG_EXIT();
- return ret;
-}
-#endif /* WL_NAN_DISC_CACHE */
-
-static int
-wl_cfgvendor_nan_version_info(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int ret = BCME_OK;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- uint32 version = NAN_HAL_VERSION_1;
-
- BCM_REFERENCE(cfg);
- WL_DBG(("Enter %s version %d\n", __FUNCTION__, version));
- ret = wl_cfgvendor_send_cmd_reply(wiphy, &version, sizeof(version));
- return ret;
-}
-
-#endif /* WL_NAN */
-
-#ifdef LINKSTAT_SUPPORT
-
-#define NUM_RATE 32
-#define NUM_PEER 1
-#define NUM_CHAN 11
-#define HEADER_SIZE sizeof(ver_len)
-
-static int wl_cfgvendor_lstats_get_bcn_mbss(char *buf, uint32 *rxbeaconmbss)
-{
- wl_cnt_info_t *cbuf = (wl_cnt_info_t *)buf;
- const void *cnt;
-
- if ((cnt = (const void *)bcm_get_data_from_xtlv_buf(cbuf->data, cbuf->datalen,
- WL_CNT_XTLV_CNTV_LE10_UCODE, NULL, BCM_XTLV_OPTION_ALIGN32)) != NULL) {
- *rxbeaconmbss = ((const wl_cnt_v_le10_mcst_t *)cnt)->rxbeaconmbss;
- } else if ((cnt = (const void *)bcm_get_data_from_xtlv_buf(cbuf->data, cbuf->datalen,
- WL_CNT_XTLV_LT40_UCODE_V1, NULL, BCM_XTLV_OPTION_ALIGN32)) != NULL) {
- *rxbeaconmbss = ((const wl_cnt_lt40mcst_v1_t *)cnt)->rxbeaconmbss;
- } else if ((cnt = (const void *)bcm_get_data_from_xtlv_buf(cbuf->data, cbuf->datalen,
- WL_CNT_XTLV_GE40_UCODE_V1, NULL, BCM_XTLV_OPTION_ALIGN32)) != NULL) {
- *rxbeaconmbss = ((const wl_cnt_ge40mcst_v1_t *)cnt)->rxbeaconmbss;
- } else if ((cnt = (const void *)bcm_get_data_from_xtlv_buf(cbuf->data, cbuf->datalen,
- WL_CNT_XTLV_GE80_UCODE_V1, NULL, BCM_XTLV_OPTION_ALIGN32)) != NULL) {
- *rxbeaconmbss = ((const wl_cnt_ge80mcst_v1_t *)cnt)->rxbeaconmbss;
- } else {
- *rxbeaconmbss = 0;
- return BCME_NOTFOUND;
- }
-
- return BCME_OK;
-}
-
-static int wl_cfgvendor_lstats_get_info(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- static char iovar_buf[WLC_IOCTL_MAXLEN];
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- int err = 0, i;
- wifi_radio_stat *radio;
- wifi_radio_stat_h radio_h;
- wl_wme_cnt_t *wl_wme_cnt;
- const wl_cnt_wlc_t *wlc_cnt;
- scb_val_t scbval;
- char *output = NULL;
- char *outdata = NULL;
- wifi_rate_stat_v1 *p_wifi_rate_stat_v1 = NULL;
- wifi_rate_stat *p_wifi_rate_stat = NULL;
- uint total_len = 0;
- uint32 rxbeaconmbss;
- wifi_iface_stat iface;
- wlc_rev_info_t revinfo;
-#ifdef CONFIG_COMPAT
- compat_wifi_iface_stat compat_iface;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
- int compat_task_state = in_compat_syscall();
-#else
- int compat_task_state = is_compat_task();
-#endif
-#endif /* CONFIG_COMPAT */
-
- WL_INFORM_MEM(("%s: Enter \n", __func__));
- RETURN_EIO_IF_NOT_UP(cfg);
-
- /* Get the device rev info */
- bzero(&revinfo, sizeof(revinfo));
- err = wldev_ioctl_get(bcmcfg_to_prmry_ndev(cfg), WLC_GET_REVINFO, &revinfo,
- sizeof(revinfo));
- if (err != BCME_OK) {
- goto exit;
- }
-
- outdata = (void *)MALLOCZ(cfg->osh, WLC_IOCTL_MAXLEN);
- if (outdata == NULL) {
- WL_ERR(("%s: alloc failed\n", __func__));
- return -ENOMEM;
- }
-
- bzero(&scbval, sizeof(scb_val_t));
- bzero(outdata, WLC_IOCTL_MAXLEN);
- output = outdata;
-
- err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "radiostat", NULL, 0,
- iovar_buf, WLC_IOCTL_MAXLEN, NULL);
- if (err != BCME_OK && err != BCME_UNSUPPORTED) {
- WL_ERR(("error (%d) - size = %zu\n", err, sizeof(wifi_radio_stat)));
- goto exit;
- }
- radio = (wifi_radio_stat *)iovar_buf;
-
- bzero(&radio_h, sizeof(wifi_radio_stat_h));
- radio_h.on_time = radio->on_time;
- radio_h.tx_time = radio->tx_time;
- radio_h.rx_time = radio->rx_time;
- radio_h.on_time_scan = radio->on_time_scan;
- radio_h.on_time_nbd = radio->on_time_nbd;
- radio_h.on_time_gscan = radio->on_time_gscan;
- radio_h.on_time_roam_scan = radio->on_time_roam_scan;
- radio_h.on_time_pno_scan = radio->on_time_pno_scan;
- radio_h.on_time_hs20 = radio->on_time_hs20;
- radio_h.num_channels = NUM_CHAN;
-
- memcpy(output, &radio_h, sizeof(wifi_radio_stat_h));
-
- output += sizeof(wifi_radio_stat_h);
- output += (NUM_CHAN * sizeof(wifi_channel_stat));
-
- err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "wme_counters", NULL, 0,
- iovar_buf, WLC_IOCTL_MAXLEN, NULL);
- if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
- goto exit;
- }
- wl_wme_cnt = (wl_wme_cnt_t *)iovar_buf;
-
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].ac, WIFI_AC_VO);
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].tx_mpdu, wl_wme_cnt->tx[AC_VO].packets);
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].rx_mpdu, wl_wme_cnt->rx[AC_VO].packets);
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VO].mpdu_lost,
- wl_wme_cnt->tx_failed[WIFI_AC_VO].packets);
-
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].ac, WIFI_AC_VI);
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].tx_mpdu, wl_wme_cnt->tx[AC_VI].packets);
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].rx_mpdu, wl_wme_cnt->rx[AC_VI].packets);
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_VI].mpdu_lost,
- wl_wme_cnt->tx_failed[WIFI_AC_VI].packets);
-
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].ac, WIFI_AC_BE);
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].tx_mpdu, wl_wme_cnt->tx[AC_BE].packets);
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].rx_mpdu, wl_wme_cnt->rx[AC_BE].packets);
- COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].mpdu_lost,
- wl_wme_cnt->tx_failed[WIFI_AC_BE].packets);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].ac, WIFI_AC_BE);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].tx_mpdu, wl_wme_cnt->tx[AC_BE].packets);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].rx_mpdu, wl_wme_cnt->rx[AC_BE].packets);
+ COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].mpdu_lost,
+ wl_wme_cnt->tx_failed[WIFI_AC_BE].packets);
COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BK].ac, WIFI_AC_BK);
COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BK].tx_mpdu, wl_wme_cnt->tx[AC_BK].packets);
COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BK].mpdu_lost,
wl_wme_cnt->tx_failed[WIFI_AC_BK].packets);
+
err = wldev_iovar_getbuf(bcmcfg_to_prmry_ndev(cfg), "counters", NULL, 0,
iovar_buf, WLC_IOCTL_MAXLEN, NULL);
if (unlikely(err)) {
}
CHK_CNTBUF_DATALEN(iovar_buf, WLC_IOCTL_MAXLEN);
+
+#ifdef STAT_REPORT
+ wl_stat_report_gather(cfg, iovar_buf);
+#endif
+
/* Translate traditional (ver <= 10) counters struct to new xtlv type struct */
err = wl_cntbuf_to_xtlv_format(NULL, iovar_buf, WLC_IOCTL_MAXLEN, revinfo.corerev);
if (err != BCME_OK) {
COMPAT_ASSIGN_VALUE(iface, ac[WIFI_AC_BE].retries, wlc_cnt->txretry);
- err = wl_cfgvendor_lstats_get_bcn_mbss(iovar_buf, &rxbeaconmbss);
- if (unlikely(err)) {
- WL_ERR(("get_bcn_mbss error (%d)\n", err));
+ if ((macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data,
+ ((wl_cnt_info_t *)iovar_buf)->datalen,
+ WL_CNT_XTLV_CNTV_LE10_UCODE, NULL,
+ BCM_XTLV_OPTION_ALIGN32)) == NULL) {
+ if ((macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data,
+ ((wl_cnt_info_t *)iovar_buf)->datalen,
+ WL_CNT_XTLV_GE40_UCODE_V1, NULL,
+ BCM_XTLV_OPTION_ALIGN32)) == NULL) {
+ macstat_cnt = bcm_get_data_from_xtlv_buf(((wl_cnt_info_t *)iovar_buf)->data,
+ ((wl_cnt_info_t *)iovar_buf)->datalen,
+ WL_CNT_XTLV_LT40_UCODE_V1, NULL,
+ BCM_XTLV_OPTION_ALIGN32);
+ }
+ }
+
+ if (macstat_cnt == NULL) {
+ printf("wlmTxGetAckedPackets: macstat_cnt NULL!\n");
+ err = BCME_ERROR;
goto exit;
}
goto exit;
}
- COMPAT_ASSIGN_VALUE(iface, beacon_rx, rxbeaconmbss);
+ COMPAT_ASSIGN_VALUE(iface, beacon_rx, macstat_cnt->rxbeaconmbss);
COMPAT_ASSIGN_VALUE(iface, rssi_mgmt, scbval.val);
COMPAT_ASSIGN_VALUE(iface, num_peers, NUM_PEER);
COMPAT_ASSIGN_VALUE(iface, peer_info->num_rate, NUM_RATE);
err = BCME_BADLEN;
goto exit;
}
- err = wl_cfgvendor_send_cmd_reply(wiphy, outdata, total_len);
+ err = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ outdata,
+ total_len);
if (unlikely(err))
WL_ERR(("Vendor Command reply failed ret:%d \n", err));
exit:
if (outdata) {
- MFREE(cfg->osh, outdata, WLC_IOCTL_MAXLEN);
+ kfree(outdata);
}
return err;
}
#endif /* LINKSTAT_SUPPORT */
-#ifdef DHD_LOG_DUMP
-static int
-wl_cfgvendor_get_buf_data(const struct nlattr *iter, struct buf_data **buf)
-{
- int ret = BCME_OK;
-
- if (nla_len(iter) != sizeof(struct buf_data)) {
- WL_ERR(("Invalid len : %d\n", nla_len(iter)));
- ret = BCME_BADLEN;
- }
- (*buf) = (struct buf_data *)nla_data(iter);
- if (!(*buf) || (((*buf)->len) <= 0) || !((*buf)->data_buf[0])) {
- WL_ERR(("Invalid buffer\n"));
- ret = BCME_ERROR;
- }
- return ret;
-}
-
-static int
-wl_cfgvendor_dbg_file_dump(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
+#ifdef DEBUGABILITY
+static int wl_cfgvendor_dbg_start_logging(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
{
- int ret = BCME_OK, rem, type = 0;
+ int ret = BCME_OK, rem, type;
+ char ring_name[DBGRING_NAME_MAX] = {0};
+ int log_level = 0, flags = 0, time_intval = 0, threshold = 0;
const struct nlattr *iter;
- char *mem_buf = NULL;
- struct sk_buff *skb = NULL;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct buf_data *buf;
- int pos = 0;
-
- /* Alloc the SKB for vendor_event */
- skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
- if (!skb) {
- WL_ERR(("skb allocation is failed\n"));
- ret = BCME_NOMEM;
- goto exit;
- }
- WL_ERR(("%s\n", __FUNCTION__));
+ dhd_pub_t *dhd_pub = cfg->pub;
nla_for_each_attr(iter, data, len, rem) {
type = nla_type(iter);
- ret = wl_cfgvendor_get_buf_data(iter, &buf);
- if (ret)
- goto exit;
switch (type) {
- case DUMP_BUF_ATTR_MEMDUMP:
- ret = dhd_os_get_socram_dump(bcmcfg_to_prmry_ndev(cfg), &mem_buf,
- (uint32 *)(&(buf->len)));
- if (ret) {
- WL_ERR(("failed to get_socram_dump : %d\n", ret));
- goto exit;
- }
- ret = dhd_export_debug_data(mem_buf, NULL, buf->data_buf[0],
- (int)buf->len, &pos);
- break;
-
- case DUMP_BUF_ATTR_TIMESTAMP :
- ret = dhd_print_time_str(buf->data_buf[0], NULL,
- (uint32)buf->len, &pos);
- break;
-#ifdef EWP_ECNTRS_LOGGING
- case DUMP_BUF_ATTR_ECNTRS :
- ret = dhd_print_ecntrs_data(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len, &pos);
- break;
-#endif /* EWP_ECNTRS_LOGGING */
-#ifdef DHD_STATUS_LOGGING
- case DUMP_BUF_ATTR_STATUS_LOG :
- ret = dhd_print_status_log_data(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len, &pos);
- break;
-#endif /* DHD_STATUS_LOGGING */
-#ifdef EWP_RTT_LOGGING
- case DUMP_BUF_ATTR_RTT_LOG :
- ret = dhd_print_rtt_data(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len, &pos);
- break;
-#endif /* EWP_RTT_LOGGING */
- case DUMP_BUF_ATTR_DHD_DUMP :
- ret = dhd_print_dump_data(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len, &pos);
- break;
-#if defined(BCMPCIE)
- case DUMP_BUF_ATTR_EXT_TRAP :
- ret = dhd_print_ext_trap_data(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len, &pos);
- break;
-#endif /* BCMPCIE */
-#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
- case DUMP_BUF_ATTR_HEALTH_CHK :
- ret = dhd_print_health_chk_data(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len, &pos);
- break;
-#endif // endif
- case DUMP_BUF_ATTR_COOKIE :
- ret = dhd_print_cookie_data(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len, &pos);
- break;
-#ifdef DHD_DUMP_PCIE_RINGS
- case DUMP_BUF_ATTR_FLOWRING_DUMP :
- ret = dhd_print_flowring_data(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len, &pos);
- break;
-#endif // endif
- case DUMP_BUF_ATTR_GENERAL_LOG :
- ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len,
- DLD_BUF_TYPE_GENERAL, &pos);
- break;
-
- case DUMP_BUF_ATTR_PRESERVE_LOG :
- ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len,
- DLD_BUF_TYPE_PRESERVE, &pos);
- break;
-
- case DUMP_BUF_ATTR_SPECIAL_LOG :
- ret = dhd_get_dld_log_dump(bcmcfg_to_prmry_ndev(cfg), NULL,
- buf->data_buf[0], NULL, (uint32)buf->len,
- DLD_BUF_TYPE_SPECIAL, &pos);
- break;
-#ifdef DHD_SSSR_DUMP
- case DUMP_BUF_ATTR_SSSR_C0_D11_BEFORE :
- ret = dhd_sssr_dump_d11_buf_before(bcmcfg_to_prmry_ndev(cfg),
- buf->data_buf[0], (uint32)buf->len, 0);
- break;
-
- case DUMP_BUF_ATTR_SSSR_C0_D11_AFTER :
- ret = dhd_sssr_dump_d11_buf_after(bcmcfg_to_prmry_ndev(cfg),
- buf->data_buf[0], (uint32)buf->len, 0);
- break;
-
- case DUMP_BUF_ATTR_SSSR_C1_D11_BEFORE :
- ret = dhd_sssr_dump_d11_buf_before(bcmcfg_to_prmry_ndev(cfg),
- buf->data_buf[0], (uint32)buf->len, 1);
+ case DEBUG_ATTRIBUTE_RING_NAME:
+ strncpy(ring_name, nla_data(iter),
+ MIN(sizeof(ring_name) -1, nla_len(iter)));
break;
-
- case DUMP_BUF_ATTR_SSSR_C1_D11_AFTER :
- ret = dhd_sssr_dump_d11_buf_after(bcmcfg_to_prmry_ndev(cfg),
- buf->data_buf[0], (uint32)buf->len, 1);
+ case DEBUG_ATTRIBUTE_LOG_LEVEL:
+ log_level = nla_get_u32(iter);
break;
-
- case DUMP_BUF_ATTR_SSSR_DIG_BEFORE :
- ret = dhd_sssr_dump_dig_buf_before(bcmcfg_to_prmry_ndev(cfg),
- buf->data_buf[0], (uint32)buf->len);
+ case DEBUG_ATTRIBUTE_RING_FLAGS:
+ flags = nla_get_u32(iter);
break;
-
- case DUMP_BUF_ATTR_SSSR_DIG_AFTER :
- ret = dhd_sssr_dump_dig_buf_after(bcmcfg_to_prmry_ndev(cfg),
- buf->data_buf[0], (uint32)buf->len);
+ case DEBUG_ATTRIBUTE_LOG_TIME_INTVAL:
+ time_intval = nla_get_u32(iter);
break;
-#endif /* DHD_SSSR_DUMP */
-#ifdef DNGL_AXI_ERROR_LOGGING
- case DUMP_BUF_ATTR_AXI_ERROR:
- ret = dhd_os_get_axi_error_dump(bcmcfg_to_prmry_ndev(cfg),
- buf->data_buf[0], (uint32)buf->len);
+ case DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE:
+ threshold = nla_get_u32(iter);
break;
-#endif /* DNGL_AXI_ERROR_LOGGING */
default:
WL_ERR(("Unknown type: %d\n", type));
- ret = BCME_ERROR;
+ ret = BCME_BADADDR;
goto exit;
}
}
- if (ret)
- goto exit;
-
- ret = nla_put_u32(skb, type, (uint32)(ret));
+ ret = dhd_os_start_logging(dhd_pub, ring_name, log_level, flags, time_intval, threshold);
if (ret < 0) {
- WL_ERR(("Failed to put type, ret:%d\n", ret));
- goto exit;
- }
- ret = cfg80211_vendor_cmd_reply(skb);
- if (ret) {
- WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
+ WL_ERR(("start_logging is failed ret: %d\n", ret));
}
- return ret;
exit:
- if (skb) {
- /* Free skb memory */
- kfree_skb(skb);
+ return ret;
+}
+
+static int wl_cfgvendor_dbg_reset_logging(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ int ret = BCME_OK;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd_pub = cfg->pub;
+
+ ret = dhd_os_reset_logging(dhd_pub);
+ if (ret < 0) {
+ WL_ERR(("reset logging is failed ret: %d\n", ret));
}
+
return ret;
}
-#endif /* DHD_LOG_DUMP */
-#ifdef DEBUGABILITY
static int
wl_cfgvendor_dbg_trigger_mem_dump(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
{
int ret = BCME_OK;
uint32 alloc_len;
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
dhd_pub_t *dhdp = (dhd_pub_t *)(cfg->pub);
- WL_ERR(("wl_cfgvendor_dbg_trigger_mem_dump %d\n", __LINE__));
-
dhdp->memdump_type = DUMP_TYPE_CFG_VENDOR_TRIGGERED;
+
ret = dhd_os_socram_dump(bcmcfg_to_prmry_ndev(cfg), &alloc_len);
if (ret) {
WL_ERR(("failed to call dhd_os_socram_dump : %d\n", ret));
goto exit;
}
/* Alloc the SKB for vendor_event */
- skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
if (!skb) {
WL_ERR(("skb allocation is failed\n"));
ret = BCME_NOMEM;
goto exit;
}
- ret = nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, alloc_len);
-
- if (unlikely(ret)) {
- WL_ERR(("Failed to put fw dump length, ret=%d\n", ret));
- goto exit;
- }
+ nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, alloc_len);
ret = cfg80211_vendor_cmd_reply(skb);
if (ret) {
WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
- goto exit;
}
- return ret;
+ printk("wl_cfgvendor_dbg_trigger_mem_dump ===================ret : %d\n", ret);
+
exit:
- /* Free skb memory */
- if (skb) {
- kfree_skb(skb);
- }
return ret;
}
uintptr_t user_buf = (uintptr_t)NULL;
const struct nlattr *iter;
char *mem_buf = NULL;
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
nla_for_each_attr(iter, data, len, rem) {
* buffer length is not already initialized.
*/
if ((nla_len(iter) == sizeof(uint32)) &&
- !buf_len) {
+ !buf_len) {
buf_len = nla_get_u32(iter);
- if (buf_len <= 0) {
- ret = BCME_ERROR;
- goto exit;
- }
} else {
ret = BCME_ERROR;
goto exit;
goto exit;
}
user_buf = (uintptr_t)nla_get_u64(iter);
- if (!user_buf) {
- ret = BCME_ERROR;
- goto exit;
- }
break;
default:
WL_ERR(("Unknown type: %d\n", type));
}
}
if (buf_len > 0 && user_buf) {
-#if 0
mem_buf = vmalloc(buf_len);
if (!mem_buf) {
WL_ERR(("failed to allocate mem_buf with size : %d\n", buf_len));
ret = BCME_NOMEM;
goto exit;
}
-#endif
ret = dhd_os_get_socram_dump(bcmcfg_to_prmry_ndev(cfg), &mem_buf, &buf_len);
if (ret) {
WL_ERR(("failed to get_socram_dump : %d\n", ret));
if (in_compat_syscall())
#else
if (is_compat_task())
-#endif /* LINUX_VER >= 4.6 */
+#endif
{
void * usr_ptr = compat_ptr((uintptr_t) user_buf);
ret = copy_to_user(usr_ptr, mem_buf, buf_len);
}
}
/* Alloc the SKB for vendor_event */
- skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, CFG80211_VENDOR_CMD_REPLY_SKB_SZ);
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 100);
if (!skb) {
WL_ERR(("skb allocation is failed\n"));
ret = BCME_NOMEM;
goto free_mem;
}
- /* Indicate the memdump is succesfully copied */
- ret = nla_put(skb, DEBUG_ATTRIBUTE_FW_DUMP_DATA, sizeof(ret), &ret);
- if (ret < 0) {
- WL_ERR(("Failed to put DEBUG_ATTRIBUTE_FW_DUMP_DATA, ret:%d\n", ret));
- goto free_mem;
- }
+ /* Indicate the memdump is succesfully copied */
+ nla_put(skb, DEBUG_ATTRIBUTE_FW_DUMP_DATA, sizeof(ret), &ret);
ret = cfg80211_vendor_cmd_reply(skb);
if (ret) {
WL_ERR(("Vendor Command reply failed ret:%d \n", ret));
}
- skb = NULL;
}
free_mem:
-// vfree(mem_buf);
- /* Free skb memory */
- if (skb) {
- kfree_skb(skb);
- }
+ vfree(mem_buf);
exit:
return ret;
}
-static int wl_cfgvendor_dbg_start_logging(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
+static int wl_cfgvendor_dbg_get_version(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
{
int ret = BCME_OK, rem, type;
- char ring_name[DBGRING_NAME_MAX] = {0};
- int log_level = 0, flags = 0, time_intval = 0, threshold = 0;
+ int buf_len = 1024;
+ bool dhd_ver = FALSE;
+ char *buf_ptr;
const struct nlattr *iter;
+ gfp_t kflags;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhd_pub = cfg->pub;
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ buf_ptr = kzalloc(buf_len, kflags);
+ if (!buf_ptr) {
+ WL_ERR(("failed to allocate the buffer for version n"));
+ ret = BCME_NOMEM;
+ goto exit;
+ }
nla_for_each_attr(iter, data, len, rem) {
type = nla_type(iter);
switch (type) {
- case DEBUG_ATTRIBUTE_RING_NAME:
- strncpy(ring_name, nla_data(iter),
- MIN(sizeof(ring_name) -1, nla_len(iter)));
- break;
- case DEBUG_ATTRIBUTE_LOG_LEVEL:
- log_level = nla_get_u32(iter);
- break;
- case DEBUG_ATTRIBUTE_RING_FLAGS:
- flags = nla_get_u32(iter);
- break;
- case DEBUG_ATTRIBUTE_LOG_TIME_INTVAL:
- time_intval = nla_get_u32(iter);
+ case DEBUG_ATTRIBUTE_GET_DRIVER:
+ dhd_ver = TRUE;
break;
- case DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE:
- threshold = nla_get_u32(iter);
+ case DEBUG_ATTRIBUTE_GET_FW:
+ dhd_ver = FALSE;
break;
default:
WL_ERR(("Unknown type: %d\n", type));
- ret = BCME_BADADDR;
+ ret = BCME_ERROR;
goto exit;
}
}
-
- ret = dhd_os_start_logging(dhd_pub, ring_name, log_level, flags, time_intval, threshold);
+ ret = dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg), dhd_ver, &buf_ptr, buf_len);
if (ret < 0) {
- WL_ERR(("start_logging is failed ret: %d\n", ret));
+ WL_ERR(("failed to get the version %d\n", ret));
+ goto exit;
}
+ ret = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ buf_ptr, strlen(buf_ptr));
exit:
- return ret;
-}
-
-static int wl_cfgvendor_dbg_reset_logging(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int ret = BCME_OK;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhd_pub = cfg->pub;
-
- ret = dhd_os_reset_logging(dhd_pub);
- if (ret < 0) {
- WL_ERR(("reset logging is failed ret: %d\n", ret));
- }
-
+ kfree(buf_ptr);
return ret;
}
dhd_dbg_ring_status_t ring_status;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
dhd_pub_t *dhd_pub = cfg->pub;
- bzero(dbg_ring_status, DBG_RING_STATUS_SIZE * DEBUG_RING_ID_MAX);
+ memset(dbg_ring_status, 0, DBG_RING_STATUS_SIZE * DEBUG_RING_ID_MAX);
ring_cnt = 0;
for (ring_id = DEBUG_RING_ID_INVALID + 1; ring_id < DEBUG_RING_ID_MAX; ring_id++) {
ret = dhd_os_get_ring_status(dhd_pub, ring_id, &ring_status);
}
/* Alloc the SKB for vendor_event */
skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy,
- nla_total_size(DBG_RING_STATUS_SIZE) * ring_cnt + nla_total_size(sizeof(ring_cnt)));
+ (DBG_RING_STATUS_SIZE * ring_cnt) + 100);
if (!skb) {
WL_ERR(("skb allocation is failed\n"));
ret = BCME_NOMEM;
goto exit;
}
- /* Ignore return of nla_put_u32 and nla_put since the skb allocated
- * above has a requested size for all payload
- */
- (void)nla_put_u32(skb, DEBUG_ATTRIBUTE_RING_NUM, ring_cnt);
+ nla_put_u32(skb, DEBUG_ATTRIBUTE_RING_NUM, ring_cnt);
for (i = 0; i < ring_cnt; i++) {
- (void)nla_put(skb, DEBUG_ATTRIBUTE_RING_STATUS, DBG_RING_STATUS_SIZE,
+ nla_put(skb, DEBUG_ATTRIBUTE_RING_STATUS, DBG_RING_STATUS_SIZE,
&dbg_ring_status[i]);
}
ret = cfg80211_vendor_cmd_reply(skb);
type = nla_type(iter);
switch (type) {
case DEBUG_ATTRIBUTE_RING_NAME:
- strlcpy(ring_name, nla_data(iter), sizeof(ring_name));
+ strncpy(ring_name, nla_data(iter),
+ MIN(sizeof(ring_name) -1, nla_len(iter)));
break;
default:
WL_ERR(("Unknown type: %d\n", type));
return ret;
}
-#endif /* DEBUGABILITY */
static int wl_cfgvendor_dbg_get_feature(struct wiphy *wiphy,
struct wireless_dev *wdev, const void *data, int len)
WL_ERR(("dbg_get_feature failed ret:%d\n", ret));
goto exit;
}
- ret = wl_cfgvendor_send_cmd_reply(wiphy, &supported_features,
- sizeof(supported_features));
+ ret = wl_cfgvendor_send_cmd_reply(wiphy, bcmcfg_to_prmry_ndev(cfg),
+ &supported_features, sizeof(supported_features));
if (ret < 0) {
WL_ERR(("wl_cfgvendor_send_cmd_reply failed ret:%d\n", ret));
goto exit;
return ret;
}
-#ifdef DEBUGABILITY
static void wl_cfgvendor_dbg_ring_send_evt(void *ctx,
const int ring_id, const void *data, const uint32 len,
const dhd_dbg_ring_status_t ring_status)
nla_put(skb, DEBUG_ATTRIBUTE_RING_DATA, len, data);
cfg80211_vendor_event(skb, kflags);
}
-#endif /* DEBUGABILITY */
-
-#ifdef DHD_LOG_DUMP
-static int wl_cfgvendor_nla_put_sssr_dump_data(struct sk_buff *skb,
- struct net_device *ndev)
-{
- int ret = BCME_OK;
-#ifdef DHD_SSSR_DUMP
- uint32 arr_len[DUMP_SSSR_ATTR_COUNT];
- int i = 0, j = 0;
-#endif /* DHD_SSSR_DUMP */
- char memdump_path[MEMDUMP_PATH_LEN];
-
- dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
- "sssr_dump_core_0_before_SR");
- ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_0_BEFORE_DUMP, memdump_path);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put sssr core 0 before dump path, ret=%d\n", ret));
- goto exit;
- }
-
- dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
- "sssr_dump_core_0_after_SR");
- ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_0_AFTER_DUMP, memdump_path);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put sssr core 1 after dump path, ret=%d\n", ret));
- goto exit;
- }
-
- dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
- "sssr_dump_core_1_before_SR");
- ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_1_BEFORE_DUMP, memdump_path);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put sssr core 1 before dump path, ret=%d\n", ret));
- goto exit;
- }
-
- dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
- "sssr_dump_core_1_after_SR");
- ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_CORE_1_AFTER_DUMP, memdump_path);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put sssr core 1 after dump path, ret=%d\n", ret));
- goto exit;
- }
-
- dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
- "sssr_dump_dig_before_SR");
- ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_DIG_BEFORE_DUMP, memdump_path);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put sssr dig before dump path, ret=%d\n", ret));
- goto exit;
- }
-
- dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN,
- "sssr_dump_dig_after_SR");
- ret = nla_put_string(skb, DUMP_FILENAME_ATTR_SSSR_DIG_AFTER_DUMP, memdump_path);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put sssr dig after dump path, ret=%d\n", ret));
- goto exit;
- }
-
-#ifdef DHD_SSSR_DUMP
- memset(arr_len, 0, sizeof(arr_len));
- dhd_nla_put_sssr_dump_len(ndev, arr_len);
-
- for (i = 0, j = DUMP_SSSR_ATTR_START; i < DUMP_SSSR_ATTR_COUNT; i++, j++) {
- if (arr_len[i]) {
- ret = nla_put_u32(skb, j, arr_len[i]);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put sssr dump len, ret=%d\n", ret));
- goto exit;
- }
- }
- }
-#endif /* DHD_SSSR_DUMP */
-
-exit:
- return ret;
-}
-
-static int wl_cfgvendor_nla_put_debug_dump_data(struct sk_buff *skb,
- struct net_device *ndev)
-{
- int ret = BCME_OK;
- uint32 len = 0;
- char dump_path[128];
-
- ret = dhd_get_debug_dump_file_name(ndev, NULL, dump_path, sizeof(dump_path));
- if (ret < 0) {
- WL_ERR(("%s: Failed to get debug dump filename\n", __FUNCTION__));
- goto exit;
- }
- ret = nla_put_string(skb, DUMP_FILENAME_ATTR_DEBUG_DUMP, dump_path);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put debug dump path, ret=%d\n", ret));
- goto exit;
- }
- WL_ERR(("debug_dump path = %s%s\n", dump_path, FILE_NAME_HAL_TAG));
- wl_print_verinfo(wl_get_cfg(ndev));
-
- len = dhd_get_time_str_len();
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_TIMESTAMP, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put time stamp length, ret=%d\n", ret));
- goto exit;
- }
- }
-
- len = dhd_get_dld_len(DLD_BUF_TYPE_GENERAL);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_GENERAL_LOG, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put general log length, ret=%d\n", ret));
- goto exit;
- }
- }
-#ifdef EWP_ECNTRS_LOGGING
- len = dhd_get_ecntrs_len(ndev, NULL);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_ECNTRS, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put ecntrs length, ret=%d\n", ret));
- goto exit;
- }
- }
-#endif /* EWP_ECNTRS_LOGGING */
- len = dhd_get_dld_len(DLD_BUF_TYPE_SPECIAL);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_SPECIAL_LOG, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put special log length, ret=%d\n", ret));
- goto exit;
- }
- }
- len = dhd_get_dhd_dump_len(ndev, NULL);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_DHD_DUMP, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put dhd dump length, ret=%d\n", ret));
- goto exit;
- }
- }
-
-#if defined(BCMPCIE)
- len = dhd_get_ext_trap_len(ndev, NULL);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_EXT_TRAP, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put ext trap length, ret=%d\n", ret));
- goto exit;
- }
- }
-#endif /* BCMPCIE */
-
-#if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
- len = dhd_get_health_chk_len(ndev, NULL);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_HEALTH_CHK, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put health check length, ret=%d\n", ret));
- goto exit;
- }
- }
-#endif // endif
-
- len = dhd_get_dld_len(DLD_BUF_TYPE_PRESERVE);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_PRESERVE_LOG, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put preserve log length, ret=%d\n", ret));
- goto exit;
- }
- }
-
- len = dhd_get_cookie_log_len(ndev, NULL);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_COOKIE, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put cookie length, ret=%d\n", ret));
- goto exit;
- }
- }
-#ifdef DHD_DUMP_PCIE_RINGS
- len = dhd_get_flowring_len(ndev, NULL);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_FLOWRING_DUMP, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put flowring dump length, ret=%d\n", ret));
- goto exit;
- }
- }
-#endif // endif
-#ifdef DHD_STATUS_LOGGING
- len = dhd_get_status_log_len(ndev, NULL);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_STATUS_LOG, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put status log length, ret=%d\n", ret));
- goto exit;
- }
- }
-#endif /* DHD_STATUS_LOGGING */
-#ifdef EWP_RTT_LOGGING
- len = dhd_get_rtt_len(ndev, NULL);
- if (len) {
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_RTT_LOG, len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put rtt log length, ret=%d\n", ret));
- goto exit;
- }
- }
-#endif /* EWP_RTT_LOGGING */
-exit:
- return ret;
-}
-#ifdef DNGL_AXI_ERROR_LOGGING
-static void wl_cfgvendor_nla_put_axi_error_data(struct sk_buff *skb,
- struct net_device *ndev)
-{
- int ret = 0;
- char axierrordump_path[MEMDUMP_PATH_LEN];
- int dumpsize = dhd_os_get_axi_error_dump_size(ndev);
- if (dumpsize <= 0) {
- WL_ERR(("Failed to calcuate axi error dump len\n"));
- return;
- }
- dhd_os_get_axi_error_filename(ndev, axierrordump_path, MEMDUMP_PATH_LEN);
- ret = nla_put_string(skb, DUMP_FILENAME_ATTR_AXI_ERROR_DUMP, axierrordump_path);
- if (ret) {
- WL_ERR(("Failed to put filename\n"));
- return;
- }
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_AXI_ERROR, dumpsize);
- if (ret) {
- WL_ERR(("Failed to put filesize\n"));
- return;
- }
-}
-#endif /* DNGL_AXI_ERROR_LOGGING */
-
-static int wl_cfgvendor_nla_put_memdump_data(struct sk_buff *skb,
- struct net_device *ndev, const uint32 fw_len)
-{
- char memdump_path[MEMDUMP_PATH_LEN];
- int ret = BCME_OK;
-
- dhd_get_memdump_filename(ndev, memdump_path, MEMDUMP_PATH_LEN, "mem_dump");
- ret = nla_put_string(skb, DUMP_FILENAME_ATTR_MEM_DUMP, memdump_path);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put mem dump path, ret=%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, DUMP_LEN_ATTR_MEMDUMP, fw_len);
- if (unlikely(ret)) {
- WL_ERR(("Failed to nla put mem dump length, ret=%d\n", ret));
- goto exit;
- }
-
-exit:
- return ret;
-}
-static void wl_cfgvendor_dbg_send_file_dump_evt(void *ctx, const void *data,
+static void wl_cfgvendor_dbg_send_urgent_evt(void *ctx, const void *data,
const uint32 len, const uint32 fw_len)
{
struct net_device *ndev = ctx;
struct wiphy *wiphy;
gfp_t kflags;
- struct sk_buff *skb = NULL;
- struct bcm_cfg80211 *cfg;
- dhd_pub_t *dhd_pub;
- int ret = BCME_OK;
-
+ struct sk_buff *skb;
if (!ndev) {
WL_ERR(("ndev is NULL\n"));
return;
}
-
kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
wiphy = ndev->ieee80211_ptr->wiphy;
/* Alloc the SKB for vendor_event */
#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
- skb = cfg80211_vendor_event_alloc(wiphy, NULL, len + CFG80211_VENDOR_EVT_SKB_SZ,
- GOOGLE_FILE_DUMP_EVENT, kflags);
+ skb = cfg80211_vendor_event_alloc(wiphy, NULL, len + 100,
+ GOOGLE_FW_DUMP_EVENT, kflags);
#else
- skb = cfg80211_vendor_event_alloc(wiphy, len + CFG80211_VENDOR_EVT_SKB_SZ,
- GOOGLE_FILE_DUMP_EVENT, kflags);
+ skb = cfg80211_vendor_event_alloc(wiphy, len + 100,
+ GOOGLE_FW_DUMP_EVENT, kflags);
#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
/* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
if (!skb) {
WL_ERR(("skb alloc failed"));
return;
}
-
- cfg = wiphy_priv(wiphy);
- dhd_pub = cfg->pub;
-#ifdef DNGL_AXI_ERROR_LOGGING
- if (dhd_pub->smmu_fault_occurred) {
- wl_cfgvendor_nla_put_axi_error_data(skb, ndev);
- }
-#endif /* DNGL_AXI_ERROR_LOGGING */
-#ifdef DHD_FW_COREDUMP
- if (dhd_pub->memdump_enabled || (dhd_pub->memdump_type == DUMP_TYPE_BY_SYSDUMP))
-#else
- if ((dhd_pub->memdump_type == DUMP_TYPE_BY_SYSDUMP))
-#endif
- {
- if (((ret = wl_cfgvendor_nla_put_memdump_data(skb, ndev, fw_len)) < 0) ||
- ((ret = wl_cfgvendor_nla_put_debug_dump_data(skb, ndev)) < 0) ||
- ((ret = wl_cfgvendor_nla_put_sssr_dump_data(skb, ndev)) < 0)) {
- WL_ERR(("nla put failed\n"));
- goto done;
- }
- }
- /* TODO : Similar to above function add for debug_dump, sssr_dump, and pktlog also. */
+ nla_put_u32(skb, DEBUG_ATTRIBUTE_FW_DUMP_LEN, fw_len);
+ nla_put(skb, DEBUG_ATTRIBUTE_RING_DATA, len, data);
cfg80211_vendor_event(skb, kflags);
- return;
-done:
- if (skb) {
- dev_kfree_skb_any(skb);
- }
-}
-#endif /* DHD_LOG_DUMP */
-
-static int wl_cfgvendor_dbg_get_version(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int ret = BCME_OK, rem, type;
- int buf_len = 1024;
- bool dhd_ver = FALSE;
- char *buf_ptr;
- const struct nlattr *iter;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-
- buf_ptr = (char *)MALLOCZ(cfg->osh, buf_len);
- if (!buf_ptr) {
- WL_ERR(("failed to allocate the buffer for version n"));
- ret = BCME_NOMEM;
- goto exit;
- }
- nla_for_each_attr(iter, data, len, rem) {
- type = nla_type(iter);
- switch (type) {
- case DEBUG_ATTRIBUTE_GET_DRIVER:
- dhd_ver = TRUE;
- break;
- case DEBUG_ATTRIBUTE_GET_FW:
- dhd_ver = FALSE;
- break;
- default:
- WL_ERR(("Unknown type: %d\n", type));
- ret = BCME_ERROR;
- goto exit;
- }
- }
- ret = dhd_os_get_version(bcmcfg_to_prmry_ndev(cfg), dhd_ver, &buf_ptr, buf_len);
- if (ret < 0) {
- WL_ERR(("failed to get the version %d\n", ret));
- goto exit;
- }
- ret = wl_cfgvendor_send_cmd_reply(wiphy, buf_ptr, strlen(buf_ptr));
-exit:
- MFREE(cfg->osh, buf_ptr, buf_len);
- return ret;
}
+#endif /* DEBUGABILITY */
#ifdef DBG_PKT_MON
static int wl_cfgvendor_dbg_start_pkt_fate_monitoring(struct wiphy *wiphy,
goto exit;
}
- ret = nla_put_u32(skb, DEBUG_ATTRIBUTE_PKT_FATE_NUM, resp_count);
- if (ret < 0) {
- WL_ERR(("Failed to put DEBUG_ATTRIBUTE_PKT_FATE_NUM, ret:%d\n", ret));
- goto exit;
- }
+ nla_put_u32(skb, DEBUG_ATTRIBUTE_PKT_FATE_NUM, resp_count);
ret = cfg80211_vendor_cmd_reply(skb);
if (unlikely(ret)) {
WL_ERR(("vendor Command reply failed ret:%d \n", ret));
}
- return ret;
exit:
- /* Free skb memory */
- if (skb) {
- kfree_skb(skb);
- }
return ret;
}
const struct nlattr *iter;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
dhd_pub_t *dhd_pub = cfg->pub;
+ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
nla_for_each_attr(iter, data, len, rem) {
type = nla_type(iter);
}
break;
case MKEEP_ALIVE_ATTRIBUTE_IP_PKT:
- if (ip_pkt) {
- ret = BCME_BADARG;
- WL_ERR(("ip_pkt already allocated\n"));
- goto exit;
- }
if (!ip_pkt_len) {
ret = BCME_BADARG;
WL_ERR(("ip packet length is 0\n"));
goto exit;
}
- ip_pkt = (u8 *)MALLOCZ(cfg->osh, ip_pkt_len);
+ ip_pkt = (u8 *)kzalloc(ip_pkt_len, kflags);
if (ip_pkt == NULL) {
ret = BCME_NOMEM;
WL_ERR(("Failed to allocate mem for ip packet\n"));
exit:
if (ip_pkt) {
- MFREE(cfg->osh, ip_pkt, ip_pkt_len);
+ kfree(ip_pkt);
}
return ret;
struct wireless_dev *wdev, const void *data, int len)
{
struct net_device *ndev = wdev_to_ndev(wdev);
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
int ret, ver, max_len, mem_needed;
/* APF version */
return -ENOMEM;
}
- ret = nla_put_u32(skb, APF_ATTRIBUTE_VERSION, ver);
- if (ret < 0) {
- WL_ERR(("Failed to put APF_ATTRIBUTE_VERSION, ret:%d\n", ret));
- goto exit;
- }
- ret = nla_put_u32(skb, APF_ATTRIBUTE_MAX_LEN, max_len);
- if (ret < 0) {
- WL_ERR(("Failed to put APF_ATTRIBUTE_MAX_LEN, ret:%d\n", ret));
- goto exit;
- }
-
+ nla_put_u32(skb, APF_ATTRIBUTE_VERSION, ver);
+ nla_put_u32(skb, APF_ATTRIBUTE_MAX_LEN, max_len);
+
ret = cfg80211_vendor_cmd_reply(skb);
if (unlikely(ret)) {
WL_ERR(("vendor command reply failed, ret=%d\n", ret));
}
- return ret;
-exit:
- /* Free skb memory */
- kfree_skb(skb);
+
return ret;
}
u8 *program = NULL;
u32 program_len = 0;
int ret, tmp, type;
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ gfp_t kflags;
if (len <= 0) {
WL_ERR(("Invalid len: %d\n", len));
}
break;
case APF_ATTRIBUTE_PROGRAM:
- if (unlikely(program)) {
- WL_ERR(("program already allocated\n"));
- ret = -EINVAL;
- goto exit;
- }
if (unlikely(!program_len)) {
WL_ERR(("program len is not set\n"));
ret = -EINVAL;
goto exit;
}
- if (nla_len(iter) != program_len) {
- WL_ERR(("program_len is not same\n"));
- ret = -EINVAL;
- goto exit;
- }
- program = MALLOCZ(cfg->osh, program_len);
+ kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ program = kzalloc(program_len, kflags);
if (unlikely(!program)) {
WL_ERR(("%s: can't allocate %d bytes\n",
__FUNCTION__, program_len));
exit:
if (program) {
- MFREE(cfg->osh, program, program_len);
+ kfree(program);
}
return ret;
}
}
#endif /* NDO_CONFIG_SUPPORT */
-/* for kernel >= 4.13 NL80211 wl_cfg80211_set_pmk have to be used. */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
-static int wl_cfgvendor_set_pmk(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int ret = 0;
- wsec_pmk_t pmk;
- const struct nlattr *iter;
- int rem, type;
- struct net_device *ndev = wdev_to_ndev(wdev);
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- struct wl_security *sec;
-
- nla_for_each_attr(iter, data, len, rem) {
- type = nla_type(iter);
- switch (type) {
- case BRCM_ATTR_DRIVER_KEY_PMK:
- if (nla_len(iter) > sizeof(pmk.key)) {
- ret = -EINVAL;
- goto exit;
- }
- pmk.flags = 0;
- pmk.key_len = htod16(nla_len(iter));
- bcopy((uint8 *)nla_data(iter), pmk.key, len);
- break;
- default:
- WL_ERR(("Unknown type: %d\n", type));
- ret = BCME_BADARG;
- goto exit;
- }
- }
-
- sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
- if ((sec->wpa_auth == WLAN_AKM_SUITE_8021X) ||
- (sec->wpa_auth == WL_AKM_SUITE_SHA256_1X)) {
- ret = wldev_iovar_setbuf(ndev, "okc_info_pmk", pmk.key, pmk.key_len, cfg->ioctl_buf,
- WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
- if (ret) {
- /* could fail in case that 'okc' is not supported */
- WL_INFORM_MEM(("okc_info_pmk failed, err=%d (ignore)\n", ret));
- }
- }
-
- ret = wldev_ioctl_set(ndev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
- WL_INFORM_MEM(("IOVAR set_pmk ret:%d", ret));
-exit:
- return ret;
-}
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
-
-static int wl_cfgvendor_get_driver_feature(struct wiphy *wiphy,
- struct wireless_dev *wdev, const void *data, int len)
-{
- int ret = BCME_OK;
- u8 supported[(BRCM_WLAN_VENDOR_FEATURES_MAX / 8) + 1] = {0};
- struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- dhd_pub_t *dhd_pub = cfg->pub;
- struct sk_buff *skb;
- int32 mem_needed;
-
- mem_needed = VENDOR_REPLY_OVERHEAD + NLA_HDRLEN + sizeof(supported);
-
- BCM_REFERENCE(dhd_pub);
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
- if (FW_SUPPORTED(dhd_pub, idsup)) {
- ret = wl_features_set(supported, sizeof(supported),
- BRCM_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD);
- }
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
-
- /* Alloc the SKB for vendor_event */
- skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, mem_needed);
- if (unlikely(!skb)) {
- WL_ERR(("skb alloc failed"));
- ret = BCME_NOMEM;
- goto exit;
- }
-
- ret = nla_put(skb, BRCM_ATTR_DRIVER_FEATURE_FLAGS, sizeof(supported), supported);
- if (ret) {
- kfree_skb(skb);
- goto exit;
- }
- ret = cfg80211_vendor_cmd_reply(skb);
-exit:
- return ret;
-}
-
static const struct wiphy_vendor_command wl_vendor_cmds [] = {
{
{
.subcmd = BRCM_VENDOR_SCMD_PRIV_STR
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
.doit = wl_cfgvendor_priv_string_handler
},
-#ifdef BCM_PRIV_CMD_SUPPORT
{
{
.vendor_id = OUI_BRCM,
.subcmd = BRCM_VENDOR_SCMD_BCM_STR
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
.doit = wl_cfgvendor_priv_bcm_handler
},
-#endif /* BCM_PRIV_CMD_SUPPORT */
-#ifdef WL_SAE
- {
- {
- .vendor_id = OUI_BRCM,
- .subcmd = BRCM_VENDOR_SCMD_BCM_PSK
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_sae_password
- },
-#endif /* WL_SAE */
#ifdef GSCAN_SUPPORT
{
{
.subcmd = GSCAN_SUBCMD_GET_CAPABILITIES
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_gscan_get_capabilities
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = GSCAN_SUBCMD_SET_CONFIG
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_scan_cfg
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = GSCAN_SUBCMD_SET_SCAN_CONFIG
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_batch_scan_cfg
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = GSCAN_SUBCMD_ENABLE_GSCAN
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_initiate_gscan
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_enable_full_scan_result
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = GSCAN_SUBCMD_SET_HOTLIST
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_hotlist_cfg
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = GSCAN_SUBCMD_GET_SCAN_RESULTS
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_gscan_get_batch_results
- },
-#endif /* GSCAN_SUPPORT */
-#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_gscan_get_channel_list
- },
-#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
-#ifdef RTT_SUPPORT
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = RTT_SUBCMD_SET_CONFIG
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_rtt_set_config
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = RTT_SUBCMD_CANCEL_CONFIG
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_rtt_cancel_config
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = RTT_SUBCMD_GETCAPABILITY
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_rtt_get_capability
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = RTT_SUBCMD_GETAVAILCHANNEL
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_rtt_get_responder_info
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = RTT_SUBCMD_SET_RESPONDER
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_rtt_set_responder
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = RTT_SUBCMD_CANCEL_RESPONDER
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_rtt_cancel_responder
- },
-#endif /* RTT_SUPPORT */
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_get_feature_set
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_get_feature_set_matrix
- },
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = ANDR_WIFI_RANDOM_MAC_OUI
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_rand_mac_oui
- },
-#ifdef CUSTOM_FORCE_NODFS_FLAG
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = ANDR_WIFI_NODFS_CHANNELS
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_nodfs_flag
- },
-#endif /* CUSTOM_FORCE_NODFS_FLAG */
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = ANDR_WIFI_SET_COUNTRY
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_country
- },
-#ifdef LINKSTAT_SUPPORT
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = LSTATS_SUBCMD_GET_INFO
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_lstats_get_info
+ .doit = wl_cfgvendor_gscan_get_capabilities
},
-#endif /* LINKSTAT_SUPPORT */
-
-#ifdef GSCAN_SUPPORT
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = GSCAN_SUBCMD_SET_EPNO_SSID
+ .subcmd = GSCAN_SUBCMD_SET_CONFIG
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_epno_cfg
-
+ .doit = wl_cfgvendor_set_scan_cfg
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = WIFI_SUBCMD_SET_LAZY_ROAM_PARAMS
+ .subcmd = GSCAN_SUBCMD_SET_SCAN_CONFIG
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_lazy_roam_cfg
-
+ .doit = wl_cfgvendor_set_batch_scan_cfg
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = WIFI_SUBCMD_ENABLE_LAZY_ROAM
+ .subcmd = GSCAN_SUBCMD_ENABLE_GSCAN
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_enable_lazy_roam
-
+ .doit = wl_cfgvendor_initiate_gscan
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = WIFI_SUBCMD_SET_BSSID_PREF
+ .subcmd = GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_bssid_pref
-
+ .doit = wl_cfgvendor_enable_full_scan_result
},
-#endif /* GSCAN_SUPPORT */
-#if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = WIFI_SUBCMD_SET_SSID_WHITELIST
+ .subcmd = GSCAN_SUBCMD_SET_HOTLIST
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_ssid_whitelist
-
+ .doit = wl_cfgvendor_hotlist_cfg
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = WIFI_SUBCMD_SET_BSSID_BLACKLIST
+ .subcmd = GSCAN_SUBCMD_GET_SCAN_RESULTS
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_bssid_blacklist
+ .doit = wl_cfgvendor_gscan_get_batch_results
},
-#endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
-#ifdef ROAMEXP_SUPPORT
+#endif /* GSCAN_SUPPORT */
+#if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = WIFI_SUBCMD_FW_ROAM_POLICY
+ .subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_fw_roaming_state
+ .doit = wl_cfgvendor_gscan_get_channel_list
},
+#endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
+#ifdef RTT_SUPPORT
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = WIFI_SUBCMD_ROAM_CAPABILITY
+ .subcmd = RTT_SUBCMD_SET_CONFIG
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_fw_roam_get_capability
+ .doit = wl_cfgvendor_rtt_set_config
},
-#endif /* ROAMEXP_SUPPORT */
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_VER
+ .subcmd = RTT_SUBCMD_CANCEL_CONFIG
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_get_version
+ .doit = wl_cfgvendor_rtt_cancel_config
},
-#ifdef DHD_LOG_DUMP
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_FILE_DUMP_BUF
+ .subcmd = RTT_SUBCMD_GETCAPABILITY
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_file_dump
+ .doit = wl_cfgvendor_rtt_get_capability
},
-#endif /* DHD_LOG_DUMP */
-
-#ifdef DEBUGABILITY
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_TRIGGER_MEM_DUMP
+ .subcmd = RTT_SUBCMD_GETAVAILCHANNEL
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_trigger_mem_dump
+ .doit = wl_cfgvendor_rtt_get_responder_info
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_MEM_DUMP
+ .subcmd = RTT_SUBCMD_SET_RESPONDER
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_get_mem_dump
+ .doit = wl_cfgvendor_rtt_set_responder
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_START_LOGGING
+ .subcmd = RTT_SUBCMD_CANCEL_RESPONDER
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_start_logging
+ .doit = wl_cfgvendor_rtt_cancel_responder
},
+#endif /* RTT_SUPPORT */
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_RESET_LOGGING
+ .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_reset_logging
+ .doit = wl_cfgvendor_get_feature_set
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_RING_STATUS
+ .subcmd = ANDR_WIFI_SUBCMD_GET_FEATURE_SET_MATRIX
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_get_ring_status
+ .doit = wl_cfgvendor_get_feature_set_matrix
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_RING_DATA
+ .subcmd = ANDR_WIFI_RANDOM_MAC_OUI
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_get_ring_data
+ .doit = wl_cfgvendor_set_rand_mac_oui
},
-#endif /* DEBUGABILITY */
+#ifdef CUSTOM_FORCE_NODFS_FLAG
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_FEATURE
+ .subcmd = ANDR_WIFI_NODFS_CHANNELS
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_get_feature
+ .doit = wl_cfgvendor_set_nodfs_flag
},
-#ifdef DBG_PKT_MON
+#endif /* CUSTOM_FORCE_NODFS_FLAG */
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_START_PKT_FATE_MONITORING
+ .subcmd = ANDR_WIFI_SET_COUNTRY
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_start_pkt_fate_monitoring
+ .doit = wl_cfgvendor_set_country
},
+#ifdef LINKSTAT_SUPPORT
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_TX_PKT_FATES
+ .subcmd = LSTATS_SUBCMD_GET_INFO
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_get_tx_pkt_fates
+ .doit = wl_cfgvendor_lstats_get_info
},
+#endif /* LINKSTAT_SUPPORT */
+
+#ifdef GSCAN_SUPPORT
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_RX_PKT_FATES
+ .subcmd = GSCAN_SUBCMD_SET_EPNO_SSID
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_dbg_get_rx_pkt_fates
+ .doit = wl_cfgvendor_epno_cfg
+
},
-#endif /* DBG_PKT_MON */
-#ifdef KEEP_ALIVE
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE
+ .subcmd = WIFI_SUBCMD_SET_SSID_WHITELIST
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_start_mkeep_alive
+ .doit = wl_cfgvendor_set_ssid_whitelist
+
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE
+ .subcmd = WIFI_SUBCMD_SET_LAZY_ROAM_PARAMS
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_stop_mkeep_alive
+ .doit = wl_cfgvendor_set_lazy_roam_cfg
+
},
-#endif /* KEEP_ALIVE */
-#ifdef WL_NAN
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_ENABLE
+ .subcmd = WIFI_SUBCMD_ENABLE_LAZY_ROAM
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_start_handler
+ .doit = wl_cfgvendor_enable_lazy_roam
+
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_DISABLE
+ .subcmd = WIFI_SUBCMD_SET_BSSID_PREF
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_stop_handler
+ .doit = wl_cfgvendor_set_bssid_pref
+
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_CONFIG
+ .subcmd = WIFI_SUBCMD_SET_BSSID_BLACKLIST
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_config_handler
+ .doit = wl_cfgvendor_set_bssid_blacklist
},
+#endif /* GSCAN_SUPPORT */
+#ifdef DEBUGABILITY
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_REQUEST_PUBLISH
+ .subcmd = DEBUG_START_LOGGING
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_req_publish
+ .doit = wl_cfgvendor_dbg_start_logging
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_REQUEST_SUBSCRIBE
+ .subcmd = DEBUG_RESET_LOGGING
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_req_subscribe
+ .doit = wl_cfgvendor_dbg_reset_logging
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_CANCEL_PUBLISH
+ .subcmd = DEBUG_TRIGGER_MEM_DUMP
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_cancel_publish
+ .doit = wl_cfgvendor_dbg_trigger_mem_dump
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_CANCEL_SUBSCRIBE
+ .subcmd = DEBUG_GET_MEM_DUMP
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_cancel_subscribe
+ .doit = wl_cfgvendor_dbg_get_mem_dump
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_TRANSMIT
+ .subcmd = DEBUG_GET_VER
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_transmit
+ .doit = wl_cfgvendor_dbg_get_version
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_GET_CAPABILITIES
+ .subcmd = DEBUG_GET_RING_STATUS
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_get_capablities
+ .doit = wl_cfgvendor_dbg_get_ring_status
},
-
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE
+ .subcmd = DEBUG_GET_RING_DATA
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_data_path_iface_create
+ .doit = wl_cfgvendor_dbg_get_ring_data
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE
+ .subcmd = DEBUG_GET_FEATURE
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_data_path_iface_delete
+ .doit = wl_cfgvendor_dbg_get_feature
},
+#endif /* DEBUGABILITY */
+#ifdef DBG_PKT_MON
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_REQUEST
+ .subcmd = DEBUG_START_PKT_FATE_MONITORING
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_data_path_request
+ .doit = wl_cfgvendor_dbg_start_pkt_fate_monitoring
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_RESPONSE
+ .subcmd = DEBUG_GET_TX_PKT_FATES
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_data_path_response
+ .doit = wl_cfgvendor_dbg_get_tx_pkt_fates
},
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_END
+ .subcmd = DEBUG_GET_RX_PKT_FATES
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_data_path_end
+ .doit = wl_cfgvendor_dbg_get_rx_pkt_fates
},
-#ifdef WL_NAN_DISC_CACHE
+#endif /* DBG_PKT_MON */
+#ifdef KEEP_ALIVE
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_DATA_PATH_SEC_INFO
+ .subcmd = WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_data_path_sec_info
+ .doit = wl_cfgvendor_start_mkeep_alive
},
-#endif /* WL_NAN_DISC_CACHE */
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = NAN_WIFI_SUBCMD_VERSION_INFO
+ .subcmd = WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_nan_version_info
+ .doit = wl_cfgvendor_stop_mkeep_alive
},
-#endif /* WL_NAN */
+#endif /* KEEP_ALIVE */
#if defined(PKT_FILTER_SUPPORT) && defined(APF)
{
{
.subcmd = APF_SUBCMD_GET_CAPABILITIES
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
.doit = wl_cfgvendor_apf_get_capabilities
},
.subcmd = APF_SUBCMD_SET_FILTER
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
.doit = wl_cfgvendor_apf_set_filter
},
#endif /* PKT_FILTER_SUPPORT && APF */
.subcmd = WIFI_SUBCMD_CONFIG_ND_OFFLOAD
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
.doit = wl_cfgvendor_configure_nd_offload
},
#endif /* NDO_CONFIG_SUPPORT */
.subcmd = WIFI_SUBCMD_SET_RSSI_MONITOR
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
.doit = wl_cfgvendor_set_rssi_monitor
},
#endif /* RSSI_MONITOR_SUPPORT */
-#ifdef DHD_WAKE_STATUS
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_GET_WAKE_REASON_STATS
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_get_wake_reason_stats
- },
-#endif /* DHD_WAKE_STATUS */
#ifdef DHDTCPACK_SUPPRESS
{
{
.subcmd = WIFI_SUBCMD_CONFIG_TCPACK_SUP
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
.doit = wl_cfgvendor_set_tcpack_sup_mode
},
#endif /* DHDTCPACK_SUPPRESS */
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
- {
- {
- .vendor_id = OUI_BRCM,
- .subcmd = BRCM_VENDOR_SCMD_SET_PMK
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_pmk
- },
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) */
- {
- {
- .vendor_id = OUI_BRCM,
- .subcmd = BRCM_VENDOR_SCMD_GET_FEATURES
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_get_driver_feature
- },
-#if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_FILE_DUMP_DONE_IND
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_notify_dump_completion
- },
-#endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
-#if defined(WL_CFG80211)
- {
- {
- .vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_SET_HAL_START
- },
- .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_set_hal_started
- },
+#ifdef DHD_WAKE_STATUS
{
{
.vendor_id = OUI_GOOGLE,
- .subcmd = DEBUG_SET_HAL_STOP
+ .subcmd = DEBUG_GET_WAKE_REASON_STATS
},
.flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0))
- .policy = VENDOR_CMD_RAW_DATA,
-#endif
- .doit = wl_cfgvendor_stop_hal
+ .doit = wl_cfgvendor_get_wake_reason_stats
}
-#endif /* WL_CFG80211 */
+#endif /* DHD_WAKE_STATUS */
};
static const struct nl80211_vendor_cmd_info wl_vendor_events [] = {
{ OUI_BRCM, BRCM_VENDOR_EVENT_UNSPEC },
{ OUI_BRCM, BRCM_VENDOR_EVENT_PRIV_STR },
+#ifdef GSCAN_SUPPORT
{ OUI_GOOGLE, GOOGLE_GSCAN_SIGNIFICANT_EVENT },
{ OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT },
{ OUI_GOOGLE, GOOGLE_GSCAN_BATCH_SCAN_EVENT },
{ OUI_GOOGLE, GOOGLE_SCAN_FULL_RESULTS_EVENT },
+#endif /* GSCAN_SUPPORT */
+#ifdef RTT_SUPPORT
{ OUI_GOOGLE, GOOGLE_RTT_COMPLETE_EVENT },
+#endif /* RTT_SUPPORT */
+#ifdef GSCAN_SUPPORT
{ OUI_GOOGLE, GOOGLE_SCAN_COMPLETE_EVENT },
{ OUI_GOOGLE, GOOGLE_GSCAN_GEOFENCE_LOST_EVENT },
{ OUI_GOOGLE, GOOGLE_SCAN_EPNO_EVENT },
+#endif /* GSCAN_SUPPORT */
{ OUI_GOOGLE, GOOGLE_DEBUG_RING_EVENT },
{ OUI_GOOGLE, GOOGLE_FW_DUMP_EVENT },
+#ifdef GSCAN_SUPPORT
{ OUI_GOOGLE, GOOGLE_PNO_HOTSPOT_FOUND_EVENT },
+#endif /* GSCAN_SUPPORT */
{ OUI_GOOGLE, GOOGLE_RSSI_MONITOR_EVENT },
{ OUI_GOOGLE, GOOGLE_MKEEP_ALIVE_EVENT },
{ OUI_GOOGLE, GOOGLE_NAN_EVENT_ENABLED},
{ OUI_GOOGLE, GOOGLE_NAN_EVENT_DISABLED},
- { OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH},
- { OUI_GOOGLE, GOOGLE_NAN_EVENT_REPLIED},
{ OUI_GOOGLE, GOOGLE_NAN_EVENT_PUBLISH_TERMINATED},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH},
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH},
{ OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED},
{ OUI_GOOGLE, GOOGLE_NAN_EVENT_DE_EVENT},
{ OUI_GOOGLE, GOOGLE_NAN_EVENT_FOLLOWUP},
- { OUI_GOOGLE, GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND},
- { OUI_GOOGLE, GOOGLE_NAN_EVENT_DATA_REQUEST},
- { OUI_GOOGLE, GOOGLE_NAN_EVENT_DATA_CONFIRMATION},
- { OUI_GOOGLE, GOOGLE_NAN_EVENT_DATA_END},
- { OUI_GOOGLE, GOOGLE_NAN_EVENT_BEACON},
- { OUI_GOOGLE, GOOGLE_NAN_EVENT_SDF},
{ OUI_GOOGLE, GOOGLE_NAN_EVENT_TCA},
- { OUI_GOOGLE, GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH},
- { OUI_GOOGLE, GOOGLE_NAN_EVENT_UNKNOWN},
- { OUI_GOOGLE, GOOGLE_ROAM_EVENT_START},
- { OUI_BRCM, BRCM_VENDOR_EVENT_HANGED},
- { OUI_BRCM, BRCM_VENDOR_EVENT_SAE_KEY},
- { OUI_BRCM, BRCM_VENDOR_EVENT_BEACON_RECV},
- { OUI_BRCM, BRCM_VENDOR_EVENT_PORT_AUTHORIZED},
- { OUI_GOOGLE, GOOGLE_FILE_DUMP_EVENT },
- { OUI_BRCM, BRCM_VENDOR_EVENT_CU},
- { OUI_BRCM, BRCM_VENDOR_EVENT_WIPS},
- { OUI_GOOGLE, NAN_ASYNC_RESPONSE_DISABLED}
+#ifdef NAN_DP
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_DATA_PATH_OPEN},
+#endif /* NAN_DP */
+ { OUI_GOOGLE, GOOGLE_NAN_EVENT_UNKNOWN}
};
int wl_cfgvendor_attach(struct wiphy *wiphy, dhd_pub_t *dhd)
{
- WL_INFORM_MEM(("Vendor: Register BRCM cfg80211 vendor cmd(0x%x) interface \n",
+ WL_INFORM(("Vendor: Register BRCM cfg80211 vendor cmd(0x%x) interface \n",
NL80211_CMD_VENDOR));
wiphy->vendor_commands = wl_vendor_cmds;
#ifdef DEBUGABILITY
dhd_os_dbg_register_callback(FW_VERBOSE_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+ dhd_os_dbg_register_callback(FW_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
dhd_os_dbg_register_callback(DHD_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+ dhd_os_dbg_register_callback(NAN_EVENT_RING_ID, wl_cfgvendor_dbg_ring_send_evt);
+ dhd_os_dbg_register_urgent_notifier(dhd, wl_cfgvendor_dbg_send_urgent_evt);
#endif /* DEBUGABILITY */
-#ifdef DHD_LOG_DUMP
- dhd_os_dbg_register_urgent_notifier(dhd, wl_cfgvendor_dbg_send_file_dump_evt);
-#endif /* DHD_LOG_DUMP */
return 0;
}
int wl_cfgvendor_detach(struct wiphy *wiphy)
{
- WL_INFORM_MEM(("Vendor: Unregister BRCM cfg80211 vendor interface \n"));
+ WL_INFORM(("Vendor: Unregister BRCM cfg80211 vendor interface \n"));
wiphy->vendor_commands = NULL;
wiphy->vendor_events = NULL;
/*
* Linux cfg80211 Vendor Extension Code
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_cfgvendor.h 825255 2019-06-13 12:26:42Z $
+ * $Id: wl_cfgvendor.h 698895 2017-05-11 02:55:17Z $
*/
+
#ifndef _wl_cfgvendor_h_
#define _wl_cfgvendor_h_
+#if ((LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) || \
+ defined(CONFIG_BCMDHD_VENDOR_EXT)) && !defined(WL_VENDOR_EXT_SUPPORT)
+/* defined CONFIG_BCMDHD_VENDOR_EXT in brix kernel to enable GSCAN testing */
+#define WL_VENDOR_EXT_SUPPORT
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0) && CONFIG_BCMDHD_VENDOR_EXT */
#define OUI_BRCM 0x001018
#define OUI_GOOGLE 0x001A11
#define VENDOR_DATA_OVERHEAD (NLA_HDRLEN)
enum brcm_vendor_attr {
- BRCM_ATTR_DRIVER_CMD = 0,
- BRCM_ATTR_DRIVER_KEY_PMK = 1,
- BRCM_ATTR_DRIVER_FEATURE_FLAGS = 2,
- BRCM_ATTR_DRIVER_MAX = 3
-};
-
-enum brcm_wlan_vendor_features {
- BRCM_WLAN_VENDOR_FEATURE_KEY_MGMT_OFFLOAD = 0,
- BRCM_WLAN_VENDOR_FEATURES_MAX = 1
+ BRCM_ATTR_DRIVER_CMD,
+ BRCM_ATTR_DRIVER_MAX
};
#define SCAN_RESULTS_COMPLETE_FLAG_LEN ATTRIBUTE_U32_LEN
#define GSCAN_ATTR_SET11 110
#define GSCAN_ATTR_SET12 120
#define GSCAN_ATTR_SET13 130
-#define GSCAN_ATTR_SET14 140
-
-#define NAN_SVC_INFO_LEN 255
-#define NAN_SID_ENABLE_FLAG_INVALID 0xff
-#define NAN_SID_BEACON_COUNT_INVALID 0xff
-#define WL_NAN_DW_INTERVAL 512
-#define CFG80211_VENDOR_CMD_REPLY_SKB_SZ 100
-#define CFG80211_VENDOR_EVT_SKB_SZ 2048
typedef enum {
/* don't use 0 as a valid subcommand */
/* define all wifi calling related commands between 0x1600 and 0x16FF */
ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START = 0x1600,
- ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_END = 0x16FF,
+ ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_END = 0x16FF,
/* define all NAN related commands between 0x1700 and 0x17FF */
ANDROID_NL80211_SUBCMD_NAN_RANGE_START = 0x1700,
WIFI_SUBCMD_SET_RSSI_MONITOR,
WIFI_SUBCMD_CONFIG_ND_OFFLOAD,
WIFI_SUBCMD_CONFIG_TCPACK_SUP,
- WIFI_SUBCMD_FW_ROAM_POLICY,
- WIFI_SUBCMD_ROAM_CAPABILITY,
RTT_SUBCMD_SET_CONFIG = ANDROID_NL80211_SUBCMD_RTT_RANGE_START,
RTT_SUBCMD_CANCEL_CONFIG,
RTT_SUBCMD_GETCAPABILITY,
DEBUG_GET_TX_PKT_FATES,
DEBUG_GET_RX_PKT_FATES,
DEBUG_GET_WAKE_REASON_STATS,
- DEBUG_GET_FILE_DUMP_BUF,
- DEBUG_FILE_DUMP_DONE_IND,
- DEBUG_SET_HAL_START,
- DEBUG_SET_HAL_STOP,
WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE = ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START,
WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE,
- NAN_WIFI_SUBCMD_ENABLE = ANDROID_NL80211_SUBCMD_NAN_RANGE_START, /* 0x1700 */
- NAN_WIFI_SUBCMD_DISABLE, /* 0x1701 */
- NAN_WIFI_SUBCMD_REQUEST_PUBLISH, /* 0x1702 */
- NAN_WIFI_SUBCMD_REQUEST_SUBSCRIBE, /* 0x1703 */
- NAN_WIFI_SUBCMD_CANCEL_PUBLISH, /* 0x1704 */
- NAN_WIFI_SUBCMD_CANCEL_SUBSCRIBE, /* 0x1705 */
- NAN_WIFI_SUBCMD_TRANSMIT, /* 0x1706 */
- NAN_WIFI_SUBCMD_CONFIG, /* 0x1707 */
- NAN_WIFI_SUBCMD_TCA, /* 0x1708 */
- NAN_WIFI_SUBCMD_STATS, /* 0x1709 */
- NAN_WIFI_SUBCMD_GET_CAPABILITIES, /* 0x170A */
- NAN_WIFI_SUBCMD_DATA_PATH_IFACE_CREATE, /* 0x170B */
- NAN_WIFI_SUBCMD_DATA_PATH_IFACE_DELETE, /* 0x170C */
- NAN_WIFI_SUBCMD_DATA_PATH_REQUEST, /* 0x170D */
- NAN_WIFI_SUBCMD_DATA_PATH_RESPONSE, /* 0x170E */
- NAN_WIFI_SUBCMD_DATA_PATH_END, /* 0x170F */
- NAN_WIFI_SUBCMD_DATA_PATH_SEC_INFO, /* 0x1710 */
- NAN_WIFI_SUBCMD_VERSION_INFO, /* 0x1711 */
+ NAN_WIFI_SUBCMD_ENABLE = ANDROID_NL80211_SUBCMD_NAN_RANGE_START,
+ NAN_WIFI_SUBCMD_DISABLE,
+ NAN_WIFI_SUBCMD_REQUEST_PUBLISH,
+ NAN_WIFI_SUBCMD_REQUEST_SUBSCRIBE,
+ NAN_WIFI_SUBCMD_CANCEL_PUBLISH,
+ NAN_WIFI_SUBCMD_CANCEL_SUBSCRIBE,
+ NAN_WIFI_SUBCMD_TRANSMIT,
+#ifdef NAN_DP
+ NAN_WIFI_SUBCMD_DATA_PATH_OPEN,
+ NAN_WIFI_SUBCMD_DATA_PATH_CLOSE,
+#endif /* NAN_DP */
APF_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START,
APF_SUBCMD_SET_FILTER,
/* Add more sub commands here */
GSCAN_ATTRIBUTE_RSSI_HIGH,
GSCAN_ATTRIBUTE_HOSTLIST_BSSID_ELEM,
GSCAN_ATTRIBUTE_HOTLIST_FLUSH,
- GSCAN_ATTRIBUTE_HOTLIST_BSSID_COUNT,
/* remaining reserved for additional attributes */
GSCAN_ATTRIBUTE_RSSI_SAMPLE_SIZE = GSCAN_ATTR_SET6,
GSCAN_ATTRIBUTE_BSSID_PREF,
GSCAN_ATTRIBUTE_RSSI_MODIFIER,
+
/* Roam cfg */
GSCAN_ATTRIBUTE_A_BAND_BOOST_THRESHOLD = GSCAN_ATTR_SET9,
GSCAN_ATTRIBUTE_A_BAND_PENALTY_THRESHOLD,
GSCAN_ATTRIBUTE_EPNO_SAME_NETWORK_BONUS,
GSCAN_ATTRIBUTE_EPNO_SECURE_BONUS,
GSCAN_ATTRIBUTE_EPNO_5G_BONUS,
-
- /* Android O Roaming features */
- GSCAN_ATTRIBUTE_ROAM_STATE_SET = GSCAN_ATTR_SET14,
-
GSCAN_ATTRIBUTE_MAX
};
RTT_ATTRIBUTE_RESULTS_COMPLETE = 30,
RTT_ATTRIBUTE_RESULTS_PER_TARGET,
RTT_ATTRIBUTE_RESULT_CNT,
- RTT_ATTRIBUTE_RESULT,
- RTT_ATTRIBUTE_RESULT_DETAIL
+ RTT_ATTRIBUTE_RESULT
};
enum wifi_rssi_monitor_attr {
RSSI_MONITOR_ATTRIBUTE_START
};
-enum wifi_sae_key_attr {
- BRCM_SAE_KEY_ATTR_PEER_MAC,
- BRCM_SAE_KEY_ATTR_PMK,
- BRCM_SAE_KEY_ATTR_PMKID
-};
-
enum debug_attributes {
DEBUG_ATTRIBUTE_GET_DRIVER,
DEBUG_ATTRIBUTE_GET_FW,
DEBUG_ATTRIBUTE_LOG_LEVEL,
DEBUG_ATTRIBUTE_LOG_TIME_INTVAL,
DEBUG_ATTRIBUTE_LOG_MIN_DATA_SIZE,
-// DEBUG_ATTRIBUTE_DUMP_FILENAME,
DEBUG_ATTRIBUTE_FW_DUMP_LEN,
DEBUG_ATTRIBUTE_FW_DUMP_DATA,
DEBUG_ATTRIBUTE_RING_DATA,
DEBUG_ATTRIBUTE_PKT_FATE_DATA
};
-typedef enum {
- DUMP_LEN_ATTR_INVALID,
- DUMP_LEN_ATTR_MEMDUMP,
- DUMP_LEN_ATTR_SSSR_C0_D11_BEFORE,
- DUMP_LEN_ATTR_SSSR_C0_D11_AFTER,
- DUMP_LEN_ATTR_SSSR_C1_D11_BEFORE,
- DUMP_LEN_ATTR_SSSR_C1_D11_AFTER,
- DUMP_LEN_ATTR_SSSR_DIG_BEFORE,
- DUMP_LEN_ATTR_SSSR_DIG_AFTER,
- DUMP_LEN_ATTR_TIMESTAMP,
- DUMP_LEN_ATTR_GENERAL_LOG,
- DUMP_LEN_ATTR_ECNTRS,
- DUMP_LEN_ATTR_SPECIAL_LOG,
- DUMP_LEN_ATTR_DHD_DUMP,
- DUMP_LEN_ATTR_EXT_TRAP,
- DUMP_LEN_ATTR_HEALTH_CHK,
- DUMP_LEN_ATTR_PRESERVE_LOG,
- DUMP_LEN_ATTR_COOKIE,
- DUMP_LEN_ATTR_FLOWRING_DUMP,
- DUMP_LEN_ATTR_PKTLOG,
- DUMP_FILENAME_ATTR_DEBUG_DUMP,
- DUMP_FILENAME_ATTR_MEM_DUMP,
- DUMP_FILENAME_ATTR_SSSR_CORE_0_BEFORE_DUMP,
- DUMP_FILENAME_ATTR_SSSR_CORE_0_AFTER_DUMP,
- DUMP_FILENAME_ATTR_SSSR_CORE_1_BEFORE_DUMP,
- DUMP_FILENAME_ATTR_SSSR_CORE_1_AFTER_DUMP,
- DUMP_FILENAME_ATTR_SSSR_DIG_BEFORE_DUMP,
- DUMP_FILENAME_ATTR_SSSR_DIG_AFTER_DUMP,
- DUMP_FILENAME_ATTR_PKTLOG_DUMP,
- DUMP_LEN_ATTR_STATUS_LOG,
- DUMP_LEN_ATTR_AXI_ERROR,
- DUMP_FILENAME_ATTR_AXI_ERROR_DUMP,
- DUMP_LEN_ATTR_RTT_LOG
-} EWP_DUMP_EVENT_ATTRIBUTE;
-
-/* Attributes associated with DEBUG_GET_DUMP_BUF */
-typedef enum {
- DUMP_BUF_ATTR_INVALID,
- DUMP_BUF_ATTR_MEMDUMP,
- DUMP_BUF_ATTR_SSSR_C0_D11_BEFORE,
- DUMP_BUF_ATTR_SSSR_C0_D11_AFTER,
- DUMP_BUF_ATTR_SSSR_C1_D11_BEFORE,
- DUMP_BUF_ATTR_SSSR_C1_D11_AFTER,
- DUMP_BUF_ATTR_SSSR_DIG_BEFORE,
- DUMP_BUF_ATTR_SSSR_DIG_AFTER,
- DUMP_BUF_ATTR_TIMESTAMP,
- DUMP_BUF_ATTR_GENERAL_LOG,
- DUMP_BUF_ATTR_ECNTRS,
- DUMP_BUF_ATTR_SPECIAL_LOG,
- DUMP_BUF_ATTR_DHD_DUMP,
- DUMP_BUF_ATTR_EXT_TRAP,
- DUMP_BUF_ATTR_HEALTH_CHK,
- DUMP_BUF_ATTR_PRESERVE_LOG,
- DUMP_BUF_ATTR_COOKIE,
- DUMP_BUF_ATTR_FLOWRING_DUMP,
- DUMP_BUF_ATTR_PKTLOG,
- DUMP_BUF_ATTR_STATUS_LOG,
- DUMP_BUF_ATTR_AXI_ERROR,
- DUMP_BUF_ATTR_RTT_LOG
-} EWP_DUMP_CMD_ATTRIBUTE;
-
enum mkeep_alive_attributes {
MKEEP_ALIVE_ATTRIBUTE_ID,
MKEEP_ALIVE_ATTRIBUTE_IP_PKT,
MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR,
MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC
};
+enum apf_attributes {
+ APF_ATTRIBUTE_VERSION,
+ APF_ATTRIBUTE_MAX_LEN,
+ APF_ATTRIBUTE_PROGRAM,
+ APF_ATTRIBUTE_PROGRAM_LEN
+};
typedef enum wl_vendor_event {
- BRCM_VENDOR_EVENT_UNSPEC = 0,
- BRCM_VENDOR_EVENT_PRIV_STR = 1,
- GOOGLE_GSCAN_SIGNIFICANT_EVENT = 2,
- GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT = 3,
- GOOGLE_GSCAN_BATCH_SCAN_EVENT = 4,
- GOOGLE_SCAN_FULL_RESULTS_EVENT = 5,
- GOOGLE_RTT_COMPLETE_EVENT = 6,
- GOOGLE_SCAN_COMPLETE_EVENT = 7,
- GOOGLE_GSCAN_GEOFENCE_LOST_EVENT = 8,
- GOOGLE_SCAN_EPNO_EVENT = 9,
- GOOGLE_DEBUG_RING_EVENT = 10,
- GOOGLE_FW_DUMP_EVENT = 11,
- GOOGLE_PNO_HOTSPOT_FOUND_EVENT = 12,
- GOOGLE_RSSI_MONITOR_EVENT = 13,
- GOOGLE_MKEEP_ALIVE_EVENT = 14,
-
+ BRCM_VENDOR_EVENT_UNSPEC,
+ BRCM_VENDOR_EVENT_PRIV_STR,
+ GOOGLE_GSCAN_SIGNIFICANT_EVENT,
+ GOOGLE_GSCAN_GEOFENCE_FOUND_EVENT,
+ GOOGLE_GSCAN_BATCH_SCAN_EVENT,
+ GOOGLE_SCAN_FULL_RESULTS_EVENT,
+ GOOGLE_RTT_COMPLETE_EVENT,
+ GOOGLE_SCAN_COMPLETE_EVENT,
+ GOOGLE_GSCAN_GEOFENCE_LOST_EVENT,
+ GOOGLE_SCAN_EPNO_EVENT,
+ GOOGLE_DEBUG_RING_EVENT,
+ GOOGLE_FW_DUMP_EVENT,
+ GOOGLE_PNO_HOTSPOT_FOUND_EVENT,
+ GOOGLE_RSSI_MONITOR_EVENT,
+ GOOGLE_MKEEP_ALIVE_EVENT,
/*
* BRCM specific events should be placed after
* the Generic events so that enums don't mismatch
* between the DHD and HAL
*/
- GOOGLE_NAN_EVENT_ENABLED = 15,
- GOOGLE_NAN_EVENT_DISABLED = 16,
- GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH = 17,
- GOOGLE_NAN_EVENT_REPLIED = 18,
- GOOGLE_NAN_EVENT_PUBLISH_TERMINATED = 19,
- GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED = 20,
- GOOGLE_NAN_EVENT_DE_EVENT = 21,
- GOOGLE_NAN_EVENT_FOLLOWUP = 22,
- GOOGLE_NAN_EVENT_TRANSMIT_FOLLOWUP_IND = 23,
- GOOGLE_NAN_EVENT_DATA_REQUEST = 24,
- GOOGLE_NAN_EVENT_DATA_CONFIRMATION = 25,
- GOOGLE_NAN_EVENT_DATA_END = 26,
- GOOGLE_NAN_EVENT_BEACON = 27,
- GOOGLE_NAN_EVENT_SDF = 28,
- GOOGLE_NAN_EVENT_TCA = 29,
- GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH = 30,
- GOOGLE_NAN_EVENT_UNKNOWN = 31,
- GOOGLE_ROAM_EVENT_START = 32,
- BRCM_VENDOR_EVENT_HANGED = 33,
- BRCM_VENDOR_EVENT_SAE_KEY = 34,
- BRCM_VENDOR_EVENT_BEACON_RECV = 35,
- BRCM_VENDOR_EVENT_PORT_AUTHORIZED = 36,
- GOOGLE_FILE_DUMP_EVENT = 37,
- BRCM_VENDOR_EVENT_CU = 38,
- BRCM_VENDOR_EVENT_WIPS = 39,
- NAN_ASYNC_RESPONSE_DISABLED = 40
+ GOOGLE_NAN_EVENT_ENABLED = 150,
+ GOOGLE_NAN_EVENT_DISABLED,
+ GOOGLE_NAN_EVENT_PUBLISH_TERMINATED,
+ GOOGLE_NAN_EVENT_SUBSCRIBE_MATCH,
+ GOOGLE_NAN_EVENT_SUBSCRIBE_UNMATCH,
+ GOOGLE_NAN_EVENT_SUBSCRIBE_TERMINATED,
+ GOOGLE_NAN_EVENT_DE_EVENT,
+ GOOGLE_NAN_EVENT_FOLLOWUP,
+ GOOGLE_NAN_EVENT_TCA,
+ GOOGLE_NAN_EVENT_DATA_IF_ADD,
+ GOOGLE_NAN_EVENT_DATA_PATH_OPEN,
+ GOOGLE_NAN_EVENT_UNKNOWN
} wl_vendor_event_t;
enum andr_wifi_attr {
ANDR_WIFI_ATTRIBUTE_ND_OFFLOAD_VALUE,
ANDR_WIFI_ATTRIBUTE_TCPACK_SUP_VALUE
};
-enum apf_attributes {
- APF_ATTRIBUTE_VERSION,
- APF_ATTRIBUTE_MAX_LEN,
- APF_ATTRIBUTE_PROGRAM,
- APF_ATTRIBUTE_PROGRAM_LEN
-};
typedef enum wl_vendor_gscan_attribute {
ATTR_START_GSCAN,
} WLAN_DRIVER_WAKE_REASON_CNT;
#endif /* DHD_WAKE_STATUS */
-#define BRCM_VENDOR_WIPS_EVENT_BUF_LEN 128
-typedef enum wl_vendor_wips_attr_type {
- WIPS_ATTR_DEAUTH_CNT = 1,
- WIPS_ATTR_DEAUTH_BSSID,
- WIPS_ATTR_CURRENT_RSSI,
- WIPS_ATTR_DEAUTH_RSSI
-} wl_vendor_wips_attr_type_t;
-
-/* Chipset roaming capabilities */
-typedef struct wifi_roaming_capabilities {
- u32 max_blacklist_size;
- u32 max_whitelist_size;
-} wifi_roaming_capabilities_t;
-
/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */
#define BRCM_VENDOR_SCMD_CAPA "cap"
-#define MEMDUMP_PATH_LEN 128
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT)
extern int wl_cfgvendor_attach(struct wiphy *wiphy, dhd_pub_t *dhd);
static INLINE int wl_cfgvendor_attach(struct wiphy *wiphy,
dhd_pub_t *dhd) { UNUSED_PARAMETER(wiphy); UNUSED_PARAMETER(dhd); return 0; }
static INLINE int wl_cfgvendor_detach(struct wiphy *wiphy) { UNUSED_PARAMETER(wiphy); return 0; }
-static INLINE int wl_cfgvendor_send_async_event(struct wiphy *wiphy,
- struct net_device *dev, int event_id, const void *data, int len)
-{ return 0; }
-static INLINE int wl_cfgvendor_send_hotlist_event(struct wiphy *wiphy,
- struct net_device *dev, void *data, int len, wl_vendor_event_t event)
-{ return 0; }
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || defined(WL_VENDOR_EXT_SUPPORT) */
-#if defined(WL_SUPP_EVENT) && ((LINUX_VERSION_CODE > KERNEL_VERSION(3, 13, 0)) || \
- defined(WL_VENDOR_EXT_SUPPORT))
-extern int wl_cfgvendor_send_supp_eventstring(const char *func, const char *fmt, ...);
-int wl_cfgvendor_notify_supp_event_str(const char *evt_name, const char *fmt, ...);
-#define SUPP_LOG_LEN 256
-#define PRINT_SUPP_LOG(fmt, ...) \
- wl_cfgvendor_send_supp_eventstring(__func__, fmt, ##__VA_ARGS__);
-#define SUPP_LOG(args) PRINT_SUPP_LOG args;
-#define SUPP_EVT_LOG(evt_name, fmt, ...) \
- wl_cfgvendor_notify_supp_event_str(evt_name, fmt, ##__VA_ARGS__);
-#define SUPP_EVENT(args) SUPP_EVT_LOG args
-#else
-#define SUPP_LOG(x)
-#define SUPP_EVENT(x)
-#endif /* WL_SUPP_EVENT && (kernel > (3, 13, 0)) || WL_VENDOR_EXT_SUPPORT */
-
#ifdef CONFIG_COMPAT
#define COMPAT_ASSIGN_VALUE(normal_structure, member, value) \
do { \
normal_structure.member = value;
#endif /* CONFIG_COMPAT */
-#if (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || \
- LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
-#define CFG80211_VENDOR_EVENT_ALLOC(wiphy, wdev, len, type, kflags) \
- cfg80211_vendor_event_alloc(wiphy, wdev, len, type, kflags);
-#else
-#define CFG80211_VENDOR_EVENT_ALLOC(wiphy, wdev, len, type, kflags) \
- cfg80211_vendor_event_alloc(wiphy, len, type, kflags);
-#endif /* (defined(CONFIG_ARCH_MSM) && defined(SUPPORT_WDEV_CFG80211_VENDOR_EVENT_ALLOC)) || */
- /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) */
-int wl_cfgvendor_nan_send_async_disable_resp(struct wireless_dev *wdev);
-
#endif /* _wl_cfgvendor_h_ */
* Minimal debug/trace/assert driver definitions for
* Broadcom 802.11 Networking Adapter.
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_dbg.h 697380 2017-05-03 11:34:25Z $
+ * $Id: wl_dbg.h 664795 2016-10-13 20:13:32Z $
*/
+
#ifndef _wl_dbg_h_
#define _wl_dbg_h_
#if defined(EVENT_LOG_COMPILE)
#include <event_log.h>
-#endif // endif
+#endif
/* wl_msg_level is a bit vector with defs in wlioctl.h */
extern uint32 wl_msg_level;
#define wl_print_backtrace(a, b, c)
#endif /* ENABLE_CORECAPTURE */
+
#define WIFICC_CAPTURE(_reason)
#define WIFICC_LOGDEBUGIF(_flags, _args)
#define WIFICC_LOGDEBUG(_args)
#define WL_SRSCAN(args) _WL_SRSCAN args
#else
#define WL_SRSCAN(args)
-#endif // endif
+#endif
#if defined(BCMCONDITIONAL_LOGGING)
#define WL_NONE(args)
#define WL_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args); \
else WIFICC_LOGDEBUG(args); } while (0)
-#define WL_SCAN_ERROR(args)
-#define WL_IE_ERROR(args)
-#define WL_AMSDU_ERROR(args)
-#define WL_ASSOC_ERROR(args)
-#define KM_ERR(args)
-
#define WL_TRACE(args)
#define WL_PRHDRS_MSG(args)
#define WL_PRHDRS(i, p, f, t, r, l)
#define WL_APSTA_BSSID(args)
#define WL_BA(args)
#define WL_MBSS(args)
-#define WL_MODE_SWITCH(args)
#define WL_PROTO(args)
#define WL_CAC(args) do {if (wl_msg_level & WL_CAC_VAL) WL_PRINT(args);} while (0)
#define WL_RTDC(w, s, i, j)
#define WL_RTDC2(w, s, i, j)
#define WL_CHANINT(args)
-#define WL_BTA(args)
#define WL_P2P(args)
#define WL_ITFR(args)
#define WL_TDLS(args)
#define WL_P2PO(args)
#define WL_ROAM(args)
#define WL_WNM(args)
-#define WL_NAT(args)
#ifdef WLMSG_MESH
#define WL_MESH(args) WL_PRINT(args)
#define WL_MESH_AMPE(args)
#define WL_MESH_ROUTE(args)
#define WL_MESH_BCN(args)
-#endif // endif
-#ifdef WLMSG_NATOE
-#define WL_NAT(args) do {if (wl_msg_level2 & WL_NATOE_VAL) WL_PRINT(args);} while (0)
-#else
-#define WL_NAT(args)
-#endif // endif
+#endif
-#define WL_PFN_ERROR(args)
#define WL_AMPDU_UPDN(args)
#define WL_AMPDU_RX(args)
#define WL_PCIE(args)
#define WL_TSLOG(w, s, i, j)
#define WL_FBT(args)
-#define WL_MBO_DBG(args)
-#define WL_RANDMAC_DBG(args)
-#define WL_BAM_ERR(args)
-#define WL_ADPS(args)
-#define WL_OCE_DBG(args)
#define WL_ERROR_ON() (wl_msg_level & WL_ERROR_VAL)
#define WL_TRACE_ON() 0
#define WL_DPT_ON() 0
#define WL_WOWL_ON() 0
#define WL_SCAN_ON() (wl_msg_level2 & WL_SCAN_VAL)
-#define WL_BTA_ON() 0
#define WL_P2P_ON() 0
#define WL_ITFR_ON() 0
#define WL_MCHAN_ON() 0
#define WL_PCIE_ON() 0
#define WL_MUMIMO_ON() 0
#define WL_MESH_ON() 0
-#define WL_MBO_DBG_ON() 0
-#define WL_RANDMAC_DBG_ON() 0
-#define WL_ADPS_ON() 0
-#define WL_OCE_DBG_ON() 0
#else /* !BCMDBG */
#define WL_ERROR(args)
-#define KM_ERR(args)
-
-#define WL_AMPDU_ERR(args)
-
#define WL_TRACE(args)
#define WL_APSTA_UPDN(args)
#define WL_APSTA_RX(args)
-
#ifdef WLMSG_WSEC
-#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
-#if defined(USE_EVENT_LOG_RA)
-#define WL_WSEC(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_WSEC_LOG, args)
-#define WL_WSEC_DUMP(args) EVENT_LOG_RA(EVENT_LOG_TAG_WL_WSEC_DUMP, args)
-#else
-#define WL_WSEC(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_WSEC_LOG, args)
-#define WL_WSEC_DUMP(args) EVENT_LOG_FAST_CAST_PAREN_ARGS(EVENT_LOG_TAG_WL_WSEC_DUMP, args)
-#endif /* USE_EVENT_LOG_RA */
-#else
#define WL_WSEC(args) WL_PRINT(args)
#define WL_WSEC_DUMP(args) WL_PRINT(args)
-#endif /* EVENT_LOG_COMPILE */
#else
#define WL_WSEC(args)
#define WL_WSEC_DUMP(args)
-#endif /* WLMSG_WSEC */
-
-#ifdef WLMSG_MBO
-#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
-#if defined(USE_EVENT_LOG_RA)
-#define WL_MBO_DBG(args) EVENT_LOG_RA(EVENT_LOG_TAG_MBO_DBG, args)
-#define WL_MBO_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_MBO_INFO, args)
-#else
-#define WL_MBO_DBG(args) \
- EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_MBO_DBG, args)
-#define WL_MBO_INFO(args) \
- EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_MBO_INFO, args)
-#endif /* USE_EVENT_LOG_RA */
-#else
-#define WL_MBO_DBG(args) WL_PRINT(args)
-#define WL_MBO_INFO(args) WL_PRINT(args)
-#endif /* EVENT_LOG_COMPILE */
-#else
-#define WL_MBO_DBG(args)
-#define WL_MBO_INFO(args)
-#endif /* WLMSG_MBO */
-
-#define WL_MBO_ERR(args) WL_PRINT(args)
-
-#ifdef WLMSG_RANDMAC
-#if defined(EVENT_LOG_COMPILE) && defined(EVENT_LOG_COMPILE)
-#if defined(USE_EVENT_LOG_RA)
-#define WL_RANDMAC_DBG(args) EVENT_LOG_RA(EVENT_LOG_TAG_RANDMAC_DBG, args)
-#define WL_RANDMAC_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_RANDMAC_INFO, args)
-#else
-#define WL_RANDMAC_DBG(args) \
- EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_RANDMAC_DBG, args)
-#define WL_RANDMAC_INFO(args) \
- EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_RANDMAC_INFO, args)
-#endif /* USE_EVENT_LOG_RA */
-#else
-#define WL_RANDMAC_DBG(args) WL_PRINT(args)
-#define WL_RANDMAC_INFO(args) WL_PRINT(args)
-#endif /* EVENT_LOG_COMPILE */
-#else
-#define WL_RANDMAC_DBG(args)
-#define WL_RANDMAC_INFO(args)
-#endif /* WLMSG_RANDMAC */
-
-#define WL_RANDMAC_ERR(args) WL_PRINT(args)
-
-#ifdef WLMSG_OCE
-#if defined(EVENT_LOG_COMPILE)
-#if defined(USE_EVENT_LOG_RA)
-#define WL_OCE_DBG(args) EVENT_LOG_RA(EVENT_LOG_TAG_OCE_DBG, args)
-#define WL_OCE_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_OCE_INFO, args)
-#else
-#define WL_OCE_DBG(args) \
- EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_OCE_DBG, args)
-#define WL_OCE_INFO(args) \
- EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_OCE_INFO, args)
-#endif /* USE_EVENT_LOG_RA */
-#else
-#define WL_OCE_DBG(args) WL_PRINT(args)
-#define WL_OCE_INFO(args) WL_PRINT(args)
-#endif /* EVENT_LOG_COMPILE */
-#else
-#define WL_OCE_DBG(args)
-#define WL_OCE_INFO(args)
-#endif /* WLMSG_OCE */
-
-#define WL_OCE_ERR(args) WL_PRINT(args)
-
+#endif
#define WL_PCIE(args) do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0)
#define WL_PCIE_ON() (wl_msg_level2 & WL_PCIE_VAL)
#define WL_PFN(args) do {if (wl_msg_level & WL_PFN_VAL) WL_PRINT(args);} while (0)
#define WL_PFN_ON() (wl_msg_level & WL_PFN_VAL)
-#endif // endif
-
-#ifdef WLMSG_BAM
-#if defined(EVENT_LOG_COMPILE)
-#ifdef USE_EVENT_LOG_RA
-#define WL_BAM_ERR(args) EVENT_LOG_RA(EVENT_LOG_TAG_BAM, args)
-#else
-#define WL_BAM_ERR(args) EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_BAM, args)
-#endif /* USE_EVENT_LOG_RA */
-#else
-#define WL_BAM_ERR(args) WL_PRINT(args)
-#endif /* EVENT_LOG_COMPILE */
-#endif /* WLMSG_BAM */
-
-#ifdef WLMSG_WNM_BSSTRANS
-#if defined(EVENT_LOG_COMPILE)
-#if defined(USE_EVENT_LOG_RA)
-#define WL_BSSTRANS_INFO(args) EVENT_LOG_RA(EVENT_LOG_TAG_WNM_BSSTRANS_INFO, args)
-#else
-#define WL_BSSTRANS_INFO(args) \
- EVENT_LOG_COMPACT_CAST_PAREN_ARGS(EVENT_LOG_TAG_WNM_BSSTRANS_INFO, args)
-#endif /* USE_EVENT_LOG_RA */
-#else
-#define WL_BSSTRANS_INFO(args) WL_PRINT(args)
-#endif /* EVENT_LOG_COMPILE */
-#else
-#define WL_BSSTRANS_INFO(args)
-#endif /* WLMSG_WNM_BSSTRANS */
-
-#define WL_BSSTRANS_ERR(args) WL_PRINT(args)
+#endif
#define DBGERRONLY(x)
#if defined(WL_ESCAN)
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+
+#include <bcmutils.h>
#include <bcmendian.h>
+#include <ethernet.h>
+
#include <linux/if_arp.h>
#include <asm/uaccess.h>
+
+#include <wlioctl.h>
#include <wl_android.h>
+#include <wl_iw.h>
#include <wl_escan.h>
#include <dhd_config.h>
-#define ESCAN_ERROR(name, arg1, args...) \
- do { \
- if (android_msg_level & ANDROID_ERROR_LEVEL) { \
- printk(KERN_ERR "[dhd-%s] ESCAN-ERROR) %s : " arg1, name, __func__, ## args); \
- } \
- } while (0)
-#define ESCAN_TRACE(name, arg1, args...) \
+/* message levels */
+#define ESCAN_ERROR_LEVEL 0x0001
+#define ESCAN_SCAN_LEVEL 0x0002
+#define ESCAN_TRACE_LEVEL 0x0004
+
+#define ESCAN_ERROR(x) \
do { \
- if (android_msg_level & ANDROID_TRACE_LEVEL) { \
- printk(KERN_INFO "[dhd-%s] ESCAN-TRACE) %s : " arg1, name, __func__, ## args); \
+ if (iw_msg_level & ESCAN_ERROR_LEVEL) { \
+ printf(KERN_ERR "ESCAN-ERROR) %s : ", __func__); \
+ printf x; \
} \
} while (0)
-#define ESCAN_SCAN(name, arg1, args...) \
+#define ESCAN_SCAN(x) \
do { \
- if (android_msg_level & ANDROID_SCAN_LEVEL) { \
- printk(KERN_INFO "[dhd-%s] ESCAN-SCAN) %s : " arg1, name, __func__, ## args); \
+ if (iw_msg_level & ESCAN_SCAN_LEVEL) { \
+ printf(KERN_ERR "ESCAN-SCAN) %s : ", __func__); \
+ printf x; \
} \
} while (0)
-#define ESCAN_DBG(name, arg1, args...) \
+#define ESCAN_TRACE(x) \
do { \
- if (android_msg_level & ANDROID_DBG_LEVEL) { \
- printk(KERN_INFO "[dhd-%s] ESCAN-DBG) %s : " arg1, name, __func__, ## args); \
+ if (iw_msg_level & ESCAN_TRACE_LEVEL) { \
+ printf(KERN_ERR "ESCAN-TRACE) %s : ", __func__); \
+ printf x; \
} \
} while (0)
#define dtoh16(i) (i)
#define htodchanspec(i) (i)
#define dtohchanspec(i) (i)
-#define WL_EXTRA_BUF_MAX 2048
#define wl_escan_get_buf(a) ((wl_scan_results_t *) (a)->escan_buf)
} removal_element_t;
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
+struct wl_escan_info *g_escan = NULL;
/* Return a new chanspec given a legacy chanspec
* Returns INVCHANSPEC on error
*/
}
if (wf_chspec_malformed(chspec)) {
- ESCAN_ERROR("wlan", "wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
- chspec);
+ ESCAN_ERROR(("wl_chspec_from_legacy: output chanspec (0x%04X) malformed\n",
+ chspec));
return INVCHANSPEC;
}
chanspec_t lchspec;
if (wf_chspec_malformed(chspec)) {
- ESCAN_ERROR("wlan", "wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
- chspec);
+ ESCAN_ERROR(("wl_chspec_to_legacy: input chanspec (0x%04X) malformed\n",
+ chspec));
return INVCHANSPEC;
}
} else {
/* cannot express the bandwidth */
char chanbuf[CHANSPEC_STR_LEN];
- ESCAN_ERROR("wlan", "wl_chspec_to_legacy: unable to convert chanspec %s "
- "(0x%04X) to pre-11ac format\n",
- wf_chspec_ntoa(chspec, chanbuf), chspec);
+ ESCAN_ERROR((
+ "wl_chspec_to_legacy: unable to convert chanspec %s (0x%04X) "
+ "to pre-11ac format\n",
+ wf_chspec_ntoa(chspec, chanbuf), chspec));
return INVCHANSPEC;
}
* Returns INVCHANSPEC on error
*/
static chanspec_t
-wl_ch_host_to_driver(int ioctl_ver, u16 channel)
+wl_ch_host_to_driver(int ioctl_ver, s32 bssidx, u16 channel)
{
chanspec_t chanspec;
return wl_chspec_host_to_driver(ioctl_ver, chanspec);
}
-static inline struct wl_bss_info *next_bss(struct wl_scan_results *list,
- struct wl_bss_info *bss)
+static inline struct wl_bss_info *next_bss(struct wl_scan_results *list, struct wl_bss_info *bss)
{
return bss = bss ?
(struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
}
-#if defined(ESCAN_RESULT_PATCH)
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+
+static int
+rssi_to_qual(int rssi)
+{
+ if (rssi <= WL_IW_RSSI_NO_SIGNAL)
+ return 0;
+ else if (rssi <= WL_IW_RSSI_VERY_LOW)
+ return 1;
+ else if (rssi <= WL_IW_RSSI_LOW)
+ return 2;
+ else if (rssi <= WL_IW_RSSI_GOOD)
+ return 3;
+ else if (rssi <= WL_IW_RSSI_VERY_GOOD)
+ return 4;
+ else
+ return 5;
+}
+
+#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
+ 4 && __GNUC_MINOR__ >= 6))
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+(entry) = list_first_entry((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+_Pragma("GCC diagnostic push") \
+_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
+entry = container_of((ptr), type, member); \
+_Pragma("GCC diagnostic pop") \
+
+#else
+#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
+(entry) = list_first_entry((ptr), type, member); \
+
+#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
+entry = container_of((ptr), type, member); \
+
+#endif /* STRICT_GCC_WARNINGS */
+
+static unsigned long wl_lock_eq(struct wl_escan_info *escan)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&escan->eq_lock, flags);
+ return flags;
+}
+
+static void wl_unlock_eq(struct wl_escan_info *escan, unsigned long flags)
+{
+ spin_unlock_irqrestore(&escan->eq_lock, flags);
+}
+
+static void wl_init_eq(struct wl_escan_info *escan)
+{
+ spin_lock_init(&escan->eq_lock);
+ INIT_LIST_HEAD(&escan->eq_list);
+}
+
+static void wl_flush_eq(struct wl_escan_info *escan)
+{
+ struct escan_event_q *e;
+ unsigned long flags;
+
+ flags = wl_lock_eq(escan);
+ while (!list_empty_careful(&escan->eq_list)) {
+ BCM_SET_LIST_FIRST_ENTRY(e, &escan->eq_list, struct escan_event_q, eq_list);
+ list_del(&e->eq_list);
+ kfree(e);
+ }
+ wl_unlock_eq(escan, flags);
+}
+
+static struct escan_event_q *wl_deq_event(struct wl_escan_info *escan)
+{
+ struct escan_event_q *e = NULL;
+ unsigned long flags;
+
+ flags = wl_lock_eq(escan);
+ if (likely(!list_empty(&escan->eq_list))) {
+ BCM_SET_LIST_FIRST_ENTRY(e, &escan->eq_list, struct escan_event_q, eq_list);
+ list_del(&e->eq_list);
+ }
+ wl_unlock_eq(escan, flags);
+
+ return e;
+}
+
+/*
+ * push event to tail of the queue
+ */
+
static s32
-wl_escan_inform_bss(struct net_device *dev, struct wl_escan_info *escan)
+wl_enq_event(struct wl_escan_info *escan, struct net_device *ndev, u32 event,
+ const wl_event_msg_t *msg, void *data)
+{
+ struct escan_event_q *e;
+ s32 err = 0;
+ uint32 evtq_size;
+ uint32 data_len;
+ unsigned long flags;
+ gfp_t aflags;
+
+ data_len = 0;
+ if (data)
+ data_len = ntoh32(msg->datalen);
+ evtq_size = sizeof(struct escan_event_q) + data_len;
+ aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
+ e = kzalloc(evtq_size, aflags);
+ if (unlikely(!e)) {
+ ESCAN_ERROR(("event alloc failed\n"));
+ return -ENOMEM;
+ }
+ e->etype = event;
+ memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
+ if (data)
+ memcpy(e->edata, data, data_len);
+ flags = wl_lock_eq(escan);
+ list_add_tail(&e->eq_list, &escan->eq_list);
+ wl_unlock_eq(escan, flags);
+
+ return err;
+}
+
+static void wl_put_event(struct escan_event_q *e)
+{
+ kfree(e);
+}
+
+static void wl_wakeup_event(struct wl_escan_info *escan)
+{
+ dhd_pub_t *dhd = (dhd_pub_t *)(escan->pub);
+
+ if (dhd->up && (escan->event_tsk.thr_pid >= 0)) {
+ up(&escan->event_tsk.sema);
+ }
+}
+
+static s32 wl_escan_event_handler(void *data)
+{
+ struct wl_escan_info *escan = NULL;
+ struct escan_event_q *e;
+ tsk_ctl_t *tsk = (tsk_ctl_t *)data;
+
+ escan = (struct wl_escan_info *)tsk->parent;
+
+ printf("tsk Enter, tsk = 0x%p\n", tsk);
+
+ while (down_interruptible (&tsk->sema) == 0) {
+ SMP_RD_BARRIER_DEPENDS();
+ if (tsk->terminated) {
+ break;
+ }
+ while (escan && (e = wl_deq_event(escan))) {
+ ESCAN_TRACE(("dev=%p, event type (%d), ifidx: %d bssidx: %d \n",
+ escan->dev, e->etype, e->emsg.ifidx, e->emsg.bsscfgidx));
+
+ if (e->emsg.ifidx > WL_MAX_IFS) {
+ ESCAN_ERROR(("Event ifidx not in range. val:%d \n", e->emsg.ifidx));
+ goto fail;
+ }
+
+ if (escan->dev && escan->evt_handler[e->etype]) {
+ dhd_pub_t *dhd = (struct dhd_pub *)(escan->pub);
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ ESCAN_ERROR((": BUS is DOWN.\n"));
+ } else {
+ escan->evt_handler[e->etype](escan, &e->emsg, e->edata);
+ }
+ } else {
+ ESCAN_TRACE(("Unknown Event (%d): ignoring\n", e->etype));
+ }
+fail:
+ wl_put_event(e);
+ DHD_EVENT_WAKE_UNLOCK(escan->pub);
+ }
+ }
+ printf("%s: was terminated\n", __FUNCTION__);
+ complete_and_exit(&tsk->completed, 0);
+ return 0;
+}
+
+void
+wl_escan_event(struct net_device *dev, const wl_event_msg_t * e, void *data)
+{
+ u32 event_type = ntoh32(e->event_type);
+ struct wl_escan_info *escan = g_escan;
+
+ if (!escan || !escan->dev) {
+ return;
+ }
+
+ if (escan->event_tsk.thr_pid == -1) {
+ ESCAN_ERROR(("Event handler is not created\n"));
+ return;
+ }
+
+ if (escan == NULL) {
+ ESCAN_ERROR(("Stale event ignored\n"));
+ return;
+ }
+
+ if (event_type == WLC_E_PFN_NET_FOUND) {
+ ESCAN_TRACE(("PNOEVENT: PNO_NET_FOUND\n"));
+ }
+ else if (event_type == WLC_E_PFN_NET_LOST) {
+ ESCAN_TRACE(("PNOEVENT: PNO_NET_LOST\n"));
+ }
+
+ DHD_EVENT_WAKE_LOCK(escan->pub);
+ if (likely(!wl_enq_event(escan, dev, event_type, e, data))) {
+ wl_wakeup_event(escan);
+ } else {
+ DHD_EVENT_WAKE_UNLOCK(escan->pub);
+ }
+}
+
+static s32 wl_escan_inform_bss(struct wl_escan_info *escan)
{
struct wl_scan_results *bss_list;
s32 err = 0;
bss_list = escan->bss_list;
- ESCAN_SCAN(dev->name, "scanned AP count (%d)\n", bss_list->count);
-
/* Delete disconnected cache */
#if defined(BSSCACHE)
- wl_delete_disconnected_bss_cache(&escan->g_bss_cache_ctrl,
- (u8*)&escan->disconnected_bssid);
+ wl_delete_disconnected_bss_cache(&escan->g_bss_cache_ctrl, (u8*)&escan->disconnected_bssid);
#if defined(RSSIAVG)
- wl_delete_disconnected_rssi_cache(&escan->g_rssi_cache_ctrl,
- (u8*)&escan->disconnected_bssid);
+ wl_delete_disconnected_rssi_cache(&escan->g_rssi_cache_ctrl, (u8*)&escan->disconnected_bssid);
#endif
#endif
#if defined(RSSIAVG)
wl_update_rssi_cache(&escan->g_rssi_cache_ctrl, bss_list);
if (!in_atomic())
- wl_update_connected_rssi_cache(dev, &escan->g_rssi_cache_ctrl, &rssi);
+ wl_update_connected_rssi_cache(escan->dev, &escan->g_rssi_cache_ctrl, &rssi);
#endif
#if defined(BSSCACHE)
wl_update_bss_cache(&escan->g_bss_cache_ctrl,
wl_delete_dirty_bss_cache(&escan->g_bss_cache_ctrl);
wl_reset_bss_cache(&escan->g_bss_cache_ctrl);
if (escan->autochannel)
- wl_ext_get_best_channel(dev, &escan->g_bss_cache_ctrl,
- escan->ioctl_ver, &escan->best_2g_ch, &escan->best_5g_ch);
+ wl_ext_get_best_channel(escan->dev, &escan->g_bss_cache_ctrl,
+ escan->ioctl_ver &escan->best_2g_ch, &escan->best_5g_ch);
#else
if (escan->autochannel)
- wl_ext_get_best_channel(dev, bss_list, escan->ioctl_ver,
+ wl_ext_get_best_channel(escan->dev, bss_list, escan->ioctl_ver,
&escan->best_2g_ch, &escan->best_5g_ch);
#endif
+ ESCAN_TRACE(("scanned AP count (%d)\n", bss_list->count));
+
return err;
}
-#endif /* ESCAN_RESULT_PATCH */
static wl_scan_params_t *
-wl_escan_alloc_params(struct net_device *dev, struct wl_escan_info *escan,
- int channel, int nprobes, int *out_params_size)
+wl_escan_alloc_params(struct wl_escan_info *escan, int channel,
+ int nprobes, int *out_params_size)
{
wl_scan_params_t *params;
int params_size;
int num_chans;
+ int bssidx = 0;
*out_params_size = 0;
params_size = WL_SCAN_PARAMS_FIXED_SIZE + 1 * sizeof(uint16);
params = (wl_scan_params_t*) kzalloc(params_size, GFP_KERNEL);
if (params == NULL) {
- ESCAN_ERROR(dev->name, "mem alloc failed (%d bytes)\n", params_size);
+ ESCAN_ERROR(("mem alloc failed (%d bytes)\n", params_size));
return params;
}
memset(params, 0, params_size);
if (channel == -1)
params->channel_list[0] = htodchanspec(channel);
else
- params->channel_list[0] = wl_ch_host_to_driver(escan->ioctl_ver, channel);
+ params->channel_list[0] = wl_ch_host_to_driver(escan->ioctl_ver, bssidx, channel);
/* Our scan params have 1 channel and 0 ssids */
params->channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) |
return params;
}
-static void
-wl_escan_abort(struct net_device *dev, struct wl_escan_info *escan)
+static void wl_escan_abort(struct wl_escan_info *escan)
{
wl_scan_params_t *params = NULL;
s32 params_size = 0;
s32 err = BCME_OK;
if (!in_atomic()) {
/* Our scan params only need space for 1 channel and 0 ssids */
- params = wl_escan_alloc_params(dev, escan, -1, 0, ¶ms_size);
+ params = wl_escan_alloc_params(escan, -1, 0, ¶ms_size);
if (params == NULL) {
- ESCAN_ERROR(dev->name, "scan params allocation failed \n");
+ ESCAN_ERROR(("scan params allocation failed \n"));
err = -ENOMEM;
} else {
/* Do a scan abort to stop the driver's scan engine */
- err = wldev_ioctl(dev, WLC_SCAN, params, params_size, true);
+ err = wldev_ioctl(escan->dev, WLC_SCAN, params, params_size, true);
if (err < 0) {
- ESCAN_ERROR(dev->name, "scan abort failed \n");
+ ESCAN_ERROR(("scan abort failed \n"));
}
kfree(params);
}
}
}
-static s32
-wl_escan_notify_complete(struct net_device *dev,
- struct wl_escan_info *escan, bool fw_abort)
+static s32 wl_notify_escan_complete(struct wl_escan_info *escan, bool fw_abort)
{
s32 err = BCME_OK;
-#if defined(WL_WIRELESS_EXT)
int cmd = 0;
#if WIRELESS_EXT > 13
union iwreq_data wrqu;
char extra[IW_CUSTOM_MAX + 1];
+
+ memset(extra, 0, sizeof(extra));
#endif
-#endif
- struct dhd_pub *dhd = dhd_get_pub(dev);
- ESCAN_TRACE(dev->name, "Enter\n");
+ ESCAN_TRACE(("Enter\n"));
+ if (!escan || !escan->dev) {
+ ESCAN_ERROR(("escan or dev is null\n"));
+ err = BCME_ERROR;
+ goto out;
+ }
if (fw_abort && !in_atomic())
- wl_escan_abort(dev, escan);
+ wl_escan_abort(escan);
if (timer_pending(&escan->scan_timeout))
del_timer_sync(&escan->scan_timeout);
-
#if defined(ESCAN_RESULT_PATCH)
escan->bss_list = wl_escan_get_buf(escan);
- wl_escan_inform_bss(dev, escan);
+ wl_escan_inform_bss(escan);
#endif /* ESCAN_RESULT_PATCH */
- escan->escan_state = ESCAN_STATE_IDLE;
- wake_up_interruptible(&dhd->conf->event_complete);
-
-#if defined(WL_WIRELESS_EXT)
#if WIRELESS_EXT > 13
#if WIRELESS_EXT > 14
cmd = SIOCGIWSCAN;
#endif
+ ESCAN_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
// terence 20150224: fix "wlan0: (WE) : Wireless Event too big (65306)"
memset(&wrqu, 0, sizeof(wrqu));
- memset(extra, 0, sizeof(extra));
if (cmd) {
if (cmd == SIOCGIWSCAN) {
- wireless_send_event(dev, cmd, &wrqu, NULL);
+ wireless_send_event(escan->dev, cmd, &wrqu, NULL);
} else
- wireless_send_event(dev, cmd, &wrqu, extra);
+ wireless_send_event(escan->dev, cmd, &wrqu, extra);
}
-#endif
#endif
+out:
return err;
}
#ifdef ESCAN_BUF_OVERFLOW_MGMT
static void
-wl_escan_find_removal_candidate(struct wl_escan_info *escan,
- wl_bss_info_t *bss, removal_element_t *candidate)
+wl_cfg80211_find_removal_candidate(wl_bss_info_t *bss, removal_element_t *candidate)
{
int idx;
for (idx = 0; idx < BUF_OVERFLOW_MGMT_COUNT; idx++) {
}
static void
-wl_escan_remove_lowRSSI_info(struct net_device *dev, struct wl_escan_info *escan,
- wl_scan_results_t *list, removal_element_t *candidate, wl_bss_info_t *bi)
+wl_cfg80211_remove_lowRSSI_info(wl_scan_results_t *list, removal_element_t *candidate,
+ wl_bss_info_t *bi)
{
int idx1, idx2;
int total_delete_len = 0;
candidate[idx1].RSSI == bss->RSSI &&
candidate[idx1].length == dtoh32(bss->length)) {
u32 delete_len = dtoh32(bss->length);
- ESCAN_DBG(dev->name,
- "delete scan info of %pM to add new AP\n", &bss->BSSID);
+ ESCAN_TRACE(("delete scan info of " MACDBG " to add new AP\n",
+ MAC2STRDBG(bss->BSSID.octet)));
if (idx2 < list->count -1) {
memmove((u8 *)bss, (u8 *)bss + delete_len,
list->buflen - cur_len - delete_len);
}
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
-static s32
-wl_escan_handler(struct net_device *dev, struct wl_escan_info *escan,
+static s32 wl_escan_handler(struct wl_escan_info *escan,
const wl_event_msg_t *e, void *data)
{
s32 err = BCME_OK;
u32 i;
u16 channel;
+ ESCAN_TRACE(("enter event type : %d, status : %d \n",
+ ntoh32(e->event_type), ntoh32(e->status)));
+
mutex_lock(&escan->usr_sync);
escan_result = (wl_escan_result_t *)data;
if (escan->escan_state != ESCAN_STATE_SCANING) {
- ESCAN_DBG(dev->name, "Not my scan\n");
+ ESCAN_TRACE(("Not my scan\n"));
goto exit;
}
- ESCAN_DBG(dev->name, "enter event type : %d, status : %d \n",
- ntoh32(e->event_type), ntoh32(e->status));
-
if (status == WLC_E_STATUS_PARTIAL) {
- ESCAN_DBG(dev->name, "WLC_E_STATUS_PARTIAL \n");
+ ESCAN_TRACE(("WLC_E_STATUS_PARTIAL \n"));
if (!escan_result) {
- ESCAN_ERROR(dev->name, "Invalid escan result (NULL pointer)\n");
+ ESCAN_ERROR(("Invalid escan result (NULL pointer)\n"));
goto exit;
}
if (dtoh16(escan_result->bss_count) != 1) {
- ESCAN_ERROR(dev->name, "Invalid bss_count %d: ignoring\n",
- escan_result->bss_count);
+ ESCAN_ERROR(("Invalid bss_count %d: ignoring\n", escan_result->bss_count));
goto exit;
}
bi = escan_result->bss_info;
if (!bi) {
- ESCAN_ERROR(dev->name, "Invalid escan bss info (NULL pointer)\n");
+ ESCAN_ERROR(("Invalid escan bss info (NULL pointer)\n"));
goto exit;
}
bi_length = dtoh32(bi->length);
if (bi_length != (dtoh32(escan_result->buflen) - WL_ESCAN_RESULTS_FIXED_SIZE)) {
- ESCAN_ERROR(dev->name, "Invalid bss_info length %d: ignoring\n",
- bi_length);
+ ESCAN_ERROR(("Invalid bss_info length %d: ignoring\n", bi_length));
goto exit;
}
/* +++++ terence 20130524: skip invalid bss */
channel =
- bi->ctl_ch ? bi->ctl_ch :
- CHSPEC_CHANNEL(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
+ bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
if (!dhd_conf_match_channel(escan->pub, channel))
goto exit;
/* ----- terence 20130524: skip invalid bss */
remove_lower_rssi = TRUE;
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
- ESCAN_DBG(dev->name, "%s(%pM) RSSI %d flags 0x%x length %d\n",
- bi->SSID, &bi->BSSID, bi->RSSI, bi->flags, bi->length);
+ ESCAN_TRACE(("%s("MACDBG") RSSI %d flags 0x%x length %d\n", bi->SSID,
+ MAC2STRDBG(bi->BSSID.octet), bi->RSSI, bi->flags, bi->length));
for (i = 0; i < list->count; i++) {
bss = bss ? (wl_bss_info_t *)((uintptr)bss + dtoh32(bss->length))
: list->bss_info;
#ifdef ESCAN_BUF_OVERFLOW_MGMT
- ESCAN_DBG(dev->name,
- "%s(%pM), i=%d bss: RSSI %d list->count %d\n",
- bss->SSID, &bss->BSSID, i, bss->RSSI, list->count);
+ ESCAN_TRACE(("%s("MACDBG"), i=%d bss: RSSI %d list->count %d\n",
+ bss->SSID, MAC2STRDBG(bss->BSSID.octet),
+ i, bss->RSSI, list->count));
if (remove_lower_rssi)
- wl_escan_find_removal_candidate(escan, bss, candidate);
+ wl_cfg80211_find_removal_candidate(bss, candidate);
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
if (!bcmp(&bi->BSSID, &bss->BSSID, ETHER_ADDR_LEN) &&
- (CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec))
- == CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bss->chanspec))) &&
- bi->SSID_len == bss->SSID_len &&
- !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
+ (CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec))
+ == CHSPEC_BAND(wl_chspec_driver_to_host(escan->ioctl_ver, bss->chanspec))) &&
+ bi->SSID_len == bss->SSID_len &&
+ !bcmp(bi->SSID, bss->SSID, bi->SSID_len)) {
/* do not allow beacon data to update
*the data recd from a probe response
(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
goto exit;
- ESCAN_DBG(dev->name,
- "%s(%pM), i=%d prev: RSSI %d flags 0x%x, "
- "new: RSSI %d flags 0x%x\n",
- bss->SSID, &bi->BSSID, i, bss->RSSI, bss->flags,
- bi->RSSI, bi->flags);
+ ESCAN_TRACE(("%s("MACDBG"), i=%d prev: RSSI %d"
+ " flags 0x%x, new: RSSI %d flags 0x%x\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet), i,
+ bss->RSSI, bss->flags, bi->RSSI, bi->flags));
if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) ==
(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL)) {
/* preserve max RSSI if the measurements are
* both on-channel or both off-channel
*/
- ESCAN_DBG(dev->name,
- "%s(%pM), same onchan, RSSI: prev %d new %d\n",
- bss->SSID, &bi->BSSID, bss->RSSI, bi->RSSI);
+ ESCAN_TRACE(("%s("MACDBG"), same onchan"
+ ", RSSI: prev %d new %d\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ bss->RSSI, bi->RSSI));
bi->RSSI = MAX(bss->RSSI, bi->RSSI);
} else if ((bss->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) &&
(bi->flags & WL_BSS_FLAGS_RSSI_ONCHANNEL) == 0) {
/* preserve the on-channel rssi measurement
* if the new measurement is off channel
*/
- ESCAN_DBG(dev->name,
- "%s(%pM), prev onchan, RSSI: prev %d new %d\n",
- bss->SSID, &bi->BSSID, bss->RSSI, bi->RSSI);
+ ESCAN_TRACE(("%s("MACDBG"), prev onchan"
+ ", RSSI: prev %d new %d\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ bss->RSSI, bi->RSSI));
bi->RSSI = bss->RSSI;
bi->flags |= WL_BSS_FLAGS_RSSI_ONCHANNEL;
}
if (dtoh32(bss->length) != bi_length) {
u32 prev_len = dtoh32(bss->length);
- ESCAN_DBG(dev->name,
- "bss info replacement occured(bcast:%d->probresp%d)\n",
- bss->ie_length, bi->ie_length);
- ESCAN_DBG(dev->name,
- "%s(%pM), replacement!(%d -> %d)\n",
- bss->SSID, &bi->BSSID, prev_len, bi_length);
-
- if (list->buflen - prev_len + bi_length > ESCAN_BUF_SIZE) {
- ESCAN_ERROR(dev->name,
- "Buffer is too small: keep the previous result "
- "of this AP\n");
+ ESCAN_TRACE(("bss info replacement"
+ " is occured(bcast:%d->probresp%d)\n",
+ bss->ie_length, bi->ie_length));
+ ESCAN_TRACE(("%s("MACDBG"), replacement!(%d -> %d)\n",
+ bss->SSID, MAC2STRDBG(bi->BSSID.octet),
+ prev_len, bi_length));
+
+ if (list->buflen - prev_len + bi_length
+ > ESCAN_BUF_SIZE) {
+ ESCAN_ERROR(("Buffer is too small: keep the"
+ " previous result of this AP\n"));
/* Only update RSSI */
bss->RSSI = bi->RSSI;
bss->flags |= (bi->flags
}
if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
#ifdef ESCAN_BUF_OVERFLOW_MGMT
- wl_escan_remove_lowRSSI_info(dev, escan, list, candidate, bi);
+ wl_cfg80211_remove_lowRSSI_info(list, candidate, bi);
if (bi_length > ESCAN_BUF_SIZE - list->buflen) {
- ESCAN_DBG(dev->name,
- "RSSI(%pM) is too low(%d) to add Buffer\n",
- &bi->BSSID, bi->RSSI);
+ ESCAN_TRACE(("RSSI(" MACDBG ") is too low(%d) to add Buffer\n",
+ MAC2STRDBG(bi->BSSID.octet), bi->RSSI));
goto exit;
}
#else
- ESCAN_ERROR(dev->name, "Buffer is too small: ignoring\n");
+ ESCAN_ERROR(("Buffer is too small: ignoring\n"));
goto exit;
#endif /* ESCAN_BUF_OVERFLOW_MGMT */
}
+ if (strlen(bi->SSID) == 0) { // terence: fix for hidden SSID
+ ESCAN_SCAN(("Skip hidden SSID %pM\n", &bi->BSSID));
+ goto exit;
+ }
+
memcpy(&(((char *)list)[list->buflen]), bi, bi_length);
list->version = dtoh32(bi->version);
list->buflen += bi_length;
}
}
else if (status == WLC_E_STATUS_SUCCESS) {
- ESCAN_DBG(dev->name, "ESCAN COMPLETED\n");
+ escan->escan_state = ESCAN_STATE_IDLE;
+ ESCAN_TRACE(("ESCAN COMPLETED\n"));
escan->bss_list = wl_escan_get_buf(escan);
- ESCAN_DBG(dev->name, "SCAN COMPLETED: scanned AP count=%d\n",
- escan->bss_list->count);
- wl_escan_notify_complete(dev, escan, false);
+ ESCAN_TRACE(("SCAN COMPLETED: scanned AP count=%d\n",
+ escan->bss_list->count));
+ wl_escan_inform_bss(escan);
+ wl_notify_escan_complete(escan, false);
} else if ((status == WLC_E_STATUS_ABORT) || (status == WLC_E_STATUS_NEWSCAN) ||
(status == WLC_E_STATUS_11HQUIET) || (status == WLC_E_STATUS_CS_ABORT) ||
(status == WLC_E_STATUS_NEWASSOC)) {
/* Handle all cases of scan abort */
- ESCAN_DBG(dev->name, "ESCAN ABORT reason: %d\n", status);
+ escan->escan_state = ESCAN_STATE_IDLE;
+ ESCAN_TRACE(("ESCAN ABORT reason: %d\n", status));
escan->bss_list = wl_escan_get_buf(escan);
- ESCAN_DBG(dev->name, "SCAN ABORT: scanned AP count=%d\n",
- escan->bss_list->count);
- wl_escan_notify_complete(dev, escan, false);
+ ESCAN_TRACE(("SCAN ABORT: scanned AP count=%d\n",
+ escan->bss_list->count));
+ wl_escan_inform_bss(escan);
+ wl_notify_escan_complete(escan, false);
} else if (status == WLC_E_STATUS_TIMEOUT) {
- ESCAN_ERROR(dev->name, "WLC_E_STATUS_TIMEOUT\n");
- ESCAN_ERROR(dev->name, "reason[0x%x]\n", e->reason);
+ ESCAN_ERROR(("WLC_E_STATUS_TIMEOUT\n"));
+ ESCAN_ERROR(("reason[0x%x]\n", e->reason));
if (e->reason == 0xFFFFFFFF) {
- wl_escan_notify_complete(dev, escan, true);
+ wl_notify_escan_complete(escan, true);
}
+ escan->escan_state = ESCAN_STATE_IDLE;
} else {
- ESCAN_ERROR(dev->name, "unexpected Escan Event %d : abort\n", status);
+ ESCAN_ERROR(("unexpected Escan Event %d : abort\n", status));
+ escan->escan_state = ESCAN_STATE_IDLE;
escan->bss_list = wl_escan_get_buf(escan);
- ESCAN_DBG(dev->name, "SCAN ABORTED(UNEXPECTED): scanned AP count=%d\n",
- escan->bss_list->count);
- wl_escan_notify_complete(dev, escan, false);
+ ESCAN_TRACE(("SCAN ABORTED(UNEXPECTED): scanned AP count=%d\n",
+ escan->bss_list->count));
+ wl_escan_inform_bss(escan);
+ wl_notify_escan_complete(escan, false);
}
exit:
mutex_unlock(&escan->usr_sync);
}
static int
-wl_escan_prep(struct net_device *dev, struct wl_escan_info *escan,
- wl_uint32_list_t *list, wl_scan_params_t *params, wlc_ssid_t *ssid, bool bcast)
+wl_escan_prep(struct wl_escan_info *escan, wl_uint32_list_t *list,
+ wl_scan_params_t *params, wlc_ssid_t *ssid)
{
int err = 0;
wl_scan_results_t *results;
u32 n_channels = 0;
uint channel;
chanspec_t chanspec;
- u32 n_ssids;
results = wl_escan_get_buf(escan);
results->version = 0;
n_channels = dtoh32(list->count);
/* Copy channel array if applicable */
- ESCAN_SCAN(dev->name, "### List of channelspecs to scan ###\n");
+ ESCAN_SCAN(("### List of channelspecs to scan ###\n"));
if (n_channels > 0) {
for (i = 0; i < n_channels; i++) {
channel = dtoh32(list->element[i]);
continue;
chanspec = WL_CHANSPEC_BW_20;
if (chanspec == INVCHANSPEC) {
- ESCAN_ERROR(dev->name, "Invalid chanspec! Skipping channel\n");
+ ESCAN_ERROR(("Invalid chanspec! Skipping channel\n"));
continue;
}
if (channel <= CH_MAX_2G_CHANNEL) {
params->channel_list[j] = channel;
params->channel_list[j] &= WL_CHANSPEC_CHAN_MASK;
params->channel_list[j] |= chanspec;
- ESCAN_SCAN(dev->name, "Chan : %d, Channel spec: %x\n",
- channel, params->channel_list[j]);
+ ESCAN_SCAN(("Chan : %d, Channel spec: %x \n",
+ channel, params->channel_list[j]));
params->channel_list[j] = wl_chspec_host_to_driver(escan->ioctl_ver,
params->channel_list[j]);
j++;
}
} else {
- ESCAN_SCAN(dev->name, "Scanning all channels\n");
+ ESCAN_SCAN(("Scanning all channels\n"));
}
if (ssid && ssid->SSID_len) {
/* Copy ssid array if applicable */
- ESCAN_SCAN(dev->name, "### List of SSIDs to scan ###\n");
+ ESCAN_SCAN(("### List of SSIDs to scan ###\n"));
offset = offsetof(wl_scan_params_t, channel_list) + n_channels * sizeof(u16);
offset = roundup(offset, sizeof(u32));
ptr = (char*)params + offset;
- if (bcast) {
- n_ssids = 2;
- ESCAN_SCAN(dev->name, "0: Broadcast scan\n");
- memset(&ssid_tmp, 0, sizeof(wlc_ssid_t));
- ssid_tmp.SSID_len = 0;
- memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t));
- ptr += sizeof(wlc_ssid_t);
- } else {
- n_ssids = 1;
- }
+ ESCAN_SCAN(("0: Broadcast scan\n"));
+ memset(&ssid_tmp, 0, sizeof(wlc_ssid_t));
+ ssid_tmp.SSID_len = 0;
+ memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t));
+ ptr += sizeof(wlc_ssid_t);
memset(&ssid_tmp, 0, sizeof(wlc_ssid_t));
ssid_tmp.SSID_len = ssid->SSID_len;
memcpy(ssid_tmp.SSID, ssid->SSID, ssid->SSID_len);
memcpy(ptr, &ssid_tmp, sizeof(wlc_ssid_t));
ptr += sizeof(wlc_ssid_t);
- ESCAN_SCAN(dev->name, "1: scan for %s size=%d\n",
- ssid_tmp.SSID, ssid_tmp.SSID_len);
+ ESCAN_SCAN(("1: scan for %s size=%d\n", ssid_tmp.SSID, ssid_tmp.SSID_len));
/* Adding mask to channel numbers */
params->channel_num =
- htod32((n_ssids << WL_SCAN_PARAMS_NSSID_SHIFT) |
+ htod32((2 << WL_SCAN_PARAMS_NSSID_SHIFT) |
(n_channels & WL_SCAN_PARAMS_COUNT_MASK));
}
else {
- ESCAN_SCAN(dev->name, "Broadcast scan\n");
+ ESCAN_SCAN(("Broadcast scan\n"));
}
return err;
}
-static int
-wl_escan_reset(struct wl_escan_info *escan)
+static int wl_escan_reset(struct wl_escan_info *escan)
{
if (timer_pending(&escan->scan_timeout))
del_timer_sync(&escan->scan_timeout);
return 0;
}
-static void
-wl_escan_timeout(unsigned long data)
+static void wl_escan_timeout(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct timer_list *t
+#else
+ unsigned long data
+#endif
+)
{
wl_event_msg_t msg;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct wl_escan_info *escan = from_timer(escan, t, scan_timeout);
+#else
struct wl_escan_info *escan = (struct wl_escan_info *)data;
+#endif
struct wl_scan_results *bss_list;
struct wl_bss_info *bi = NULL;
s32 i;
u32 channel;
- if (!escan->dev) {
- ESCAN_ERROR("wlan", "No dev present\n");
- return;
- }
-
bss_list = wl_escan_get_buf(escan);
if (!bss_list) {
- ESCAN_ERROR(escan->dev->name,
- "bss_list is null. Didn't receive any partial scan results\n");
+ ESCAN_ERROR(("bss_list is null. Didn't receive any partial scan results\n"));
} else {
- ESCAN_ERROR(escan->dev->name, "scanned AP count (%d)\n", bss_list->count);
+ ESCAN_ERROR(("%s: scanned AP count (%d)\n", __FUNCTION__, bss_list->count));
bi = next_bss(bss_list, bi);
for_each_bss(bss_list, bi, i) {
- channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver,
- bi->chanspec));
- ESCAN_ERROR(escan->dev->name, "SSID :%s Channel :%d\n", bi->SSID, channel);
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
+ ESCAN_ERROR(("SSID :%s Channel :%d\n", bi->SSID, channel));
}
}
+ if (!escan->dev) {
+ ESCAN_ERROR(("No dev present\n"));
+ return;
+ }
+
bzero(&msg, sizeof(wl_event_msg_t));
- ESCAN_ERROR(escan->dev->name, "timer expired\n");
+ ESCAN_ERROR(("timer expired\n"));
- msg.ifidx = dhd_net2idx(escan->pub->info, escan->dev);
msg.event_type = hton32(WLC_E_ESCAN_RESULT);
msg.status = hton32(WLC_E_STATUS_TIMEOUT);
msg.reason = 0xFFFFFFFF;
- wl_ext_event_send(escan->pub->event_params, &msg, NULL);
+ wl_escan_event(escan->dev, &msg, NULL);
+
+ // terence 20130729: workaround to fix out of memory in firmware
+// if (dhd_conf_get_chip(dhd_get_pub(dev)) == BCM43362_CHIP_ID) {
+// ESCAN_ERROR(("Send hang event\n"));
+// net_os_send_hang_message(dev);
+// }
}
int
-wl_escan_set_scan(struct net_device *dev, dhd_pub_t *dhdp,
- wlc_ssid_t *ssid, uint16 channel, bool bcast)
+wl_escan_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+)
{
- struct wl_escan_info *escan = dhdp->escan;
s32 err = BCME_OK;
s32 params_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params));
wl_escan_params_t *params = NULL;
scb_val_t scbval;
static int cnt = 0;
+ struct wl_escan_info *escan = g_escan;
+ wlc_ssid_t ssid;
u32 n_channels = 0;
wl_uint32_list_t *list;
u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];
+ s32 val = 0;
+
+ ESCAN_TRACE(("Enter \n"));
+ if (!escan) {
+ ESCAN_ERROR(("device is not ready\n"));
+ return -EIO;
+ }
mutex_lock(&escan->usr_sync);
- if (escan->escan_state == ESCAN_STATE_DOWN) {
- ESCAN_ERROR(dev->name, "STATE is down\n");
- err = -EINVAL;
- goto exit2;
+
+ if (!escan->ioctl_ver) {
+ val = 1;
+ if ((err = wldev_ioctl(dev, WLC_GET_VERSION, &val, sizeof(int), false) < 0)) {
+ ANDROID_ERROR(("WLC_GET_VERSION failed, err=%d\n", err));
+ goto exit;
+ }
+ val = dtoh32(val);
+ if (val != WLC_IOCTL_VERSION && val != 1) {
+ ANDROID_ERROR(("Version mismatch, please upgrade. Got %d, expected %d or 1\n",
+ val, WLC_IOCTL_VERSION));
+ goto exit;
+ }
+ escan->ioctl_ver = val;
+ printf("%s: ioctl_ver=%d\n", __FUNCTION__, val);
}
- if (wl_ext_check_scan(dev, dhdp)) {
- err = -EBUSY;
+ /* default Broadcast scan */
+ memset(&ssid, 0, sizeof(ssid));
+
+#if WIRELESS_EXT > 17
+ /* check for given essid */
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+ ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
+ memcpy(ssid.SSID, req->essid, ssid.SSID_len);
+ ssid.SSID_len = htod32(ssid.SSID_len);
+ }
+ }
+#endif
+ if (escan->escan_state == ESCAN_STATE_SCANING) {
+ ESCAN_ERROR(("Scanning already\n"));
goto exit;
}
- ESCAN_TRACE(dev->name, "Enter \n");
-
/* if scan request is not empty parse scan request paramters */
memset(valid_chan_list, 0, sizeof(valid_chan_list));
list = (wl_uint32_list_t *)(void *) valid_chan_list;
-
- if (channel) {
- list->count = htod32(1);
- list->element[0] = htod32(channel);
- } else {
- list->count = htod32(WL_NUMCHANNELS);
- err = wldev_ioctl(dev, WLC_GET_VALID_CHANNELS, valid_chan_list,
- sizeof(valid_chan_list), false);
- if (err != 0) {
- ESCAN_ERROR(dev->name, "get channels failed with %d\n", err);
- goto exit;
- }
+ list->count = htod32(WL_NUMCHANNELS);
+ err = wldev_ioctl(escan->dev, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), false);
+ if (err != 0) {
+ ESCAN_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, err));
+ goto exit;
}
-
n_channels = dtoh32(list->count);
/* Allocate space for populating ssids in wl_escan_params_t struct */
if (dtoh32(list->count) % 2)
params_size += sizeof(u16) * (n_channels + 1);
else
params_size += sizeof(u16) * n_channels;
- if (ssid && ssid->SSID_len) {
+ if (ssid.SSID_len) {
params_size += sizeof(struct wlc_ssid) * 2;
}
err = -ENOMEM;
goto exit;
}
- wl_escan_prep(dev, escan, list, ¶ms->params, ssid, bcast);
+ wl_escan_prep(escan, list, ¶ms->params, &ssid);
params->version = htod32(ESCAN_REQ_VERSION);
params->action = htod16(WL_SCAN_ACTION_START);
wl_escan_set_sync_id(params->sync_id);
if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
- ESCAN_ERROR(dev->name, "ioctl buffer length not sufficient\n");
+ ESCAN_ERROR(("ioctl buffer length not sufficient\n"));
kfree(params);
err = -ENOMEM;
goto exit;
}
params->params.scan_type = DOT11_SCANTYPE_ACTIVE;
- ESCAN_SCAN(dev->name, "Passive scan_type %d\n", params->params.scan_type);
+ ESCAN_TRACE(("Passive scan_type %d\n", params->params.scan_type));
- WL_MSG(dev->name, "LEGACY_SCAN\n");
err = wldev_iovar_setbuf(dev, "escan", params, params_size,
escan->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
if (unlikely(err)) {
if (err == BCME_EPERM)
/* Scan Not permitted at this point of time */
- ESCAN_TRACE(dev->name, "Escan not permitted at this time (%d)\n", err);
+ ESCAN_TRACE(("Escan not permitted at this time (%d)\n", err));
else
- ESCAN_ERROR(dev->name, "Escan set error (%d)\n", err);
+ ESCAN_ERROR(("Escan set error (%d)\n", err));
+ wl_escan_reset(escan);
}
kfree(params);
+exit:
if (unlikely(err)) {
/* Don't print Error incase of Scan suppress */
- if (err == BCME_EPERM)
- ESCAN_TRACE(dev->name, "Escan failed: Scan Suppressed\n");
+ if ((err == BCME_EPERM))
+ ESCAN_TRACE(("Escan failed: Scan Suppressed \n"));
else {
cnt++;
- ESCAN_ERROR(dev->name, "error (%d), cnt=%d\n", err, cnt);
+ ESCAN_ERROR(("error (%d), cnt=%d\n", err, cnt));
// terence 20140111: send disassoc to firmware
if (cnt >= 4) {
memset(&scbval, 0, sizeof(scb_val_t));
wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true);
- ESCAN_ERROR(dev->name, "Send disassoc to break the busy\n");
+ ESCAN_ERROR(("Send disassoc to break the busy dev=%p\n", dev));
cnt = 0;
}
}
} else {
cnt = 0;
- escan->dev = dev;
}
-exit:
- if (unlikely(err)) {
- wl_escan_reset(escan);
- }
-exit2:
mutex_unlock(&escan->usr_sync);
return err;
}
-#if defined(WL_WIRELESS_EXT)
-static int
-rssi_to_qual(int rssi)
-{
- if (rssi <= WL_IW_RSSI_NO_SIGNAL)
- return 0;
- else if (rssi <= WL_IW_RSSI_VERY_LOW)
- return 1;
- else if (rssi <= WL_IW_RSSI_LOW)
- return 2;
- else if (rssi <= WL_IW_RSSI_GOOD)
- return 3;
- else if (rssi <= WL_IW_RSSI_VERY_GOOD)
- return 4;
- else
- return 5;
-}
-
-static int
-wl_escan_merge_scan_results(struct net_device *dev, struct wl_escan_info *escan,
- struct iw_request_info *info, char *extra, wl_bss_info_t *bi, int *len, int max_size)
+int
+wl_escan_get_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *dwrq,
+ char *extra
+)
{
s32 err = BCME_OK;
struct iw_event iwe;
- int j;
- char *event = extra, *end = extra + max_size - WE_ADD_EVENT_FIX, *value;
+ int i, j;
+ char *event = extra, *end = extra + dwrq->length, *value;
int16 rssi;
int channel;
- chanspec_t chanspec;
-
- /* overflow check cover fields before wpa IEs */
- if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
- IW_EV_QUAL_LEN >= end) {
- err = -E2BIG;
- goto exit;
- }
-
-#if defined(RSSIAVG)
- rssi = wl_get_avg_rssi(&escan->g_rssi_cache_ctrl, &bi->BSSID);
- if (rssi == RSSI_MINVAL)
- rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
-#else
- // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
- rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
-#endif
- chanspec = wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec);
- channel = wf_chspec_ctlchan(chanspec);
- ESCAN_SCAN(dev->name, "BSSID %pM, channel %3d(%3d %sMHz), rssi %3d, SSID \"%s\"\n",
- &bi->BSSID, channel, CHSPEC_CHANNEL(chanspec),
- CHSPEC_IS20(chanspec)?"20":
- CHSPEC_IS40(chanspec)?"40":
- CHSPEC_IS80(chanspec)?"80":"160",
- rssi, bi->SSID);
-
- /* First entry must be the BSSID */
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
- event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
-
- /* SSID */
- iwe.u.data.length = dtoh32(bi->SSID_len);
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
-
- /* Mode */
- if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
- iwe.cmd = SIOCGIWMODE;
- if (dtoh16(bi->capability) & DOT11_CAP_ESS)
- iwe.u.mode = IW_MODE_INFRA;
- else
- iwe.u.mode = IW_MODE_ADHOC;
- event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
- }
-
- /* Channel */
- iwe.cmd = SIOCGIWFREQ;
-#if 1
- iwe.u.freq.m = wf_channel2mhz(channel, channel <= CH_MAX_2G_CHANNEL ?
- WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
-#else
- iwe.u.freq.m = wf_channel2mhz(bi->n_cap ?
- bi->ctl_ch : CHSPEC_CHANNEL(bi->chanspec),
- CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
- WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
-#endif
- iwe.u.freq.e = 6;
- event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
-
- /* Channel quality */
- iwe.cmd = IWEVQUAL;
- iwe.u.qual.qual = rssi_to_qual(rssi);
- iwe.u.qual.level = 0x100 + rssi;
- iwe.u.qual.noise = 0x100 + bi->phy_noise;
- event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
-
- wl_iw_handle_scanresults_ies(&event, end, info, bi);
-
- /* Encryption */
- iwe.cmd = SIOCGIWENCODE;
- if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
-
- /* Rates */
- if (bi->rateset.count <= sizeof(bi->rateset.rates)) {
- if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) {
- err = -E2BIG;
- goto exit;
- }
- value = event + IW_EV_LCP_LEN;
- iwe.cmd = SIOCGIWRATE;
- /* Those two flags are ignored... */
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
- for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
- iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
- value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
- IW_EV_PARAM_LEN);
- }
- event = value;
- }
- *len = event - extra;
- if (*len < 0)
- ESCAN_ERROR(dev->name, "==> Wrong size\n");
-
-exit:
- return err;
-}
-
-int
-wl_escan_get_scan(struct net_device *dev, dhd_pub_t *dhdp,
- struct iw_request_info *info, struct iw_point *dwrq, char *extra)
-{
- struct wl_escan_info *escan = dhdp->escan;
- s32 err = BCME_OK;
- int i = 0;
- int len_prep = 0, len_ret = 0;
wl_bss_info_t *bi = NULL;
+ struct wl_escan_info *escan = g_escan;
struct wl_scan_results *bss_list;
- __u16 buflen_from_user = dwrq->length;
#if defined(BSSCACHE)
wl_bss_cache_t *node;
#endif
- char *buf = NULL;
- struct ether_addr cur_bssid;
- u8 ioctl_buf[WLC_IOCTL_SMLEN];
- if (!extra) {
- ESCAN_TRACE(dev->name, "extra is null\n");
+ ESCAN_TRACE(("%s: %s SIOCGIWSCAN, len=%d\n", __FUNCTION__, dev->name, dwrq->length));
+
+ if (!extra)
return -EINVAL;
- }
mutex_lock(&escan->usr_sync);
/* Check for scan in progress */
if (escan->escan_state == ESCAN_STATE_SCANING) {
- ESCAN_DBG(dev->name, "SIOCGIWSCAN GET still scanning\n");
+ ESCAN_TRACE(("%s: SIOCGIWSCAN GET still scanning\n", dev->name));
err = -EAGAIN;
goto exit;
}
if (!escan->bss_list) {
- ESCAN_ERROR(dev->name, "scan not ready\n");
+ ESCAN_ERROR(("%s: scan not ready\n", dev->name));
err = -EAGAIN;
goto exit;
}
- if (dev != escan->dev) {
- ESCAN_ERROR(dev->name, "not my scan from %s\n", escan->dev->name);
- err = -EINVAL;
- goto exit;
- }
-
- ESCAN_SCAN(dev->name, "SIOCGIWSCAN, len=%d\n", dwrq->length);
-
- wldev_iovar_getbuf(dev, "cur_etheraddr", NULL, 0, ioctl_buf, WLC_IOCTL_SMLEN, NULL);
- err = wldev_ioctl(dev, WLC_GET_BSSID, &cur_bssid, sizeof(cur_bssid), false);
- if (err != BCME_NOTASSOCIATED &&
- memcmp(ðer_null, &cur_bssid, ETHER_ADDR_LEN) &&
- memcmp(ioctl_buf, &cur_bssid, ETHER_ADDR_LEN)) {
- // merge current connected bss
- buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_ATOMIC);
- if (!buf) {
- ESCAN_ERROR(dev->name, "buffer alloc failed.\n");
- err = BCME_NOMEM;
- goto exit;
- }
- *(u32 *)buf = htod32(WL_EXTRA_BUF_MAX);
- err = wldev_ioctl(dev, WLC_GET_BSS_INFO, buf, WL_EXTRA_BUF_MAX, false);
- if (unlikely(err)) {
- ESCAN_ERROR(dev->name, "Could not get bss info %d\n", err);
- goto exit;
- }
- bi = (struct wl_bss_info *)(buf + 4);
- len_prep = 0;
- err = wl_escan_merge_scan_results(dev, escan, info, extra+len_ret, bi,
- &len_prep, buflen_from_user-len_ret);
- len_ret += len_prep;
- if (err)
- goto exit;
- bi = NULL;
- }
#if defined(BSSCACHE)
bss_list = &escan->g_bss_cache_ctrl.m_cache_head->results;
#if defined(BSSCACHE)
bi = node->results.bss_info;
#endif
- if (!memcmp(&bi->BSSID, &cur_bssid, ETHER_ADDR_LEN)) {
- ESCAN_SCAN(dev->name, "skip connected AP %pM\n", &cur_bssid);
-#if defined(BSSCACHE)
- node = node->next;
+ /* overflow check cover fields before wpa IEs */
+ if (event + ETHER_ADDR_LEN + bi->SSID_len + IW_EV_UINT_LEN + IW_EV_FREQ_LEN +
+ IW_EV_QUAL_LEN >= end) {
+ err = -E2BIG;
+ goto exit;
+ }
+
+#if defined(RSSIAVG)
+ rssi = wl_get_avg_rssi(&escan->g_rssi_cache_ctrl, &bi->BSSID);
+ if (rssi == RSSI_MINVAL)
+ rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
+#else
+ // terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
+ rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
#endif
- continue;
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
+ ESCAN_SCAN(("BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
+ MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+
+ /* First entry must be the BSSID */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(iwe.u.ap_addr.sa_data, &bi->BSSID, ETHER_ADDR_LEN);
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_ADDR_LEN);
+
+ /* SSID */
+ iwe.u.data.length = dtoh32(bi->SSID_len);
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, bi->SSID);
+
+ /* Mode */
+ if (dtoh16(bi->capability) & (DOT11_CAP_ESS | DOT11_CAP_IBSS)) {
+ iwe.cmd = SIOCGIWMODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_ESS)
+ iwe.u.mode = IW_MODE_INFRA;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_UINT_LEN);
+ }
+
+ /* Channel */
+ iwe.cmd = SIOCGIWFREQ;
+#if 1
+ iwe.u.freq.m = wf_channel2mhz(channel, channel <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+#else
+ iwe.u.freq.m = wf_channel2mhz(bi->n_cap ?
+ bi->ctl_ch : CHSPEC_CHANNEL(bi->chanspec),
+ CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+ WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
+#endif
+ iwe.u.freq.e = 6;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
+
+ /* Channel quality */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.qual = rssi_to_qual(rssi);
+ iwe.u.qual.level = 0x100 + rssi;
+ iwe.u.qual.noise = 0x100 + bi->phy_noise;
+ event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_QUAL_LEN);
+
+ wl_iw_handle_scanresults_ies(&event, end, info, bi);
+
+ /* Encryption */
+ iwe.cmd = SIOCGIWENCODE;
+ if (dtoh16(bi->capability) & DOT11_CAP_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)event);
+
+ /* Rates */
+ if (bi->rateset.count <= sizeof(bi->rateset.rates)) {
+ if (event + IW_MAX_BITRATES*IW_EV_PARAM_LEN >= end) {
+ err = -E2BIG;
+ goto exit;
+ }
+ value = event + IW_EV_LCP_LEN;
+ iwe.cmd = SIOCGIWRATE;
+ /* Those two flags are ignored... */
+ iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+ for (j = 0; j < bi->rateset.count && j < IW_MAX_BITRATES; j++) {
+ iwe.u.bitrate.value = (bi->rateset.rates[j] & 0x7f) * 500000;
+ value = IWE_STREAM_ADD_VALUE(info, event, value, end, &iwe,
+ IW_EV_PARAM_LEN);
+ }
+ event = value;
}
- len_prep = 0;
- err = wl_escan_merge_scan_results(dev, escan, info, extra+len_ret, bi,
- &len_prep, buflen_from_user-len_ret);
- len_ret += len_prep;
- if (err)
- goto exit;
#if defined(BSSCACHE)
node = node->next;
#endif
}
- if ((len_ret + WE_ADD_EVENT_FIX) < dwrq->length)
- dwrq->length = len_ret;
-
+ dwrq->length = event - extra;
dwrq->flags = 0; /* todo */
- ESCAN_SCAN(dev->name, "scanned AP count (%d)\n", i);
+ ESCAN_SCAN(("scanned AP count (%d)\n", i));
exit:
- kfree(buf);
- dwrq->length = len_ret;
mutex_unlock(&escan->usr_sync);
return err;
}
-#endif /* WL_WIRELESS_EXT */
-
-#ifdef WLMESH
-bool
-wl_escan_meshid_ie(u8 *parse, u32 len, wlc_ssid_t *mesh_id)
-{
- bcm_tlv_t *ie;
-
- if((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_MESH_ID)) != NULL) {
- mesh_id->SSID_len = ie->len;
- if (ie->len) {
- strncpy(mesh_id->SSID, ie->data, ie->len);
- }
- return TRUE;
- }
- return FALSE;
-}
-
-bool
-wl_escan_rsn_ie(u8 *parse, u32 len)
-{
- if (bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_RSN_ID)) {
- return TRUE;
- }
- return FALSE;
-}
-bool
-wl_escan_mesh_info_ie(struct net_device *dev, u8 *parse, u32 len,
- struct wl_mesh_params *mesh_info)
+s32 wl_escan_autochannel(struct net_device *dev, char* command, int total_len)
{
- bcm_tlv_t *ie;
- uchar mesh_oui[]={0x00, 0x22, 0xf4};
- int totl_len;
- uint8 *pie;
- uint max_len;
- bool found = FALSE;
-
- memset(mesh_info, 0, sizeof(struct wl_mesh_params));
- if((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID)) != NULL) {
- totl_len = ie->len;
- if (!memcmp(ie->data, &mesh_oui, sizeof(mesh_oui))) {
- pie = ie->data + sizeof(mesh_oui);
- ie = (bcm_tlv_t *)pie;
- totl_len -= sizeof(mesh_oui);
- while (totl_len > 2 && ie->len) {
- if (ie->id == MESH_INFO_MASTER_BSSID && ie->len == ETHER_ADDR_LEN) {
- memcpy(&mesh_info->master_bssid, ie->data, ETHER_ADDR_LEN);
- } else if (ie->id == MESH_INFO_MASTER_CHANNEL) {
- mesh_info->master_channel = ie->data[0];
- found = TRUE;
- } else if (ie->id == MESH_INFO_HOP_CNT) {
- mesh_info->hop_cnt = ie->data[0];
- } else if (ie->id == MESH_INFO_PEER_BSSID) {
- max_len = min(MAX_HOP_LIST*ETHER_ADDR_LEN, (int)ie->len);
- memcpy(mesh_info->peer_bssid, ie->data, max_len);
- }
- totl_len -= (ie->len + 2);
- pie = ie->data + ie->len;
- ie = (bcm_tlv_t *)pie;
- }
- }
+ struct wl_escan_info *escan = g_escan;
+ int ret = 0;
+ int bytes_written = -1;
+
+ sscanf(command, "%*s %d", &escan->autochannel);
+
+ if (escan->autochannel == 0) {
+ escan->best_2g_ch = 0;
+ escan->best_5g_ch = 0;
+ } else if (escan->autochannel == 2) {
+ bytes_written = snprintf(command, total_len, "2g=%d 5g=%d",
+ escan->best_2g_ch, escan->best_5g_ch);
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ ret = bytes_written;
}
- return found;
+ return ret;
}
-bool
-wl_escan_mesh_info(struct net_device *dev, struct wl_escan_info *escan,
- struct ether_addr *peer_bssid, struct wl_mesh_params *mesh_info)
+static s32 wl_create_event_handler(struct wl_escan_info *escan)
{
- int i = 0;
- wl_bss_info_t *bi = NULL;
- struct wl_scan_results *bss_list;
- int16 bi_rssi, bi_chan;
- wlc_ssid_t bi_meshid;
- bool is_mesh_peer = FALSE, found = FALSE;
- struct wl_mesh_params peer_mesh_info;
-
- mutex_lock(&escan->usr_sync);
-
- /* Check for scan in progress */
- if (escan->escan_state == ESCAN_STATE_SCANING) {
- ESCAN_ERROR(dev->name, "SIOCGIWSCAN GET still scanning\n");
- goto exit;
- }
- if (!escan->bss_list) {
- ESCAN_ERROR(dev->name, "scan not ready\n");
- goto exit;
- }
- if (dev != escan->dev) {
- ESCAN_ERROR(dev->name, "not my scan from %s\n", escan->dev->name);
- goto exit;
- }
+ int ret = 0;
+ ESCAN_TRACE(("Enter \n"));
- bss_list = escan->bss_list;
- bi = next_bss(bss_list, bi);
- ESCAN_SCAN(dev->name, "scanned AP/Mesh count (%d)\n", bss_list->count);
- for_each_bss(bss_list, bi, i)
- {
- memset(&bi_meshid, 0, sizeof(bi_meshid));
- is_mesh_peer = FALSE;
- bi_chan = wf_chspec_ctlchan(
- wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
- bi_rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
- is_mesh_peer = wl_escan_meshid_ie(((u8*)bi)+bi->ie_offset,
- bi->ie_length, &bi_meshid);
- if (!(bi->capability & (DOT11_CAP_ESS|DOT11_CAP_IBSS)) && is_mesh_peer) {
- bool bi_sae = FALSE, bss_found = FALSE, prefer = FALSE;
- if (!memcmp(peer_bssid, &bi->BSSID, ETHER_ADDR_LEN)) {
- bi_sae = wl_escan_rsn_ie(((u8*)bi)+bi->ie_offset, bi->ie_length);
- bss_found = wl_escan_mesh_info_ie(dev, ((u8*)bi)+bi->ie_offset,
- bi->ie_length, &peer_mesh_info);
- if (bss_found) {
- memcpy(&mesh_info->master_bssid, &peer_mesh_info.master_bssid,
- ETHER_ADDR_LEN);
- mesh_info->master_channel = peer_mesh_info.master_channel;
- mesh_info->hop_cnt = peer_mesh_info.hop_cnt;
- memcpy(mesh_info->peer_bssid, peer_mesh_info.peer_bssid,
- sizeof(peer_mesh_info.peer_bssid));
- prefer = TRUE;
- found = TRUE;
- }
- }
- ESCAN_SCAN(dev->name,
- "%s[Mesh] BSSID=%pM, channel=%d, RSSI=%d, sec=%s, "
- "mbssid=%pM, mchannel=%d, hop=%d, pbssid=%pM, MeshID=\"%s\"\n",
- prefer?"*":" ", &bi->BSSID, bi_chan, bi_rssi, bi_sae?"SAE":"OPEN",
- &peer_mesh_info.master_bssid, peer_mesh_info.master_channel,
- peer_mesh_info.hop_cnt, &peer_mesh_info.peer_bssid, bi_meshid.SSID);
- }
- }
+ /* Do not use DHD in cfg driver */
+ escan->event_tsk.thr_pid = -1;
-exit:
- mutex_unlock(&escan->usr_sync);
- return found;
+ PROC_START(wl_escan_event_handler, escan, &escan->event_tsk, 0, "wl_escan_handler");
+ if (escan->event_tsk.thr_pid < 0)
+ ret = -ENOMEM;
+ return ret;
}
-bool
-wl_escan_mesh_peer(struct net_device *dev, struct wl_escan_info *escan,
- wlc_ssid_t *cur_ssid, uint16 cur_chan, bool sae,
- struct wl_mesh_params *mesh_info)
+static void wl_destroy_event_handler(struct wl_escan_info *escan)
{
- int i = 0;
- wl_bss_info_t *bi = NULL;
- struct wl_scan_results *bss_list;
- int16 bi_rssi, bi_chan, max_rssi = -100;
- uint min_hop_cnt = 255;
- wlc_ssid_t bi_meshid;
- bool is_mesh_peer = FALSE, chan_matched = FALSE, found = FALSE;
- struct wl_mesh_params peer_mesh_info;
-
- mutex_lock(&escan->usr_sync);
-
- /* Check for scan in progress */
- if (escan->escan_state == ESCAN_STATE_SCANING) {
- ESCAN_ERROR(dev->name, "SIOCGIWSCAN GET still scanning\n");
- goto exit;
- }
- if (!escan->bss_list) {
- ESCAN_ERROR(dev->name, "scan not ready\n");
- goto exit;
- }
- if (dev != escan->dev) {
- ESCAN_ERROR(dev->name, "not my scan from %s\n", escan->dev->name);
- goto exit;
- }
-
- bss_list = escan->bss_list;
- bi = next_bss(bss_list, bi);
- ESCAN_SCAN(dev->name, "scanned AP/Mesh count (%d)\n", bss_list->count);
- for_each_bss(bss_list, bi, i)
- {
- memset(&bi_meshid, 0, sizeof(bi_meshid));
- is_mesh_peer = FALSE;
- bi_chan = wf_chspec_ctlchan(
- wl_chspec_driver_to_host(escan->ioctl_ver, bi->chanspec));
- bi_rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
- is_mesh_peer = wl_escan_meshid_ie(((u8*)bi)+bi->ie_offset,
- bi->ie_length, &bi_meshid);
- if (!(bi->capability & (DOT11_CAP_ESS|DOT11_CAP_IBSS)) && is_mesh_peer) {
- bool meshid_matched = FALSE, sec_matched = FALSE, bi_sae = FALSE,
- bss_found = FALSE, prefer = FALSE;
-
- if (cur_ssid->SSID_len && cur_ssid->SSID_len == bi_meshid.SSID_len &&
- !memcmp(cur_ssid->SSID, bi_meshid.SSID, bi_meshid.SSID_len))
- meshid_matched = TRUE;
-
- bi_sae = wl_escan_rsn_ie(((u8*)bi)+bi->ie_offset, bi->ie_length);
- if (bi_sae == sae)
- sec_matched = TRUE;
-
- bss_found = wl_escan_mesh_info_ie(dev, ((u8*)bi)+bi->ie_offset, bi->ie_length,
- &peer_mesh_info);
- if (meshid_matched && sec_matched && bss_found &&
- (cur_chan == bi_chan)) {
- if (peer_mesh_info.hop_cnt < min_hop_cnt) {
- memcpy(&mesh_info->master_bssid, &peer_mesh_info.master_bssid,
- ETHER_ADDR_LEN);
- mesh_info->master_channel = peer_mesh_info.master_channel;
- mesh_info->hop_cnt = peer_mesh_info.hop_cnt;
- memcpy(mesh_info->peer_bssid, peer_mesh_info.peer_bssid,
- sizeof(peer_mesh_info.peer_bssid));
- min_hop_cnt = peer_mesh_info.hop_cnt;
- prefer = TRUE;
- chan_matched = TRUE;
- found = TRUE;
- }
- }
- else if (meshid_matched && sec_matched && bss_found &&
- (cur_chan != bi_chan) && !chan_matched) {
- if (bi_rssi > max_rssi) {
- memcpy(&mesh_info->master_bssid, &peer_mesh_info.master_bssid,
- ETHER_ADDR_LEN);
- mesh_info->master_channel = peer_mesh_info.master_channel;
- mesh_info->hop_cnt = peer_mesh_info.hop_cnt;
- memcpy(mesh_info->peer_bssid, peer_mesh_info.peer_bssid,
- sizeof(peer_mesh_info.peer_bssid));
- max_rssi = bi_rssi;
- prefer = TRUE;
- found = TRUE;
- }
- }
-
- ESCAN_SCAN(dev->name,
- "%s[Mesh] BSSID=%pM, channel=%d, RSSI=%d, sec=%s, "
- "mbssid=%pM, mchannel=%d, hop=%d, pbssid=%pM, MeshID=\"%s\"\n",
- prefer?"*":" ", &bi->BSSID, bi_chan, bi_rssi, bi_sae?"SAE":"OPEN",
- &peer_mesh_info.master_bssid, peer_mesh_info.master_channel,
- peer_mesh_info.hop_cnt, &peer_mesh_info.peer_bssid, bi_meshid.SSID);
- } else {
- ESCAN_SCAN(dev->name,
- "[AP] BSSID=%pM, channel=%d, RSSI=%d, SSID=\"%s\"\n",
- &bi->BSSID, bi_chan, bi_rssi, bi->SSID);
- }
- }
-
-exit:
- mutex_unlock(&escan->usr_sync);
- return found;
+ if (escan->event_tsk.thr_pid >= 0)
+ PROC_STOP(&escan->event_tsk);
}
-#endif /* WLMESH */
-static void
-wl_escan_deinit(struct net_device *dev, struct wl_escan_info *escan)
+static void wl_escan_deinit(struct wl_escan_info *escan)
{
- ESCAN_TRACE(dev->name, "Enter\n");
-
+ printf("%s: Enter\n", __FUNCTION__);
+ if (!escan) {
+ ESCAN_ERROR(("device is not ready\n"));
+ return;
+ }
+ wl_destroy_event_handler(escan);
+ wl_flush_eq(escan);
del_timer_sync(&escan->scan_timeout);
- escan->escan_state = ESCAN_STATE_DOWN;
+ escan->escan_state = ESCAN_STATE_IDLE;
#if defined(RSSIAVG)
wl_free_rssi_cache(&escan->g_rssi_cache_ctrl);
#endif
}
-static s32
-wl_escan_init(struct net_device *dev, struct wl_escan_info *escan)
+static s32 wl_escan_init(struct wl_escan_info *escan)
{
- ESCAN_TRACE(dev->name, "Enter\n");
-
- /* Init scan_timeout timer */
- init_timer_compat(&escan->scan_timeout, wl_escan_timeout, escan);
- escan->escan_state = ESCAN_STATE_IDLE;
-
- return 0;
-}
-
-void
-wl_escan_down(struct net_device *dev, dhd_pub_t *dhdp)
-{
- struct wl_escan_info *escan = dhdp->escan;
+ int err = 0;
- ESCAN_TRACE(dev->name, "Enter\n");
+ printf("%s: Enter\n", __FUNCTION__);
if (!escan) {
- ESCAN_ERROR(dev->name, "escan is NULL\n");
- return;
+ ESCAN_ERROR(("device is not ready\n"));
+ return -EIO;
}
- wl_escan_deinit(dev, escan);
-}
-
-int
-wl_escan_up(struct net_device *dev, dhd_pub_t *dhdp)
-{
- struct wl_escan_info *escan = dhdp->escan;
- s32 val = 0;
- int ret = -1;
+ /* Init scan_timeout timer */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ timer_setup(&escan->scan_timeout, wl_escan_timeout, 0);
+#else
+ init_timer(&escan->scan_timeout);
+ escan->scan_timeout.data = (unsigned long) escan;
+ escan->scan_timeout.function = wl_escan_timeout;
+#endif
- ESCAN_TRACE(dev->name, "Enter\n");
- if (!escan) {
- ESCAN_ERROR(dev->name, "escan is NULL\n");
- return ret;
+ if (wl_create_event_handler(escan)) {
+ err = -ENOMEM;
+ goto err;
}
+ memset(escan->evt_handler, 0, sizeof(escan->evt_handler));
- ret = wl_escan_init(dev, escan);
- if (ret) {
- ESCAN_ERROR(dev->name, "wl_escan_init ret %d\n", ret);
- return ret;
- }
+ escan->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+ escan->escan_state = ESCAN_STATE_IDLE;
- if (!escan->ioctl_ver) {
- val = 1;
- if ((ret = wldev_ioctl(dev, WLC_GET_VERSION, &val, sizeof(int), false) < 0)) {
- ESCAN_ERROR(dev->name, "WLC_GET_VERSION failed, ret=%d\n", ret);
- return ret;
- }
- val = dtoh32(val);
- if (val != WLC_IOCTL_VERSION && val != 1) {
- ESCAN_ERROR(dev->name,
- "Version mismatch, please upgrade. Got %d, expected %d or 1\n",
- val, WLC_IOCTL_VERSION);
- return ret;
- }
- escan->ioctl_ver = val;
- }
+ mutex_init(&escan->usr_sync);
return 0;
+err:
+ wl_escan_deinit(escan);
+ return err;
}
-int
-wl_escan_event_dettach(struct net_device *dev, dhd_pub_t *dhdp)
+void wl_escan_detach(dhd_pub_t *dhdp)
{
- struct wl_escan_info *escan = dhdp->escan;
- int ret = -1;
-
- if (!escan) {
- ESCAN_ERROR(dev->name, "escan is NULL\n");
- return ret;
- }
+ struct wl_escan_info *escan = g_escan;
- wl_ext_event_deregister(dev, dhdp, WLC_E_ESCAN_RESULT, wl_escan_handler);
-
- return 0;
-}
-
-int
-wl_escan_event_attach(struct net_device *dev, dhd_pub_t *dhdp)
-{
- struct wl_escan_info *escan = dhdp->escan;
- int ret = -1;
+ printf("%s: Enter\n", __FUNCTION__);
if (!escan) {
- ESCAN_ERROR(dev->name, "escan is NULL\n");
- return ret;
+ ESCAN_ERROR(("device is not ready\n"));
+ return;
}
- ret = wl_ext_event_register(dev, dhdp, WLC_E_ESCAN_RESULT, wl_escan_handler,
- escan, PRIO_EVENT_ESCAN);
- if (ret) {
- ESCAN_ERROR(dev->name, "wl_ext_event_register err %d\n", ret);
- }
+ wl_escan_deinit(escan);
- return ret;
-}
-
-void
-wl_escan_detach(struct net_device *dev, dhd_pub_t *dhdp)
-{
- struct wl_escan_info *escan = dhdp->escan;
-
- ESCAN_TRACE(dev->name, "Enter\n");
-
- if (!escan)
- return;
-
- wl_escan_deinit(dev, escan);
if (escan->escan_ioctl_buf) {
kfree(escan->escan_ioctl_buf);
escan->escan_ioctl_buf = NULL;
}
- wl_ext_event_deregister(dev, dhdp, WLC_E_ESCAN_RESULT, wl_escan_handler);
-
DHD_OS_PREFREE(dhdp, escan, sizeof(struct wl_escan_info));
- dhdp->escan = NULL;
+ g_escan = NULL;
}
int
wl_escan_attach(struct net_device *dev, dhd_pub_t *dhdp)
{
struct wl_escan_info *escan = NULL;
- int ret = 0;
- ESCAN_TRACE(dev->name, "Enter\n");
+ printf("%s: Enter\n", __FUNCTION__);
- escan = (struct wl_escan_info *)DHD_OS_PREALLOC(dhdp,
- DHD_PREALLOC_WL_ESCAN, sizeof(struct wl_escan_info));
+ if (!dev)
+ return 0;
+ escan = (wl_escan_info_t *)DHD_OS_PREALLOC(dhdp, DHD_PREALLOC_WL_ESCAN_INFO, sizeof(struct wl_escan_info));
if (!escan)
return -ENOMEM;
+ g_escan = escan;
memset(escan, 0, sizeof(struct wl_escan_info));
- dhdp->escan = escan;
-
/* we only care about main interface so save a global here */
+ escan->dev = dev;
escan->pub = dhdp;
- escan->escan_state = ESCAN_STATE_DOWN;
+ escan->escan_state = ESCAN_STATE_IDLE;
escan->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
if (unlikely(!escan->escan_ioctl_buf)) {
- ESCAN_ERROR(dev->name, "Ioctl buf alloc failed\n");
- ret = -ENOMEM;
- goto exit;
+ ESCAN_ERROR(("Ioctl buf alloc failed\n"));
+ goto err ;
}
- ret = wl_escan_init(dev, escan);
- if (ret) {
- ESCAN_ERROR(dev->name, "wl_escan_init err %d\n", ret);
- goto exit;
- }
- mutex_init(&escan->usr_sync);
+ wl_init_eq(escan);
+ wl_escan_init(escan);
return 0;
-
-exit:
- wl_escan_detach(dev, dhdp);
- return ret;
+err:
+ wl_escan_detach(dhdp);
+ return -ENOMEM;
}
#endif /* WL_ESCAN */
-\r
-#ifndef _wl_escan_\r
-#define _wl_escan_\r
-#include <linuxver.h>\r
-#include <wl_iw.h>\r
-\r
-#define ESCAN_BUF_SIZE (64 * 1024)\r
-\r
-#define WL_ESCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */\r
-\r
-/* donlge escan state */\r
-enum escan_state {\r
- ESCAN_STATE_DOWN,\r
- ESCAN_STATE_IDLE,\r
- ESCAN_STATE_SCANING\r
-};\r
-\r
-typedef struct wl_escan_info {\r
- struct net_device *dev;\r
- dhd_pub_t *pub;\r
- timer_list_compat_t scan_timeout; /* Timer for catch scan event timeout */\r
- int escan_state;\r
- int ioctl_ver;\r
- u8 escan_buf[ESCAN_BUF_SIZE];\r
- struct wl_scan_results *bss_list;\r
- struct ether_addr disconnected_bssid;\r
- u8 *escan_ioctl_buf;\r
- struct mutex usr_sync; /* maily for up/down synchronization */\r
- int autochannel;\r
- int best_2g_ch;\r
- int best_5g_ch;\r
-#if defined(RSSIAVG)\r
- wl_rssi_cache_ctrl_t g_rssi_cache_ctrl;\r
- wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl;\r
-#endif\r
-#if defined(BSSCACHE)\r
- wl_bss_cache_ctrl_t g_bss_cache_ctrl;\r
-#endif\r
-} wl_escan_info_t;\r
-\r
-#if defined(WLMESH)\r
-enum mesh_info_id {\r
- MESH_INFO_MASTER_BSSID = 1,\r
- MESH_INFO_MASTER_CHANNEL,\r
- MESH_INFO_HOP_CNT,\r
- MESH_INFO_PEER_BSSID\r
-};\r
-\r
-#define MAX_HOP_LIST 10\r
-typedef struct wl_mesh_params {\r
- struct ether_addr master_bssid;\r
- uint16 master_channel;\r
- uint hop_cnt;\r
- struct ether_addr peer_bssid[MAX_HOP_LIST];\r
- uint16 scan_channel;\r
-} wl_mesh_params_t;\r
-bool wl_escan_mesh_info(struct net_device *dev,\r
- struct wl_escan_info *escan, struct ether_addr *peer_bssid,\r
- struct wl_mesh_params *mesh_info);\r
-bool wl_escan_mesh_peer(struct net_device *dev,\r
- struct wl_escan_info *escan, wlc_ssid_t *cur_ssid, uint16 cur_chan, bool sae,\r
- struct wl_mesh_params *mesh_info);\r
-#endif /* WLMESH */\r
-\r
-int wl_escan_set_scan(struct net_device *dev, dhd_pub_t *dhdp,\r
- wlc_ssid_t *ssid, uint16 channel, bool bcast);\r
-int wl_escan_get_scan(struct net_device *dev, dhd_pub_t *dhdp,\r
- struct iw_request_info *info, struct iw_point *dwrq, char *extra);\r
-int wl_escan_attach(struct net_device *dev, dhd_pub_t *dhdp);\r
-void wl_escan_detach(struct net_device *dev, dhd_pub_t *dhdp);\r
-int wl_escan_event_attach(struct net_device *dev, dhd_pub_t *dhdp);\r
-int wl_escan_event_dettach(struct net_device *dev, dhd_pub_t *dhdp);\r
-int wl_escan_up(struct net_device *dev, dhd_pub_t *dhdp);\r
-void wl_escan_down(struct net_device *dev, dhd_pub_t *dhdp);\r
-\r
-#endif /* _wl_escan_ */\r
-\r
+
+#ifndef _wl_escan_
+#define _wl_escan_
+
+#include <linux/wireless.h>
+#include <wl_iw.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <linux/time.h>
+
+
+#ifdef DHD_MAX_IFS
+#define WL_MAX_IFS DHD_MAX_IFS
+#else
+#define WL_MAX_IFS 16
+#endif
+
+#define ESCAN_BUF_SIZE (64 * 1024)
+
+#define WL_ESCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */
+
+/* event queue for cfg80211 main event */
+struct escan_event_q {
+ struct list_head eq_list;
+ u32 etype;
+ wl_event_msg_t emsg;
+ s8 edata[1];
+};
+
+/* donlge escan state */
+enum escan_state {
+ ESCAN_STATE_IDLE,
+ ESCAN_STATE_SCANING
+};
+
+struct wl_escan_info;
+
+typedef s32(*ESCAN_EVENT_HANDLER) (struct wl_escan_info *escan,
+ const wl_event_msg_t *e, void *data);
+
+typedef struct wl_escan_info {
+ struct net_device *dev;
+ dhd_pub_t *pub;
+ struct timer_list scan_timeout; /* Timer for catch scan event timeout */
+ int escan_state;
+ int ioctl_ver;
+
+ char ioctlbuf[WLC_IOCTL_SMLEN];
+ u8 escan_buf[ESCAN_BUF_SIZE];
+ struct wl_scan_results *bss_list;
+ struct wl_scan_results *scan_results;
+ struct ether_addr disconnected_bssid;
+ u8 *escan_ioctl_buf;
+ spinlock_t eq_lock; /* for event queue synchronization */
+ struct list_head eq_list; /* used for event queue */
+ tsk_ctl_t event_tsk; /* task of main event handler thread */
+ ESCAN_EVENT_HANDLER evt_handler[WLC_E_LAST];
+ struct mutex usr_sync; /* maily for up/down synchronization */
+ int autochannel;
+ int best_2g_ch;
+ int best_5g_ch;
+#if defined(RSSIAVG)
+ wl_rssi_cache_ctrl_t g_rssi_cache_ctrl;
+ wl_rssi_cache_ctrl_t g_connected_rssi_cache_ctrl;
+#endif
+#if defined(BSSCACHE)
+ wl_bss_cache_ctrl_t g_bss_cache_ctrl;
+#endif
+} wl_escan_info_t;
+
+void wl_escan_event(struct net_device *dev, const wl_event_msg_t * e, void *data);
+
+int wl_escan_set_scan(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu,
+ char *extra
+);
+int wl_escan_get_scan(struct net_device *dev, struct iw_request_info *info,
+ struct iw_point *dwrq, char *extra);
+s32 wl_escan_autochannel(struct net_device *dev, char* command, int total_len);
+int wl_escan_attach(struct net_device *dev, dhd_pub_t *dhdp);
+void wl_escan_detach(dhd_pub_t *dhdp);
+
+#endif /* _wl_escan_ */
+
+++ /dev/null
-
-#if defined(WL_EXT_IAPSTA) || defined(USE_IW)
-#include <bcmendian.h>
-#include <wl_android.h>
-#include <dhd_config.h>
-
-#define EVENT_ERROR(name, arg1, args...) \
- do { \
- if (android_msg_level & ANDROID_ERROR_LEVEL) { \
- printk(KERN_ERR "[dhd-%s] EVENT-ERROR) %s : " arg1, name, __func__, ## args); \
- } \
- } while (0)
-#define EVENT_TRACE(name, arg1, args...) \
- do { \
- if (android_msg_level & ANDROID_TRACE_LEVEL) { \
- printk(KERN_INFO "[dhd-%s] EVENT-TRACE) %s : " arg1, name, __func__, ## args); \
- } \
- } while (0)
-#define EVENT_DBG(name, arg1, args...) \
- do { \
- if (android_msg_level & ANDROID_DBG_LEVEL) { \
- printk(KERN_INFO "[dhd-%s] EVENT-DBG) %s : " arg1, name, __func__, ## args); \
- } \
- } while (0)
-
-#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == \
- 4 && __GNUC_MINOR__ >= 6))
-#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
-(entry) = list_first_entry((ptr), type, member); \
-_Pragma("GCC diagnostic pop") \
-
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-_Pragma("GCC diagnostic push") \
-_Pragma("GCC diagnostic ignored \"-Wcast-qual\"") \
-entry = container_of((ptr), type, member); \
-_Pragma("GCC diagnostic pop") \
-
-#else
-#define BCM_SET_LIST_FIRST_ENTRY(entry, ptr, type, member) \
-(entry) = list_first_entry((ptr), type, member); \
-
-#define BCM_SET_CONTAINER_OF(entry, ptr, type, member) \
-entry = container_of((ptr), type, member); \
-
-#endif /* STRICT_GCC_WARNINGS */
-
-#ifdef DHD_MAX_IFS
-#define WL_MAX_IFS DHD_MAX_IFS
-#else
-#define WL_MAX_IFS 16
-#endif
-
-/* event queue for cfg80211 main event */
-struct wl_event_q {
- struct list_head eq_list;
- u32 etype;
- wl_event_msg_t emsg;
- s8 edata[1];
-};
-
-typedef s32(*EXT_EVENT_HANDLER) (struct net_device *dev, void *cb_argu,
- const wl_event_msg_t *e, void *data);
-
-typedef struct event_handler_list {
- struct event_handler_list *next;
- struct net_device *dev;
- uint32 etype;
- EXT_EVENT_HANDLER cb_func;
- void *cb_argu;
- wl_event_prio_t prio;
-} event_handler_list_t;
-
-typedef struct event_handler_head {
- event_handler_list_t *evt_head;
-} event_handler_head_t;
-
-typedef struct wl_event_params {
- dhd_pub_t *pub;
- struct net_device *dev[WL_MAX_IFS];
- struct event_handler_head evt_head;
- struct list_head eq_list; /* used for event queue */
- spinlock_t eq_lock; /* for event queue synchronization */
- struct workqueue_struct *event_workq; /* workqueue for event */
- struct work_struct event_work; /* work item for event */
- struct mutex event_sync;
-} wl_event_params_t;
-
-static unsigned long
-wl_ext_event_lock_eq(struct wl_event_params *event_params)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&event_params->eq_lock, flags);
- return flags;
-}
-
-static void
-wl_ext_event_unlock_eq(struct wl_event_params *event_params, unsigned long flags)
-{
- spin_unlock_irqrestore(&event_params->eq_lock, flags);
-}
-
-static void
-wl_ext_event_init_eq_lock(struct wl_event_params *event_params)
-{
- spin_lock_init(&event_params->eq_lock);
-}
-
-static void
-wl_ext_event_init_eq(struct wl_event_params *event_params)
-{
- wl_ext_event_init_eq_lock(event_params);
- INIT_LIST_HEAD(&event_params->eq_list);
-}
-
-static void
-wl_ext_event_flush_eq(struct wl_event_params *event_params)
-{
- struct wl_event_q *e;
- unsigned long flags;
-
- flags = wl_ext_event_lock_eq(event_params);
- while (!list_empty_careful(&event_params->eq_list)) {
- BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
- list_del(&e->eq_list);
- kfree(e);
- }
- wl_ext_event_unlock_eq(event_params, flags);
-}
-
-/*
-* retrieve first queued event from head
-*/
-
-static struct wl_event_q *
-wl_ext_event_deq_event(struct wl_event_params *event_params)
-{
- struct wl_event_q *e = NULL;
- unsigned long flags;
-
- flags = wl_ext_event_lock_eq(event_params);
- if (likely(!list_empty(&event_params->eq_list))) {
- BCM_SET_LIST_FIRST_ENTRY(e, &event_params->eq_list, struct wl_event_q, eq_list);
- list_del(&e->eq_list);
- }
- wl_ext_event_unlock_eq(event_params, flags);
-
- return e;
-}
-
-/*
- * push event to tail of the queue
- */
-
-static s32
-wl_ext_event_enq_event(struct wl_event_params *event_params, u32 event,
- const wl_event_msg_t *msg, void *data)
-{
- struct wl_event_q *e;
- s32 err = 0;
- uint32 evtq_size;
- uint32 data_len;
- unsigned long flags;
- gfp_t aflags;
-
- data_len = 0;
- if (data)
- data_len = ntoh32(msg->datalen);
- evtq_size = sizeof(struct wl_event_q) + data_len;
- aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
- e = kzalloc(evtq_size, aflags);
- if (unlikely(!e)) {
- EVENT_ERROR("wlan", "event alloc failed\n");
- return -ENOMEM;
- }
- e->etype = event;
- memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
- if (data)
- memcpy(e->edata, data, data_len);
- flags = wl_ext_event_lock_eq(event_params);
- list_add_tail(&e->eq_list, &event_params->eq_list);
- wl_ext_event_unlock_eq(event_params, flags);
-
- return err;
-}
-
-static void
-wl_ext_event_put_event(struct wl_event_q *e)
-{
- kfree(e);
-}
-
-static void
-wl_ext_event_handler(struct work_struct *work_data)
-{
- struct wl_event_params *event_params = NULL;
- struct wl_event_q *e;
- struct net_device *dev = NULL;
- struct event_handler_list *evt_node;
- dhd_pub_t *dhd;
- unsigned long flags = 0;
-
- BCM_SET_CONTAINER_OF(event_params, work_data, struct wl_event_params, event_work);
- DHD_EVENT_WAKE_LOCK(event_params->pub);
- while ((e = wl_ext_event_deq_event(event_params))) {
- if (e->emsg.ifidx >= DHD_MAX_IFS) {
- EVENT_ERROR("wlan", "ifidx=%d not in range\n", e->emsg.ifidx);
- goto fail;
- }
- dev = event_params->dev[e->emsg.ifidx];
- if (!dev) {
- EVENT_DBG("wlan", "ifidx=%d dev not ready\n", e->emsg.ifidx);
- goto fail;
- }
- dhd = dhd_get_pub(dev);
- if (e->etype > WLC_E_LAST) {
- EVENT_TRACE(dev->name, "Unknown Event (%d): ignoring\n", e->etype);
- goto fail;
- }
- DHD_GENERAL_LOCK(dhd, flags);
- if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
- EVENT_ERROR(dev->name, "BUS is DOWN.\n");
- DHD_GENERAL_UNLOCK(dhd, flags);
- goto fail;
- }
- DHD_GENERAL_UNLOCK(dhd, flags);
- EVENT_DBG(dev->name, "event type (%d)\n", e->etype);
- mutex_lock(&event_params->event_sync);
- evt_node = event_params->evt_head.evt_head;
- for (;evt_node;) {
- if (evt_node->dev == dev &&
- (evt_node->etype == e->etype || evt_node->etype == WLC_E_LAST))
- evt_node->cb_func(dev, evt_node->cb_argu, &e->emsg, e->edata);
- evt_node = evt_node->next;
- }
- mutex_unlock(&event_params->event_sync);
-fail:
- wl_ext_event_put_event(e);
- }
- DHD_EVENT_WAKE_UNLOCK(event_params->pub);
-}
-
-void
-wl_ext_event_send(void *params, const wl_event_msg_t * e, void *data)
-{
- struct wl_event_params *event_params = params;
- u32 event_type = ntoh32(e->event_type);
-
- if (event_params == NULL) {
- EVENT_ERROR("wlan", "Stale event %d(%s) ignored\n",
- event_type, bcmevent_get_name(event_type));
- return;
- }
-
- if (event_params->event_workq == NULL) {
- EVENT_ERROR("wlan", "Event handler is not created %d(%s)\n",
- event_type, bcmevent_get_name(event_type));
- return;
- }
-
- if (likely(!wl_ext_event_enq_event(event_params, event_type, e, data))) {
- queue_work(event_params->event_workq, &event_params->event_work);
- }
-}
-
-static s32
-wl_ext_event_create_handler(struct wl_event_params *event_params)
-{
- int ret = 0;
- EVENT_TRACE("wlan", "Enter\n");
-
- /* Allocate workqueue for event */
- if (!event_params->event_workq) {
- event_params->event_workq = alloc_workqueue("ext_eventd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
- }
-
- if (!event_params->event_workq) {
- EVENT_ERROR("wlan", "event_workq alloc_workqueue failed\n");
- ret = -ENOMEM;
- } else {
- INIT_WORK(&event_params->event_work, wl_ext_event_handler);
- }
- return ret;
-}
-
-static void
-wl_ext_event_free(struct wl_event_params *event_params)
-{
- struct event_handler_list *node, *cur, **evt_head;
-
- evt_head = &event_params->evt_head.evt_head;
- node = *evt_head;
-
- for (;node;) {
- EVENT_TRACE(node->dev->name, "Free etype=%d\n", node->etype);
- cur = node;
- node = cur->next;
- kfree(cur);
- }
- *evt_head = NULL;
-}
-
-static void
-wl_ext_event_destroy_handler(struct wl_event_params *event_params)
-{
- if (event_params && event_params->event_workq) {
- cancel_work_sync(&event_params->event_work);
- destroy_workqueue(event_params->event_workq);
- event_params->event_workq = NULL;
- }
-}
-
-int
-wl_ext_event_register(struct net_device *dev, dhd_pub_t *dhd, uint32 event,
- void *cb_func, void *data, wl_event_prio_t prio)
-{
- struct wl_event_params *event_params = dhd->event_params;
- struct event_handler_list *node, *leaf, *node_prev, **evt_head;
- int ret = 0;
-
- if (event_params) {
- mutex_lock(&event_params->event_sync);
- evt_head = &event_params->evt_head.evt_head;
- node = *evt_head;
- for (;node;) {
- if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
- EVENT_TRACE(dev->name, "skip event %d\n", event);
- mutex_unlock(&event_params->event_sync);
- return 0;
- }
- node = node->next;
- }
- leaf = kmalloc(sizeof(event_handler_list_t), GFP_KERNEL);
- if (!leaf) {
- EVENT_ERROR(dev->name, "Memory alloc failure %d for event %d\n",
- (int)sizeof(event_handler_list_t), event);
- mutex_unlock(&event_params->event_sync);
- return -ENOMEM;
- }
- leaf->next = NULL;
- leaf->dev = dev;
- leaf->etype = event;
- leaf->cb_func = cb_func;
- leaf->cb_argu = data;
- leaf->prio = prio;
- if (*evt_head == NULL) {
- *evt_head = leaf;
- } else {
- node = *evt_head;
- node_prev = NULL;
- for (;node;) {
- if (node->prio <= prio) {
- leaf->next = node;
- if (node_prev)
- node_prev->next = leaf;
- else
- *evt_head = leaf;
- break;
- } else if (node->next == NULL) {
- node->next = leaf;
- break;
- }
- node_prev = node;
- node = node->next;
- }
- }
- EVENT_TRACE(dev->name, "event %d registered\n", event);
- mutex_unlock(&event_params->event_sync);
- } else {
- EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
- ret = -ENODEV;
- }
-
- return ret;
-}
-
-void
-wl_ext_event_deregister(struct net_device *dev, dhd_pub_t *dhd,
- uint32 event, void *cb_func)
-{
- struct wl_event_params *event_params = dhd->event_params;
- struct event_handler_list *node, *prev, **evt_head;
- int tmp = 0;
-
- if (event_params) {
- mutex_lock(&event_params->event_sync);
- evt_head = &event_params->evt_head.evt_head;
- node = *evt_head;
- prev = node;
- for (;node;) {
- if (node->dev == dev && node->etype == event && node->cb_func == cb_func) {
- if (node == *evt_head) {
- tmp = 1;
- *evt_head = node->next;
- } else {
- tmp = 0;
- prev->next = node->next;
- }
- EVENT_TRACE(dev->name, "event %d deregistered\n", event);
- kfree(node);
- if (tmp == 1) {
- node = *evt_head;
- prev = node;
- } else {
- node = prev->next;
- }
- continue;
- }
- prev = node;
- node = node->next;
- }
- mutex_unlock(&event_params->event_sync);
- } else {
- EVENT_ERROR(dev->name, "event_params not ready %d\n", event);
- }
-}
-
-static s32
-wl_ext_event_init_priv(struct wl_event_params *event_params)
-{
- s32 err = 0;
-
- mutex_init(&event_params->event_sync);
- wl_ext_event_init_eq(event_params);
- if (wl_ext_event_create_handler(event_params))
- return -ENOMEM;
-
- return err;
-}
-
-static void
-wl_ext_event_deinit_priv(struct wl_event_params *event_params)
-{
- wl_ext_event_destroy_handler(event_params);
- wl_ext_event_flush_eq(event_params);
- wl_ext_event_free(event_params);
-}
-
-int
-wl_ext_event_attach_netdev(struct net_device *net, int ifidx, uint8 bssidx)
-{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_event_params *event_params = dhd->event_params;
-
- EVENT_TRACE(net->name, "ifidx=%d, bssidx=%d\n", ifidx, bssidx);
- if (event_params && ifidx < WL_MAX_IFS) {
- event_params->dev[ifidx] = net;
- }
-
- return 0;
-}
-
-int
-wl_ext_event_dettach_netdev(struct net_device *net, int ifidx)
-{
- struct dhd_pub *dhd = dhd_get_pub(net);
- struct wl_event_params *event_params = dhd->event_params;
-
- EVENT_TRACE(net->name, "ifidx=%d\n", ifidx);
- if (event_params && ifidx < WL_MAX_IFS) {
- event_params->dev[ifidx] = NULL;
- }
-
- return 0;
-}
-
-s32
-wl_ext_event_attach(struct net_device *dev, dhd_pub_t *dhdp)
-{
- struct wl_event_params *event_params = NULL;
- s32 err = 0;
-
- event_params = kmalloc(sizeof(wl_event_params_t), GFP_KERNEL);
- if (!event_params) {
- EVENT_ERROR(dev->name, "Failed to allocate memory (%zu)\n",
- sizeof(wl_event_params_t));
- return -ENOMEM;
- }
- dhdp->event_params = event_params;
- memset(event_params, 0, sizeof(wl_event_params_t));
- event_params->pub = dhdp;
-
- err = wl_ext_event_init_priv(event_params);
- if (err) {
- EVENT_ERROR(dev->name, "Failed to wl_ext_event_init_priv (%d)\n", err);
- goto ext_attach_out;
- }
-
- return err;
-ext_attach_out:
- wl_ext_event_dettach(dhdp);
- return err;
-}
-
-void
-wl_ext_event_dettach(dhd_pub_t *dhdp)
-{
- struct wl_event_params *event_params = dhdp->event_params;
-
- if (event_params) {
- wl_ext_event_deinit_priv(event_params);
- kfree(event_params);
- dhdp->event_params = NULL;
- }
-}
-#endif
#ifdef WL_NAN
#include <wlioctl_utils.h>
#endif
-#include <wl_iw.h>
#include <wl_android.h>
#ifdef WL_ESCAN
#include <wl_escan.h>
#endif
-#include <dhd_config.h>
-uint iw_msg_level = WL_ERROR_LEVEL;
+typedef const struct si_pub si_t;
-#define WL_ERROR_MSG(x, args...) \
- do { \
- if (iw_msg_level & WL_ERROR_LEVEL) { \
- printk(KERN_ERR "[dhd] WEXT-ERROR) %s : " x, __func__, ## args); \
- } \
- } while (0)
-#define WL_TRACE_MSG(x, args...) \
- do { \
- if (iw_msg_level & WL_TRACE_LEVEL) { \
- printk(KERN_INFO "[dhd] WEXT-TRACE) %s : " x, __func__, ## args); \
- } \
- } while (0)
-#define WL_SCAN_MSG(x, args...) \
- do { \
- if (iw_msg_level & WL_SCAN_LEVEL) { \
- printk(KERN_INFO "[dhd] WEXT-SCAN) %s : " x, __func__, ## args); \
- } \
- } while (0)
-#define WL_WSEC_MSG(x, args...) \
- do { \
- if (iw_msg_level & WL_WSEC_LEVEL) { \
- printk(KERN_INFO "[dhd] WEXT-WSEC) %s : " x, __func__, ## args); \
- } \
- } while (0)
-#define WL_ERROR(x) WL_ERROR_MSG x
-#define WL_TRACE(x) WL_TRACE_MSG x
-#define WL_SCAN(x) WL_SCAN_MSG x
-#define WL_WSEC(x) WL_WSEC_MSG x
-
-#ifdef BCMWAPI_WPI
-/* these items should evetually go into wireless.h of the linux system headfile dir */
-#ifndef IW_ENCODE_ALG_SM4
-#define IW_ENCODE_ALG_SM4 0x20
-#endif
+/* message levels */
+#define WL_ERROR_LEVEL 0x0001
+#define WL_SCAN_LEVEL 0x0002
+#define WL_ASSOC_LEVEL 0x0004
+#define WL_INFORM_LEVEL 0x0008
+#define WL_WSEC_LEVEL 0x0010
+#define WL_PNO_LEVEL 0x0020
+#define WL_COEX_LEVEL 0x0040
+#define WL_SOFTAP_LEVEL 0x0080
+#define WL_TRACE_LEVEL 0x0100
-#ifndef IW_AUTH_WAPI_ENABLED
-#define IW_AUTH_WAPI_ENABLED 0x20
-#endif
+uint iw_msg_level = WL_ERROR_LEVEL;
-#ifndef IW_AUTH_WAPI_VERSION_1
-#define IW_AUTH_WAPI_VERSION_1 0x00000008
-#endif
+#define WL_ERROR(x) do {if (iw_msg_level & WL_ERROR_LEVEL) printf x;} while (0)
+#define WL_SCAN(x) do {if (iw_msg_level & WL_SCAN_LEVEL) printf x;} while (0)
+#define WL_ASSOC(x) do {if (iw_msg_level & WL_ASSOC_LEVEL) printf x;} while (0)
+#define WL_INFORM(x) do {if (iw_msg_level & WL_INFORM_LEVEL) printf x;} while (0)
+#define WL_WSEC(x) do {if (iw_msg_level & WL_WSEC_LEVEL) printf x;} while (0)
+#define WL_PNO(x) do {if (iw_msg_level & WL_PNO_LEVEL) printf x;} while (0)
+#define WL_COEX(x) do {if (iw_msg_level & WL_COEX_LEVEL) printf x;} while (0)
+#define WL_SOFTAP(x) do {if (iw_msg_level & WL_SOFTAP_LEVEL) printf x;} while (0)
+#define WL_TRACE(x) do {if (iw_msg_level & WL_TRACE_LEVEL) printf x;} while (0)
-#ifndef IW_AUTH_CIPHER_SMS4
-#define IW_AUTH_CIPHER_SMS4 0x00000020
-#endif
+#include <wl_iw.h>
-#ifndef IW_AUTH_KEY_MGMT_WAPI_PSK
-#define IW_AUTH_KEY_MGMT_WAPI_PSK 4
-#endif
-
-#ifndef IW_AUTH_KEY_MGMT_WAPI_CERT
-#define IW_AUTH_KEY_MGMT_WAPI_CERT 8
-#endif
-#endif /* BCMWAPI_WPI */
/* Broadcom extensions to WEXT, linux upstream has obsoleted WEXT */
#ifndef IW_AUTH_KEY_MGMT_FT_802_1X
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
#include <linux/rtnetlink.h>
#endif
+#if defined(SOFTAP)
+struct net_device *ap_net_dev = NULL;
+tsk_ctl_t ap_eth_ctl; /* apsta AP netdev waiter thread */
+#endif /* SOFTAP */
extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
uint32 reason, char* stringBuf, uint buflen);
#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST)
#endif /* WIRELESS_EXT < 19 */
-
#ifndef WL_ESCAN
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
#define DAEMONIZE(a) do { \
typedef struct iscan_info {
struct net_device *dev;
- timer_list_compat_t timer;
+ struct timer_list timer;
uint32 timer_ms;
uint32 timer_on;
int iscan_state;
long sysioc_pid;
struct semaphore sysioc_sem;
struct completion sysioc_exited;
+
+
char ioctlbuf[WLC_IOCTL_SMLEN];
} iscan_info_t;
+iscan_info_t *g_iscan = NULL;
static void wl_iw_timerfunc(ulong data);
static void wl_iw_set_event_mask(struct net_device *dev);
static int wl_iw_iscan(iscan_info_t *iscan, wlc_ssid_t *ssid, uint16 action);
-#endif /* !WL_ESCAN */
-
-struct pmk_list {
- pmkid_list_t pmkids;
- pmkid_t foo[MAXPMKID - 1];
-};
-
-typedef struct wl_wext_info {
- struct net_device *dev;
- dhd_pub_t *dhd;
- struct delayed_work pm_enable_work;
- struct mutex pm_sync;
- struct wl_conn_info conn_info;
- struct pmk_list pmk_list;
-#ifndef WL_ESCAN
- struct iscan_info iscan;
-#endif
-} wl_wext_info_t;
+#endif /* WL_ESCAN */
/* priv_link becomes netdev->priv and is the link between netdev and wlif struct */
typedef struct priv_link {
}
#endif /* WIRELESS_EXT > 12 */
-static void
-wl_iw_update_connect_status(struct net_device *dev, enum wl_ext_status status)
-{
-#ifndef WL_CFG80211
- struct dhd_pub *dhd = dhd_get_pub(dev);
- int cur_eapol_status = 0;
- int wpa_auth = 0;
- int error = -EINVAL;
- wl_wext_info_t *wext_info = NULL;
-
- if (!dhd || !dhd->conf)
- return;
- wext_info = dhd->wext_info;
- cur_eapol_status = dhd->conf->eapol_status;
-
- if (status == WL_EXT_STATUS_CONNECTING) {
-#ifdef WL_EXT_IAPSTA
- wl_ext_add_remove_pm_enable_work(dev, TRUE);
-#endif /* WL_EXT_IAPSTA */
- if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &wpa_auth))) {
- WL_ERROR(("wpa_auth get error %d\n", error));
- return;
- }
- if (wpa_auth & (WPA_AUTH_PSK|WPA2_AUTH_PSK))
- dhd->conf->eapol_status = EAPOL_STATUS_4WAY_START;
- else
- dhd->conf->eapol_status = EAPOL_STATUS_NONE;
- } else if (status == WL_EXT_STATUS_ADD_KEY) {
- dhd->conf->eapol_status = EAPOL_STATUS_4WAY_DONE;
- wake_up_interruptible(&dhd->conf->event_complete);
- } else if (status == WL_EXT_STATUS_DISCONNECTING) {
-#ifdef WL_EXT_IAPSTA
- wl_ext_add_remove_pm_enable_work(dev, FALSE);
-#endif /* WL_EXT_IAPSTA */
- if (cur_eapol_status >= EAPOL_STATUS_4WAY_START &&
- cur_eapol_status < EAPOL_STATUS_4WAY_DONE) {
- WL_ERROR(("WPA failed at %d\n", cur_eapol_status));
- dhd->conf->eapol_status = EAPOL_STATUS_NONE;
- } else if (cur_eapol_status >= EAPOL_STATUS_WSC_START &&
- cur_eapol_status < EAPOL_STATUS_WSC_DONE) {
- WL_ERROR(("WPS failed at %d\n", cur_eapol_status));
- dhd->conf->eapol_status = EAPOL_STATUS_NONE;
- }
- } else if (status == WL_EXT_STATUS_DISCONNECTED) {
- if (cur_eapol_status >= EAPOL_STATUS_4WAY_START &&
- cur_eapol_status < EAPOL_STATUS_4WAY_DONE) {
- WL_ERROR(("WPA failed at %d\n", cur_eapol_status));
- dhd->conf->eapol_status = EAPOL_STATUS_NONE;
- wake_up_interruptible(&dhd->conf->event_complete);
- } else if (cur_eapol_status >= EAPOL_STATUS_WSC_START &&
- cur_eapol_status < EAPOL_STATUS_WSC_DONE) {
- WL_ERROR(("WPS failed at %d\n", cur_eapol_status));
- dhd->conf->eapol_status = EAPOL_STATUS_NONE;
- }
- }
-#endif
- return;
-}
-
int
wl_iw_send_priv_event(
struct net_device *dev,
bzero(&bssid, sizeof(struct sockaddr));
if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, &bssid, ETHER_ADDR_LEN))) {
- WL_ERROR(("WLC_REASSOC failed (%d)\n", error));
+ WL_ERROR(("%s: WLC_REASSOC failed (%d)\n", __FUNCTION__, error));
return error;
}
return 0;
}
-#define DHD_CHECK(dhd, dev) \
- if (!dhd) { \
- WL_ERROR (("[dhd-%s] %s: dhd is NULL\n", dev->name, __FUNCTION__)); \
- return -ENODEV; \
- } \
-
static int
wl_iw_set_freq(
struct net_device *dev,
{
int error, chan;
uint sf = 0;
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCSIWFREQ\n", dev->name));
- DHD_CHECK(dhd, dev);
- wext_info = dhd->wext_info;
/* Setting by channel number */
if (fwrq->e == 0 && fwrq->m < MAXCHANNEL) {
}
chan = wf_mhz2channel(fwrq->m, sf);
}
- if (wext_info)
- wext_info->conn_info.channel = chan;
- WL_MSG(dev->name, "chan=%d\n", chan);
+ WL_ERROR(("%s: chan=%d\n", __FUNCTION__, chan));
chan = htod32(chan);
if ((error = dev_wlc_ioctl(dev, WLC_SET_CHANNEL, &chan, sizeof(chan)))) {
- WL_ERROR(("WLC_SET_CHANNEL failed (%d).\n", error));
+ WL_ERROR(("%s: WLC_SET_CHANNEL failed (%d).\n", __FUNCTION__, error));
return error;
}
)
{
int infra = 0, ap = 0, error = 0;
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCSIWMODE\n", dev->name));
- DHD_CHECK(dhd, dev);
- wext_info = dhd->wext_info;
- if (wext_info) {
- memset(&wext_info->conn_info.ssid, 0, sizeof(wlc_ssid_t));
- memset(&wext_info->conn_info.bssid, 0, sizeof(struct ether_addr));
- wext_info->conn_info.channel = 0;
- }
switch (*uwrq) {
case IW_MODE_MASTER:
)
{
int error = -EINVAL;
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCSIWAP\n", dev->name));
- DHD_CHECK(dhd, dev);
- wext_info = dhd->wext_info;
+
if (awrq->sa_family != ARPHRD_ETHER) {
- WL_ERROR(("Invalid Header...sa_family\n"));
+ WL_ERROR(("%s: Invalid Header...sa_family\n", __FUNCTION__));
return -EINVAL;
}
if (ETHER_ISBCAST(awrq->sa_data) || ETHER_ISNULLADDR(awrq->sa_data)) {
scb_val_t scbval;
bzero(&scbval, sizeof(scb_val_t));
- WL_MSG(dev->name, "WLC_DISASSOC\n");
+ WL_ERROR(("%s: WLC_DISASSOC\n", __FUNCTION__));
if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
- WL_ERROR(("WLC_DISASSOC failed (%d).\n", error));
+ WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
}
- wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTING);
return 0;
}
/* WL_ASSOC(("Assoc to %s\n", bcm_ether_ntoa((struct ether_addr *)&(awrq->sa_data),
* eabuf)));
*/
/* Reassociate to the specified AP */
- if (wext_info)
- memcpy(&wext_info->conn_info.bssid, awrq->sa_data, ETHER_ADDR_LEN);
- if (wext_info && wext_info->conn_info.ssid.SSID_len) {
- if ((error = wl_ext_connect(dev, &wext_info->conn_info)))
- return error;
- } else {
- if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) {
- WL_ERROR(("WLC_REASSOC failed (%d).\n", error));
- return error;
- }
- WL_MSG(dev->name, "join BSSID="MACSTR"\n", MAC2STR((u8 *)awrq->sa_data));
+ if ((error = dev_wlc_ioctl(dev, WLC_REASSOC, awrq->sa_data, ETHER_ADDR_LEN))) {
+ WL_ERROR(("%s: WLC_REASSOC failed (%d).\n", __FUNCTION__, error));
+ return error;
}
- wl_iw_update_connect_status(dev, WL_EXT_STATUS_CONNECTING);
+ WL_ERROR(("%s: join BSSID="MACSTR"\n", __FUNCTION__, MAC2STR((u8 *)awrq->sa_data)));
return 0;
}
if (mlme->cmd == IW_MLME_DISASSOC) {
scbval.val = htod32(scbval.val);
- WL_MSG(dev->name, "WLC_DISASSOC\n");
error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t));
}
else if (mlme->cmd == IW_MLME_DEAUTH) {
scbval.val = htod32(scbval.val);
- WL_MSG(dev->name, "WLC_SCB_DEAUTHENTICATE_FOR_REASON\n");
error = dev_wlc_ioctl(dev, WLC_SCB_DEAUTHENTICATE_FOR_REASON, &scbval,
sizeof(scb_val_t));
}
else {
- WL_ERROR(("Invalid ioctl data.\n"));
+ WL_ERROR(("%s: Invalid ioctl data.\n", __FUNCTION__));
return error;
}
- wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTING);
return error;
}
{
wl_scan_results_t *list;
iscan_buf_t * buf;
- iscan_info_t *iscan;
+ iscan_info_t *iscan = g_iscan;
struct sockaddr *addr = (struct sockaddr *) extra;
struct iw_quality qual[IW_MAX_AP];
wl_bss_info_t *bi = NULL;
int i;
int16 rssi;
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCGIWAPLIST\n", dev->name));
- DHD_CHECK(dhd, dev);
- wext_info = dhd->wext_info;
- iscan = &wext_info->iscan;
if (!extra)
return -EINVAL;
return 0;
}
-#endif
static int
wl_iw_iscan_set_scan(
char *extra
)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_wext_info_t *wext_info = NULL;
wlc_ssid_t ssid;
-#ifndef WL_ESCAN
- iscan_info_t *iscan;
-#endif
+ iscan_info_t *iscan = g_iscan;
- DHD_CHECK(dhd, dev);
- wext_info = dhd->wext_info;
-#ifdef WL_ESCAN
- /* default Broadcast scan */
- memset(&ssid, 0, sizeof(ssid));
-#if WIRELESS_EXT > 17
- /* check for given essid */
- if (wrqu->data.length == sizeof(struct iw_scan_req)) {
- if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
- struct iw_scan_req *req = (struct iw_scan_req *)extra;
- ssid.SSID_len = MIN(sizeof(ssid.SSID), req->essid_len);
- memcpy(ssid.SSID, req->essid, ssid.SSID_len);
- ssid.SSID_len = htod32(ssid.SSID_len);
- }
- }
-#endif
- return wl_escan_set_scan(dev, dhd, &ssid, 0, TRUE);
-#else
- iscan = &wext_info->iscan;
WL_TRACE(("%s: SIOCSIWSCAN iscan=%p\n", dev->name, iscan));
/* use backup if our thread is not successful */
iscan->timer_on = 1;
return 0;
-#endif
}
+#endif /* WL_ESCAN */
#if WIRELESS_EXT > 17
static bool
}
#endif /* WIRELESS_EXT > 17 */
-#ifdef BCMWAPI_WPI
-static inline int _wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data,
- size_t len, int uppercase)
-{
- size_t i;
- char *pos = buf, *end = buf + buf_size;
- int ret;
- if (buf_size == 0)
- return 0;
- for (i = 0; i < len; i++) {
- ret = snprintf(pos, end - pos, uppercase ? "%02X" : "%02x",
- data[i]);
- if (ret < 0 || ret >= end - pos) {
- end[-1] = '\0';
- return pos - buf;
- }
- pos += ret;
- }
- end[-1] = '\0';
- return pos - buf;
-}
-
-/**
- * wpa_snprintf_hex - Print data as a hex string into a buffer
- * @buf: Memory area to use as the output buffer
- * @buf_size: Maximum buffer size in bytes (should be at least 2 * len + 1)
- * @data: Data to be printed
- * @len: Length of data in bytes
- * Returns: Number of bytes written
- */
-static int
-wpa_snprintf_hex(char *buf, size_t buf_size, const u8 *data, size_t len)
-{
- return _wpa_snprintf_hex(buf, buf_size, data, len, 0);
-}
-#endif /* BCMWAPI_WPI */
#ifndef WL_ESCAN
static
#if WIRELESS_EXT > 17
struct iw_event iwe;
char *event;
-#ifdef BCMWAPI_WPI
- char *buf;
- int custom_event_len;
-#endif
event = *event_p;
if (bi->ie_length) {
break;
}
}
-
-#ifdef BCMWAPI_WPI
- ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
- ptr_len = bi->ie_length;
-
- while ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_WAPI_ID))) {
- WL_TRACE(("found a WAPI IE...\n"));
-#ifdef WAPI_IE_USE_GENIE
- iwe.cmd = IWEVGENIE;
- iwe.u.data.length = ie->len + 2;
- event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
-#else /* using CUSTOM event */
- iwe.cmd = IWEVCUSTOM;
- custom_event_len = strlen("wapi_ie=") + 2*(ie->len + 2);
- iwe.u.data.length = custom_event_len;
-
- buf = kmalloc(custom_event_len+1, GFP_KERNEL);
- if (buf == NULL)
- {
- WL_ERROR(("malloc(%d) returned NULL...\n", custom_event_len));
- break;
- }
- memcpy(buf, "wapi_ie=", 8);
- wpa_snprintf_hex(buf + 8, 2+1, &(ie->id), 1);
- wpa_snprintf_hex(buf + 10, 2+1, &(ie->len), 1);
- wpa_snprintf_hex(buf + 12, 2*ie->len+1, ie->data, ie->len);
- event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, buf);
- kfree(buf);
-#endif /* WAPI_IE_USE_GENIE */
- break;
- }
-#endif /* BCMWAPI_WPI */
*event_p = event;
}
int16 rssi;
int channel;
- WL_TRACE(("%s SIOCGIWSCAN\n", dev->name));
+ WL_TRACE(("%s: %s SIOCGIWSCAN\n", __FUNCTION__, dev->name));
if (!extra)
return -EINVAL;
// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
- WL_SCAN(("BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
- MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+ WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
+ __FUNCTION__, MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
/* First entry must be the BSSID */
iwe.cmd = SIOCGIWAP;
return 0;
}
-#endif /* WL_ESCAN */
static int
wl_iw_iscan_get_scan(
char *extra
)
{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_wext_info_t *wext_info = NULL;
-#ifndef WL_ESCAN
wl_scan_results_t *list;
struct iw_event iwe;
wl_bss_info_t *bi = NULL;
int ii, j;
int apcnt;
char *event = extra, *end = extra + dwrq->length, *value;
+ iscan_info_t *iscan = g_iscan;
iscan_buf_t * p_buf;
int16 rssi;
int channel;
- iscan_info_t *iscan;
-#endif
- DHD_CHECK(dhd, dev);
- wext_info = dhd->wext_info;
-#ifdef WL_ESCAN
- return wl_escan_get_scan(dev, dhd, info, dwrq, extra);
-#else
- WL_TRACE(("%s SIOCGIWSCAN\n", dev->name));
+ WL_TRACE(("%s: %s SIOCGIWSCAN\n", __FUNCTION__, dev->name));
if (!extra)
return -EINVAL;
/* use backup if our thread is not successful */
- iscan = &wext_info->iscan;
if ((!iscan) || (iscan->sysioc_pid < 0)) {
return wl_iw_get_scan(dev, info, dwrq, extra);
}
// terence 20150419: limit the max. rssi to -2 or the bss will be filtered out in android OS
rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
channel = (bi->ctl_ch == 0) ? CHSPEC_CHANNEL(bi->chanspec) : bi->ctl_ch;
- WL_SCAN(("BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
- MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
+ WL_SCAN(("%s: BSSID="MACSTR", channel=%d, RSSI=%d, SSID=\"%s\"\n",
+ __FUNCTION__, MAC2STR(bi->BSSID.octet), channel, rssi, bi->SSID));
/* First entry must be the BSSID */
iwe.cmd = SIOCGIWAP;
dwrq->length = event - extra;
dwrq->flags = 0; /* todo */
- WL_SCAN(("apcnt=%d\n", apcnt));
+ WL_SCAN(("%s: apcnt=%d\n", __FUNCTION__, apcnt));
return 0;
-#endif
}
+#endif /* WL_ESCAN */
#endif /* WIRELESS_EXT > 13 */
{
wlc_ssid_t ssid;
int error;
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_wext_info_t *wext_info = NULL;
WL_TRACE(("%s: SIOCSIWESSID\n", dev->name));
- DHD_CHECK(dhd, dev);
- wext_info = dhd->wext_info;
/* default Broadcast SSID */
memset(&ssid, 0, sizeof(ssid));
memcpy(ssid.SSID, extra, ssid.SSID_len);
ssid.SSID_len = htod32(ssid.SSID_len);
- if (wext_info) {
- memcpy(wext_info->conn_info.ssid.SSID, ssid.SSID, ssid.SSID_len);
- wext_info->conn_info.ssid.SSID_len = ssid.SSID_len;
- }
- if (wext_info && memcmp(ðer_null, &wext_info->conn_info.bssid, ETHER_ADDR_LEN)) {
- if ((error = wl_ext_connect(dev, &wext_info->conn_info)))
- return error;
- } else {
- if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid)))) {
- WL_ERROR(("WLC_SET_SSID failed (%d).\n", error));
- return error;
- }
- WL_MSG(dev->name, "join SSID=\"%s\"\n", ssid.SSID);
+ if ((error = dev_wlc_ioctl(dev, WLC_SET_SSID, &ssid, sizeof(ssid)))) {
+ WL_ERROR(("%s: WLC_SET_SSID failed (%d).\n", __FUNCTION__, error));
+ return error;
}
- wl_iw_update_connect_status(dev, WL_EXT_STATUS_CONNECTING);
+ WL_ERROR(("%s: join SSID=%s\n", __FUNCTION__, ssid.SSID));
}
/* If essid null then it is "iwconfig <interface> essid off" command */
else {
scb_val_t scbval;
bzero(&scbval, sizeof(scb_val_t));
- WL_MSG(dev->name, "WLC_DISASSOC\n");
+ WL_ERROR(("%s: WLC_DISASSOC\n", __FUNCTION__));
if ((error = dev_wlc_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t)))) {
- WL_ERROR(("WLC_DISASSOC failed (%d).\n", error));
+ WL_ERROR(("%s: WLC_DISASSOC failed (%d).\n", __FUNCTION__, error));
return error;
}
- wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTING);
}
return 0;
}
return -EINVAL;
if ((error = dev_wlc_ioctl(dev, WLC_GET_SSID, &ssid, sizeof(ssid)))) {
- WL_ERROR(("Error getting the SSID %d\n", error));
+ WL_ERROR(("Error getting the SSID\n"));
return error;
}
char *extra
)
{
-#if defined(BCMWAPI_WPI)
- uchar buf[WLC_IOCTL_SMLEN] = {0};
- uchar *p = buf;
- int wapi_ie_size;
-
- WL_TRACE(("%s: SIOCSIWGENIE\n", dev->name));
-
- if (extra[0] == DOT11_MNG_WAPI_ID)
- {
- wapi_ie_size = iwp->length;
- memcpy(p, extra, iwp->length);
- dev_wlc_bufvar_set(dev, "wapiie", buf, wapi_ie_size);
- }
- else
-#endif
dev_wlc_bufvar_set(dev, "wpaie", extra, iwp->length);
return 0;
bcopy(keystring, pmk.key, len);
pmk.flags = htod16(WSEC_PASSPHRASE);
- WL_WSEC(("set key %s\n", keystring));
+ WL_WSEC(("%s: set key %s\n", __FUNCTION__, keystring));
error = dev_wlc_ioctl(dev, WLC_SET_WSEC_PMK, &pmk, sizeof(pmk));
if (error) {
- WL_ERROR(("WLC_SET_WSEC_PMK error %d\n", error));
+ WL_ERROR(("%s: WLC_SET_WSEC_PMK error %d\n", __FUNCTION__, error));
return error;
}
}
case IW_ENCODE_ALG_CCMP:
key.algo = CRYPTO_ALGO_AES_CCM;
break;
-#ifdef BCMWAPI_WPI
- case IW_ENCODE_ALG_SM4:
- key.algo = CRYPTO_ALGO_SMS4;
- if (iwe->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- key.flags &= ~WL_PRIMARY_KEY;
- }
- break;
-#endif
default:
break;
}
error = dev_wlc_ioctl(dev, WLC_SET_KEY, &key, sizeof(key));
if (error)
return error;
- wl_iw_update_connect_status(dev, WL_EXT_STATUS_ADD_KEY);
}
return 0;
}
-/* wpa2 pmk list */
+
+struct {
+ pmkid_list_t pmkids;
+ pmkid_t foo[MAXPMKID-1];
+} pmkid_list;
static int
wl_iw_set_pmksa(
struct net_device *dev,
char *extra
)
{
- struct pmk_list *pmk_list = NULL;
struct iw_pmksa *iwpmksa;
uint i;
char eabuf[ETHER_ADDR_STR_LEN];
- pmkid_t *pmkid_array = NULL;
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_wext_info_t *wext_info = NULL;
+ pmkid_t * pmkid_array = pmkid_list.pmkids.pmkid;
WL_TRACE(("%s: SIOCSIWPMKSA\n", dev->name));
- DHD_CHECK(dhd, dev);
- wext_info = dhd->wext_info;
- pmk_list = &wext_info->pmk_list;
- if (pmk_list)
- pmkid_array = pmk_list->pmkids.pmkid;
iwpmksa = (struct iw_pmksa *)extra;
bzero((char *)eabuf, ETHER_ADDR_STR_LEN);
if (iwpmksa->cmd == IW_PMKSA_FLUSH) {
WL_TRACE(("wl_iw_set_pmksa - IW_PMKSA_FLUSH\n"));
- bzero((char *)pmk_list, sizeof(struct pmk_list));
+ bzero((char *)&pmkid_list, sizeof(pmkid_list));
}
if (iwpmksa->cmd == IW_PMKSA_REMOVE) {
pmkid_list_t pmkid, *pmkidptr;
WL_TRACE(("%02x ", pmkidptr->pmkid[0].PMKID[j]));
WL_TRACE(("\n"));
}
- for (i = 0; i < pmk_list->pmkids.npmkid; i++)
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++)
if (!bcmp(&iwpmksa->bssid.sa_data[0], &pmkid_array[i].BSSID,
ETHER_ADDR_LEN))
break;
- for (; i < pmk_list->pmkids.npmkid; i++) {
+ for (; i < pmkid_list.pmkids.npmkid; i++) {
bcopy(&pmkid_array[i+1].BSSID,
&pmkid_array[i].BSSID,
ETHER_ADDR_LEN);
&pmkid_array[i].PMKID,
WPA2_PMKID_LEN);
}
- pmk_list->pmkids.npmkid--;
+ pmkid_list.pmkids.npmkid--;
}
if (iwpmksa->cmd == IW_PMKSA_ADD) {
bcopy(&iwpmksa->bssid.sa_data[0],
- &pmkid_array[pmk_list->pmkids.npmkid].BSSID,
+ &pmkid_array[pmkid_list.pmkids.npmkid].BSSID,
ETHER_ADDR_LEN);
- bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmk_list->pmkids.npmkid].PMKID,
+ bcopy(&iwpmksa->pmkid[0], &pmkid_array[pmkid_list.pmkids.npmkid].PMKID,
WPA2_PMKID_LEN);
{
uint j;
uint k;
- k = pmk_list->pmkids.npmkid;
+ k = pmkid_list.pmkids.npmkid;
BCM_REFERENCE(k);
WL_TRACE(("wl_iw_set_pmksa,IW_PMKSA_ADD - PMKID: %s = ",
bcm_ether_ntoa(&pmkid_array[k].BSSID,
WL_TRACE(("%02x ", pmkid_array[k].PMKID[j]));
WL_TRACE(("\n"));
}
- pmk_list->pmkids.npmkid++;
+ pmkid_list.pmkids.npmkid++;
}
- WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmk_list->pmkids.npmkid));
- for (i = 0; i < pmk_list->pmkids.npmkid; i++) {
+ WL_TRACE(("PRINTING pmkid LIST - No of elements %d\n", pmkid_list.pmkids.npmkid));
+ for (i = 0; i < pmkid_list.pmkids.npmkid; i++) {
uint j;
WL_TRACE(("PMKID[%d]: %s = ", i,
bcm_ether_ntoa(&pmkid_array[i].BSSID,
WL_TRACE(("%02x ", pmkid_array[i].PMKID[j]));
printf("\n");
}
- dev_wlc_bufvar_set(dev, "pmkid_info", (char *)pmk_list, sizeof(struct pmk_list));
+ WL_TRACE(("\n"));
+ dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list));
return 0;
}
val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED;
else if (paramval & IW_AUTH_WPA_VERSION_WPA2)
val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
-#ifdef BCMWAPI_WPI
- else if (paramval & IW_AUTH_WAPI_VERSION_1)
- val = WAPI_AUTH_UNSPECIFIED;
-#endif
- WL_TRACE(("%d: setting wpa_auth to 0x%0x\n", __LINE__, val));
+ WL_TRACE(("%s: %d: setting wpa_auth to 0x%0x\n", __FUNCTION__, __LINE__, val));
if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
return error;
break;
}
if ((error = dev_wlc_intvar_get(dev, "wsec", &val))) {
- WL_ERROR(("wsec error %d\n", error));
+ WL_ERROR(("%s: wsec error %d\n", __FUNCTION__, error));
return error;
}
- WL_WSEC(("get wsec=0x%x\n", val));
+ WL_WSEC(("%s: get wsec=0x%x\n", __FUNCTION__, val));
cipher_combined = iw->gwsec | iw->pwsec;
val &= ~(WEP_ENABLED | TKIP_ENABLED | AES_ENABLED);
val |= TKIP_ENABLED;
if (cipher_combined & IW_AUTH_CIPHER_CCMP)
val |= AES_ENABLED;
-#ifdef BCMWAPI_WPI
- val &= ~SMS4_ENABLED;
- if (cipher_combined & IW_AUTH_CIPHER_SMS4)
- val |= SMS4_ENABLED;
-#endif
if (iw->privacy_invoked && !val) {
- WL_WSEC(("%s: 'Privacy invoked' TRUE but clearing wsec, assuming "
- "we're a WPS enrollee\n", dev->name));
+ WL_WSEC(("%s: %s: 'Privacy invoked' TRUE but clearing wsec, assuming "
+ "we're a WPS enrollee\n", dev->name, __FUNCTION__));
if ((error = dev_wlc_intvar_set(dev, "is_WPS_enrollee", TRUE))) {
WL_WSEC(("Failed to set iovar is_WPS_enrollee\n"));
return error;
}
}
- WL_WSEC(("set wsec=0x%x\n", val));
+ WL_WSEC(("%s: set wsec=0x%x\n", __FUNCTION__, val));
if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
- WL_ERROR(("wsec error %d\n", error));
+ WL_ERROR(("%s: wsec error %d\n", __FUNCTION__, error));
return error;
}
* handshake.
*/
if (dev_wlc_intvar_get(dev, "fbt_cap", &fbt_cap) == 0) {
- WL_WSEC(("get fbt_cap=0x%x\n", fbt_cap));
+ WL_WSEC(("%s: get fbt_cap=0x%x\n", __FUNCTION__, fbt_cap));
if (fbt_cap == WLC_FBT_CAP_DRV_4WAY_AND_REASSOC) {
if ((paramid == IW_AUTH_CIPHER_PAIRWISE) && (val & AES_ENABLED)) {
if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 1))) {
- WL_ERROR(("sup_wpa 1 error %d\n", error));
+ WL_ERROR(("%s: sup_wpa 1 error %d\n", __FUNCTION__, error));
return error;
}
}
else if (val == 0) {
if ((error = dev_wlc_intvar_set(dev, "sup_wpa", 0))) {
- WL_ERROR(("sup_wpa 0 error %d\n", error));
+ WL_ERROR(("%s: sup_wpa 0 error %d\n", __FUNCTION__, error));
return error;
}
}
case IW_AUTH_KEY_MGMT:
if ((error = dev_wlc_intvar_get(dev, "wpa_auth", &val))) {
- WL_ERROR(("wpa_auth error %d\n", error));
+ WL_ERROR(("%s: wpa_auth error %d\n", __FUNCTION__, error));
return error;
}
- WL_WSEC(("get wpa_auth to %d\n", val));
+ WL_WSEC(("%s: get wpa_auth to %d\n", __FUNCTION__, val));
if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
if (paramval & (IW_AUTH_KEY_MGMT_FT_PSK | IW_AUTH_KEY_MGMT_PSK))
if (paramval & (IW_AUTH_KEY_MGMT_FT_802_1X | IW_AUTH_KEY_MGMT_FT_PSK))
val |= WPA2_AUTH_FT;
}
-#ifdef BCMWAPI_WPI
- if (paramval & (IW_AUTH_KEY_MGMT_WAPI_PSK | IW_AUTH_KEY_MGMT_WAPI_CERT))
- val = WAPI_AUTH_UNSPECIFIED;
-#endif
- WL_TRACE(("%d: setting wpa_auth to %d\n", __LINE__, val));
+ WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
if ((error = dev_wlc_intvar_set(dev, "wpa_auth", val)))
return error;
break;
case IW_AUTH_80211_AUTH_ALG:
/* open shared */
- WL_MSG(dev->name, "Setting the D11auth %d\n", paramval);
+ WL_ERROR(("Setting the D11auth %d\n", paramval));
if (paramval & IW_AUTH_ALG_OPEN_SYSTEM)
val = 0;
else if (paramval & IW_AUTH_ALG_SHARED_KEY)
case IW_AUTH_WPA_ENABLED:
if (paramval == 0) {
val = 0;
- WL_TRACE(("%d: setting wpa_auth to %d\n", __LINE__, val));
+ WL_TRACE(("%s: %d: setting wpa_auth to %d\n", __FUNCTION__, __LINE__, val));
error = dev_wlc_intvar_set(dev, "wpa_auth", val);
return error;
}
#if WIRELESS_EXT > 17
case IW_AUTH_ROAMING_CONTROL:
- WL_TRACE(("IW_AUTH_ROAMING_CONTROL\n"));
+ WL_TRACE(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
/* driver control or user space app control */
break;
#endif /* WIRELESS_EXT > 17 */
-#ifdef BCMWAPI_WPI
-
- case IW_AUTH_WAPI_ENABLED:
- if ((error = dev_wlc_intvar_get(dev, "wsec", &val)))
- return error;
- if (paramval) {
- val |= SMS4_ENABLED;
- if ((error = dev_wlc_intvar_set(dev, "wsec", val))) {
- WL_ERROR(("setting wsec to 0x%0x returned error %d\n",
- val, error));
- return error;
- }
- if ((error = dev_wlc_intvar_set(dev, "wpa_auth", WAPI_AUTH_UNSPECIFIED))) {
- WL_ERROR(("setting wpa_auth(%d) returned %d\n",
- WAPI_AUTH_UNSPECIFIED,
- error));
- return error;
- }
- }
-
- break;
-
-#endif /* BCMWAPI_WPI */
default:
break;
#if WIRELESS_EXT > 17
case IW_AUTH_ROAMING_CONTROL:
- WL_ERROR(("IW_AUTH_ROAMING_CONTROL\n"));
+ WL_ERROR(("%s: IW_AUTH_ROAMING_CONTROL\n", __FUNCTION__));
/* driver control or user space app control */
break;
(iw_handler) wl_iw_iscan_get_aplist, /* SIOCGIWAPLIST */
#endif
#if WIRELESS_EXT > 13
+#ifdef WL_ESCAN
+ (iw_handler) wl_escan_set_scan, /* SIOCSIWSCAN */
+ (iw_handler) wl_escan_get_scan, /* SIOCGIWSCAN */
+#else
(iw_handler) wl_iw_iscan_set_scan, /* SIOCSIWSCAN */
(iw_handler) wl_iw_iscan_get_scan, /* SIOCGIWSCAN */
+#endif
#else /* WIRELESS_EXT > 13 */
(iw_handler) NULL, /* SIOCSIWSCAN */
(iw_handler) NULL, /* SIOCGIWSCAN */
char *extra = NULL;
size_t token_size = 1;
int max_tokens = 0, ret = 0;
-#ifndef WL_ESCAN
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_wext_info_t *wext_info = NULL;
- iscan_info_t *iscan;
-
- DHD_CHECK(dhd, dev);
- wext_info = dhd->wext_info;
- iscan = &wext_info->iscan;
-#endif
if (cmd < SIOCIWFIRST ||
IW_IOCTL_IDX(cmd) >= ARRAYSIZE(wl_iw_handler) ||
#if WIRELESS_EXT > 13
case SIOCGIWSCAN:
#ifndef WL_ESCAN
- if (iscan)
+ if (g_iscan)
max_tokens = wrq->u.data.length;
else
#endif
#endif /* IW_CUSTOM_MAX */
void
-wl_iw_event(struct net_device *dev, struct wl_wext_info *wext_info,
- wl_event_msg_t *e, void* data)
+wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data)
{
#if WIRELESS_EXT > 13
union iwreq_data wrqu;
uint32 datalen = ntoh32(e->datalen);
uint32 status = ntoh32(e->status);
uint32 reason = ntoh32(e->reason);
-#ifndef WL_ESCAN
- iscan_info_t *iscan = &wext_info->iscan;
-#endif
memset(&wrqu, 0, sizeof(wrqu));
memset(extra, 0, sizeof(extra));
case WLC_E_REASSOC_IND:
cmd = IWEVREGISTERED;
break;
- case WLC_E_DEAUTH:
- case WLC_E_DISASSOC:
- wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTED);
- WL_MSG_RLMT(dev->name, &e->addr, ETHER_ADDR_LEN,
- "disconnected with "MACSTR", event %d, reason %d\n",
- MAC2STR((u8 *)wrqu.addr.sa_data), event_type, reason);
- break;
case WLC_E_DEAUTH_IND:
case WLC_E_DISASSOC_IND:
cmd = SIOCGIWAP;
- WL_MSG(dev->name, "disconnected with "MACSTR", event %d, reason %d\n",
- MAC2STR((u8 *)wrqu.addr.sa_data), event_type, reason);
+ wrqu.data.length = strlen(extra);
bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
bzero(&extra, ETHER_ADDR_LEN);
- wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTED);
break;
case WLC_E_LINK:
cmd = SIOCGIWAP;
+ wrqu.data.length = strlen(extra);
if (!(flags & WLC_EVENT_MSG_LINK)) {
- WL_MSG(dev->name, "Link Down with "MACSTR", reason=%d\n",
+ printf("%s: Link Down with "MACSTR", reason=%d\n", __FUNCTION__,
MAC2STR((u8 *)wrqu.addr.sa_data), reason);
bzero(wrqu.addr.sa_data, ETHER_ADDR_LEN);
bzero(&extra, ETHER_ADDR_LEN);
- wl_iw_update_connect_status(dev, WL_EXT_STATUS_DISCONNECTED);
} else {
- WL_MSG(dev->name, "Link UP with "MACSTR"\n",
+ printf("%s: Link UP with "MACSTR"\n", __FUNCTION__,
MAC2STR((u8 *)wrqu.addr.sa_data));
}
break;
}
#endif /* WIRELESS_EXT > 17 */
-#ifndef WL_ESCAN
+#ifdef WL_ESCAN
+ case WLC_E_ESCAN_RESULT:
+ WL_TRACE(("event WLC_E_ESCAN_RESULT\n"));
+ wl_escan_event(dev, e, data);
+ break;
+#else
+
case WLC_E_SCAN_COMPLETE:
#if WIRELESS_EXT > 14
cmd = SIOCGIWSCAN;
WL_TRACE(("event WLC_E_SCAN_COMPLETE\n"));
// terence 20150224: fix "wlan0: (WE) : Wireless Event too big (65306)"
memset(&wrqu, 0, sizeof(wrqu));
- if ((iscan) && (iscan->sysioc_pid >= 0) &&
- (iscan->iscan_state != ISCAN_STATE_IDLE))
- up(&iscan->sysioc_sem);
+ if ((g_iscan) && (g_iscan->sysioc_pid >= 0) &&
+ (g_iscan->iscan_state != ISCAN_STATE_IDLE))
+ up(&g_iscan->sysioc_sem);
break;
#endif
if (cmd) {
#ifndef WL_ESCAN
if (cmd == SIOCGIWSCAN) {
- if ((!iscan) || (iscan->sysioc_pid < 0)) {
+ if ((!g_iscan) || (g_iscan->sysioc_pid < 0)) {
wireless_send_event(dev, cmd, &wrqu, NULL);
};
} else
break;
}
default:
- WL_ERROR(("%d: Unsupported type %d\n", __LINE__, type));
+ WL_ERROR(("%s %d: Unsupported type %d\n", __FUNCTION__, __LINE__, type));
break;
}
return res;
phy_noise = 0;
if ((res = dev_wlc_ioctl(dev, WLC_GET_PHY_NOISE, &phy_noise, sizeof(phy_noise)))) {
- WL_TRACE(("WLC_GET_PHY_NOISE error=%d\n", res));
+ WL_ERROR(("%s: WLC_GET_PHY_NOISE error=%d\n", __FUNCTION__, res));
goto done;
}
memset(&scb_val, 0, sizeof(scb_val));
if ((res = dev_wlc_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t)))) {
- WL_TRACE(("WLC_GET_RSSI error=%d\n", res));
+ WL_ERROR(("%s: WLC_GET_RSSI error=%d\n", __FUNCTION__, res));
goto done;
}
memset(&revinfo, 0, sizeof(revinfo));
res = dev_wlc_ioctl(dev, WLC_GET_REVINFO, &revinfo, sizeof(revinfo));
if (res) {
- WL_ERROR(("WLC_GET_REVINFO failed %d\n", res));
+ WL_ERROR(("%s: WLC_GET_REVINFO failed %d\n", __FUNCTION__, res));
goto done;
}
corerev = dtoh32(revinfo.corerev);
#ifdef WL_NAN
res = wl_cntbuf_to_xtlv_format(NULL, cntinfo, MAX_WLIW_IOCTL_LEN, corerev);
if (res) {
- WL_ERROR(("wl_cntbuf_to_xtlv_format failed %d\n", res));
+ WL_ERROR(("%s: wl_cntbuf_to_xtlv_format failed %d\n", __FUNCTION__, res));
goto done;
}
#ifndef WL_ESCAN
static void
-wl_iw_timerfunc(ulong data)
+wl_iw_timerfunc(
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ struct timer_list *t
+#else
+ unsigned long data
+#endif
+)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ iscan_info_t *iscan = from_timer(iscan, t, timer);
+#else
iscan_info_t *iscan = (iscan_info_t *)data;
+#endif
iscan->timer_on = 0;
if (iscan->iscan_state != ISCAN_STATE_IDLE) {
WL_TRACE(("timer trigger\n"));
uint32 status;
iscan_info_t *iscan = (iscan_info_t *)data;
- WL_MSG("wlan", "thread Enter\n");
+ printf("%s: thread Enter\n", __FUNCTION__);
DAEMONIZE("iscan_sysioc");
status = WL_SCAN_RESULTS_PARTIAL;
break;
}
}
- WL_MSG("wlan", "was terminated\n");
+ printf("%s: was terminated\n", __FUNCTION__);
complete_and_exit(&iscan->sysioc_exited, 0);
}
-#endif /* !WL_ESCAN */
-
-void
-wl_iw_detach(struct net_device *dev, dhd_pub_t *dhdp)
-{
- wl_wext_info_t *wext_info = dhdp->wext_info;
-#ifndef WL_ESCAN
- iscan_buf_t *buf;
- iscan_info_t *iscan;
-#endif
- if (!wext_info)
- return;
-
-#ifndef WL_ESCAN
- iscan = &wext_info->iscan;
- if (iscan->sysioc_pid >= 0) {
- KILL_PROC(iscan->sysioc_pid, SIGTERM);
- wait_for_completion(&iscan->sysioc_exited);
- }
-
- while (iscan->list_hdr) {
- buf = iscan->list_hdr->next;
- kfree(iscan->list_hdr);
- iscan->list_hdr = buf;
- }
-#endif
- wl_ext_event_deregister(dev, dhdp, WLC_E_LAST, wl_iw_event);
- if (wext_info) {
- kfree(wext_info);
- dhdp->wext_info = NULL;
- }
-}
+#endif /* WL_ESCAN */
int
-wl_iw_attach(struct net_device *dev, dhd_pub_t *dhdp)
+wl_iw_attach(struct net_device *dev, void * dhdp)
{
- wl_wext_info_t *wext_info = NULL;
- int ret = 0;
#ifndef WL_ESCAN
iscan_info_t *iscan = NULL;
-#endif
+
+ printf("%s: Enter\n", __FUNCTION__);
if (!dev)
return 0;
- WL_TRACE(("Enter\n"));
- wext_info = (void *)kzalloc(sizeof(struct wl_wext_info), GFP_KERNEL);
- if (!wext_info)
+ iscan = kmalloc(sizeof(iscan_info_t), GFP_KERNEL);
+ if (!iscan)
return -ENOMEM;
- memset(wext_info, 0, sizeof(wl_wext_info_t));
- wext_info->dev = dev;
- wext_info->dhd = dhdp;
- wext_info->conn_info.bssidx = 0;
- dhdp->wext_info = (void *)wext_info;
-
-#ifndef WL_ESCAN
- iscan = &wext_info->iscan;
+ memset(iscan, 0, sizeof(iscan_info_t));
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
iscan->kthread = NULL;
#endif
iscan->sysioc_pid = -1;
/* we only care about main interface so save a global here */
+ g_iscan = iscan;
iscan->dev = dev;
iscan->iscan_state = ISCAN_STATE_IDLE;
/* Set up the timer */
iscan->timer_ms = 2000;
- init_timer_compat(&iscan->timer, wl_iw_timerfunc, iscan);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
+ timer_setup(&iscan->timer, wl_iw_timerfunc, 0);
+#else
+ init_timer(&iscan->timer);
+ iscan->timer.data = (ulong)iscan;
+ iscan->timer.function = wl_iw_timerfunc;
+#endif
sema_init(&iscan->sysioc_sem, 0);
init_completion(&iscan->sysioc_exited);
#else
iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
#endif
- if (iscan->sysioc_pid < 0) {
- ret = -ENOMEM;
- goto exit;
- }
-#endif
- ret = wl_ext_event_register(dev, dhdp, WLC_E_LAST, wl_iw_event, dhdp->wext_info,
- PRIO_EVENT_WEXT);
- if (ret) {
- WL_ERROR(("wl_ext_event_register err %d\n", ret));
- goto exit;
- }
-
- return ret;
-exit:
- wl_iw_detach(dev, dhdp);
- return ret;
+ if (iscan->sysioc_pid < 0)
+ return -ENOMEM;
+#endif /* WL_ESCAN */
+ return 0;
}
-void
-wl_iw_down(struct net_device *dev, dhd_pub_t *dhdp)
+void wl_iw_detach(void)
{
- wl_wext_info_t *wext_info = NULL;
-
- if (dhdp) {
- wext_info = dhdp->wext_info;
- } else {
- WL_ERROR (("dhd is NULL\n"));
+#ifndef WL_ESCAN
+ iscan_buf_t *buf;
+ iscan_info_t *iscan = g_iscan;
+ if (!iscan)
return;
+ if (iscan->sysioc_pid >= 0) {
+ KILL_PROC(iscan->sysioc_pid, SIGTERM);
+ wait_for_completion(&iscan->sysioc_exited);
}
-}
-
-int
-wl_iw_up(struct net_device *dev, dhd_pub_t *dhdp)
-{
- wl_wext_info_t *wext_info = NULL;
- int ret = 0;
-
- if (dhdp) {
- wext_info = dhdp->wext_info;
- } else {
- WL_ERROR (("dhd is NULL\n"));
- return -ENODEV;
- }
-
- return ret;
-}
-s32
-wl_iw_autochannel(struct net_device *dev, char* command, int total_len)
-{
- struct dhd_pub *dhd = dhd_get_pub(dev);
- wl_wext_info_t *wext_info = NULL;
- int ret = 0;
-#ifdef WL_ESCAN
- int bytes_written = -1;
-#endif
-
- DHD_CHECK(dhd, dev);
- wext_info = dhd->wext_info;
-#ifdef WL_ESCAN
- sscanf(command, "%*s %d", &dhd->escan->autochannel);
- if (dhd->escan->autochannel == 0) {
- dhd->escan->best_2g_ch = 0;
- dhd->escan->best_5g_ch = 0;
- } else if (dhd->escan->autochannel == 2) {
- bytes_written = snprintf(command, total_len, "2g=%d 5g=%d",
- dhd->escan->best_2g_ch, dhd->escan->best_5g_ch);
- WL_TRACE(("command result is %s\n", command));
- ret = bytes_written;
+ while (iscan->list_hdr) {
+ buf = iscan->list_hdr->next;
+ kfree(iscan->list_hdr);
+ iscan->list_hdr = buf;
}
+ kfree(iscan);
+ g_iscan = NULL;
#endif
-
- return ret;
}
#endif /* USE_IW */
#include <typedefs.h>
#include <ethernet.h>
#include <wlioctl.h>
-#include <dngl_stats.h>
-#include <dhd.h>
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
-#define get_ds() (KERNEL_DS)
-#endif
#define WL_SCAN_PARAMS_SSID_MAX 10
#define GET_SSID "SSID="
} wl_iw_t;
struct wl_ctrl {
- timer_list_compat_t *timer;
+ struct timer_list *timer;
struct net_device *dev;
long sysioc_pid;
struct semaphore sysioc_sem;
#endif /* WIRELESS_EXT > 12 */
extern int wl_iw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+extern void wl_iw_event(struct net_device *dev, wl_event_msg_t *e, void* data);
extern int wl_iw_get_wireless_stats(struct net_device *dev, struct iw_statistics *wstats);
+int wl_iw_attach(struct net_device *dev, void * dhdp);
int wl_iw_send_priv_event(struct net_device *dev, char *flag);
#ifdef WL_ESCAN
int wl_iw_handle_scanresults_ies(char **event_p, char *end,
struct iw_request_info *info, wl_bss_info_t *bi);
#endif
-int wl_iw_attach(struct net_device *dev, dhd_pub_t *dhdp);
-void wl_iw_detach(struct net_device *dev, dhd_pub_t *dhdp);
-int wl_iw_up(struct net_device *dev, dhd_pub_t *dhdp);
-void wl_iw_down(struct net_device *dev, dhd_pub_t *dhdp);
-s32 wl_iw_autochannel(struct net_device *dev, char* command, int total_len);
-
-/* message levels */
-#define WL_ERROR_LEVEL (1 << 0)
-#define WL_TRACE_LEVEL (1 << 1)
-#define WL_INFO_LEVEL (1 << 2)
-#define WL_SCAN_LEVEL (1 << 3)
-#define WL_WSEC_LEVEL (1 << 4)
+
+void wl_iw_detach(void);
#define CSCAN_COMMAND "CSCAN "
#define CSCAN_TLV_PREFIX 'S'
/*
* Broadcom Dongle Host Driver (DHD), Linux monitor network interface
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*/
#ifndef DHD_MAX_IFS
#define DHD_MAX_IFS 16
-#endif // endif
+#endif
#define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__)
#define MON_TRACE MON_PRINT
.ndo_set_rx_mode = dhd_mon_if_set_multicast_list,
#else
.ndo_set_multicast_list = dhd_mon_if_set_multicast_list,
-#endif // endif
+#endif
.ndo_set_mac_address = dhd_mon_if_change_mac,
};
/*
* Linux roam cache
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wl_roam.c 798173 2019-01-07 09:23:21Z $
+ * $Id: wl_roam.c 662434 2016-09-29 12:21:46Z $
*/
-
-#include <typedefs.h>
-#include <osl.h>
-#include <bcmwifi_channels.h>
-#include <wlioctl.h>
-#include <bcmutils.h>
-#ifdef WL_CFG80211
-#include <wl_cfg80211.h>
-#endif // endif
-#include <wldev_common.h>
-#include <bcmstdlib_s.h>
-
-#ifdef ESCAN_CHANNEL_CACHE
-#define MAX_ROAM_CACHE 200
-#define MAX_SSID_BUFSIZE 36
-
-#define ROAMSCAN_MODE_NORMAL 0
-#define ROAMSCAN_MODE_WES 1
-
-typedef struct {
- chanspec_t chanspec;
- int ssid_len;
- char ssid[MAX_SSID_BUFSIZE];
-} roam_channel_cache;
-
-static int n_roam_cache = 0;
-static int roam_band = WLC_BAND_AUTO;
-static roam_channel_cache roam_cache[MAX_ROAM_CACHE];
-static uint band2G, band5G, band_bw;
-
-#ifdef ROAM_CHANNEL_CACHE
-int init_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver)
-{
- int err;
- struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
- s32 mode;
-
- /* Check support in firmware */
- err = wldev_iovar_getint(dev, "roamscan_mode", &mode);
- if (err && (err == BCME_UNSUPPORTED)) {
- /* If firmware doesn't support, return error. Else proceed */
- WL_ERR(("roamscan_mode iovar failed. %d\n", err));
- return err;
- }
-
-#ifdef D11AC_IOTYPES
- if (ioctl_ver == 1) {
- /* legacy chanspec */
- band2G = WL_LCHANSPEC_BAND_2G;
- band5G = WL_LCHANSPEC_BAND_5G;
- band_bw = WL_LCHANSPEC_BW_20 | WL_LCHANSPEC_CTL_SB_NONE;
- } else {
- band2G = WL_CHANSPEC_BAND_2G;
- band5G = WL_CHANSPEC_BAND_5G;
- band_bw = WL_CHANSPEC_BW_20;
- }
-#else
- band2G = WL_CHANSPEC_BAND_2G;
- band5G = WL_CHANSPEC_BAND_5G;
- band_bw = WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
-#endif /* D11AC_IOTYPES */
-
- n_roam_cache = 0;
- roam_band = WLC_BAND_AUTO;
-
- return 0;
-}
-#endif /* ROAM_CHANNEL_CACHE */
-
-#ifdef ESCAN_CHANNEL_CACHE
-void set_roam_band(int band)
-{
- roam_band = band;
-}
-
-void reset_roam_cache(struct bcm_cfg80211 *cfg)
-{
- if (!cfg->rcc_enabled) {
- return;
- }
-
- n_roam_cache = 0;
-}
-
-void add_roam_cache(struct bcm_cfg80211 *cfg, wl_bss_info_t *bi)
-{
- int i;
- uint8 channel;
- char chanbuf[CHANSPEC_STR_LEN];
-
- if (!cfg->rcc_enabled) {
- return;
- }
-
- if (n_roam_cache >= MAX_ROAM_CACHE)
- return;
-
- for (i = 0; i < n_roam_cache; i++) {
- if ((roam_cache[i].ssid_len == bi->SSID_len) &&
- (roam_cache[i].chanspec == bi->chanspec) &&
- (memcmp(roam_cache[i].ssid, bi->SSID, bi->SSID_len) == 0)) {
- /* identical one found, just return */
- return;
- }
- }
-
- roam_cache[n_roam_cache].ssid_len = bi->SSID_len;
- channel = wf_chspec_ctlchan(bi->chanspec);
- WL_DBG(("CHSPEC = %s, CTL %d\n", wf_chspec_ntoa_ex(bi->chanspec, chanbuf), channel));
- roam_cache[n_roam_cache].chanspec =
- (channel <= CH_MAX_2G_CHANNEL ? band2G : band5G) | band_bw | channel;
- (void)memcpy_s(roam_cache[n_roam_cache].ssid, bi->SSID_len, bi->SSID, bi->SSID_len);
-
- n_roam_cache++;
-}
-
-static bool is_duplicated_channel(const chanspec_t *channels, int n_channels, chanspec_t new)
-{
- int i;
-
- for (i = 0; i < n_channels; i++) {
- if (channels[i] == new)
- return TRUE;
- }
-
- return FALSE;
-}
-
-int get_roam_channel_list(int target_chan,
- chanspec_t *channels, int n_channels, const wlc_ssid_t *ssid, int ioctl_ver)
-{
- int i, n = 1;
- char chanbuf[CHANSPEC_STR_LEN];
-
- /* first index is filled with the given target channel */
- if (target_chan) {
- channels[0] = (target_chan & WL_CHANSPEC_CHAN_MASK) |
- (target_chan <= CH_MAX_2G_CHANNEL ? band2G : band5G) | band_bw;
- } else {
- /* If target channel is not provided, set the index to 0 */
- n = 0;
- }
-
- WL_DBG((" %s: %03d 0x%04X\n", __FUNCTION__, target_chan, channels[0]));
-
- for (i = 0; i < n_roam_cache; i++) {
- chanspec_t ch = roam_cache[i].chanspec;
- bool is_2G = ioctl_ver == 1 ? LCHSPEC_IS2G(ch) : CHSPEC_IS2G(ch);
- bool is_5G = ioctl_ver == 1 ? LCHSPEC_IS5G(ch) : CHSPEC_IS5G(ch);
- bool band_match = ((roam_band == WLC_BAND_AUTO) ||
- ((roam_band == WLC_BAND_2G) && is_2G) ||
- ((roam_band == WLC_BAND_5G) && is_5G));
-
- ch = CHSPEC_CHANNEL(ch) | (is_2G ? band2G : band5G) | band_bw;
- if ((roam_cache[i].ssid_len == ssid->SSID_len) &&
- band_match && !is_duplicated_channel(channels, n, ch) &&
- (memcmp(roam_cache[i].ssid, ssid->SSID, ssid->SSID_len) == 0)) {
- /* match found, add it */
- WL_DBG(("%s: Chanspec = %s\n", __FUNCTION__,
- wf_chspec_ntoa_ex(ch, chanbuf)));
- channels[n++] = ch;
- if (n >= n_channels) {
- WL_ERR(("Too many roam scan channels\n"));
- return n;
- }
- }
- }
-
- return n;
-}
-#endif /* ESCAN_CHANNEL_CACHE */
-
-#ifdef ROAM_CHANNEL_CACHE
-void print_roam_cache(struct bcm_cfg80211 *cfg)
-{
- int i;
-
- if (!cfg->rcc_enabled) {
- return;
- }
-
- WL_DBG((" %d cache\n", n_roam_cache));
-
- for (i = 0; i < n_roam_cache; i++) {
- roam_cache[i].ssid[roam_cache[i].ssid_len] = 0;
- WL_DBG(("0x%02X %02d %s\n", roam_cache[i].chanspec,
- roam_cache[i].ssid_len, roam_cache[i].ssid));
- }
-}
-
-static void add_roamcache_channel(wl_roam_channel_list_t *channels, chanspec_t ch)
-{
- int i;
-
- if (channels->n >= MAX_ROAM_CHANNEL) /* buffer full */
- return;
-
- for (i = 0; i < channels->n; i++) {
- if (channels->channels[i] == ch) /* already in the list */
- return;
- }
-
- channels->channels[i] = ch;
- channels->n++;
-
- WL_DBG((" RCC: %02d 0x%04X\n",
- ch & WL_CHANSPEC_CHAN_MASK, ch));
-}
-
-void update_roam_cache(struct bcm_cfg80211 *cfg, int ioctl_ver)
-{
- int error, i, prev_channels;
- wl_roam_channel_list_t channel_list;
- char iobuf[WLC_IOCTL_SMLEN];
- struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
- wlc_ssid_t ssid;
-
- if (!cfg->rcc_enabled) {
- return;
- }
-
- if (!wl_get_drv_status(cfg, CONNECTED, dev)) {
- WL_DBG(("Not associated\n"));
- return;
- }
-
- /* need to read out the current cache list
- as the firmware may change dynamically
- */
- error = wldev_iovar_getbuf(dev, "roamscan_channels", 0, 0,
- (void *)&channel_list, sizeof(channel_list), NULL);
- if (error) {
- WL_ERR(("Failed to get roamscan channels, error = %d\n", error));
- return;
- }
-
- error = wldev_get_ssid(dev, &ssid);
- if (error) {
- WL_ERR(("Failed to get SSID, err=%d\n", error));
- return;
- }
-
- prev_channels = channel_list.n;
- for (i = 0; i < n_roam_cache; i++) {
- chanspec_t ch = roam_cache[i].chanspec;
- bool is_2G = ioctl_ver == 1 ? LCHSPEC_IS2G(ch) : CHSPEC_IS2G(ch);
- bool is_5G = ioctl_ver == 1 ? LCHSPEC_IS5G(ch) : CHSPEC_IS5G(ch);
- bool band_match = ((roam_band == WLC_BAND_AUTO) ||
- ((roam_band == WLC_BAND_2G) && is_2G) ||
- ((roam_band == WLC_BAND_5G) && is_5G));
-
- if ((roam_cache[i].ssid_len == ssid.SSID_len) &&
- band_match && (memcmp(roam_cache[i].ssid, ssid.SSID, ssid.SSID_len) == 0)) {
- /* match found, add it */
- ch = CHSPEC_CHANNEL(ch) | (is_2G ? band2G : band5G) | band_bw;
- add_roamcache_channel(&channel_list, ch);
- }
- }
- if (prev_channels != channel_list.n) {
- /* channel list updated */
- error = wldev_iovar_setbuf(dev, "roamscan_channels", &channel_list,
- sizeof(channel_list), iobuf, sizeof(iobuf), NULL);
- if (error) {
- WL_ERR(("Failed to update roamscan channels, error = %d\n", error));
- }
- }
-
- WL_DBG(("%d AP, %d cache item(s), err=%d\n", n_roam_cache, channel_list.n, error));
-}
-
-void wl_update_roamscan_cache_by_band(struct net_device *dev, int band)
-{
- int i, error, ioctl_ver, wes_mode;
- wl_roam_channel_list_t chanlist_before, chanlist_after;
- char iobuf[WLC_IOCTL_SMLEN];
-
- roam_band = band;
-
- error = wldev_iovar_getint(dev, "roamscan_mode", &wes_mode);
- if (error) {
- WL_ERR(("Failed to get roamscan mode, error = %d\n", error));
- return;
- }
-
- ioctl_ver = wl_cfg80211_get_ioctl_version();
- /* in case of WES mode, update channel list by band based on the cache in DHD */
- if (wes_mode) {
- int n = 0;
- chanlist_before.n = n_roam_cache;
-
- for (n = 0; n < n_roam_cache; n++) {
- chanspec_t ch = roam_cache[n].chanspec;
- bool is_2G = ioctl_ver == 1 ? LCHSPEC_IS2G(ch) : CHSPEC_IS2G(ch);
- chanlist_before.channels[n] = CHSPEC_CHANNEL(ch) |
- (is_2G ? band2G : band5G) | band_bw;
- }
- } else {
- if (band == WLC_BAND_AUTO) {
- return;
- }
- error = wldev_iovar_getbuf(dev, "roamscan_channels", 0, 0,
- (void *)&chanlist_before, sizeof(wl_roam_channel_list_t), NULL);
- if (error) {
- WL_ERR(("Failed to get roamscan channels, error = %d\n", error));
- return;
- }
- }
- chanlist_after.n = 0;
- /* filtering by the given band */
- for (i = 0; i < chanlist_before.n; i++) {
- chanspec_t chspec = chanlist_before.channels[i];
- bool is_2G = ioctl_ver == 1 ? LCHSPEC_IS2G(chspec) : CHSPEC_IS2G(chspec);
- bool is_5G = ioctl_ver == 1 ? LCHSPEC_IS5G(chspec) : CHSPEC_IS5G(chspec);
- bool band_match = ((band == WLC_BAND_AUTO) ||
- ((band == WLC_BAND_2G) && is_2G) ||
- ((band == WLC_BAND_5G) && is_5G));
- if (band_match) {
- chanlist_after.channels[chanlist_after.n++] = chspec;
- }
- }
-
- if (wes_mode) {
- /* need to set ROAMSCAN_MODE_NORMAL to update roamscan_channels,
- * otherwise, it won't be updated
- */
- wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_NORMAL);
-
- error = wldev_iovar_setbuf(dev, "roamscan_channels", &chanlist_after,
- sizeof(wl_roam_channel_list_t), iobuf, sizeof(iobuf), NULL);
- if (error) {
- WL_ERR(("Failed to update roamscan channels, error = %d\n", error));
- }
- wldev_iovar_setint(dev, "roamscan_mode", ROAMSCAN_MODE_WES);
- } else {
- if (chanlist_before.n == chanlist_after.n) {
- return;
- }
- error = wldev_iovar_setbuf(dev, "roamscan_channels", &chanlist_after,
- sizeof(wl_roam_channel_list_t), iobuf, sizeof(iobuf), NULL);
- if (error) {
- WL_ERR(("Failed to update roamscan channels, error = %d\n", error));
- }
- }
-}
-#endif /* ROAM_CHANNEL_CACHE */
-#endif /* ESCAN_CHANNEL_CACHE */
--- /dev/null
+/*
+ * Forward declarations for commonly used wl driver structs
+ *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2 (the "GPL"),
+ * available at http://www.broadcom.com/licenses/GPLv2.php, with the
+ * following added to such license:
+ *
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions of
+ * the license of that module. An independent module is a module which is not
+ * derived from this software. The special exception does not apply to any
+ * modifications of the software.
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a license
+ * other than the GPL, without Broadcom's express prior written consent.
+ *
+ *
+ * <<Broadcom-WL-IPTag/Open:>>
+ *
+ * $Id: wlc_types.h 665242 2016-10-17 05:59:26Z $
+ */
+
+#ifndef _wlc_types_h_
+#define _wlc_types_h_
+#include <wlioctl.h>
+
+/* Version of WLC interface to be returned as a part of wl_wlc_version structure.
+ * WLC_API_VERSION_MINOR is currently not in use.
+ */
+#define WLC_API_VERSION_MAJOR 8
+#define WLC_API_VERSION_MINOR 0
+
+/* forward declarations */
+
+typedef struct wlc_info wlc_info_t;
+typedef struct wlcband wlcband_t;
+typedef struct wlc_cmn_info wlc_cmn_info_t;
+typedef struct wlc_assoc_info wlc_assoc_info_t;
+typedef struct wlc_pm_info wlc_pm_info_t;
+
+typedef struct wlc_bsscfg wlc_bsscfg_t;
+typedef struct wlc_mbss_info wlc_mbss_info_t;
+typedef struct wlc_spt wlc_spt_t;
+typedef struct scb scb_t;
+typedef struct scb_iter scb_iter_t;
+typedef struct vndr_ie_listel vndr_ie_listel_t;
+typedef struct wlc_if wlc_if_t;
+typedef struct wl_if wl_if_t;
+typedef struct led_info led_info_t;
+typedef struct bmac_led bmac_led_t;
+typedef struct bmac_led_info bmac_led_info_t;
+typedef struct seq_cmds_info wlc_seq_cmds_info_t;
+typedef struct ota_test_info ota_test_info_t;
+typedef struct wlc_ccx ccx_t;
+typedef struct wlc_ccx_rm ccx_rm_t;
+typedef struct apps_wlc_psinfo apps_wlc_psinfo_t;
+typedef struct scb_module scb_module_t;
+typedef struct ba_info ba_info_t;
+typedef struct wlc_frminfo wlc_frminfo_t;
+typedef struct amsdu_info amsdu_info_t;
+typedef struct txq_info txq_info_t;
+typedef struct txq txq_t;
+typedef struct cram_info cram_info_t;
+typedef struct wlc_extlog_info wlc_extlog_info_t;
+typedef struct wlc_txq_info wlc_txq_info_t;
+typedef struct wlc_hrt_info wlc_hrt_info_t;
+typedef struct wlc_hrt_to wlc_hrt_to_t;
+typedef struct wlc_cac wlc_cac_t;
+typedef struct ampdu_tx_info ampdu_tx_info_t;
+typedef struct ampdu_rx_info ampdu_rx_info_t;
+typedef struct wlc_ratesel_info wlc_ratesel_info_t;
+typedef struct ratesel_info ratesel_info_t;
+typedef struct wlc_ap_info wlc_ap_info_t;
+typedef struct cs_info cs_info_t;
+typedef struct wlc_scan_info wlc_scan_info_t;
+typedef struct wlc_scan_cmn_info wlc_scan_cmn_t;
+typedef struct tdls_info tdls_info_t;
+typedef struct dls_info dls_info_t;
+typedef struct l2_filter_info l2_filter_info_t;
+typedef struct wlc_auth_info wlc_auth_info_t;
+typedef struct wlc_sup_info wlc_sup_info_t;
+typedef struct wlc_fbt_info wlc_fbt_info_t;
+typedef struct wlc_assoc_mgr_info wlc_assoc_mgr_info_t;
+typedef struct wlc_ccxsup_info wlc_ccxsup_info_t;
+typedef struct wlc_psta_info wlc_psta_info_t;
+typedef struct wlc_mcnx_info wlc_mcnx_info_t;
+typedef struct wlc_p2p_info wlc_p2p_info_t;
+typedef struct wlc_cxnoa_info wlc_cxnoa_info_t;
+typedef struct mchan_info mchan_info_t;
+typedef struct wlc_mchan_context wlc_mchan_context_t;
+typedef struct bta_info bta_info_t;
+typedef struct wowl_info wowl_info_t;
+typedef struct wowlpf_info wowlpf_info_t;
+typedef struct wlc_plt_info wlc_plt_pub_t;
+typedef struct antsel_info antsel_info_t;
+typedef struct bmac_pmq bmac_pmq_t;
+typedef struct wmf_info wmf_info_t;
+typedef struct wlc_rrm_info wlc_rrm_info_t;
+typedef struct rm_info rm_info_t;
+
+struct d11init;
+
+typedef struct wlc_dpc_info wlc_dpc_info_t;
+
+typedef struct wlc_11h_info wlc_11h_info_t;
+typedef struct wlc_tpc_info wlc_tpc_info_t;
+typedef struct wlc_csa_info wlc_csa_info_t;
+typedef struct wlc_quiet_info wlc_quiet_info_t;
+typedef struct cca_info cca_info_t;
+typedef struct itfr_info itfr_info_t;
+
+typedef struct wlc_wnm_info wlc_wnm_info_t;
+typedef struct wlc_11d_info wlc_11d_info_t;
+typedef struct wlc_cntry_info wlc_cntry_info_t;
+
+typedef struct wlc_dfs_info wlc_dfs_info_t;
+
+typedef struct bsscfg_module bsscfg_module_t;
+
+typedef struct wlc_prot_info wlc_prot_info_t;
+typedef struct wlc_prot_g_info wlc_prot_g_info_t;
+typedef struct wlc_prot_n_info wlc_prot_n_info_t;
+typedef struct wlc_prot_obss_info wlc_prot_obss_info_t;
+typedef struct wlc_obss_dynbw wlc_obss_dynbw_t;
+typedef struct wlc_11u_info wlc_11u_info_t;
+typedef struct wlc_probresp_info wlc_probresp_info_t;
+typedef struct wlc_wapi_info wlc_wapi_info_t;
+
+typedef struct wlc_tbtt_info wlc_tbtt_info_t;
+typedef struct wlc_nic_info wlc_nic_info_t;
+
+typedef struct wlc_bssload_info wlc_bssload_info_t;
+
+typedef struct wlc_pcb_info wlc_pcb_info_t;
+typedef struct wlc_txc_info wlc_txc_info_t;
+
+typedef struct wlc_trf_mgmt_ctxt wlc_trf_mgmt_ctxt_t;
+typedef struct wlc_trf_mgmt_info wlc_trf_mgmt_info_t;
+
+typedef struct wlc_net_detect_ctxt wlc_net_detect_ctxt_t;
+
+typedef struct wlc_powersel_info wlc_powersel_info_t;
+typedef struct powersel_info powersel_info_t;
+
+typedef struct wlc_lpc_info wlc_lpc_info_t;
+typedef struct lpc_info lpc_info_t;
+typedef struct rate_lcb_info rate_lcb_info_t;
+typedef struct wlc_txbf_info wlc_txbf_info_t;
+typedef struct wlc_murx_info wlc_murx_info_t;
+
+typedef struct wlc_olpc_eng_info_t wlc_olpc_eng_info_t;
+/* used by olpc to register for callbacks from stf */
+typedef void (*wlc_stf_txchain_evt_notify)(wlc_info_t *wlc);
+
+typedef struct wlc_rfc wlc_rfc_t;
+typedef struct wlc_pktc_info wlc_pktc_info_t;
+
+typedef struct wlc_mfp_info wlc_mfp_info_t;
+
+typedef struct wlc_mdns_info wlc_mdns_info_t;
+
+typedef struct wlc_macfltr_info wlc_macfltr_info_t;
+typedef struct wlc_bmon_info wlc_bmon_info_t;
+
+typedef struct wlc_nar_info wlc_nar_info_t;
+typedef struct wlc_bs_data_info wlc_bs_data_info_t;
+
+typedef struct wlc_keymgmt wlc_keymgmt_t;
+typedef struct wlc_key wlc_key_t;
+typedef struct wlc_key_info wlc_key_info_t;
+
+typedef struct wlc_hw wlc_hw_t;
+typedef struct wlc_hw_info wlc_hw_info_t;
+typedef struct wlc_hwband wlc_hwband_t;
+
+typedef struct wlc_rx_stall_info wlc_rx_stall_info_t;
+
+typedef struct wlc_rmc_info wlc_rmc_info_t;
+
+typedef struct wlc_iem_info wlc_iem_info_t;
+
+typedef struct wlc_ier_info wlc_ier_info_t;
+typedef struct wlc_ier_reg wlc_ier_reg_t;
+
+typedef struct wlc_ht_info wlc_ht_info_t;
+typedef struct wlc_obss_info wlc_obss_info_t;
+typedef struct wlc_vht_info wlc_vht_info_t;
+typedef struct wlc_akm_info wlc_akm_info_t;
+typedef struct wlc_srvsdb_info wlc_srvsdb_info_t;
+
+typedef struct wlc_bss_info wlc_bss_info_t;
+
+typedef struct wlc_hs20_info wlc_hs20_info_t;
+typedef struct wlc_pmkid_info wlc_pmkid_info_t;
+typedef struct wlc_btc_info wlc_btc_info_t;
+
+typedef struct wlc_txh_info wlc_txh_info_t;
+typedef union wlc_txd wlc_txd_t;
+
+typedef struct wlc_staprio_info wlc_staprio_info_t;
+typedef struct wlc_stamon_info wlc_stamon_info_t;
+typedef struct wlc_monitor_info wlc_monitor_info_t;
+
+typedef struct wlc_debug_crash_info wlc_debug_crash_info_t;
+
+typedef struct wlc_nan_info wlc_nan_info_t;
+typedef struct wlc_tsmap_info wlc_tsmap_info_t;
+
+typedef struct wlc_wds_info wlc_wds_info_t;
+typedef struct okc_info okc_info_t;
+typedef struct wlc_aibss_info wlc_aibss_info_t;
+typedef struct wlc_ipfo_info wlc_ipfo_info_t;
+typedef struct wlc_stats_info wlc_stats_info_t;
+
+typedef struct wlc_pps_info wlc_pps_info_t;
+
+typedef struct duration_info duration_info_t;
+
+typedef struct wlc_pdsvc_info wlc_pdsvc_info_t;
+
+/* For LTE Coex */
+typedef struct wlc_ltecx_info wlc_ltecx_info_t;
+
+typedef struct wlc_probresp_mac_filter_info wlc_probresp_mac_filter_info_t;
+
+typedef struct wlc_ltr_info wlc_ltr_info_t;
+
+typedef struct bwte_info bwte_info_t;
+
+typedef struct tbow_info tbow_info_t;
+
+typedef struct wlc_modesw_info wlc_modesw_info_t;
+
+typedef struct wlc_pm_mute_tx_info wlc_pm_mute_tx_t;
+
+typedef struct wlc_bcntrim_info wlc_bcntrim_info_t;
+
+typedef struct wlc_smfs_info wlc_smfs_info_t;
+typedef struct wlc_misc_info wlc_misc_info_t;
+typedef struct wlc_ulb_info wlc_ulb_info_t;
+
+typedef struct wlc_eventq wlc_eventq_t;
+typedef struct wlc_event wlc_event_t;
+typedef struct wlc_ulp_info wlc_ulp_info_t;
+
+typedef struct wlc_bsscfg_psq_info wlc_bsscfg_psq_info_t;
+typedef struct wlc_bsscfg_viel_info wlc_bsscfg_viel_info_t;
+
+typedef struct wlc_txmod_info wlc_txmod_info_t;
+typedef struct tx_path_node tx_path_node_t;
+
+typedef struct wlc_linkstats_info wlc_linkstats_info_t;
+typedef struct wlc_lq_info wlc_lq_info_t;
+typedef struct chanim_info chanim_info_t;
+
+typedef struct wlc_mesh_info wlc_mesh_info_t;
+typedef struct wlc_wlfc_info wlc_wlfc_info_t;
+
+typedef struct wlc_frag_info wlc_frag_info_t;
+typedef struct wlc_bss_list wlc_bss_list_t;
+
+typedef struct wlc_msch_info wlc_msch_info_t;
+typedef struct wlc_msch_req_handle wlc_msch_req_handle_t;
+
+typedef struct wlc_randmac_info wlc_randmac_info_t;
+
+typedef struct wlc_chanctxt wlc_chanctxt_t;
+typedef struct wlc_chanctxt_info wlc_chanctxt_info_t;
+typedef struct wlc_sta_info wlc_sta_info_t;
+
+typedef struct health_check_info health_check_info_t;
+typedef struct wlc_act_frame_info wlc_act_frame_info_t;
+typedef struct nan_sched_req_handle nan_sched_req_handle_t;
+
+typedef struct wlc_qos_info wlc_qos_info_t;
+
+typedef struct wlc_assoc wlc_assoc_t;
+typedef struct wlc_roam wlc_roam_t;
+typedef struct wlc_pm_st wlc_pm_st_t;
+typedef struct wlc_wme wlc_wme_t;
+
+typedef struct wlc_link_qual wlc_link_qual_t;
+
+typedef struct wlc_rsdb_info wlc_rsdb_info_t;
+
+typedef struct wlc_asdb wlc_asdb_t;
+
+typedef struct rsdb_common_info rsdb_cmn_info_t;
+
+typedef struct wlc_macdbg_info wlc_macdbg_info_t;
+typedef struct wlc_rspec_info wlc_rspec_info_t;
+typedef struct wlc_ndis_info wlc_ndis_info_t;
+
+typedef struct wlc_join_pref wlc_join_pref_t;
+
+typedef struct wlc_scan_utils wlc_scan_utils_t;
+#ifdef ACKSUPR_MAC_FILTER
+typedef struct wlc_addrmatch_info wlc_addrmatch_info_t;
+#endif /* ACKSUPR_MAC_FILTER */
+
+typedef struct cca_ucode_counts cca_ucode_counts_t;
+typedef struct cca_chan_qual cca_chan_qual_t;
+
+typedef struct wlc_perf_utils wlc_perf_utils_t;
+typedef struct wlc_test_info wlc_test_info_t;
+
+typedef struct chanswitch_times chanswitch_times_t;
+typedef struct wlc_dump_info wlc_dump_info_t;
+
+typedef struct wlc_stf wlc_stf_t;
+
+typedef sta_info_v4_t sta_info_t;
+typedef struct wl_roam_prof_band_v2 wl_roam_prof_band_t;
+typedef struct wl_roam_prof_v2 wl_roam_prof_t;
+
+/* Inteface version mapping for versioned pfn structures */
+#undef PFN_SCANRESULT_VERSION
+#define PFN_SCANRESULT_VERSION PFN_SCANRESULT_VERSION_V2
+#define PFN_SCANRESULTS_VERSION PFN_SCANRESULTS_VERSION_V2
+#define PFN_LBEST_SCAN_RESULT_VERSION PFN_LBEST_SCAN_RESULT_VERSION_V2
+typedef wl_pfn_subnet_info_v2_t wl_pfn_subnet_info_t;
+typedef wl_pfn_net_info_v2_t wl_pfn_net_info_t;
+typedef wl_pfn_lnet_info_v2_t wl_pfn_lnet_info_t;
+typedef wl_pfn_lscanresults_v2_t wl_pfn_lscanresults_t;
+typedef wl_pfn_scanresults_v2_t wl_pfn_scanresults_t;
+typedef wl_pfn_scanresult_v2_t wl_pfn_scanresult_t;
+
+typedef wl_dfs_ap_move_status_v2_t wl_dfs_ap_move_status_t;
+
+#endif /* _wlc_types_h_ */
/*
* Common function shared by Linux WEXT, cfg80211 and p2p drivers
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wldev_common.c 786015 2018-10-24 08:21:53Z $
+ * $Id: wldev_common.c 699163 2017-05-12 05:18:23Z $
*/
#include <osl.h>
#include <bcmutils.h>
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
-#include <wl_cfgscan.h>
-#endif /* WL_CFG80211 */
+#endif
#include <dhd_config.h>
#define htod32(i) (i)
#define htodchanspec(i) (i)
#define dtohchanspec(i) (i)
-#define WLDEV_ERROR_MSG(x, args...) \
- do { \
- printk(KERN_INFO "[dhd] WLDEV-ERROR) " x, ## args); \
+#define WLDEV_ERROR(args) \
+ do { \
+ printk(KERN_ERR "WLDEV-ERROR) "); \
+ printk args; \
} while (0)
-#define WLDEV_ERROR(x) WLDEV_ERROR_MSG x
-#define WLDEV_INFO_MSG(x, args...) \
- do { \
- printk(KERN_INFO "[dhd] WLDEV-INFO) " x, ## args); \
+#define WLDEV_INFO(args) \
+ do { \
+ printk(KERN_INFO "WLDEV-INFO) "); \
+ printk args; \
} while (0)
-#define WLDEV_INFO(x) WLDEV_INFO_MSG x
extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd);
struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
{
s32 ret = 0;
- struct wl_ioctl ioc;
+ struct wl_ioctl ioc;
+
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = cmd;
ioc.buf = arg;
ioc.len = len;
ioc.set = set;
- ret = dhd_ioctl_entry_local(dev, (wl_ioctl_t *)&ioc, cmd);
+
+ ret = dhd_ioctl_entry_local(dev, &ioc, cmd);
return ret;
}
+
/*
SET commands :
cast buffer to non-const and call the GET function
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
-#endif // endif
+#endif
return wldev_ioctl(dev, cmd, (void *)arg, len, 1);
#if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
#pragma GCC diagnostic pop
-#endif // endif
+#endif
}
+
s32 wldev_ioctl_get(
struct net_device *dev, u32 cmd, void *arg, u32 len)
{
* wl_iw, wl_cfg80211 and wl_cfgp2p
*/
static s32 wldev_mkiovar(
- const s8 *iovar_name, const s8 *param, s32 paramlen,
+ const s8 *iovar_name, s8 *param, s32 paramlen,
s8 *iovar_buf, u32 buflen)
{
s32 iolen = 0;
s32 wldev_iovar_getbuf(
struct net_device *dev, s8 *iovar_name,
- const void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
{
s32 ret = 0;
if (buf_sync) {
return ret;
}
+
s32 wldev_iovar_setbuf(
struct net_device *dev, s8 *iovar_name,
- const void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
{
s32 ret = 0;
s32 iovar_len;
sizeof(iovar_buf), NULL);
}
+
s32 wldev_iovar_getint(
struct net_device *dev, s8 *iovar, s32 *pval)
{
* wl_iw, wl_cfg80211 and wl_cfgp2p
*/
s32 wldev_mkiovar_bsscfg(
- const s8 *iovar_name, const s8 *param, s32 paramlen,
+ const s8 *iovar_name, s8 *param, s32 paramlen,
s8 *iovar_buf, s32 buflen, s32 bssidx)
{
const s8 *prefix = "bsscfg:";
u32 iolen;
/* initialize buffer */
- if (!iovar_buf || buflen <= 0)
+ if (!iovar_buf || buflen == 0)
return BCME_BADARG;
memset(iovar_buf, 0, buflen);
namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar name + null */
iolen = prefixlen + namelen + sizeof(u32) + paramlen;
- if (iolen > (u32)buflen) {
+ if (buflen < 0 || iolen > (u32)buflen)
+ {
WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__));
return BCME_BUFTOOSHORT;
}
}
s32 wldev_iovar_setbuf_bsscfg(
- struct net_device *dev, const s8 *iovar_name,
- const void *param, s32 paramlen,
- void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
+ struct net_device *dev, s8 *iovar_name,
+ void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
{
s32 ret = 0;
s32 iovar_len;
sizeof(iovar_buf), bssidx, NULL);
}
+
s32 wldev_iovar_getint_bsscfg(
struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
{
buf = NULL;
return error;
}
- bss = (wl_bss_info_t*)(buf + 4);
+ bss = (struct wl_bss_info *)(buf + 4);
chanspec = wl_chspec_driver_to_host(bss->chanspec);
band = chanspec & WL_CHANSPEC_BAND_MASK;
{
int error = -1;
wl_country_t cspec = {{0}, 0, {0}};
+ wl_country_t cur_cspec = {{0}, 0, {0}}; /* current ccode */
scb_val_t scbval;
+ char smbuf[WLC_IOCTL_SMLEN];
#ifdef WL_CFG80211
struct wireless_dev *wdev = ndev_to_wdev(dev);
struct wiphy *wiphy = wdev->wiphy;
struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
-#endif /* WL_CFG80211 */
+#endif
if (!country_code)
return error;
bzero(&scbval, sizeof(scb_val_t));
- error = wldev_iovar_getbuf(dev, "country", NULL, 0, &cspec, sizeof(cspec), NULL);
+ error = wldev_iovar_getbuf(dev, "country", NULL, 0, &cur_cspec, sizeof(wl_country_t), NULL);
if (error < 0) {
WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error));
return error;
}
+ cspec.rev = revinfo;
+ memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
+ memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
+ error = dhd_conf_map_country_list(dhd_get_pub(dev), &cspec);
+ if (error)
+ dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);
+
+ WLDEV_INFO(("%s: Current country %s rev %d\n",
+ __FUNCTION__, cur_cspec.ccode, cur_cspec.rev));
+
if ((error < 0) ||
dhd_force_country_change(dev) ||
- (strncmp(country_code, cspec.ccode, WLC_CNTRY_BUF_SZ) != 0)) {
+ (strncmp(cspec.ccode, cur_cspec.ccode, WLC_CNTRY_BUF_SZ) != 0)) {
+ if ((user_enforced)
#ifdef WL_CFG80211
- if ((user_enforced) && (wl_get_drv_status(cfg, CONNECTED, dev)))
-#else
- if (user_enforced)
-#endif /* WL_CFG80211 */
- {
+ && (wl_get_drv_status(cfg, CONNECTED, dev))
+#endif
+ ) {
bzero(&scbval, sizeof(scb_val_t));
error = wldev_ioctl_set(dev, WLC_DISASSOC,
&scbval, sizeof(scb_val_t));
}
}
-#ifdef WL_CFG80211
- wl_cfg80211_scan_abort(cfg);
-#endif
-
- cspec.rev = revinfo;
- strlcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
- strlcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
- error = dhd_conf_map_country_list(dhd_get_pub(dev), &cspec);
- if (error)
- dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);
- error = dhd_conf_set_country(dhd_get_pub(dev), &cspec);
+ error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
+ smbuf, sizeof(smbuf), NULL);
if (error < 0) {
WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n",
__FUNCTION__, country_code, cspec.ccode, cspec.rev));
/*
* Common function shared by Linux WEXT, cfg80211 and p2p drivers
*
- * Copyright (C) 1999-2019, Broadcom.
- *
+ * Copyright (C) 1999-2017, Broadcom Corporation
+ *
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
- *
+ *
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
- *
+ *
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* <<Broadcom-WL-IPTag/Open:>>
*
- * $Id: wldev_common.h 813004 2019-04-03 07:16:21Z $
+ * $Id: wldev_common.h 699163 2017-05-12 05:18:23Z $
*/
#ifndef __WLDEV_COMMON_H__
#define __WLDEV_COMMON_H__
*/
s32 wldev_iovar_getbuf(
struct net_device *dev, s8 *iovar_name,
- const void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
/** Set named IOVARs, this function calls wl_dev_ioctl with
* WLC_SET_VAR IOCTL code
*/
s32 wldev_iovar_setbuf(
struct net_device *dev, s8 *iovar_name,
- const void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
+ void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync);
s32 wldev_iovar_setint(
struct net_device *dev, s8 *iovar, s32 val);
*/
s32 wldev_mkiovar_bsscfg(
- const s8 *iovar_name, const s8 *param, s32 paramlen,
+ const s8 *iovar_name, s8 *param, s32 paramlen,
s8 *iovar_buf, s32 buflen, s32 bssidx);
/** Retrieve named and bsscfg indexed IOVARs, this function calls wl_dev_ioctl with
* WLC_SET_VAR IOCTL code
*/
s32 wldev_iovar_setbuf_bsscfg(
- struct net_device *dev, const s8 *iovar_name, const void *param, s32 paramlen,
+ struct net_device *dev, s8 *iovar_name, void *param, s32 paramlen,
void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync);
s32 wldev_iovar_getint_bsscfg(
extern int net_os_set_dtim_skip(struct net_device *dev, int val);
extern int net_os_set_suspend_disable(struct net_device *dev, int val);
extern int net_os_set_suspend(struct net_device *dev, int val, int force);
-extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val);
extern int net_os_set_max_dtim_enable(struct net_device *dev, int val);
-#ifdef DISABLE_DTIM_IN_SUSPEND
-extern int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val);
-#endif /* DISABLE_DTIM_IN_SUSPEND */
extern int wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid,
int max, int *bytes_left);